]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.45-201108232250.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.45-201108232250.patch
1 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40 --- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86 --- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359 --- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596 --- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647 --- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755 --- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391 --- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.45/arch/mips/include/asm/reboot.h linux-2.6.32.45/arch/mips/include/asm/reboot.h
1488 --- linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490 @@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494 -extern void (*_machine_restart)(char *command);
1495 -extern void (*_machine_halt)(void);
1496 +extern void (*__noreturn _machine_restart)(char *command);
1497 +extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500 diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1501 --- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502 +++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507 -extern unsigned long arch_align_stack(unsigned long sp);
1508 +#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1512 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518 +#ifdef CONFIG_PAX_ASLR
1519 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520 +
1521 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523 +#endif
1524 +
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1529 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535 +#ifdef CONFIG_PAX_ASLR
1536 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537 +
1538 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540 +#endif
1541 +
1542 #include <asm/processor.h>
1543
1544 /*
1545 diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1546 --- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547 +++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552 +/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556 diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1557 --- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558 +++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563 -
1564 -/*
1565 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1566 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567 - */
1568 -unsigned long arch_align_stack(unsigned long sp)
1569 -{
1570 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571 - sp -= get_random_int() & ~PAGE_MASK;
1572 -
1573 - return sp & ALMASK;
1574 -}
1575 diff -urNp linux-2.6.32.45/arch/mips/kernel/reset.c linux-2.6.32.45/arch/mips/kernel/reset.c
1576 --- linux-2.6.32.45/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577 +++ linux-2.6.32.45/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578 @@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582 -void (*_machine_restart)(char *command);
1583 -void (*_machine_halt)(void);
1584 +void (*__noreturn _machine_restart)(char *command);
1585 +void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589 @@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593 + BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600 + BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607 + BUG();
1608 }
1609 diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1610 --- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611 +++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616 +
1617 +#ifdef CONFIG_PAX_RANDMMAP
1618 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619 +#endif
1620 +
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627 - if (task_size - len >= addr &&
1628 - (!vmm || addr + len <= vmm->vm_start))
1629 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632 - addr = TASK_UNMAPPED_BASE;
1633 + addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641 - if (!vmm || addr + len <= vmm->vm_start)
1642 + if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646 diff -urNp linux-2.6.32.45/arch/mips/Makefile linux-2.6.32.45/arch/mips/Makefile
1647 --- linux-2.6.32.45/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648 +++ linux-2.6.32.45/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649 @@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653 +cflags-y += -Wno-sign-compare -Wno-extra
1654 +
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658 diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1659 --- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660 +++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661 @@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665 +#ifdef CONFIG_PAX_PAGEEXEC
1666 +void pax_report_insns(void *pc, void *sp)
1667 +{
1668 + unsigned long i;
1669 +
1670 + printk(KERN_ERR "PAX: bytes at PC: ");
1671 + for (i = 0; i < 5; i++) {
1672 + unsigned int c;
1673 + if (get_user(c, (unsigned int *)pc+i))
1674 + printk(KERN_CONT "???????? ");
1675 + else
1676 + printk(KERN_CONT "%08x ", c);
1677 + }
1678 + printk("\n");
1679 +}
1680 +#endif
1681 +
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1686 --- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687 +++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692 +#ifdef CONFIG_PAX_ASLR
1693 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694 +
1695 +#define PAX_DELTA_MMAP_LEN 16
1696 +#define PAX_DELTA_STACK_LEN 16
1697 +#endif
1698 +
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1703 --- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704 +++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705 @@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709 +
1710 +#ifdef CONFIG_PAX_PAGEEXEC
1711 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714 +#else
1715 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716 +# define PAGE_COPY_NOEXEC PAGE_COPY
1717 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718 +#endif
1719 +
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723 diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1724 --- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725 +++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726 @@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730 +static inline int in_init_rx(struct module *me, void *loc)
1731 +{
1732 + return (loc >= me->module_init_rx &&
1733 + loc < (me->module_init_rx + me->init_size_rx));
1734 +}
1735 +
1736 +static inline int in_init_rw(struct module *me, void *loc)
1737 +{
1738 + return (loc >= me->module_init_rw &&
1739 + loc < (me->module_init_rw + me->init_size_rw));
1740 +}
1741 +
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744 - return (loc >= me->module_init &&
1745 - loc <= (me->module_init + me->init_size));
1746 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1747 +}
1748 +
1749 +static inline int in_core_rx(struct module *me, void *loc)
1750 +{
1751 + return (loc >= me->module_core_rx &&
1752 + loc < (me->module_core_rx + me->core_size_rx));
1753 +}
1754 +
1755 +static inline int in_core_rw(struct module *me, void *loc)
1756 +{
1757 + return (loc >= me->module_core_rw &&
1758 + loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763 - return (loc >= me->module_core &&
1764 - loc <= (me->module_core + me->core_size));
1765 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773 - me->core_size = ALIGN(me->core_size, 16);
1774 - me->arch.got_offset = me->core_size;
1775 - me->core_size += gots * sizeof(struct got_entry);
1776 -
1777 - me->core_size = ALIGN(me->core_size, 16);
1778 - me->arch.fdesc_offset = me->core_size;
1779 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1780 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781 + me->arch.got_offset = me->core_size_rw;
1782 + me->core_size_rw += gots * sizeof(struct got_entry);
1783 +
1784 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785 + me->arch.fdesc_offset = me->core_size_rw;
1786 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794 - got = me->module_core + me->arch.got_offset;
1795 + got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826 diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1827 --- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828 +++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833 - if (!vma || addr + len <= vma->vm_start)
1834 + if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842 - if (!vma || addr + len <= vma->vm_start)
1843 + if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851 - addr = TASK_UNMAPPED_BASE;
1852 + addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856 diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1857 --- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858 +++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1864 - && (vma->vm_flags & VM_EXEC)) {
1865 -
1866 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870 diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1871 --- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872 +++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873 @@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877 +#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885 - if (code == 6 || code == 16)
1886 + if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894 +#ifdef CONFIG_PAX_PAGEEXEC
1895 +/*
1896 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897 + *
1898 + * returns 1 when task should be killed
1899 + * 2 when rt_sigreturn trampoline was detected
1900 + * 3 when unpatched PLT trampoline was detected
1901 + */
1902 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1903 +{
1904 +
1905 +#ifdef CONFIG_PAX_EMUPLT
1906 + int err;
1907 +
1908 + do { /* PaX: unpatched PLT emulation */
1909 + unsigned int bl, depwi;
1910 +
1911 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913 +
1914 + if (err)
1915 + break;
1916 +
1917 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919 +
1920 + err = get_user(ldw, (unsigned int *)addr);
1921 + err |= get_user(bv, (unsigned int *)(addr+4));
1922 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1923 +
1924 + if (err)
1925 + break;
1926 +
1927 + if (ldw == 0x0E801096U &&
1928 + bv == 0xEAC0C000U &&
1929 + ldw2 == 0x0E881095U)
1930 + {
1931 + unsigned int resolver, map;
1932 +
1933 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935 + if (err)
1936 + break;
1937 +
1938 + regs->gr[20] = instruction_pointer(regs)+8;
1939 + regs->gr[21] = map;
1940 + regs->gr[22] = resolver;
1941 + regs->iaoq[0] = resolver | 3UL;
1942 + regs->iaoq[1] = regs->iaoq[0] + 4;
1943 + return 3;
1944 + }
1945 + }
1946 + } while (0);
1947 +#endif
1948 +
1949 +#ifdef CONFIG_PAX_EMUTRAMP
1950 +
1951 +#ifndef CONFIG_PAX_EMUSIGRT
1952 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953 + return 1;
1954 +#endif
1955 +
1956 + do { /* PaX: rt_sigreturn emulation */
1957 + unsigned int ldi1, ldi2, bel, nop;
1958 +
1959 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963 +
1964 + if (err)
1965 + break;
1966 +
1967 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968 + ldi2 == 0x3414015AU &&
1969 + bel == 0xE4008200U &&
1970 + nop == 0x08000240U)
1971 + {
1972 + regs->gr[25] = (ldi1 & 2) >> 1;
1973 + regs->gr[20] = __NR_rt_sigreturn;
1974 + regs->gr[31] = regs->iaoq[1] + 16;
1975 + regs->sr[0] = regs->iasq[1];
1976 + regs->iaoq[0] = 0x100UL;
1977 + regs->iaoq[1] = regs->iaoq[0] + 4;
1978 + regs->iasq[0] = regs->sr[2];
1979 + regs->iasq[1] = regs->sr[2];
1980 + return 2;
1981 + }
1982 + } while (0);
1983 +#endif
1984 +
1985 + return 1;
1986 +}
1987 +
1988 +void pax_report_insns(void *pc, void *sp)
1989 +{
1990 + unsigned long i;
1991 +
1992 + printk(KERN_ERR "PAX: bytes at PC: ");
1993 + for (i = 0; i < 5; i++) {
1994 + unsigned int c;
1995 + if (get_user(c, (unsigned int *)pc+i))
1996 + printk(KERN_CONT "???????? ");
1997 + else
1998 + printk(KERN_CONT "%08x ", c);
1999 + }
2000 + printk("\n");
2001 +}
2002 +#endif
2003 +
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007 @@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011 - if ((vma->vm_flags & acc_type) != acc_type)
2012 + if ((vma->vm_flags & acc_type) != acc_type) {
2013 +
2014 +#ifdef CONFIG_PAX_PAGEEXEC
2015 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016 + (address & ~3UL) == instruction_pointer(regs))
2017 + {
2018 + up_read(&mm->mmap_sem);
2019 + switch (pax_handle_fetch_fault(regs)) {
2020 +
2021 +#ifdef CONFIG_PAX_EMUPLT
2022 + case 3:
2023 + return;
2024 +#endif
2025 +
2026 +#ifdef CONFIG_PAX_EMUTRAMP
2027 + case 2:
2028 + return;
2029 +#endif
2030 +
2031 + }
2032 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033 + do_group_exit(SIGKILL);
2034 + }
2035 +#endif
2036 +
2037 goto bad_area;
2038 + }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
2043 --- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044 +++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045 @@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049 - struct dma_map_ops *dma_ops;
2050 + const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
2055 --- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056 +++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061 -extern struct dma_map_ops dma_direct_ops;
2062 +extern const struct dma_map_ops dma_direct_ops;
2063
2064 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2124 --- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130 -extern unsigned long randomize_et_dyn(unsigned long base);
2131 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132 +#define ELF_ET_DYN_BASE (0x20000000)
2133 +
2134 +#ifdef CONFIG_PAX_ASLR
2135 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136 +
2137 +#ifdef __powerpc64__
2138 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140 +#else
2141 +#define PAX_DELTA_MMAP_LEN 15
2142 +#define PAX_DELTA_STACK_LEN 15
2143 +#endif
2144 +#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153 -#define arch_randomize_brk arch_randomize_brk
2154 -
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2159 --- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160 +++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165 +/* dma-iommu.c */
2166 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167 +
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2172 --- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173 +++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174 @@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178 + KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2183 --- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184 +++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185 @@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191 +#define VM_STACK_DEFAULT_FLAGS32 \
2192 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198 +#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202 +#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2207 --- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208 +++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215 +#define VM_DATA_DEFAULT_FLAGS32 \
2216 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225 +#define ktla_ktva(addr) (addr)
2226 +#define ktva_ktla(addr) (addr)
2227 +
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2232 --- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239 -extern struct dma_map_ops *get_pci_dma_ops(void);
2240 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2246 --- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247 +++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248 @@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252 +#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2257 --- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258 +++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259 @@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263 +#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h
2268 --- linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269 +++ linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2280 --- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281 +++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282 @@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2291 --- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292 +++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293 @@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297 -extern struct dma_map_ops swiotlb_dma_ops;
2298 +extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2303 --- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304 +++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309 -extern unsigned long arch_align_stack(unsigned long sp);
2310 +#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2315 --- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316 +++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317 @@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322 +
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326 @@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330 -#ifndef __powerpc64__
2331 -
2332 -static inline unsigned long copy_from_user(void *to,
2333 - const void __user *from, unsigned long n)
2334 -{
2335 - unsigned long over;
2336 -
2337 - if (access_ok(VERIFY_READ, from, n))
2338 - return __copy_tofrom_user((__force void __user *)to, from, n);
2339 - if ((unsigned long)from < TASK_SIZE) {
2340 - over = (unsigned long)from + n - TASK_SIZE;
2341 - return __copy_tofrom_user((__force void __user *)to, from,
2342 - n - over) + over;
2343 - }
2344 - return n;
2345 -}
2346 -
2347 -static inline unsigned long copy_to_user(void __user *to,
2348 - const void *from, unsigned long n)
2349 -{
2350 - unsigned long over;
2351 -
2352 - if (access_ok(VERIFY_WRITE, to, n))
2353 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2354 - if ((unsigned long)to < TASK_SIZE) {
2355 - over = (unsigned long)to + n - TASK_SIZE;
2356 - return __copy_tofrom_user(to, (__force void __user *)from,
2357 - n - over) + over;
2358 - }
2359 - return n;
2360 -}
2361 -
2362 -#else /* __powerpc64__ */
2363 -
2364 -#define __copy_in_user(to, from, size) \
2365 - __copy_tofrom_user((to), (from), (size))
2366 -
2367 -extern unsigned long copy_from_user(void *to, const void __user *from,
2368 - unsigned long n);
2369 -extern unsigned long copy_to_user(void __user *to, const void *from,
2370 - unsigned long n);
2371 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372 - unsigned long n);
2373 -
2374 -#endif /* __powerpc64__ */
2375 -
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383 +
2384 + if (!__builtin_constant_p(n))
2385 + check_object_size(to, n, false);
2386 +
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394 +
2395 + if (!__builtin_constant_p(n))
2396 + check_object_size(from, n, true);
2397 +
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405 +#ifndef __powerpc64__
2406 +
2407 +static inline unsigned long __must_check copy_from_user(void *to,
2408 + const void __user *from, unsigned long n)
2409 +{
2410 + unsigned long over;
2411 +
2412 + if ((long)n < 0)
2413 + return n;
2414 +
2415 + if (access_ok(VERIFY_READ, from, n)) {
2416 + if (!__builtin_constant_p(n))
2417 + check_object_size(to, n, false);
2418 + return __copy_tofrom_user((__force void __user *)to, from, n);
2419 + }
2420 + if ((unsigned long)from < TASK_SIZE) {
2421 + over = (unsigned long)from + n - TASK_SIZE;
2422 + if (!__builtin_constant_p(n - over))
2423 + check_object_size(to, n - over, false);
2424 + return __copy_tofrom_user((__force void __user *)to, from,
2425 + n - over) + over;
2426 + }
2427 + return n;
2428 +}
2429 +
2430 +static inline unsigned long __must_check copy_to_user(void __user *to,
2431 + const void *from, unsigned long n)
2432 +{
2433 + unsigned long over;
2434 +
2435 + if ((long)n < 0)
2436 + return n;
2437 +
2438 + if (access_ok(VERIFY_WRITE, to, n)) {
2439 + if (!__builtin_constant_p(n))
2440 + check_object_size(from, n, true);
2441 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2442 + }
2443 + if ((unsigned long)to < TASK_SIZE) {
2444 + over = (unsigned long)to + n - TASK_SIZE;
2445 + if (!__builtin_constant_p(n))
2446 + check_object_size(from, n - over, true);
2447 + return __copy_tofrom_user(to, (__force void __user *)from,
2448 + n - over) + over;
2449 + }
2450 + return n;
2451 +}
2452 +
2453 +#else /* __powerpc64__ */
2454 +
2455 +#define __copy_in_user(to, from, size) \
2456 + __copy_tofrom_user((to), (from), (size))
2457 +
2458 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459 +{
2460 + if ((long)n < 0 || n > INT_MAX)
2461 + return n;
2462 +
2463 + if (!__builtin_constant_p(n))
2464 + check_object_size(to, n, false);
2465 +
2466 + if (likely(access_ok(VERIFY_READ, from, n)))
2467 + n = __copy_from_user(to, from, n);
2468 + else
2469 + memset(to, 0, n);
2470 + return n;
2471 +}
2472 +
2473 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474 +{
2475 + if ((long)n < 0 || n > INT_MAX)
2476 + return n;
2477 +
2478 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479 + if (!__builtin_constant_p(n))
2480 + check_object_size(from, n, true);
2481 + n = __copy_to_user(to, from, n);
2482 + }
2483 + return n;
2484 +}
2485 +
2486 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487 + unsigned long n);
2488 +
2489 +#endif /* __powerpc64__ */
2490 +
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2495 --- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496 +++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501 -static struct sysfs_ops cache_index_ops = {
2502 +static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2507 --- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508 +++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513 -struct dma_map_ops dma_direct_ops = {
2514 +const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2519 --- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2531 --- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537 -struct dma_map_ops swiotlb_dma_ops = {
2538 +const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2543 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545 @@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549 + bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553 @@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557 -1: bl .save_nvgprs
2558 - mr r5,r3
2559 +1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2564 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566 @@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570 + bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574 - bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2579 --- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580 +++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585 -static struct dma_map_ops ibmebus_dma_ops = {
2586 +static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2591 --- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592 +++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606 -struct kgdb_arch arch_kgdb_ops = {
2607 +const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2612 --- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613 +++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2619 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627 - if (location >= mod->module_core
2628 - && location < mod->module_core + mod->core_size)
2629 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632 - else
2633 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636 + else {
2637 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638 + return ~0UL;
2639 + }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2644 --- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645 +++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646 @@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650 +#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656 + return vmalloc(size);
2657 +}
2658 +
2659 +void *module_alloc_exec(unsigned long size)
2660 +#else
2661 +void *module_alloc(unsigned long size)
2662 +#endif
2663 +
2664 +{
2665 + if (size == 0)
2666 + return NULL;
2667 +
2668 return vmalloc_exec(size);
2669 }
2670
2671 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675 +#ifdef CONFIG_PAX_KERNEXEC
2676 +void module_free_exec(struct module *mod, void *module_region)
2677 +{
2678 + module_free(mod, module_region);
2679 +}
2680 +#endif
2681 +
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2686 --- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687 +++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701 -struct dma_map_ops *get_pci_dma_ops(void)
2702 +const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2707 --- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708 +++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728 - printk(" (%pS)",
2729 + printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746 -
2747 -unsigned long arch_align_stack(unsigned long sp)
2748 -{
2749 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750 - sp -= get_random_int() & ~PAGE_MASK;
2751 - return sp & ~0xf;
2752 -}
2753 -
2754 -static inline unsigned long brk_rnd(void)
2755 -{
2756 - unsigned long rnd = 0;
2757 -
2758 - /* 8MB for 32bit, 1GB for 64bit */
2759 - if (is_32bit_task())
2760 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761 - else
2762 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763 -
2764 - return rnd << PAGE_SHIFT;
2765 -}
2766 -
2767 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2768 -{
2769 - unsigned long base = mm->brk;
2770 - unsigned long ret;
2771 -
2772 -#ifdef CONFIG_PPC_STD_MMU_64
2773 - /*
2774 - * If we are using 1TB segments and we are allowed to randomise
2775 - * the heap, we can put it above 1TB so it is backed by a 1TB
2776 - * segment. Otherwise the heap will be in the bottom 1TB
2777 - * which always uses 256MB segments and this may result in a
2778 - * performance penalty.
2779 - */
2780 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782 -#endif
2783 -
2784 - ret = PAGE_ALIGN(base + brk_rnd());
2785 -
2786 - if (ret < mm->brk)
2787 - return mm->brk;
2788 -
2789 - return ret;
2790 -}
2791 -
2792 -unsigned long randomize_et_dyn(unsigned long base)
2793 -{
2794 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795 -
2796 - if (ret < base)
2797 - return base;
2798 -
2799 - return ret;
2800 -}
2801 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ptrace.c linux-2.6.32.45/arch/powerpc/kernel/ptrace.c
2802 --- linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803 +++ linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817 - tmp = ptrace_get_reg(child, (int) index);
2818 + tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2823 --- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2835 --- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2847 --- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848 +++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2863 --- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864 +++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869 +extern void gr_handle_kernel_exploit(void);
2870 +
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878 + gr_handle_kernel_exploit();
2879 +
2880 oops_exit();
2881 do_exit(err);
2882
2883 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2884 --- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885 +++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886 @@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890 +#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898 - current->mm->context.vdso_base = 0;
2899 + current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907 - 0, 0);
2908 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2913 --- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914 +++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919 -struct dma_map_ops vio_dma_mapping_ops = {
2920 +static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925 + .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937 diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2938 --- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939 +++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940 @@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945 -{
2946 - if (likely(access_ok(VERIFY_READ, from, n)))
2947 - n = __copy_from_user(to, from, n);
2948 - else
2949 - memset(to, 0, n);
2950 - return n;
2951 -}
2952 -
2953 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954 -{
2955 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2956 - n = __copy_to_user(to, from, n);
2957 - return n;
2958 -}
2959 -
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967 -EXPORT_SYMBOL(copy_from_user);
2968 -EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971 diff -urNp linux-2.6.32.45/arch/powerpc/Makefile linux-2.6.32.45/arch/powerpc/Makefile
2972 --- linux-2.6.32.45/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973 +++ linux-2.6.32.45/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978 +cflags-y += -Wno-sign-compare -Wno-extra
2979 +
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983 diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2984 --- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985 +++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986 @@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990 +#include <linux/slab.h>
2991 +#include <linux/pagemap.h>
2992 +#include <linux/compiler.h>
2993 +#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997 @@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001 +#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009 +#ifdef CONFIG_PAX_PAGEEXEC
3010 +/*
3011 + * PaX: decide what to do with offenders (regs->nip = fault address)
3012 + *
3013 + * returns 1 when task should be killed
3014 + */
3015 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3016 +{
3017 + return 1;
3018 +}
3019 +
3020 +void pax_report_insns(void *pc, void *sp)
3021 +{
3022 + unsigned long i;
3023 +
3024 + printk(KERN_ERR "PAX: bytes at PC: ");
3025 + for (i = 0; i < 5; i++) {
3026 + unsigned int c;
3027 + if (get_user(c, (unsigned int __user *)pc+i))
3028 + printk(KERN_CONT "???????? ");
3029 + else
3030 + printk(KERN_CONT "%08x ", c);
3031 + }
3032 + printk("\n");
3033 +}
3034 +#endif
3035 +
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043 - error_code &= 0x48200000;
3044 + error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048 @@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052 - if (error_code & 0x10000000)
3053 + if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057 @@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061 - if (error_code & DSISR_PROTFAULT)
3062 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066 @@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070 +
3071 +#ifdef CONFIG_PAX_PAGEEXEC
3072 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073 +#ifdef CONFIG_PPC_STD_MMU
3074 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075 +#else
3076 + if (is_exec && regs->nip == address) {
3077 +#endif
3078 + switch (pax_handle_fetch_fault(regs)) {
3079 + }
3080 +
3081 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082 + do_group_exit(SIGKILL);
3083 + }
3084 + }
3085 +#endif
3086 +
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mem.c linux-2.6.32.45/arch/powerpc/mm/mem.c
3091 --- linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092 +++ linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097 - int i;
3098 + unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
3103 --- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104 +++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109 +
3110 +#ifdef CONFIG_PAX_RANDMMAP
3111 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3112 + mm->mmap_base += mm->delta_mmap;
3113 +#endif
3114 +
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119 +
3120 +#ifdef CONFIG_PAX_RANDMMAP
3121 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3122 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123 +#endif
3124 +
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128 diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3129 --- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130 +++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135 - return (!vma || (addr + len) <= vma->vm_start);
3136 + return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140 @@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144 - if (!vma || addr + len <= vma->vm_start) {
3145 + if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153 - addr = mm->mmap_base;
3154 - while (addr > len) {
3155 + if (mm->mmap_base < len)
3156 + addr = -ENOMEM;
3157 + else
3158 + addr = mm->mmap_base - len;
3159 +
3160 + while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171 - if (!vma || (addr + len) <= vma->vm_start) {
3172 + if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180 - addr = vma->vm_start;
3181 + addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189 +#ifdef CONFIG_PAX_RANDMMAP
3190 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191 + addr = 0;
3192 +#endif
3193 +
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3198 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204 -static struct platform_suspend_ops lite5200_pm_ops = {
3205 +static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3217 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3222 --- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223 +++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3234 --- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240 -struct dma_map_ops dma_iommu_fixed_ops = {
3241 +const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3246 --- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247 +++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252 -static struct dma_map_ops ps3_sb_dma_ops = {
3253 +static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3262 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3267 --- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268 +++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269 @@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273 + select PCI_MSI
3274 + select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278 diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3279 --- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280 +++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285 +#ifdef CONFIG_PAX_ASLR
3286 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287 +
3288 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290 +#endif
3291 +
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295 diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3296 --- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297 +++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302 -extern unsigned int switch_amode;
3303 +#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309 -extern unsigned int s390_noexec;
3310 +#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314 diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3315 --- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316 +++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321 +
3322 + if ((long)n < 0)
3323 + return n;
3324 +
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332 + if ((long)n < 0)
3333 + return n;
3334 +
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342 +
3343 + if ((long)n < 0)
3344 + return n;
3345 +
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349 diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3350 --- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351 +++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356 + default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359 - space. The kernel parameter switch_amode=on will enable this feature,
3360 - default is disabled. Enabling this (via kernel parameter) on machines
3361 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3362 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363 + will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366 - protection option below. Enabling the execute protection via the
3367 - noexec kernel parameter will also switch the addressing modes,
3368 - independent of the switch_amode kernel parameter.
3369 + protection option below. Enabling the execute protection will also
3370 + switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375 + default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380 - The kernel parameter noexec=on will enable this feature and also
3381 - switch the addressing modes, default is disabled. Enabling this (via
3382 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383 - will reduce system performance.
3384 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385 + reduce system performance.
3386
3387 comment "Code generation options"
3388
3389 diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3390 --- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391 +++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396 - me->core_size = ALIGN(me->core_size, 4);
3397 - me->arch.got_offset = me->core_size;
3398 - me->core_size += me->arch.got_size;
3399 - me->arch.plt_offset = me->core_size;
3400 - me->core_size += me->arch.plt_size;
3401 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402 + me->arch.got_offset = me->core_size_rw;
3403 + me->core_size_rw += me->arch.got_size;
3404 + me->arch.plt_offset = me->core_size_rx;
3405 + me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413 - gotent = me->module_core + me->arch.got_offset +
3414 + gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3423 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431 - ip = me->module_core + me->arch.plt_offset +
3432 + ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440 - val = (Elf_Addr) me->module_core +
3441 + val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3450 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463 diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3464 --- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465 +++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470 -unsigned int switch_amode = 0;
3471 -EXPORT_SYMBOL_GPL(switch_amode);
3472 -
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480 -
3481 -/*
3482 - * Switch kernel/user addressing modes?
3483 - */
3484 -static int __init early_parse_switch_amode(char *p)
3485 -{
3486 - switch_amode = 1;
3487 - return 0;
3488 -}
3489 -early_param("switch_amode", early_parse_switch_amode);
3490 -
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498 -#ifdef CONFIG_S390_EXEC_PROTECT
3499 -unsigned int s390_noexec = 0;
3500 -EXPORT_SYMBOL_GPL(s390_noexec);
3501 -
3502 -/*
3503 - * Enable execute protection?
3504 - */
3505 -static int __init early_parse_noexec(char *p)
3506 -{
3507 - if (!strncmp(p, "off", 3))
3508 - return 0;
3509 - switch_amode = 1;
3510 - s390_noexec = 1;
3511 - return 0;
3512 -}
3513 -early_param("noexec", early_parse_noexec);
3514 -#endif /* CONFIG_S390_EXEC_PROTECT */
3515 -
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519 diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3520 --- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521 +++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526 +
3527 +#ifdef CONFIG_PAX_RANDMMAP
3528 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3529 + mm->mmap_base += mm->delta_mmap;
3530 +#endif
3531 +
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536 +
3537 +#ifdef CONFIG_PAX_RANDMMAP
3538 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3539 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540 +#endif
3541 +
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549 +
3550 +#ifdef CONFIG_PAX_RANDMMAP
3551 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3552 + mm->mmap_base += mm->delta_mmap;
3553 +#endif
3554 +
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559 +
3560 +#ifdef CONFIG_PAX_RANDMMAP
3561 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3562 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563 +#endif
3564 +
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568 diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3569 --- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570 +++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571 @@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575 -extern unsigned long arch_align_stack(unsigned long sp);
3576 +#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580 diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3581 --- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582 +++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587 -
3588 -unsigned long arch_align_stack(unsigned long sp)
3589 -{
3590 - return sp;
3591 -}
3592 diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3593 --- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594 +++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599 -static struct platform_suspend_ops hp6x0_pm_ops = {
3600 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3605 --- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606 +++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611 -static struct sysfs_ops sq_sysfs_ops = {
3612 +static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3617 --- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618 +++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623 -static struct platform_suspend_ops sh_pm_ops = {
3624 +static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628 diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3629 --- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630 +++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635 -struct kgdb_arch arch_kgdb_ops = {
3636 +const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640 diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3641 --- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642 +++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647 - if (TASK_SIZE - len >= addr &&
3648 - (!vma || addr + len <= vma->vm_start))
3649 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653 @@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657 - if (likely(!vma || addr + len <= vma->vm_start)) {
3658 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666 - if (TASK_SIZE - len >= addr &&
3667 - (!vma || addr + len <= vma->vm_start))
3668 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676 - if (!vma || addr <= vma->vm_start) {
3677 + if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685 - addr = mm->mmap_base-len;
3686 - if (do_colour_align)
3687 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688 + addr = mm->mmap_base - len;
3689
3690 do {
3691 + if (do_colour_align)
3692 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699 - if (likely(!vma || addr+len <= vma->vm_start)) {
3700 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708 - addr = vma->vm_start-len;
3709 - if (do_colour_align)
3710 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711 - } while (likely(len < vma->vm_start));
3712 + addr = skip_heap_stack_gap(vma, len);
3713 + } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3718 --- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719 +++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720 @@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725 +{
3726 + return v->counter;
3727 +}
3728 #define atomic64_read(v) ((v)->counter)
3729 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730 +{
3731 + return v->counter;
3732 +}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736 +{
3737 + v->counter = i;
3738 +}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741 +{
3742 + v->counter = i;
3743 +}
3744
3745 extern void atomic_add(int, atomic_t *);
3746 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766 +{
3767 + return atomic_add_ret_unchecked(1, v);
3768 +}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771 +{
3772 + return atomic64_add_ret_unchecked(1, v);
3773 +}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780 +{
3781 + return atomic_add_ret_unchecked(i, v);
3782 +}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785 +{
3786 + return atomic64_add_ret_unchecked(i, v);
3787 +}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796 +{
3797 + return atomic_inc_return_unchecked(v) == 0;
3798 +}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807 +{
3808 + atomic_add_unchecked(1, v);
3809 +}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812 +{
3813 + atomic64_add_unchecked(1, v);
3814 +}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818 +{
3819 + atomic_sub_unchecked(1, v);
3820 +}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823 +{
3824 + atomic64_sub_unchecked(1, v);
3825 +}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832 +{
3833 + return cmpxchg(&v->counter, old, new);
3834 +}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837 +{
3838 + return xchg(&v->counter, new);
3839 +}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843 - int c, old;
3844 + int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847 - if (unlikely(c == (u)))
3848 + if (unlikely(c == u))
3849 break;
3850 - old = atomic_cmpxchg((v), c, c + (a));
3851 +
3852 + asm volatile("addcc %2, %0, %0\n"
3853 +
3854 +#ifdef CONFIG_PAX_REFCOUNT
3855 + "tvs %%icc, 6\n"
3856 +#endif
3857 +
3858 + : "=r" (new)
3859 + : "0" (c), "ir" (a)
3860 + : "cc");
3861 +
3862 + old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867 - return c != (u);
3868 + return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877 +{
3878 + return xchg(&v->counter, new);
3879 +}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883 - long c, old;
3884 + long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887 - if (unlikely(c == (u)))
3888 + if (unlikely(c == u))
3889 break;
3890 - old = atomic64_cmpxchg((v), c, c + (a));
3891 +
3892 + asm volatile("addcc %2, %0, %0\n"
3893 +
3894 +#ifdef CONFIG_PAX_REFCOUNT
3895 + "tvs %%xcc, 6\n"
3896 +#endif
3897 +
3898 + : "=r" (new)
3899 + : "0" (c), "ir" (a)
3900 + : "cc");
3901 +
3902 + old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907 - return c != (u);
3908 + return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3913 --- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914 +++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915 @@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919 -#define L1_CACHE_BYTES 32
3920 +#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3925 --- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926 +++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944 - struct dma_map_ops *ops = get_dma_ops(dev);
3945 + const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953 - struct dma_map_ops *ops = get_dma_ops(dev);
3954 + const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3959 --- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961 @@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965 +#ifdef CONFIG_PAX_ASLR
3966 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967 +
3968 +#define PAX_DELTA_MMAP_LEN 16
3969 +#define PAX_DELTA_STACK_LEN 16
3970 +#endif
3971 +
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3976 --- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978 @@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982 +#ifdef CONFIG_PAX_ASLR
3983 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984 +
3985 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987 +#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3992 --- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998 +
3999 +#ifdef CONFIG_PAX_PAGEEXEC
4000 +BTFIXUPDEF_INT(page_shared_noexec)
4001 +BTFIXUPDEF_INT(page_copy_noexec)
4002 +BTFIXUPDEF_INT(page_readonly_noexec)
4003 +#endif
4004 +
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012 +#ifdef CONFIG_PAX_PAGEEXEC
4013 +extern pgprot_t PAGE_SHARED_NOEXEC;
4014 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016 +#else
4017 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018 +# define PAGE_COPY_NOEXEC PAGE_COPY
4019 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020 +#endif
4021 +
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
4026 --- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028 @@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032 +
4033 +#ifdef CONFIG_PAX_PAGEEXEC
4034 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037 +#endif
4038 +
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
4043 --- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044 +++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049 -static void inline arch_read_lock(raw_rwlock_t *lock)
4050 +static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057 -"4: add %0, 1, %1\n"
4058 +"4: addcc %0, 1, %1\n"
4059 +
4060 +#ifdef CONFIG_PAX_REFCOUNT
4061 +" tvs %%icc, 6\n"
4062 +#endif
4063 +
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071 - : "memory");
4072 + : "memory", "cc");
4073 }
4074
4075 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4076 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084 -" add %0, 1, %1\n"
4085 +" addcc %0, 1, %1\n"
4086 +
4087 +#ifdef CONFIG_PAX_REFCOUNT
4088 +" tvs %%icc, 6\n"
4089 +#endif
4090 +
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4099 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105 -" sub %0, 1, %1\n"
4106 +" subcc %0, 1, %1\n"
4107 +
4108 +#ifdef CONFIG_PAX_REFCOUNT
4109 +" tvs %%icc, 6\n"
4110 +#endif
4111 +
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119 -static void inline arch_write_lock(raw_rwlock_t *lock)
4120 +static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4129 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4138 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4143 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145 @@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149 +
4150 + unsigned long lowest_stack;
4151 };
4152
4153 /*
4154 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4155 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157 @@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161 + unsigned long lowest_stack;
4162 +
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4167 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173 - if (n && __access_ok((unsigned long) to, n))
4174 + if ((long)n < 0)
4175 + return n;
4176 +
4177 + if (n && __access_ok((unsigned long) to, n)) {
4178 + if (!__builtin_constant_p(n))
4179 + check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181 - else
4182 + } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188 + if ((long)n < 0)
4189 + return n;
4190 +
4191 + if (!__builtin_constant_p(n))
4192 + check_object_size(from, n, true);
4193 +
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199 - if (n && __access_ok((unsigned long) from, n))
4200 + if ((long)n < 0)
4201 + return n;
4202 +
4203 + if (n && __access_ok((unsigned long) from, n)) {
4204 + if (!__builtin_constant_p(n))
4205 + check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207 - else
4208 + } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214 + if ((long)n < 0)
4215 + return n;
4216 +
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4221 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223 @@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227 +#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235 - unsigned long ret = ___copy_from_user(to, from, size);
4236 + unsigned long ret;
4237
4238 + if ((long)size < 0 || size > INT_MAX)
4239 + return size;
4240 +
4241 + if (!__builtin_constant_p(size))
4242 + check_object_size(to, size, false);
4243 +
4244 + ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252 - unsigned long ret = ___copy_to_user(to, from, size);
4253 + unsigned long ret;
4254 +
4255 + if ((long)size < 0 || size > INT_MAX)
4256 + return size;
4257 +
4258 + if (!__builtin_constant_p(size))
4259 + check_object_size(from, size, true);
4260
4261 + ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4266 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268 @@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271 +
4272 +#ifdef __KERNEL__
4273 +#ifndef __ASSEMBLY__
4274 +#include <linux/types.h>
4275 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276 +#endif
4277 +#endif
4278 +
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282 diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4283 --- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284 +++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289 -static struct dma_map_ops sun4u_dma_ops = {
4290 +static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303 diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4304 --- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305 +++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310 -struct dma_map_ops sbus_dma_ops = {
4311 +const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328 -struct dma_map_ops pci32_dma_ops = {
4329 +const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4334 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340 -struct kgdb_arch arch_kgdb_ops = {
4341 +const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4346 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352 -struct kgdb_arch arch_kgdb_ops = {
4353 +const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357 diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4358 --- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359 +++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360 @@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364 -ccflags-y := -Werror
4365 +#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369 diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4370 --- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371 +++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376 -static struct dma_map_ops sun4v_dma_ops = {
4377 +static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4382 --- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383 +++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388 - printk("%pS\n", (void *) rw->ins[7]);
4389 + printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397 - printk("PC: <%pS>\n", (void *) r->pc);
4398 + printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414 - printk("%pS ] ", (void *) pc);
4415 + printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4420 --- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421 +++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4435 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4458 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464 - addr = TASK_UNMAPPED_BASE;
4465 + addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473 - if (!vmm || addr + len <= vmm->vm_start)
4474 + if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4479 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485 - if ((flags & MAP_SHARED) &&
4486 + if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494 +#ifdef CONFIG_PAX_RANDMMAP
4495 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496 +#endif
4497 +
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505 - if (task_size - len >= addr &&
4506 - (!vma || addr + len <= vma->vm_start))
4507 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512 - start_addr = addr = mm->free_area_cache;
4513 + start_addr = addr = mm->free_area_cache;
4514 } else {
4515 - start_addr = addr = TASK_UNMAPPED_BASE;
4516 + start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520 @@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524 - if (start_addr != TASK_UNMAPPED_BASE) {
4525 - start_addr = addr = TASK_UNMAPPED_BASE;
4526 + if (start_addr != mm->mmap_base) {
4527 + start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533 - if (likely(!vma || addr + len <= vma->vm_start)) {
4534 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542 - if ((flags & MAP_SHARED) &&
4543 + if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551 - if (task_size - len >= addr &&
4552 - (!vma || addr + len <= vma->vm_start))
4553 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561 - if (!vma || addr <= vma->vm_start) {
4562 + if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570 - addr = mm->mmap_base-len;
4571 - if (do_color_align)
4572 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573 + addr = mm->mmap_base - len;
4574
4575 do {
4576 + if (do_color_align)
4577 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584 - if (likely(!vma || addr+len <= vma->vm_start)) {
4585 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593 - addr = vma->vm_start-len;
4594 - if (do_color_align)
4595 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596 - } while (likely(len < vma->vm_start));
4597 + addr = skip_heap_stack_gap(vma, len);
4598 + } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606 +
4607 +#ifdef CONFIG_PAX_RANDMMAP
4608 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4609 + mm->mmap_base += mm->delta_mmap;
4610 +#endif
4611 +
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619 +
4620 +#ifdef CONFIG_PAX_RANDMMAP
4621 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4622 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623 +#endif
4624 +
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4629 --- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630 +++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635 +extern void gr_handle_kernel_exploit(void);
4636 +
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652 - if(regs->psr & PSR_PS)
4653 + if(regs->psr & PSR_PS) {
4654 + gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656 + }
4657 do_exit(SIGSEGV);
4658 }
4659
4660 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4661 --- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662 +++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676 +
4677 +#ifdef CONFIG_PAX_REFCOUNT
4678 + if (lvl == 6)
4679 + pax_report_refcount_overflow(regs);
4680 +#endif
4681 +
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689 -
4690 +
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695 +#ifdef CONFIG_PAX_REFCOUNT
4696 + if (lvl == 6)
4697 + pax_report_refcount_overflow(regs);
4698 +#endif
4699 +
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707 - printk("TPC<%pS>\n", (void *) regs->tpc);
4708 + printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4759 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767 +extern void gr_handle_kernel_exploit(void);
4768 +
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785 - if (regs->tstate & TSTATE_PRIV)
4786 + if (regs->tstate & TSTATE_PRIV) {
4787 + gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789 + }
4790 +
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794 diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4795 --- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796 +++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797 @@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801 - .size __do_int_load, .-__do_int_load
4802 + .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806 diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4807 --- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808 +++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818 diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4819 --- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820 +++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821 @@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825 - add %g1, %o0, %g7
4826 + addcc %g1, %o0, %g7
4827 +
4828 +#ifdef CONFIG_PAX_REFCOUNT
4829 + tvs %icc, 6
4830 +#endif
4831 +
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839 + .globl atomic_add_unchecked
4840 + .type atomic_add_unchecked,#function
4841 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842 + BACKOFF_SETUP(%o2)
4843 +1: lduw [%o1], %g1
4844 + add %g1, %o0, %g7
4845 + cas [%o1], %g1, %g7
4846 + cmp %g1, %g7
4847 + bne,pn %icc, 2f
4848 + nop
4849 + retl
4850 + nop
4851 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4852 + .size atomic_add_unchecked, .-atomic_add_unchecked
4853 +
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859 - sub %g1, %o0, %g7
4860 + subcc %g1, %o0, %g7
4861 +
4862 +#ifdef CONFIG_PAX_REFCOUNT
4863 + tvs %icc, 6
4864 +#endif
4865 +
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873 + .globl atomic_sub_unchecked
4874 + .type atomic_sub_unchecked,#function
4875 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876 + BACKOFF_SETUP(%o2)
4877 +1: lduw [%o1], %g1
4878 + sub %g1, %o0, %g7
4879 + cas [%o1], %g1, %g7
4880 + cmp %g1, %g7
4881 + bne,pn %icc, 2f
4882 + nop
4883 + retl
4884 + nop
4885 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4886 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887 +
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893 - add %g1, %o0, %g7
4894 + addcc %g1, %o0, %g7
4895 +
4896 +#ifdef CONFIG_PAX_REFCOUNT
4897 + tvs %icc, 6
4898 +#endif
4899 +
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907 + .globl atomic_add_ret_unchecked
4908 + .type atomic_add_ret_unchecked,#function
4909 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910 + BACKOFF_SETUP(%o2)
4911 +1: lduw [%o1], %g1
4912 + addcc %g1, %o0, %g7
4913 + cas [%o1], %g1, %g7
4914 + cmp %g1, %g7
4915 + bne,pn %icc, 2f
4916 + add %g7, %o0, %g7
4917 + sra %g7, 0, %o0
4918 + retl
4919 + nop
4920 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4921 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922 +
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928 - sub %g1, %o0, %g7
4929 + subcc %g1, %o0, %g7
4930 +
4931 +#ifdef CONFIG_PAX_REFCOUNT
4932 + tvs %icc, 6
4933 +#endif
4934 +
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942 - add %g1, %o0, %g7
4943 + addcc %g1, %o0, %g7
4944 +
4945 +#ifdef CONFIG_PAX_REFCOUNT
4946 + tvs %xcc, 6
4947 +#endif
4948 +
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956 + .globl atomic64_add_unchecked
4957 + .type atomic64_add_unchecked,#function
4958 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959 + BACKOFF_SETUP(%o2)
4960 +1: ldx [%o1], %g1
4961 + addcc %g1, %o0, %g7
4962 + casx [%o1], %g1, %g7
4963 + cmp %g1, %g7
4964 + bne,pn %xcc, 2f
4965 + nop
4966 + retl
4967 + nop
4968 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4969 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970 +
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976 - sub %g1, %o0, %g7
4977 + subcc %g1, %o0, %g7
4978 +
4979 +#ifdef CONFIG_PAX_REFCOUNT
4980 + tvs %xcc, 6
4981 +#endif
4982 +
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990 + .globl atomic64_sub_unchecked
4991 + .type atomic64_sub_unchecked,#function
4992 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993 + BACKOFF_SETUP(%o2)
4994 +1: ldx [%o1], %g1
4995 + subcc %g1, %o0, %g7
4996 + casx [%o1], %g1, %g7
4997 + cmp %g1, %g7
4998 + bne,pn %xcc, 2f
4999 + nop
5000 + retl
5001 + nop
5002 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5003 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004 +
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010 - add %g1, %o0, %g7
5011 + addcc %g1, %o0, %g7
5012 +
5013 +#ifdef CONFIG_PAX_REFCOUNT
5014 + tvs %xcc, 6
5015 +#endif
5016 +
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024 + .globl atomic64_add_ret_unchecked
5025 + .type atomic64_add_ret_unchecked,#function
5026 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027 + BACKOFF_SETUP(%o2)
5028 +1: ldx [%o1], %g1
5029 + addcc %g1, %o0, %g7
5030 + casx [%o1], %g1, %g7
5031 + cmp %g1, %g7
5032 + bne,pn %xcc, 2f
5033 + add %g7, %o0, %g7
5034 + mov %g7, %o0
5035 + retl
5036 + nop
5037 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5038 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039 +
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045 - sub %g1, %o0, %g7
5046 + subcc %g1, %o0, %g7
5047 +
5048 +#ifdef CONFIG_PAX_REFCOUNT
5049 + tvs %xcc, 6
5050 +#endif
5051 +
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055 diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
5056 --- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057 +++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062 +EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066 +EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069 +EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077 diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
5078 --- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079 +++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080 @@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084 -ccflags-y := -Werror
5085 +#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089 diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
5090 --- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091 +++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092 @@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096 - add %g1, 1, %g7
5097 + addcc %g1, 1, %g7
5098 +
5099 +#ifdef CONFIG_PAX_REFCOUNT
5100 + tvs %icc, 6
5101 +#endif
5102 +
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106 @@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110 - add %g1, 1, %g7
5111 + addcc %g1, 1, %g7
5112 +
5113 +#ifdef CONFIG_PAX_REFCOUNT
5114 + tvs %icc, 6
5115 +#endif
5116 +
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120 @@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124 - add %g3, %g1, %g7
5125 + addcc %g3, %g1, %g7
5126 +
5127 +#ifdef CONFIG_PAX_REFCOUNT
5128 + tvs %icc, 6
5129 +#endif
5130 +
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134 @@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138 - add %g3, %g1, %g7
5139 + addcc %g3, %g1, %g7
5140 +
5141 +#ifdef CONFIG_PAX_REFCOUNT
5142 + tvs %icc, 6
5143 +#endif
5144 +
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148 @@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152 - sub %g1, 1, %g7
5153 + subcc %g1, 1, %g7
5154 +
5155 +#ifdef CONFIG_PAX_REFCOUNT
5156 + tvs %icc, 6
5157 +#endif
5158 +
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162 @@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166 - sub %g3, %g1, %g7
5167 + subcc %g3, %g1, %g7
5168 +
5169 +#ifdef CONFIG_PAX_REFCOUNT
5170 + tvs %icc, 6
5171 +#endif
5172 +
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176 @@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180 - sub %g3, %g1, %g7
5181 + subcc %g3, %g1, %g7
5182 +
5183 +#ifdef CONFIG_PAX_REFCOUNT
5184 + tvs %icc, 6
5185 +#endif
5186 +
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190 diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5191 --- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192 +++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5203 --- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204 +++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205 @@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209 +#include <linux/slab.h>
5210 +#include <linux/pagemap.h>
5211 +#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219 +#ifdef CONFIG_PAX_PAGEEXEC
5220 +#ifdef CONFIG_PAX_DLRESOLVE
5221 +static void pax_emuplt_close(struct vm_area_struct *vma)
5222 +{
5223 + vma->vm_mm->call_dl_resolve = 0UL;
5224 +}
5225 +
5226 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227 +{
5228 + unsigned int *kaddr;
5229 +
5230 + vmf->page = alloc_page(GFP_HIGHUSER);
5231 + if (!vmf->page)
5232 + return VM_FAULT_OOM;
5233 +
5234 + kaddr = kmap(vmf->page);
5235 + memset(kaddr, 0, PAGE_SIZE);
5236 + kaddr[0] = 0x9DE3BFA8U; /* save */
5237 + flush_dcache_page(vmf->page);
5238 + kunmap(vmf->page);
5239 + return VM_FAULT_MAJOR;
5240 +}
5241 +
5242 +static const struct vm_operations_struct pax_vm_ops = {
5243 + .close = pax_emuplt_close,
5244 + .fault = pax_emuplt_fault
5245 +};
5246 +
5247 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248 +{
5249 + int ret;
5250 +
5251 + vma->vm_mm = current->mm;
5252 + vma->vm_start = addr;
5253 + vma->vm_end = addr + PAGE_SIZE;
5254 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256 + vma->vm_ops = &pax_vm_ops;
5257 +
5258 + ret = insert_vm_struct(current->mm, vma);
5259 + if (ret)
5260 + return ret;
5261 +
5262 + ++current->mm->total_vm;
5263 + return 0;
5264 +}
5265 +#endif
5266 +
5267 +/*
5268 + * PaX: decide what to do with offenders (regs->pc = fault address)
5269 + *
5270 + * returns 1 when task should be killed
5271 + * 2 when patched PLT trampoline was detected
5272 + * 3 when unpatched PLT trampoline was detected
5273 + */
5274 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5275 +{
5276 +
5277 +#ifdef CONFIG_PAX_EMUPLT
5278 + int err;
5279 +
5280 + do { /* PaX: patched PLT emulation #1 */
5281 + unsigned int sethi1, sethi2, jmpl;
5282 +
5283 + err = get_user(sethi1, (unsigned int *)regs->pc);
5284 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286 +
5287 + if (err)
5288 + break;
5289 +
5290 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293 + {
5294 + unsigned int addr;
5295 +
5296 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297 + addr = regs->u_regs[UREG_G1];
5298 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299 + regs->pc = addr;
5300 + regs->npc = addr+4;
5301 + return 2;
5302 + }
5303 + } while (0);
5304 +
5305 + { /* PaX: patched PLT emulation #2 */
5306 + unsigned int ba;
5307 +
5308 + err = get_user(ba, (unsigned int *)regs->pc);
5309 +
5310 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311 + unsigned int addr;
5312 +
5313 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314 + regs->pc = addr;
5315 + regs->npc = addr+4;
5316 + return 2;
5317 + }
5318 + }
5319 +
5320 + do { /* PaX: patched PLT emulation #3 */
5321 + unsigned int sethi, jmpl, nop;
5322 +
5323 + err = get_user(sethi, (unsigned int *)regs->pc);
5324 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326 +
5327 + if (err)
5328 + break;
5329 +
5330 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332 + nop == 0x01000000U)
5333 + {
5334 + unsigned int addr;
5335 +
5336 + addr = (sethi & 0x003FFFFFU) << 10;
5337 + regs->u_regs[UREG_G1] = addr;
5338 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339 + regs->pc = addr;
5340 + regs->npc = addr+4;
5341 + return 2;
5342 + }
5343 + } while (0);
5344 +
5345 + do { /* PaX: unpatched PLT emulation step 1 */
5346 + unsigned int sethi, ba, nop;
5347 +
5348 + err = get_user(sethi, (unsigned int *)regs->pc);
5349 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351 +
5352 + if (err)
5353 + break;
5354 +
5355 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357 + nop == 0x01000000U)
5358 + {
5359 + unsigned int addr, save, call;
5360 +
5361 + if ((ba & 0xFFC00000U) == 0x30800000U)
5362 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363 + else
5364 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365 +
5366 + err = get_user(save, (unsigned int *)addr);
5367 + err |= get_user(call, (unsigned int *)(addr+4));
5368 + err |= get_user(nop, (unsigned int *)(addr+8));
5369 + if (err)
5370 + break;
5371 +
5372 +#ifdef CONFIG_PAX_DLRESOLVE
5373 + if (save == 0x9DE3BFA8U &&
5374 + (call & 0xC0000000U) == 0x40000000U &&
5375 + nop == 0x01000000U)
5376 + {
5377 + struct vm_area_struct *vma;
5378 + unsigned long call_dl_resolve;
5379 +
5380 + down_read(&current->mm->mmap_sem);
5381 + call_dl_resolve = current->mm->call_dl_resolve;
5382 + up_read(&current->mm->mmap_sem);
5383 + if (likely(call_dl_resolve))
5384 + goto emulate;
5385 +
5386 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387 +
5388 + down_write(&current->mm->mmap_sem);
5389 + if (current->mm->call_dl_resolve) {
5390 + call_dl_resolve = current->mm->call_dl_resolve;
5391 + up_write(&current->mm->mmap_sem);
5392 + if (vma)
5393 + kmem_cache_free(vm_area_cachep, vma);
5394 + goto emulate;
5395 + }
5396 +
5397 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399 + up_write(&current->mm->mmap_sem);
5400 + if (vma)
5401 + kmem_cache_free(vm_area_cachep, vma);
5402 + return 1;
5403 + }
5404 +
5405 + if (pax_insert_vma(vma, call_dl_resolve)) {
5406 + up_write(&current->mm->mmap_sem);
5407 + kmem_cache_free(vm_area_cachep, vma);
5408 + return 1;
5409 + }
5410 +
5411 + current->mm->call_dl_resolve = call_dl_resolve;
5412 + up_write(&current->mm->mmap_sem);
5413 +
5414 +emulate:
5415 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416 + regs->pc = call_dl_resolve;
5417 + regs->npc = addr+4;
5418 + return 3;
5419 + }
5420 +#endif
5421 +
5422 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423 + if ((save & 0xFFC00000U) == 0x05000000U &&
5424 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5425 + nop == 0x01000000U)
5426 + {
5427 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428 + regs->u_regs[UREG_G2] = addr + 4;
5429 + addr = (save & 0x003FFFFFU) << 10;
5430 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431 + regs->pc = addr;
5432 + regs->npc = addr+4;
5433 + return 3;
5434 + }
5435 + }
5436 + } while (0);
5437 +
5438 + do { /* PaX: unpatched PLT emulation step 2 */
5439 + unsigned int save, call, nop;
5440 +
5441 + err = get_user(save, (unsigned int *)(regs->pc-4));
5442 + err |= get_user(call, (unsigned int *)regs->pc);
5443 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444 + if (err)
5445 + break;
5446 +
5447 + if (save == 0x9DE3BFA8U &&
5448 + (call & 0xC0000000U) == 0x40000000U &&
5449 + nop == 0x01000000U)
5450 + {
5451 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452 +
5453 + regs->u_regs[UREG_RETPC] = regs->pc;
5454 + regs->pc = dl_resolve;
5455 + regs->npc = dl_resolve+4;
5456 + return 3;
5457 + }
5458 + } while (0);
5459 +#endif
5460 +
5461 + return 1;
5462 +}
5463 +
5464 +void pax_report_insns(void *pc, void *sp)
5465 +{
5466 + unsigned long i;
5467 +
5468 + printk(KERN_ERR "PAX: bytes at PC: ");
5469 + for (i = 0; i < 8; i++) {
5470 + unsigned int c;
5471 + if (get_user(c, (unsigned int *)pc+i))
5472 + printk(KERN_CONT "???????? ");
5473 + else
5474 + printk(KERN_CONT "%08x ", c);
5475 + }
5476 + printk("\n");
5477 +}
5478 +#endif
5479 +
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483 @@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487 +
5488 +#ifdef CONFIG_PAX_PAGEEXEC
5489 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490 + up_read(&mm->mmap_sem);
5491 + switch (pax_handle_fetch_fault(regs)) {
5492 +
5493 +#ifdef CONFIG_PAX_EMUPLT
5494 + case 2:
5495 + case 3:
5496 + return;
5497 +#endif
5498 +
5499 + }
5500 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501 + do_group_exit(SIGKILL);
5502 + }
5503 +#endif
5504 +
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5509 --- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510 +++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511 @@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515 +#include <linux/slab.h>
5516 +#include <linux/pagemap.h>
5517 +#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534 +#ifdef CONFIG_PAX_PAGEEXEC
5535 +#ifdef CONFIG_PAX_DLRESOLVE
5536 +static void pax_emuplt_close(struct vm_area_struct *vma)
5537 +{
5538 + vma->vm_mm->call_dl_resolve = 0UL;
5539 +}
5540 +
5541 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542 +{
5543 + unsigned int *kaddr;
5544 +
5545 + vmf->page = alloc_page(GFP_HIGHUSER);
5546 + if (!vmf->page)
5547 + return VM_FAULT_OOM;
5548 +
5549 + kaddr = kmap(vmf->page);
5550 + memset(kaddr, 0, PAGE_SIZE);
5551 + kaddr[0] = 0x9DE3BFA8U; /* save */
5552 + flush_dcache_page(vmf->page);
5553 + kunmap(vmf->page);
5554 + return VM_FAULT_MAJOR;
5555 +}
5556 +
5557 +static const struct vm_operations_struct pax_vm_ops = {
5558 + .close = pax_emuplt_close,
5559 + .fault = pax_emuplt_fault
5560 +};
5561 +
5562 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563 +{
5564 + int ret;
5565 +
5566 + vma->vm_mm = current->mm;
5567 + vma->vm_start = addr;
5568 + vma->vm_end = addr + PAGE_SIZE;
5569 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571 + vma->vm_ops = &pax_vm_ops;
5572 +
5573 + ret = insert_vm_struct(current->mm, vma);
5574 + if (ret)
5575 + return ret;
5576 +
5577 + ++current->mm->total_vm;
5578 + return 0;
5579 +}
5580 +#endif
5581 +
5582 +/*
5583 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5584 + *
5585 + * returns 1 when task should be killed
5586 + * 2 when patched PLT trampoline was detected
5587 + * 3 when unpatched PLT trampoline was detected
5588 + */
5589 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5590 +{
5591 +
5592 +#ifdef CONFIG_PAX_EMUPLT
5593 + int err;
5594 +
5595 + do { /* PaX: patched PLT emulation #1 */
5596 + unsigned int sethi1, sethi2, jmpl;
5597 +
5598 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5599 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601 +
5602 + if (err)
5603 + break;
5604 +
5605 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608 + {
5609 + unsigned long addr;
5610 +
5611 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612 + addr = regs->u_regs[UREG_G1];
5613 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614 +
5615 + if (test_thread_flag(TIF_32BIT))
5616 + addr &= 0xFFFFFFFFUL;
5617 +
5618 + regs->tpc = addr;
5619 + regs->tnpc = addr+4;
5620 + return 2;
5621 + }
5622 + } while (0);
5623 +
5624 + { /* PaX: patched PLT emulation #2 */
5625 + unsigned int ba;
5626 +
5627 + err = get_user(ba, (unsigned int *)regs->tpc);
5628 +
5629 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630 + unsigned long addr;
5631 +
5632 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633 +
5634 + if (test_thread_flag(TIF_32BIT))
5635 + addr &= 0xFFFFFFFFUL;
5636 +
5637 + regs->tpc = addr;
5638 + regs->tnpc = addr+4;
5639 + return 2;
5640 + }
5641 + }
5642 +
5643 + do { /* PaX: patched PLT emulation #3 */
5644 + unsigned int sethi, jmpl, nop;
5645 +
5646 + err = get_user(sethi, (unsigned int *)regs->tpc);
5647 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649 +
5650 + if (err)
5651 + break;
5652 +
5653 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655 + nop == 0x01000000U)
5656 + {
5657 + unsigned long addr;
5658 +
5659 + addr = (sethi & 0x003FFFFFU) << 10;
5660 + regs->u_regs[UREG_G1] = addr;
5661 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662 +
5663 + if (test_thread_flag(TIF_32BIT))
5664 + addr &= 0xFFFFFFFFUL;
5665 +
5666 + regs->tpc = addr;
5667 + regs->tnpc = addr+4;
5668 + return 2;
5669 + }
5670 + } while (0);
5671 +
5672 + do { /* PaX: patched PLT emulation #4 */
5673 + unsigned int sethi, mov1, call, mov2;
5674 +
5675 + err = get_user(sethi, (unsigned int *)regs->tpc);
5676 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679 +
5680 + if (err)
5681 + break;
5682 +
5683 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684 + mov1 == 0x8210000FU &&
5685 + (call & 0xC0000000U) == 0x40000000U &&
5686 + mov2 == 0x9E100001U)
5687 + {
5688 + unsigned long addr;
5689 +
5690 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692 +
5693 + if (test_thread_flag(TIF_32BIT))
5694 + addr &= 0xFFFFFFFFUL;
5695 +
5696 + regs->tpc = addr;
5697 + regs->tnpc = addr+4;
5698 + return 2;
5699 + }
5700 + } while (0);
5701 +
5702 + do { /* PaX: patched PLT emulation #5 */
5703 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704 +
5705 + err = get_user(sethi, (unsigned int *)regs->tpc);
5706 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713 +
5714 + if (err)
5715 + break;
5716 +
5717 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5721 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722 + sllx == 0x83287020U &&
5723 + jmpl == 0x81C04005U &&
5724 + nop == 0x01000000U)
5725 + {
5726 + unsigned long addr;
5727 +
5728 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729 + regs->u_regs[UREG_G1] <<= 32;
5730 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732 + regs->tpc = addr;
5733 + regs->tnpc = addr+4;
5734 + return 2;
5735 + }
5736 + } while (0);
5737 +
5738 + do { /* PaX: patched PLT emulation #6 */
5739 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740 +
5741 + err = get_user(sethi, (unsigned int *)regs->tpc);
5742 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748 +
5749 + if (err)
5750 + break;
5751 +
5752 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755 + sllx == 0x83287020U &&
5756 + (or & 0xFFFFE000U) == 0x8A116000U &&
5757 + jmpl == 0x81C04005U &&
5758 + nop == 0x01000000U)
5759 + {
5760 + unsigned long addr;
5761 +
5762 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763 + regs->u_regs[UREG_G1] <<= 32;
5764 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766 + regs->tpc = addr;
5767 + regs->tnpc = addr+4;
5768 + return 2;
5769 + }
5770 + } while (0);
5771 +
5772 + do { /* PaX: unpatched PLT emulation step 1 */
5773 + unsigned int sethi, ba, nop;
5774 +
5775 + err = get_user(sethi, (unsigned int *)regs->tpc);
5776 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778 +
5779 + if (err)
5780 + break;
5781 +
5782 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784 + nop == 0x01000000U)
5785 + {
5786 + unsigned long addr;
5787 + unsigned int save, call;
5788 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789 +
5790 + if ((ba & 0xFFC00000U) == 0x30800000U)
5791 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792 + else
5793 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794 +
5795 + if (test_thread_flag(TIF_32BIT))
5796 + addr &= 0xFFFFFFFFUL;
5797 +
5798 + err = get_user(save, (unsigned int *)addr);
5799 + err |= get_user(call, (unsigned int *)(addr+4));
5800 + err |= get_user(nop, (unsigned int *)(addr+8));
5801 + if (err)
5802 + break;
5803 +
5804 +#ifdef CONFIG_PAX_DLRESOLVE
5805 + if (save == 0x9DE3BFA8U &&
5806 + (call & 0xC0000000U) == 0x40000000U &&
5807 + nop == 0x01000000U)
5808 + {
5809 + struct vm_area_struct *vma;
5810 + unsigned long call_dl_resolve;
5811 +
5812 + down_read(&current->mm->mmap_sem);
5813 + call_dl_resolve = current->mm->call_dl_resolve;
5814 + up_read(&current->mm->mmap_sem);
5815 + if (likely(call_dl_resolve))
5816 + goto emulate;
5817 +
5818 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819 +
5820 + down_write(&current->mm->mmap_sem);
5821 + if (current->mm->call_dl_resolve) {
5822 + call_dl_resolve = current->mm->call_dl_resolve;
5823 + up_write(&current->mm->mmap_sem);
5824 + if (vma)
5825 + kmem_cache_free(vm_area_cachep, vma);
5826 + goto emulate;
5827 + }
5828 +
5829 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831 + up_write(&current->mm->mmap_sem);
5832 + if (vma)
5833 + kmem_cache_free(vm_area_cachep, vma);
5834 + return 1;
5835 + }
5836 +
5837 + if (pax_insert_vma(vma, call_dl_resolve)) {
5838 + up_write(&current->mm->mmap_sem);
5839 + kmem_cache_free(vm_area_cachep, vma);
5840 + return 1;
5841 + }
5842 +
5843 + current->mm->call_dl_resolve = call_dl_resolve;
5844 + up_write(&current->mm->mmap_sem);
5845 +
5846 +emulate:
5847 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848 + regs->tpc = call_dl_resolve;
5849 + regs->tnpc = addr+4;
5850 + return 3;
5851 + }
5852 +#endif
5853 +
5854 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855 + if ((save & 0xFFC00000U) == 0x05000000U &&
5856 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5857 + nop == 0x01000000U)
5858 + {
5859 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860 + regs->u_regs[UREG_G2] = addr + 4;
5861 + addr = (save & 0x003FFFFFU) << 10;
5862 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863 +
5864 + if (test_thread_flag(TIF_32BIT))
5865 + addr &= 0xFFFFFFFFUL;
5866 +
5867 + regs->tpc = addr;
5868 + regs->tnpc = addr+4;
5869 + return 3;
5870 + }
5871 +
5872 + /* PaX: 64-bit PLT stub */
5873 + err = get_user(sethi1, (unsigned int *)addr);
5874 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5875 + err |= get_user(or1, (unsigned int *)(addr+8));
5876 + err |= get_user(or2, (unsigned int *)(addr+12));
5877 + err |= get_user(sllx, (unsigned int *)(addr+16));
5878 + err |= get_user(add, (unsigned int *)(addr+20));
5879 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5880 + err |= get_user(nop, (unsigned int *)(addr+28));
5881 + if (err)
5882 + break;
5883 +
5884 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5887 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888 + sllx == 0x89293020U &&
5889 + add == 0x8A010005U &&
5890 + jmpl == 0x89C14000U &&
5891 + nop == 0x01000000U)
5892 + {
5893 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895 + regs->u_regs[UREG_G4] <<= 32;
5896 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898 + regs->u_regs[UREG_G4] = addr + 24;
5899 + addr = regs->u_regs[UREG_G5];
5900 + regs->tpc = addr;
5901 + regs->tnpc = addr+4;
5902 + return 3;
5903 + }
5904 + }
5905 + } while (0);
5906 +
5907 +#ifdef CONFIG_PAX_DLRESOLVE
5908 + do { /* PaX: unpatched PLT emulation step 2 */
5909 + unsigned int save, call, nop;
5910 +
5911 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5912 + err |= get_user(call, (unsigned int *)regs->tpc);
5913 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914 + if (err)
5915 + break;
5916 +
5917 + if (save == 0x9DE3BFA8U &&
5918 + (call & 0xC0000000U) == 0x40000000U &&
5919 + nop == 0x01000000U)
5920 + {
5921 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922 +
5923 + if (test_thread_flag(TIF_32BIT))
5924 + dl_resolve &= 0xFFFFFFFFUL;
5925 +
5926 + regs->u_regs[UREG_RETPC] = regs->tpc;
5927 + regs->tpc = dl_resolve;
5928 + regs->tnpc = dl_resolve+4;
5929 + return 3;
5930 + }
5931 + } while (0);
5932 +#endif
5933 +
5934 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935 + unsigned int sethi, ba, nop;
5936 +
5937 + err = get_user(sethi, (unsigned int *)regs->tpc);
5938 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940 +
5941 + if (err)
5942 + break;
5943 +
5944 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945 + (ba & 0xFFF00000U) == 0x30600000U &&
5946 + nop == 0x01000000U)
5947 + {
5948 + unsigned long addr;
5949 +
5950 + addr = (sethi & 0x003FFFFFU) << 10;
5951 + regs->u_regs[UREG_G1] = addr;
5952 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953 +
5954 + if (test_thread_flag(TIF_32BIT))
5955 + addr &= 0xFFFFFFFFUL;
5956 +
5957 + regs->tpc = addr;
5958 + regs->tnpc = addr+4;
5959 + return 2;
5960 + }
5961 + } while (0);
5962 +
5963 +#endif
5964 +
5965 + return 1;
5966 +}
5967 +
5968 +void pax_report_insns(void *pc, void *sp)
5969 +{
5970 + unsigned long i;
5971 +
5972 + printk(KERN_ERR "PAX: bytes at PC: ");
5973 + for (i = 0; i < 8; i++) {
5974 + unsigned int c;
5975 + if (get_user(c, (unsigned int *)pc+i))
5976 + printk(KERN_CONT "???????? ");
5977 + else
5978 + printk(KERN_CONT "%08x ", c);
5979 + }
5980 + printk("\n");
5981 +}
5982 +#endif
5983 +
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991 +#ifdef CONFIG_PAX_PAGEEXEC
5992 + /* PaX: detect ITLB misses on non-exec pages */
5993 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995 + {
5996 + if (address != regs->tpc)
5997 + goto good_area;
5998 +
5999 + up_read(&mm->mmap_sem);
6000 + switch (pax_handle_fetch_fault(regs)) {
6001 +
6002 +#ifdef CONFIG_PAX_EMUPLT
6003 + case 2:
6004 + case 3:
6005 + return;
6006 +#endif
6007 +
6008 + }
6009 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010 + do_group_exit(SIGKILL);
6011 + }
6012 +#endif
6013 +
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017 diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
6018 --- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019 +++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020 @@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024 - if (likely(!vma || addr + len <= vma->vm_start)) {
6025 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033 - if (!vma || addr <= vma->vm_start) {
6034 + if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6043 + addr = mm->mmap_base - len;
6044
6045 do {
6046 + addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053 - if (likely(!vma || addr+len <= vma->vm_start)) {
6054 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062 - addr = (vma->vm_start-len) & HPAGE_MASK;
6063 - } while (likely(len < vma->vm_start));
6064 + addr = skip_heap_stack_gap(vma, len);
6065 + } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073 - if (task_size - len >= addr &&
6074 - (!vma || addr + len <= vma->vm_start))
6075 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079 diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
6080 --- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081 +++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082 @@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088 +
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092 @@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096 - protection_map[1] = PAGE_READONLY;
6097 - protection_map[2] = PAGE_COPY;
6098 - protection_map[3] = PAGE_COPY;
6099 + protection_map[1] = PAGE_READONLY_NOEXEC;
6100 + protection_map[2] = PAGE_COPY_NOEXEC;
6101 + protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107 - protection_map[9] = PAGE_READONLY;
6108 - protection_map[10] = PAGE_SHARED;
6109 - protection_map[11] = PAGE_SHARED;
6110 + protection_map[9] = PAGE_READONLY_NOEXEC;
6111 + protection_map[10] = PAGE_SHARED_NOEXEC;
6112 + protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116 diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6117 --- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118 +++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119 @@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123 -ccflags-y := -Werror
6124 +#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128 diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6129 --- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130 +++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135 +
6136 +#ifdef CONFIG_PAX_PAGEEXEC
6137 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140 +#endif
6141 +
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145 diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6146 --- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147 +++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148 @@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152 + KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156 diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6157 --- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158 +++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159 @@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163 +#define ktla_ktva(addr) (addr)
6164 +#define ktva_ktla(addr) (addr)
6165 +
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169 diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6170 --- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171 +++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176 -/*
6177 - * Only x86 and x86_64 have an arch_align_stack().
6178 - * All other arches have "#define arch_align_stack(x) (x)"
6179 - * in their asm/system.h
6180 - * As this is included in UML from asm-um/system-generic.h,
6181 - * we can use it to behave as the subarch does.
6182 - */
6183 -#ifndef arch_align_stack
6184 -unsigned long arch_align_stack(unsigned long sp)
6185 -{
6186 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187 - sp -= get_random_int() % 8192;
6188 - return sp & ~0xf;
6189 -}
6190 -#endif
6191 -
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195 diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6196 --- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197 +++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198 @@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203 +{
6204 + unsigned long pax_task_size = TASK_SIZE;
6205 +
6206 +#ifdef CONFIG_PAX_SEGMEXEC
6207 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208 + pax_task_size = SEGMEXEC_TASK_SIZE;
6209 +#endif
6210 +
6211 + if (len > pax_task_size || addr > pax_task_size - len)
6212 + return -EINVAL;
6213 +
6214 + return 0;
6215 +}
6216 +
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220 diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6221 --- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222 +++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241 diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6242 --- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243 +++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248 - asm("movw %%ds,%0" : "=rm" (seg));
6249 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257 - asm("repe; cmpsb; setnz %0"
6258 + asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6263 --- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269 - movl $LOAD_PHYSICAL_ADDR, %ebx
6270 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274 @@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278 - subl $LOAD_PHYSICAL_ADDR, %ebx
6279 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283 @@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287 - testl %ecx, %ecx
6288 - jz 2f
6289 + jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6294 --- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300 - movl $LOAD_PHYSICAL_ADDR, %ebx
6301 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305 @@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309 -#include "../../kernel/verify_cpu_64.S"
6310 +#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318 - movq $LOAD_PHYSICAL_ADDR, %rbp
6319 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6324 --- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325 +++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330 +ifdef CONSTIFY_PLUGIN
6331 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332 +endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6337 --- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338 +++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6358 --- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359 +++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365 + offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6370 --- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371 +++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372 @@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376 +#include "../../../../include/linux/autoconf.h"
6377 +
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380 +static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388 - int i;
6389 + unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397 +static void read_phdrs(FILE *fp)
6398 +{
6399 + unsigned int i;
6400 +
6401 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402 + if (!phdr) {
6403 + die("Unable to allocate %d program headers\n",
6404 + ehdr.e_phnum);
6405 + }
6406 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407 + die("Seek to %d failed: %s\n",
6408 + ehdr.e_phoff, strerror(errno));
6409 + }
6410 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411 + die("Cannot read ELF program headers: %s\n",
6412 + strerror(errno));
6413 + }
6414 + for(i = 0; i < ehdr.e_phnum; i++) {
6415 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423 + }
6424 +
6425 +}
6426 +
6427 static void read_shdrs(FILE *fp)
6428 {
6429 - int i;
6430 + unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438 - int i;
6439 + unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447 - int i,j;
6448 + unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456 - int i,j;
6457 + unsigned int i,j;
6458 + uint32_t base;
6459 +
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467 + base = 0;
6468 + for (j = 0; j < ehdr.e_phnum; j++) {
6469 + if (phdr[j].p_type != PT_LOAD )
6470 + continue;
6471 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472 + continue;
6473 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474 + break;
6475 + }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6479 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487 - int i;
6488 + unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495 - int j;
6496 + unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504 - int i, printed = 0;
6505 + unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512 - int j;
6513 + unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521 - int i;
6522 + unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528 - int j;
6529 + unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539 + continue;
6540 +
6541 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544 + continue;
6545 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546 + continue;
6547 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548 + continue;
6549 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550 + continue;
6551 +#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559 - int i;
6560 + unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568 + read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572 diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6573 --- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574 +++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575 @@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579 - asm("movl %%cr0,%0" : "=r" (cr0));
6580 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588 - asm("pushfl ; "
6589 + asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593 @@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597 - asm("cpuid"
6598 + asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602 @@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606 - asm("cpuid"
6607 + asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611 @@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615 - asm("cpuid"
6616 + asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620 @@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624 - asm("cpuid"
6625 + asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659 - asm("cpuid"
6660 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662 + asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670 diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6671 --- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672 +++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682 diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6683 --- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684 +++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689 +ifdef CONSTIFY_PLUGIN
6690 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691 +endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695 diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6696 --- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697 +++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698 @@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702 - int count = 0;
6703 + unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707 diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6708 --- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709 +++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714 - int i, len = 0;
6715 + unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719 diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6720 --- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721 +++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726 + boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6731 --- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732 +++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737 + memset(&dump, 0, sizeof(dump));
6738 +
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746 - /*
6747 - * Finally dump the task struct. Not be used by gdb, but
6748 - * could be useful
6749 - */
6750 - set_fs(KERNEL_DS);
6751 - DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6756 --- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757 +++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6758 @@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762 +#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770 + .macro pax_enter_kernel_user
6771 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6772 + call pax_enter_kernel_user
6773 +#endif
6774 + .endm
6775 +
6776 + .macro pax_exit_kernel_user
6777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6778 + call pax_exit_kernel_user
6779 +#endif
6780 +#ifdef CONFIG_PAX_RANDKSTACK
6781 + pushq %rax
6782 + call pax_randomize_kstack
6783 + popq %rax
6784 +#endif
6785 + pax_erase_kstack
6786 + .endm
6787 +
6788 +.macro pax_erase_kstack
6789 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6790 + call pax_erase_kstack
6791 +#endif
6792 +.endm
6793 +
6794 /*
6795 * 32bit SYSENTER instruction entry.
6796 *
6797 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6798 CFI_REGISTER rsp,rbp
6799 SWAPGS_UNSAFE_STACK
6800 movq PER_CPU_VAR(kernel_stack), %rsp
6801 - addq $(KERNEL_STACK_OFFSET),%rsp
6802 + pax_enter_kernel_user
6803 /*
6804 * No need to follow this irqs on/off section: the syscall
6805 * disabled irqs, here we enable it straight after entry:
6806 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6807 pushfq
6808 CFI_ADJUST_CFA_OFFSET 8
6809 /*CFI_REL_OFFSET rflags,0*/
6810 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6811 + GET_THREAD_INFO(%r10)
6812 + movl TI_sysenter_return(%r10), %r10d
6813 CFI_REGISTER rip,r10
6814 pushq $__USER32_CS
6815 CFI_ADJUST_CFA_OFFSET 8
6816 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6817 SAVE_ARGS 0,0,1
6818 /* no need to do an access_ok check here because rbp has been
6819 32bit zero extended */
6820 +
6821 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6822 + mov $PAX_USER_SHADOW_BASE,%r10
6823 + add %r10,%rbp
6824 +#endif
6825 +
6826 1: movl (%rbp),%ebp
6827 .section __ex_table,"a"
6828 .quad 1b,ia32_badarg
6829 @@ -172,6 +204,7 @@ sysenter_dispatch:
6830 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6831 jnz sysexit_audit
6832 sysexit_from_sys_call:
6833 + pax_exit_kernel_user
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841 +
6842 + pax_erase_kstack
6843 +
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847 @@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851 +
6852 + pax_erase_kstack
6853 +
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862 + CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869 +
6870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6871 + pax_enter_kernel_user
6872 +#endif
6873 +
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879 - SAVE_ARGS 8,1,1
6880 + SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888 +
6889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6890 + mov $PAX_USER_SHADOW_BASE,%r10
6891 + add %r10,%r8
6892 +#endif
6893 +
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897 @@ -333,6 +383,7 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901 + pax_exit_kernel_user
6902 andl $~TS_COMPAT,TI_status(%r10)
6903 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6904 movl RIP-ARGOFFSET(%rsp),%ecx
6905 @@ -370,6 +421,9 @@ cstar_tracesys:
6906 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6907 movq %rsp,%rdi /* &pt_regs -> arg1 */
6908 call syscall_trace_enter
6909 +
6910 + pax_erase_kstack
6911 +
6912 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6913 RESTORE_REST
6914 xchgl %ebp,%r9d
6915 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6916 CFI_REL_OFFSET rip,RIP-RIP
6917 PARAVIRT_ADJUST_EXCEPTION_FRAME
6918 SWAPGS
6919 + pax_enter_kernel_user
6920 /*
6921 * No need to follow this irqs on/off section: the syscall
6922 * disabled irqs and here we enable it straight after entry:
6923 @@ -448,6 +503,9 @@ ia32_tracesys:
6924 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6925 movq %rsp,%rdi /* &pt_regs -> arg1 */
6926 call syscall_trace_enter
6927 +
6928 + pax_erase_kstack
6929 +
6930 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6931 RESTORE_REST
6932 cmpq $(IA32_NR_syscalls-1),%rax
6933 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6934 --- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6935 +++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6936 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6937 sp -= frame_size;
6938 /* Align the stack pointer according to the i386 ABI,
6939 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6940 - sp = ((sp + 4) & -16ul) - 4;
6941 + sp = ((sp - 12) & -16ul) - 4;
6942 return (void __user *) sp;
6943 }
6944
6945 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6946 * These are actually not used anymore, but left because some
6947 * gdb versions depend on them as a marker.
6948 */
6949 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6950 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6951 } put_user_catch(err);
6952
6953 if (err)
6954 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6955 0xb8,
6956 __NR_ia32_rt_sigreturn,
6957 0x80cd,
6958 - 0,
6959 + 0
6960 };
6961
6962 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6963 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6964
6965 if (ka->sa.sa_flags & SA_RESTORER)
6966 restorer = ka->sa.sa_restorer;
6967 + else if (current->mm->context.vdso)
6968 + /* Return stub is in 32bit vsyscall page */
6969 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6970 else
6971 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6972 - rt_sigreturn);
6973 + restorer = &frame->retcode;
6974 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6975
6976 /*
6977 * Not actually used anymore, but left because some gdb
6978 * versions need it.
6979 */
6980 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6981 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6982 } put_user_catch(err);
6983
6984 if (err)
6985 diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6986 --- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6987 +++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6988 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6989 " .byte 662b-661b\n" /* sourcelen */ \
6990 " .byte 664f-663f\n" /* replacementlen */ \
6991 ".previous\n" \
6992 - ".section .altinstr_replacement, \"ax\"\n" \
6993 + ".section .altinstr_replacement, \"a\"\n" \
6994 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6995 ".previous"
6996
6997 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6998 --- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6999 +++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7000 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7001
7002 #ifdef CONFIG_X86_LOCAL_APIC
7003
7004 -extern unsigned int apic_verbosity;
7005 +extern int apic_verbosity;
7006 extern int local_apic_timer_c2_ok;
7007
7008 extern int disable_apic;
7009 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
7010 --- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7011 +++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7012 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7013 __asm__ __volatile__(APM_DO_ZERO_SEGS
7014 "pushl %%edi\n\t"
7015 "pushl %%ebp\n\t"
7016 - "lcall *%%cs:apm_bios_entry\n\t"
7017 + "lcall *%%ss:apm_bios_entry\n\t"
7018 "setc %%al\n\t"
7019 "popl %%ebp\n\t"
7020 "popl %%edi\n\t"
7021 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7022 __asm__ __volatile__(APM_DO_ZERO_SEGS
7023 "pushl %%edi\n\t"
7024 "pushl %%ebp\n\t"
7025 - "lcall *%%cs:apm_bios_entry\n\t"
7026 + "lcall *%%ss:apm_bios_entry\n\t"
7027 "setc %%bl\n\t"
7028 "popl %%ebp\n\t"
7029 "popl %%edi\n\t"
7030 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
7031 --- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7032 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7033 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7034 }
7035
7036 /**
7037 + * atomic_read_unchecked - read atomic variable
7038 + * @v: pointer of type atomic_unchecked_t
7039 + *
7040 + * Atomically reads the value of @v.
7041 + */
7042 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7043 +{
7044 + return v->counter;
7045 +}
7046 +
7047 +/**
7048 * atomic_set - set atomic variable
7049 * @v: pointer of type atomic_t
7050 * @i: required value
7051 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7052 }
7053
7054 /**
7055 + * atomic_set_unchecked - set atomic variable
7056 + * @v: pointer of type atomic_unchecked_t
7057 + * @i: required value
7058 + *
7059 + * Atomically sets the value of @v to @i.
7060 + */
7061 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7062 +{
7063 + v->counter = i;
7064 +}
7065 +
7066 +/**
7067 * atomic_add - add integer to atomic variable
7068 * @i: integer value to add
7069 * @v: pointer of type atomic_t
7070 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7071 */
7072 static inline void atomic_add(int i, atomic_t *v)
7073 {
7074 - asm volatile(LOCK_PREFIX "addl %1,%0"
7075 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7076 +
7077 +#ifdef CONFIG_PAX_REFCOUNT
7078 + "jno 0f\n"
7079 + LOCK_PREFIX "subl %1,%0\n"
7080 + "int $4\n0:\n"
7081 + _ASM_EXTABLE(0b, 0b)
7082 +#endif
7083 +
7084 + : "+m" (v->counter)
7085 + : "ir" (i));
7086 +}
7087 +
7088 +/**
7089 + * atomic_add_unchecked - add integer to atomic variable
7090 + * @i: integer value to add
7091 + * @v: pointer of type atomic_unchecked_t
7092 + *
7093 + * Atomically adds @i to @v.
7094 + */
7095 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7096 +{
7097 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7098 : "+m" (v->counter)
7099 : "ir" (i));
7100 }
7101 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7102 */
7103 static inline void atomic_sub(int i, atomic_t *v)
7104 {
7105 - asm volatile(LOCK_PREFIX "subl %1,%0"
7106 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7107 +
7108 +#ifdef CONFIG_PAX_REFCOUNT
7109 + "jno 0f\n"
7110 + LOCK_PREFIX "addl %1,%0\n"
7111 + "int $4\n0:\n"
7112 + _ASM_EXTABLE(0b, 0b)
7113 +#endif
7114 +
7115 + : "+m" (v->counter)
7116 + : "ir" (i));
7117 +}
7118 +
7119 +/**
7120 + * atomic_sub_unchecked - subtract integer from atomic variable
7121 + * @i: integer value to subtract
7122 + * @v: pointer of type atomic_unchecked_t
7123 + *
7124 + * Atomically subtracts @i from @v.
7125 + */
7126 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7127 +{
7128 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7129 : "+m" (v->counter)
7130 : "ir" (i));
7131 }
7132 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7133 {
7134 unsigned char c;
7135
7136 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7137 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7138 +
7139 +#ifdef CONFIG_PAX_REFCOUNT
7140 + "jno 0f\n"
7141 + LOCK_PREFIX "addl %2,%0\n"
7142 + "int $4\n0:\n"
7143 + _ASM_EXTABLE(0b, 0b)
7144 +#endif
7145 +
7146 + "sete %1\n"
7147 : "+m" (v->counter), "=qm" (c)
7148 : "ir" (i) : "memory");
7149 return c;
7150 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7151 */
7152 static inline void atomic_inc(atomic_t *v)
7153 {
7154 - asm volatile(LOCK_PREFIX "incl %0"
7155 + asm volatile(LOCK_PREFIX "incl %0\n"
7156 +
7157 +#ifdef CONFIG_PAX_REFCOUNT
7158 + "jno 0f\n"
7159 + LOCK_PREFIX "decl %0\n"
7160 + "int $4\n0:\n"
7161 + _ASM_EXTABLE(0b, 0b)
7162 +#endif
7163 +
7164 + : "+m" (v->counter));
7165 +}
7166 +
7167 +/**
7168 + * atomic_inc_unchecked - increment atomic variable
7169 + * @v: pointer of type atomic_unchecked_t
7170 + *
7171 + * Atomically increments @v by 1.
7172 + */
7173 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7174 +{
7175 + asm volatile(LOCK_PREFIX "incl %0\n"
7176 : "+m" (v->counter));
7177 }
7178
7179 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7180 */
7181 static inline void atomic_dec(atomic_t *v)
7182 {
7183 - asm volatile(LOCK_PREFIX "decl %0"
7184 + asm volatile(LOCK_PREFIX "decl %0\n"
7185 +
7186 +#ifdef CONFIG_PAX_REFCOUNT
7187 + "jno 0f\n"
7188 + LOCK_PREFIX "incl %0\n"
7189 + "int $4\n0:\n"
7190 + _ASM_EXTABLE(0b, 0b)
7191 +#endif
7192 +
7193 + : "+m" (v->counter));
7194 +}
7195 +
7196 +/**
7197 + * atomic_dec_unchecked - decrement atomic variable
7198 + * @v: pointer of type atomic_unchecked_t
7199 + *
7200 + * Atomically decrements @v by 1.
7201 + */
7202 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7203 +{
7204 + asm volatile(LOCK_PREFIX "decl %0\n"
7205 : "+m" (v->counter));
7206 }
7207
7208 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7209 {
7210 unsigned char c;
7211
7212 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7213 + asm volatile(LOCK_PREFIX "decl %0\n"
7214 +
7215 +#ifdef CONFIG_PAX_REFCOUNT
7216 + "jno 0f\n"
7217 + LOCK_PREFIX "incl %0\n"
7218 + "int $4\n0:\n"
7219 + _ASM_EXTABLE(0b, 0b)
7220 +#endif
7221 +
7222 + "sete %1\n"
7223 : "+m" (v->counter), "=qm" (c)
7224 : : "memory");
7225 return c != 0;
7226 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7227 {
7228 unsigned char c;
7229
7230 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7231 + asm volatile(LOCK_PREFIX "incl %0\n"
7232 +
7233 +#ifdef CONFIG_PAX_REFCOUNT
7234 + "jno 0f\n"
7235 + LOCK_PREFIX "decl %0\n"
7236 + "into\n0:\n"
7237 + _ASM_EXTABLE(0b, 0b)
7238 +#endif
7239 +
7240 + "sete %1\n"
7241 + : "+m" (v->counter), "=qm" (c)
7242 + : : "memory");
7243 + return c != 0;
7244 +}
7245 +
7246 +/**
7247 + * atomic_inc_and_test_unchecked - increment and test
7248 + * @v: pointer of type atomic_unchecked_t
7249 + *
7250 + * Atomically increments @v by 1
7251 + * and returns true if the result is zero, or false for all
7252 + * other cases.
7253 + */
7254 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7255 +{
7256 + unsigned char c;
7257 +
7258 + asm volatile(LOCK_PREFIX "incl %0\n"
7259 + "sete %1\n"
7260 : "+m" (v->counter), "=qm" (c)
7261 : : "memory");
7262 return c != 0;
7263 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7264 {
7265 unsigned char c;
7266
7267 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7268 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7269 +
7270 +#ifdef CONFIG_PAX_REFCOUNT
7271 + "jno 0f\n"
7272 + LOCK_PREFIX "subl %2,%0\n"
7273 + "int $4\n0:\n"
7274 + _ASM_EXTABLE(0b, 0b)
7275 +#endif
7276 +
7277 + "sets %1\n"
7278 : "+m" (v->counter), "=qm" (c)
7279 : "ir" (i) : "memory");
7280 return c;
7281 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7282 #endif
7283 /* Modern 486+ processor */
7284 __i = i;
7285 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7286 +
7287 +#ifdef CONFIG_PAX_REFCOUNT
7288 + "jno 0f\n"
7289 + "movl %0, %1\n"
7290 + "int $4\n0:\n"
7291 + _ASM_EXTABLE(0b, 0b)
7292 +#endif
7293 +
7294 + : "+r" (i), "+m" (v->counter)
7295 + : : "memory");
7296 + return i + __i;
7297 +
7298 +#ifdef CONFIG_M386
7299 +no_xadd: /* Legacy 386 processor */
7300 + local_irq_save(flags);
7301 + __i = atomic_read(v);
7302 + atomic_set(v, i + __i);
7303 + local_irq_restore(flags);
7304 + return i + __i;
7305 +#endif
7306 +}
7307 +
7308 +/**
7309 + * atomic_add_return_unchecked - add integer and return
7310 + * @v: pointer of type atomic_unchecked_t
7311 + * @i: integer value to add
7312 + *
7313 + * Atomically adds @i to @v and returns @i + @v
7314 + */
7315 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7316 +{
7317 + int __i;
7318 +#ifdef CONFIG_M386
7319 + unsigned long flags;
7320 + if (unlikely(boot_cpu_data.x86 <= 3))
7321 + goto no_xadd;
7322 +#endif
7323 + /* Modern 486+ processor */
7324 + __i = i;
7325 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7326 : "+r" (i), "+m" (v->counter)
7327 : : "memory");
7328 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7329 return cmpxchg(&v->counter, old, new);
7330 }
7331
7332 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7333 +{
7334 + return cmpxchg(&v->counter, old, new);
7335 +}
7336 +
7337 static inline int atomic_xchg(atomic_t *v, int new)
7338 {
7339 return xchg(&v->counter, new);
7340 }
7341
7342 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7343 +{
7344 + return xchg(&v->counter, new);
7345 +}
7346 +
7347 /**
7348 * atomic_add_unless - add unless the number is already a given value
7349 * @v: pointer of type atomic_t
7350 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7351 */
7352 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7353 {
7354 - int c, old;
7355 + int c, old, new;
7356 c = atomic_read(v);
7357 for (;;) {
7358 - if (unlikely(c == (u)))
7359 + if (unlikely(c == u))
7360 break;
7361 - old = atomic_cmpxchg((v), c, c + (a));
7362 +
7363 + asm volatile("addl %2,%0\n"
7364 +
7365 +#ifdef CONFIG_PAX_REFCOUNT
7366 + "jno 0f\n"
7367 + "subl %2,%0\n"
7368 + "int $4\n0:\n"
7369 + _ASM_EXTABLE(0b, 0b)
7370 +#endif
7371 +
7372 + : "=r" (new)
7373 + : "0" (c), "ir" (a));
7374 +
7375 + old = atomic_cmpxchg(v, c, new);
7376 if (likely(old == c))
7377 break;
7378 c = old;
7379 }
7380 - return c != (u);
7381 + return c != u;
7382 }
7383
7384 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7385
7386 #define atomic_inc_return(v) (atomic_add_return(1, v))
7387 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7388 +{
7389 + return atomic_add_return_unchecked(1, v);
7390 +}
7391 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7392
7393 /* These are x86-specific, used by some header files */
7394 @@ -266,9 +495,18 @@ typedef struct {
7395 u64 __aligned(8) counter;
7396 } atomic64_t;
7397
7398 +#ifdef CONFIG_PAX_REFCOUNT
7399 +typedef struct {
7400 + u64 __aligned(8) counter;
7401 +} atomic64_unchecked_t;
7402 +#else
7403 +typedef atomic64_t atomic64_unchecked_t;
7404 +#endif
7405 +
7406 #define ATOMIC64_INIT(val) { (val) }
7407
7408 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7409 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7410
7411 /**
7412 * atomic64_xchg - xchg atomic64 variable
7413 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7414 * the old value.
7415 */
7416 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7417 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7418
7419 /**
7420 * atomic64_set - set atomic64 variable
7421 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7422 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7423
7424 /**
7425 + * atomic64_unchecked_set - set atomic64 variable
7426 + * @ptr: pointer to type atomic64_unchecked_t
7427 + * @new_val: value to assign
7428 + *
7429 + * Atomically sets the value of @ptr to @new_val.
7430 + */
7431 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7432 +
7433 +/**
7434 * atomic64_read - read atomic64 variable
7435 * @ptr: pointer to type atomic64_t
7436 *
7437 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7438 return res;
7439 }
7440
7441 -extern u64 atomic64_read(atomic64_t *ptr);
7442 +/**
7443 + * atomic64_read_unchecked - read atomic64 variable
7444 + * @ptr: pointer to type atomic64_unchecked_t
7445 + *
7446 + * Atomically reads the value of @ptr and returns it.
7447 + */
7448 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7449 +{
7450 + u64 res;
7451 +
7452 + /*
7453 + * Note, we inline this atomic64_unchecked_t primitive because
7454 + * it only clobbers EAX/EDX and leaves the others
7455 + * untouched. We also (somewhat subtly) rely on the
7456 + * fact that cmpxchg8b returns the current 64-bit value
7457 + * of the memory location we are touching:
7458 + */
7459 + asm volatile(
7460 + "mov %%ebx, %%eax\n\t"
7461 + "mov %%ecx, %%edx\n\t"
7462 + LOCK_PREFIX "cmpxchg8b %1\n"
7463 + : "=&A" (res)
7464 + : "m" (*ptr)
7465 + );
7466 +
7467 + return res;
7468 +}
7469
7470 /**
7471 * atomic64_add_return - add and return
7472 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7473 * Other variants with different arithmetic operators:
7474 */
7475 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7476 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7477 extern u64 atomic64_inc_return(atomic64_t *ptr);
7478 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7479 extern u64 atomic64_dec_return(atomic64_t *ptr);
7480 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7481
7482 /**
7483 * atomic64_add - add integer to atomic64 variable
7484 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7485 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7486
7487 /**
7488 + * atomic64_add_unchecked - add integer to atomic64 variable
7489 + * @delta: integer value to add
7490 + * @ptr: pointer to type atomic64_unchecked_t
7491 + *
7492 + * Atomically adds @delta to @ptr.
7493 + */
7494 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7495 +
7496 +/**
7497 * atomic64_sub - subtract the atomic64 variable
7498 * @delta: integer value to subtract
7499 * @ptr: pointer to type atomic64_t
7500 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7501 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7502
7503 /**
7504 + * atomic64_sub_unchecked - subtract the atomic64 variable
7505 + * @delta: integer value to subtract
7506 + * @ptr: pointer to type atomic64_unchecked_t
7507 + *
7508 + * Atomically subtracts @delta from @ptr.
7509 + */
7510 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7511 +
7512 +/**
7513 * atomic64_sub_and_test - subtract value from variable and test result
7514 * @delta: integer value to subtract
7515 * @ptr: pointer to type atomic64_t
7516 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7517 extern void atomic64_inc(atomic64_t *ptr);
7518
7519 /**
7520 + * atomic64_inc_unchecked - increment atomic64 variable
7521 + * @ptr: pointer to type atomic64_unchecked_t
7522 + *
7523 + * Atomically increments @ptr by 1.
7524 + */
7525 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7526 +
7527 +/**
7528 * atomic64_dec - decrement atomic64 variable
7529 * @ptr: pointer to type atomic64_t
7530 *
7531 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7532 extern void atomic64_dec(atomic64_t *ptr);
7533
7534 /**
7535 + * atomic64_dec_unchecked - decrement atomic64 variable
7536 + * @ptr: pointer to type atomic64_unchecked_t
7537 + *
7538 + * Atomically decrements @ptr by 1.
7539 + */
7540 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7541 +
7542 +/**
7543 * atomic64_dec_and_test - decrement and test
7544 * @ptr: pointer to type atomic64_t
7545 *
7546 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7547 --- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7548 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7549 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7550 }
7551
7552 /**
7553 + * atomic_read_unchecked - read atomic variable
7554 + * @v: pointer of type atomic_unchecked_t
7555 + *
7556 + * Atomically reads the value of @v.
7557 + */
7558 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7559 +{
7560 + return v->counter;
7561 +}
7562 +
7563 +/**
7564 * atomic_set - set atomic variable
7565 * @v: pointer of type atomic_t
7566 * @i: required value
7567 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7568 }
7569
7570 /**
7571 + * atomic_set_unchecked - set atomic variable
7572 + * @v: pointer of type atomic_unchecked_t
7573 + * @i: required value
7574 + *
7575 + * Atomically sets the value of @v to @i.
7576 + */
7577 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7578 +{
7579 + v->counter = i;
7580 +}
7581 +
7582 +/**
7583 * atomic_add - add integer to atomic variable
7584 * @i: integer value to add
7585 * @v: pointer of type atomic_t
7586 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7587 */
7588 static inline void atomic_add(int i, atomic_t *v)
7589 {
7590 - asm volatile(LOCK_PREFIX "addl %1,%0"
7591 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7592 +
7593 +#ifdef CONFIG_PAX_REFCOUNT
7594 + "jno 0f\n"
7595 + LOCK_PREFIX "subl %1,%0\n"
7596 + "int $4\n0:\n"
7597 + _ASM_EXTABLE(0b, 0b)
7598 +#endif
7599 +
7600 + : "=m" (v->counter)
7601 + : "ir" (i), "m" (v->counter));
7602 +}
7603 +
7604 +/**
7605 + * atomic_add_unchecked - add integer to atomic variable
7606 + * @i: integer value to add
7607 + * @v: pointer of type atomic_unchecked_t
7608 + *
7609 + * Atomically adds @i to @v.
7610 + */
7611 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7612 +{
7613 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7614 : "=m" (v->counter)
7615 : "ir" (i), "m" (v->counter));
7616 }
7617 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7618 */
7619 static inline void atomic_sub(int i, atomic_t *v)
7620 {
7621 - asm volatile(LOCK_PREFIX "subl %1,%0"
7622 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7623 +
7624 +#ifdef CONFIG_PAX_REFCOUNT
7625 + "jno 0f\n"
7626 + LOCK_PREFIX "addl %1,%0\n"
7627 + "int $4\n0:\n"
7628 + _ASM_EXTABLE(0b, 0b)
7629 +#endif
7630 +
7631 + : "=m" (v->counter)
7632 + : "ir" (i), "m" (v->counter));
7633 +}
7634 +
7635 +/**
7636 + * atomic_sub_unchecked - subtract the atomic variable
7637 + * @i: integer value to subtract
7638 + * @v: pointer of type atomic_unchecked_t
7639 + *
7640 + * Atomically subtracts @i from @v.
7641 + */
7642 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7643 +{
7644 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7645 : "=m" (v->counter)
7646 : "ir" (i), "m" (v->counter));
7647 }
7648 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7649 {
7650 unsigned char c;
7651
7652 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7653 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7654 +
7655 +#ifdef CONFIG_PAX_REFCOUNT
7656 + "jno 0f\n"
7657 + LOCK_PREFIX "addl %2,%0\n"
7658 + "int $4\n0:\n"
7659 + _ASM_EXTABLE(0b, 0b)
7660 +#endif
7661 +
7662 + "sete %1\n"
7663 : "=m" (v->counter), "=qm" (c)
7664 : "ir" (i), "m" (v->counter) : "memory");
7665 return c;
7666 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7667 */
7668 static inline void atomic_inc(atomic_t *v)
7669 {
7670 - asm volatile(LOCK_PREFIX "incl %0"
7671 + asm volatile(LOCK_PREFIX "incl %0\n"
7672 +
7673 +#ifdef CONFIG_PAX_REFCOUNT
7674 + "jno 0f\n"
7675 + LOCK_PREFIX "decl %0\n"
7676 + "int $4\n0:\n"
7677 + _ASM_EXTABLE(0b, 0b)
7678 +#endif
7679 +
7680 + : "=m" (v->counter)
7681 + : "m" (v->counter));
7682 +}
7683 +
7684 +/**
7685 + * atomic_inc_unchecked - increment atomic variable
7686 + * @v: pointer of type atomic_unchecked_t
7687 + *
7688 + * Atomically increments @v by 1.
7689 + */
7690 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7691 +{
7692 + asm volatile(LOCK_PREFIX "incl %0\n"
7693 : "=m" (v->counter)
7694 : "m" (v->counter));
7695 }
7696 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7697 */
7698 static inline void atomic_dec(atomic_t *v)
7699 {
7700 - asm volatile(LOCK_PREFIX "decl %0"
7701 + asm volatile(LOCK_PREFIX "decl %0\n"
7702 +
7703 +#ifdef CONFIG_PAX_REFCOUNT
7704 + "jno 0f\n"
7705 + LOCK_PREFIX "incl %0\n"
7706 + "int $4\n0:\n"
7707 + _ASM_EXTABLE(0b, 0b)
7708 +#endif
7709 +
7710 + : "=m" (v->counter)
7711 + : "m" (v->counter));
7712 +}
7713 +
7714 +/**
7715 + * atomic_dec_unchecked - decrement atomic variable
7716 + * @v: pointer of type atomic_unchecked_t
7717 + *
7718 + * Atomically decrements @v by 1.
7719 + */
7720 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7721 +{
7722 + asm volatile(LOCK_PREFIX "decl %0\n"
7723 : "=m" (v->counter)
7724 : "m" (v->counter));
7725 }
7726 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7727 {
7728 unsigned char c;
7729
7730 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7731 + asm volatile(LOCK_PREFIX "decl %0\n"
7732 +
7733 +#ifdef CONFIG_PAX_REFCOUNT
7734 + "jno 0f\n"
7735 + LOCK_PREFIX "incl %0\n"
7736 + "int $4\n0:\n"
7737 + _ASM_EXTABLE(0b, 0b)
7738 +#endif
7739 +
7740 + "sete %1\n"
7741 : "=m" (v->counter), "=qm" (c)
7742 : "m" (v->counter) : "memory");
7743 return c != 0;
7744 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7745 {
7746 unsigned char c;
7747
7748 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7749 + asm volatile(LOCK_PREFIX "incl %0\n"
7750 +
7751 +#ifdef CONFIG_PAX_REFCOUNT
7752 + "jno 0f\n"
7753 + LOCK_PREFIX "decl %0\n"
7754 + "int $4\n0:\n"
7755 + _ASM_EXTABLE(0b, 0b)
7756 +#endif
7757 +
7758 + "sete %1\n"
7759 + : "=m" (v->counter), "=qm" (c)
7760 + : "m" (v->counter) : "memory");
7761 + return c != 0;
7762 +}
7763 +
7764 +/**
7765 + * atomic_inc_and_test_unchecked - increment and test
7766 + * @v: pointer of type atomic_unchecked_t
7767 + *
7768 + * Atomically increments @v by 1
7769 + * and returns true if the result is zero, or false for all
7770 + * other cases.
7771 + */
7772 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7773 +{
7774 + unsigned char c;
7775 +
7776 + asm volatile(LOCK_PREFIX "incl %0\n"
7777 + "sete %1\n"
7778 : "=m" (v->counter), "=qm" (c)
7779 : "m" (v->counter) : "memory");
7780 return c != 0;
7781 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7782 {
7783 unsigned char c;
7784
7785 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7786 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7787 +
7788 +#ifdef CONFIG_PAX_REFCOUNT
7789 + "jno 0f\n"
7790 + LOCK_PREFIX "subl %2,%0\n"
7791 + "int $4\n0:\n"
7792 + _ASM_EXTABLE(0b, 0b)
7793 +#endif
7794 +
7795 + "sets %1\n"
7796 : "=m" (v->counter), "=qm" (c)
7797 : "ir" (i), "m" (v->counter) : "memory");
7798 return c;
7799 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7800 static inline int atomic_add_return(int i, atomic_t *v)
7801 {
7802 int __i = i;
7803 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7804 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7805 +
7806 +#ifdef CONFIG_PAX_REFCOUNT
7807 + "jno 0f\n"
7808 + "movl %0, %1\n"
7809 + "int $4\n0:\n"
7810 + _ASM_EXTABLE(0b, 0b)
7811 +#endif
7812 +
7813 + : "+r" (i), "+m" (v->counter)
7814 + : : "memory");
7815 + return i + __i;
7816 +}
7817 +
7818 +/**
7819 + * atomic_add_return_unchecked - add and return
7820 + * @i: integer value to add
7821 + * @v: pointer of type atomic_unchecked_t
7822 + *
7823 + * Atomically adds @i to @v and returns @i + @v
7824 + */
7825 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7826 +{
7827 + int __i = i;
7828 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7829 : "+r" (i), "+m" (v->counter)
7830 : : "memory");
7831 return i + __i;
7832 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7833 }
7834
7835 #define atomic_inc_return(v) (atomic_add_return(1, v))
7836 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7837 +{
7838 + return atomic_add_return_unchecked(1, v);
7839 +}
7840 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7841
7842 /* The 64-bit atomic type */
7843 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7844 }
7845
7846 /**
7847 + * atomic64_read_unchecked - read atomic64 variable
7848 + * @v: pointer of type atomic64_unchecked_t
7849 + *
7850 + * Atomically reads the value of @v.
7851 + * Doesn't imply a read memory barrier.
7852 + */
7853 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7854 +{
7855 + return v->counter;
7856 +}
7857 +
7858 +/**
7859 * atomic64_set - set atomic64 variable
7860 * @v: pointer to type atomic64_t
7861 * @i: required value
7862 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7863 }
7864
7865 /**
7866 + * atomic64_set_unchecked - set atomic64 variable
7867 + * @v: pointer to type atomic64_unchecked_t
7868 + * @i: required value
7869 + *
7870 + * Atomically sets the value of @v to @i.
7871 + */
7872 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7873 +{
7874 + v->counter = i;
7875 +}
7876 +
7877 +/**
7878 * atomic64_add - add integer to atomic64 variable
7879 * @i: integer value to add
7880 * @v: pointer to type atomic64_t
7881 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7882 */
7883 static inline void atomic64_add(long i, atomic64_t *v)
7884 {
7885 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7886 +
7887 +#ifdef CONFIG_PAX_REFCOUNT
7888 + "jno 0f\n"
7889 + LOCK_PREFIX "subq %1,%0\n"
7890 + "int $4\n0:\n"
7891 + _ASM_EXTABLE(0b, 0b)
7892 +#endif
7893 +
7894 + : "=m" (v->counter)
7895 + : "er" (i), "m" (v->counter));
7896 +}
7897 +
7898 +/**
7899 + * atomic64_add_unchecked - add integer to atomic64 variable
7900 + * @i: integer value to add
7901 + * @v: pointer to type atomic64_unchecked_t
7902 + *
7903 + * Atomically adds @i to @v.
7904 + */
7905 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7906 +{
7907 asm volatile(LOCK_PREFIX "addq %1,%0"
7908 : "=m" (v->counter)
7909 : "er" (i), "m" (v->counter));
7910 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7911 */
7912 static inline void atomic64_sub(long i, atomic64_t *v)
7913 {
7914 - asm volatile(LOCK_PREFIX "subq %1,%0"
7915 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7916 +
7917 +#ifdef CONFIG_PAX_REFCOUNT
7918 + "jno 0f\n"
7919 + LOCK_PREFIX "addq %1,%0\n"
7920 + "int $4\n0:\n"
7921 + _ASM_EXTABLE(0b, 0b)
7922 +#endif
7923 +
7924 : "=m" (v->counter)
7925 : "er" (i), "m" (v->counter));
7926 }
7927 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7928 {
7929 unsigned char c;
7930
7931 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7932 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7933 +
7934 +#ifdef CONFIG_PAX_REFCOUNT
7935 + "jno 0f\n"
7936 + LOCK_PREFIX "addq %2,%0\n"
7937 + "int $4\n0:\n"
7938 + _ASM_EXTABLE(0b, 0b)
7939 +#endif
7940 +
7941 + "sete %1\n"
7942 : "=m" (v->counter), "=qm" (c)
7943 : "er" (i), "m" (v->counter) : "memory");
7944 return c;
7945 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7946 */
7947 static inline void atomic64_inc(atomic64_t *v)
7948 {
7949 + asm volatile(LOCK_PREFIX "incq %0\n"
7950 +
7951 +#ifdef CONFIG_PAX_REFCOUNT
7952 + "jno 0f\n"
7953 + LOCK_PREFIX "decq %0\n"
7954 + "int $4\n0:\n"
7955 + _ASM_EXTABLE(0b, 0b)
7956 +#endif
7957 +
7958 + : "=m" (v->counter)
7959 + : "m" (v->counter));
7960 +}
7961 +
7962 +/**
7963 + * atomic64_inc_unchecked - increment atomic64 variable
7964 + * @v: pointer to type atomic64_unchecked_t
7965 + *
7966 + * Atomically increments @v by 1.
7967 + */
7968 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7969 +{
7970 asm volatile(LOCK_PREFIX "incq %0"
7971 : "=m" (v->counter)
7972 : "m" (v->counter));
7973 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7974 */
7975 static inline void atomic64_dec(atomic64_t *v)
7976 {
7977 - asm volatile(LOCK_PREFIX "decq %0"
7978 + asm volatile(LOCK_PREFIX "decq %0\n"
7979 +
7980 +#ifdef CONFIG_PAX_REFCOUNT
7981 + "jno 0f\n"
7982 + LOCK_PREFIX "incq %0\n"
7983 + "int $4\n0:\n"
7984 + _ASM_EXTABLE(0b, 0b)
7985 +#endif
7986 +
7987 + : "=m" (v->counter)
7988 + : "m" (v->counter));
7989 +}
7990 +
7991 +/**
7992 + * atomic64_dec_unchecked - decrement atomic64 variable
7993 + * @v: pointer to type atomic64_t
7994 + *
7995 + * Atomically decrements @v by 1.
7996 + */
7997 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7998 +{
7999 + asm volatile(LOCK_PREFIX "decq %0\n"
8000 : "=m" (v->counter)
8001 : "m" (v->counter));
8002 }
8003 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8004 {
8005 unsigned char c;
8006
8007 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8008 + asm volatile(LOCK_PREFIX "decq %0\n"
8009 +
8010 +#ifdef CONFIG_PAX_REFCOUNT
8011 + "jno 0f\n"
8012 + LOCK_PREFIX "incq %0\n"
8013 + "int $4\n0:\n"
8014 + _ASM_EXTABLE(0b, 0b)
8015 +#endif
8016 +
8017 + "sete %1\n"
8018 : "=m" (v->counter), "=qm" (c)
8019 : "m" (v->counter) : "memory");
8020 return c != 0;
8021 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8022 {
8023 unsigned char c;
8024
8025 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8026 + asm volatile(LOCK_PREFIX "incq %0\n"
8027 +
8028 +#ifdef CONFIG_PAX_REFCOUNT
8029 + "jno 0f\n"
8030 + LOCK_PREFIX "decq %0\n"
8031 + "int $4\n0:\n"
8032 + _ASM_EXTABLE(0b, 0b)
8033 +#endif
8034 +
8035 + "sete %1\n"
8036 : "=m" (v->counter), "=qm" (c)
8037 : "m" (v->counter) : "memory");
8038 return c != 0;
8039 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8040 {
8041 unsigned char c;
8042
8043 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8044 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8045 +
8046 +#ifdef CONFIG_PAX_REFCOUNT
8047 + "jno 0f\n"
8048 + LOCK_PREFIX "subq %2,%0\n"
8049 + "int $4\n0:\n"
8050 + _ASM_EXTABLE(0b, 0b)
8051 +#endif
8052 +
8053 + "sets %1\n"
8054 : "=m" (v->counter), "=qm" (c)
8055 : "er" (i), "m" (v->counter) : "memory");
8056 return c;
8057 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8058 static inline long atomic64_add_return(long i, atomic64_t *v)
8059 {
8060 long __i = i;
8061 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8062 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8063 +
8064 +#ifdef CONFIG_PAX_REFCOUNT
8065 + "jno 0f\n"
8066 + "movq %0, %1\n"
8067 + "int $4\n0:\n"
8068 + _ASM_EXTABLE(0b, 0b)
8069 +#endif
8070 +
8071 + : "+r" (i), "+m" (v->counter)
8072 + : : "memory");
8073 + return i + __i;
8074 +}
8075 +
8076 +/**
8077 + * atomic64_add_return_unchecked - add and return
8078 + * @i: integer value to add
8079 + * @v: pointer to type atomic64_unchecked_t
8080 + *
8081 + * Atomically adds @i to @v and returns @i + @v
8082 + */
8083 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8084 +{
8085 + long __i = i;
8086 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
8087 : "+r" (i), "+m" (v->counter)
8088 : : "memory");
8089 return i + __i;
8090 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8091 }
8092
8093 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8094 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095 +{
8096 + return atomic64_add_return_unchecked(1, v);
8097 +}
8098 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8099
8100 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8101 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8102 return cmpxchg(&v->counter, old, new);
8103 }
8104
8105 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8106 +{
8107 + return cmpxchg(&v->counter, old, new);
8108 +}
8109 +
8110 static inline long atomic64_xchg(atomic64_t *v, long new)
8111 {
8112 return xchg(&v->counter, new);
8113 }
8114
8115 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8116 +{
8117 + return xchg(&v->counter, new);
8118 +}
8119 +
8120 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8121 {
8122 return cmpxchg(&v->counter, old, new);
8123 }
8124
8125 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8126 +{
8127 + return cmpxchg(&v->counter, old, new);
8128 +}
8129 +
8130 static inline long atomic_xchg(atomic_t *v, int new)
8131 {
8132 return xchg(&v->counter, new);
8133 }
8134
8135 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8136 +{
8137 + return xchg(&v->counter, new);
8138 +}
8139 +
8140 /**
8141 * atomic_add_unless - add unless the number is a given value
8142 * @v: pointer of type atomic_t
8143 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8144 */
8145 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8146 {
8147 - int c, old;
8148 + int c, old, new;
8149 c = atomic_read(v);
8150 for (;;) {
8151 - if (unlikely(c == (u)))
8152 + if (unlikely(c == u))
8153 break;
8154 - old = atomic_cmpxchg((v), c, c + (a));
8155 +
8156 + asm volatile("addl %2,%0\n"
8157 +
8158 +#ifdef CONFIG_PAX_REFCOUNT
8159 + "jno 0f\n"
8160 + "subl %2,%0\n"
8161 + "int $4\n0:\n"
8162 + _ASM_EXTABLE(0b, 0b)
8163 +#endif
8164 +
8165 + : "=r" (new)
8166 + : "0" (c), "ir" (a));
8167 +
8168 + old = atomic_cmpxchg(v, c, new);
8169 if (likely(old == c))
8170 break;
8171 c = old;
8172 }
8173 - return c != (u);
8174 + return c != u;
8175 }
8176
8177 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8178 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8179 */
8180 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8181 {
8182 - long c, old;
8183 + long c, old, new;
8184 c = atomic64_read(v);
8185 for (;;) {
8186 - if (unlikely(c == (u)))
8187 + if (unlikely(c == u))
8188 break;
8189 - old = atomic64_cmpxchg((v), c, c + (a));
8190 +
8191 + asm volatile("addq %2,%0\n"
8192 +
8193 +#ifdef CONFIG_PAX_REFCOUNT
8194 + "jno 0f\n"
8195 + "subq %2,%0\n"
8196 + "int $4\n0:\n"
8197 + _ASM_EXTABLE(0b, 0b)
8198 +#endif
8199 +
8200 + : "=r" (new)
8201 + : "0" (c), "er" (a));
8202 +
8203 + old = atomic64_cmpxchg(v, c, new);
8204 if (likely(old == c))
8205 break;
8206 c = old;
8207 }
8208 - return c != (u);
8209 + return c != u;
8210 }
8211
8212 /**
8213 diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8214 --- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8215 +++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8216 @@ -38,7 +38,7 @@
8217 * a mask operation on a byte.
8218 */
8219 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8220 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8221 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8222 #define CONST_MASK(nr) (1 << ((nr) & 7))
8223
8224 /**
8225 diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8226 --- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8227 +++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8228 @@ -11,10 +11,15 @@
8229 #include <asm/pgtable_types.h>
8230
8231 /* Physical address where kernel should be loaded. */
8232 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8233 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8235 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8236
8237 +#ifndef __ASSEMBLY__
8238 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8239 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8240 +#endif
8241 +
8242 /* Minimum kernel alignment, as a power of two */
8243 #ifdef CONFIG_X86_64
8244 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8245 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8246 --- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8247 +++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8248 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8249 static inline unsigned long get_page_memtype(struct page *pg)
8250 {
8251 if (!PageUncached(pg) && !PageWC(pg))
8252 - return -1;
8253 + return ~0UL;
8254 else if (!PageUncached(pg) && PageWC(pg))
8255 return _PAGE_CACHE_WC;
8256 else if (PageUncached(pg) && !PageWC(pg))
8257 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8258 SetPageWC(pg);
8259 break;
8260 default:
8261 - case -1:
8262 + case ~0UL:
8263 ClearPageUncached(pg);
8264 ClearPageWC(pg);
8265 break;
8266 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8267 --- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8268 +++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8269 @@ -5,9 +5,10 @@
8270
8271 /* L1 cache line size */
8272 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8273 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8274 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8275
8276 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8277 +#define __read_only __attribute__((__section__(".data.read_only")))
8278
8279 #ifdef CONFIG_X86_VSMP
8280 /* vSMP Internode cacheline shift */
8281 diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8282 --- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8283 +++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8284 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8285 int len, __wsum sum,
8286 int *src_err_ptr, int *dst_err_ptr);
8287
8288 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8289 + int len, __wsum sum,
8290 + int *src_err_ptr, int *dst_err_ptr);
8291 +
8292 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8293 + int len, __wsum sum,
8294 + int *src_err_ptr, int *dst_err_ptr);
8295 +
8296 /*
8297 * Note: when you get a NULL pointer exception here this means someone
8298 * passed in an incorrect kernel address to one of these functions.
8299 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8300 int *err_ptr)
8301 {
8302 might_sleep();
8303 - return csum_partial_copy_generic((__force void *)src, dst,
8304 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8305 len, sum, err_ptr, NULL);
8306 }
8307
8308 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8309 {
8310 might_sleep();
8311 if (access_ok(VERIFY_WRITE, dst, len))
8312 - return csum_partial_copy_generic(src, (__force void *)dst,
8313 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8314 len, sum, NULL, err_ptr);
8315
8316 if (len)
8317 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8318 --- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8319 +++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8320 @@ -31,6 +31,12 @@ struct desc_struct {
8321 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8322 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8323 };
8324 + struct {
8325 + u16 offset_low;
8326 + u16 seg;
8327 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8328 + unsigned offset_high: 16;
8329 + } gate;
8330 };
8331 } __attribute__((packed));
8332
8333 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8334 --- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8335 +++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8336 @@ -4,6 +4,7 @@
8337 #include <asm/desc_defs.h>
8338 #include <asm/ldt.h>
8339 #include <asm/mmu.h>
8340 +#include <asm/pgtable.h>
8341 #include <linux/smp.h>
8342
8343 static inline void fill_ldt(struct desc_struct *desc,
8344 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8345 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8346 desc->type = (info->read_exec_only ^ 1) << 1;
8347 desc->type |= info->contents << 2;
8348 + desc->type |= info->seg_not_present ^ 1;
8349 desc->s = 1;
8350 desc->dpl = 0x3;
8351 desc->p = info->seg_not_present ^ 1;
8352 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8353 }
8354
8355 extern struct desc_ptr idt_descr;
8356 -extern gate_desc idt_table[];
8357 -
8358 -struct gdt_page {
8359 - struct desc_struct gdt[GDT_ENTRIES];
8360 -} __attribute__((aligned(PAGE_SIZE)));
8361 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8362 +extern gate_desc idt_table[256];
8363
8364 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8365 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8366 {
8367 - return per_cpu(gdt_page, cpu).gdt;
8368 + return cpu_gdt_table[cpu];
8369 }
8370
8371 #ifdef CONFIG_X86_64
8372 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8373 unsigned long base, unsigned dpl, unsigned flags,
8374 unsigned short seg)
8375 {
8376 - gate->a = (seg << 16) | (base & 0xffff);
8377 - gate->b = (base & 0xffff0000) |
8378 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8379 + gate->gate.offset_low = base;
8380 + gate->gate.seg = seg;
8381 + gate->gate.reserved = 0;
8382 + gate->gate.type = type;
8383 + gate->gate.s = 0;
8384 + gate->gate.dpl = dpl;
8385 + gate->gate.p = 1;
8386 + gate->gate.offset_high = base >> 16;
8387 }
8388
8389 #endif
8390 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8391 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8392 const gate_desc *gate)
8393 {
8394 + pax_open_kernel();
8395 memcpy(&idt[entry], gate, sizeof(*gate));
8396 + pax_close_kernel();
8397 }
8398
8399 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8400 const void *desc)
8401 {
8402 + pax_open_kernel();
8403 memcpy(&ldt[entry], desc, 8);
8404 + pax_close_kernel();
8405 }
8406
8407 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8408 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8409 size = sizeof(struct desc_struct);
8410 break;
8411 }
8412 +
8413 + pax_open_kernel();
8414 memcpy(&gdt[entry], desc, size);
8415 + pax_close_kernel();
8416 }
8417
8418 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8419 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8420
8421 static inline void native_load_tr_desc(void)
8422 {
8423 + pax_open_kernel();
8424 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8425 + pax_close_kernel();
8426 }
8427
8428 static inline void native_load_gdt(const struct desc_ptr *dtr)
8429 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8430 unsigned int i;
8431 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8432
8433 + pax_open_kernel();
8434 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8435 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8436 + pax_close_kernel();
8437 }
8438
8439 #define _LDT_empty(info) \
8440 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8441 desc->limit = (limit >> 16) & 0xf;
8442 }
8443
8444 -static inline void _set_gate(int gate, unsigned type, void *addr,
8445 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8446 unsigned dpl, unsigned ist, unsigned seg)
8447 {
8448 gate_desc s;
8449 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8450 * Pentium F0 0F bugfix can have resulted in the mapped
8451 * IDT being write-protected.
8452 */
8453 -static inline void set_intr_gate(unsigned int n, void *addr)
8454 +static inline void set_intr_gate(unsigned int n, const void *addr)
8455 {
8456 BUG_ON((unsigned)n > 0xFF);
8457 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8458 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8459 /*
8460 * This routine sets up an interrupt gate at directory privilege level 3.
8461 */
8462 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8463 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8464 {
8465 BUG_ON((unsigned)n > 0xFF);
8466 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8467 }
8468
8469 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8470 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8471 {
8472 BUG_ON((unsigned)n > 0xFF);
8473 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8474 }
8475
8476 -static inline void set_trap_gate(unsigned int n, void *addr)
8477 +static inline void set_trap_gate(unsigned int n, const void *addr)
8478 {
8479 BUG_ON((unsigned)n > 0xFF);
8480 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8481 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8482 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8483 {
8484 BUG_ON((unsigned)n > 0xFF);
8485 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8486 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8487 }
8488
8489 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8490 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8491 {
8492 BUG_ON((unsigned)n > 0xFF);
8493 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8494 }
8495
8496 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8497 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8498 {
8499 BUG_ON((unsigned)n > 0xFF);
8500 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8501 }
8502
8503 +#ifdef CONFIG_X86_32
8504 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8505 +{
8506 + struct desc_struct d;
8507 +
8508 + if (likely(limit))
8509 + limit = (limit - 1UL) >> PAGE_SHIFT;
8510 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8511 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8512 +}
8513 +#endif
8514 +
8515 #endif /* _ASM_X86_DESC_H */
8516 diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8517 --- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8518 +++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8519 @@ -6,7 +6,7 @@ struct dev_archdata {
8520 void *acpi_handle;
8521 #endif
8522 #ifdef CONFIG_X86_64
8523 -struct dma_map_ops *dma_ops;
8524 + const struct dma_map_ops *dma_ops;
8525 #endif
8526 #ifdef CONFIG_DMAR
8527 void *iommu; /* hook for IOMMU specific extension */
8528 diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8529 --- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8530 +++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8531 @@ -25,9 +25,9 @@ extern int iommu_merge;
8532 extern struct device x86_dma_fallback_dev;
8533 extern int panic_on_overflow;
8534
8535 -extern struct dma_map_ops *dma_ops;
8536 +extern const struct dma_map_ops *dma_ops;
8537
8538 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8539 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8540 {
8541 #ifdef CONFIG_X86_32
8542 return dma_ops;
8543 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8544 /* Make sure we keep the same behaviour */
8545 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8546 {
8547 - struct dma_map_ops *ops = get_dma_ops(dev);
8548 + const struct dma_map_ops *ops = get_dma_ops(dev);
8549 if (ops->mapping_error)
8550 return ops->mapping_error(dev, dma_addr);
8551
8552 @@ -122,7 +122,7 @@ static inline void *
8553 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8554 gfp_t gfp)
8555 {
8556 - struct dma_map_ops *ops = get_dma_ops(dev);
8557 + const struct dma_map_ops *ops = get_dma_ops(dev);
8558 void *memory;
8559
8560 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8561 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8562 static inline void dma_free_coherent(struct device *dev, size_t size,
8563 void *vaddr, dma_addr_t bus)
8564 {
8565 - struct dma_map_ops *ops = get_dma_ops(dev);
8566 + const struct dma_map_ops *ops = get_dma_ops(dev);
8567
8568 WARN_ON(irqs_disabled()); /* for portability */
8569
8570 diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8571 --- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8572 +++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8573 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8574 #define ISA_END_ADDRESS 0x100000
8575 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8576
8577 -#define BIOS_BEGIN 0x000a0000
8578 +#define BIOS_BEGIN 0x000c0000
8579 #define BIOS_END 0x00100000
8580
8581 #ifdef __KERNEL__
8582 diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8583 --- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8584 +++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-08-23 20:24:19.000000000 -0400
8585 @@ -257,7 +257,25 @@ extern int force_personality32;
8586 the loader. We need to make sure that it is out of the way of the program
8587 that it will "exec", and that there is sufficient room for the brk. */
8588
8589 +#ifdef CONFIG_PAX_SEGMEXEC
8590 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8591 +#else
8592 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8593 +#endif
8594 +
8595 +#ifdef CONFIG_PAX_ASLR
8596 +#ifdef CONFIG_X86_32
8597 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8598 +
8599 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8600 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601 +#else
8602 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8603 +
8604 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8605 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606 +#endif
8607 +#endif
8608
8609 /* This yields a mask that user programs can use to figure out what
8610 instruction set this CPU supports. This could be done in user space,
8611 @@ -310,9 +328,7 @@ do { \
8612
8613 #define ARCH_DLINFO \
8614 do { \
8615 - if (vdso_enabled) \
8616 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8617 - (unsigned long)current->mm->context.vdso); \
8618 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8619 } while (0)
8620
8621 #define AT_SYSINFO 32
8622 @@ -323,7 +339,7 @@ do { \
8623
8624 #endif /* !CONFIG_X86_32 */
8625
8626 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8627 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8628
8629 #define VDSO_ENTRY \
8630 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8631 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8632 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8633 #define compat_arch_setup_additional_pages syscall32_setup_pages
8634
8635 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8636 -#define arch_randomize_brk arch_randomize_brk
8637 -
8638 #endif /* _ASM_X86_ELF_H */
8639 diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8640 --- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8641 +++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8642 @@ -15,6 +15,6 @@ enum reboot_type {
8643
8644 extern enum reboot_type reboot_type;
8645
8646 -extern void machine_emergency_restart(void);
8647 +extern void machine_emergency_restart(void) __noreturn;
8648
8649 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8650 diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8651 --- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8652 +++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8653 @@ -12,16 +12,18 @@
8654 #include <asm/system.h>
8655
8656 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8657 + typecheck(u32 *, uaddr); \
8658 asm volatile("1:\t" insn "\n" \
8659 "2:\t.section .fixup,\"ax\"\n" \
8660 "3:\tmov\t%3, %1\n" \
8661 "\tjmp\t2b\n" \
8662 "\t.previous\n" \
8663 _ASM_EXTABLE(1b, 3b) \
8664 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8665 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8666 : "i" (-EFAULT), "0" (oparg), "1" (0))
8667
8668 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8669 + typecheck(u32 *, uaddr); \
8670 asm volatile("1:\tmovl %2, %0\n" \
8671 "\tmovl\t%0, %3\n" \
8672 "\t" insn "\n" \
8673 @@ -34,10 +36,10 @@
8674 _ASM_EXTABLE(1b, 4b) \
8675 _ASM_EXTABLE(2b, 4b) \
8676 : "=&a" (oldval), "=&r" (ret), \
8677 - "+m" (*uaddr), "=&r" (tem) \
8678 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8679 : "r" (oparg), "i" (-EFAULT), "1" (0))
8680
8681 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8682 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8683 {
8684 int op = (encoded_op >> 28) & 7;
8685 int cmp = (encoded_op >> 24) & 15;
8686 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8687
8688 switch (op) {
8689 case FUTEX_OP_SET:
8690 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8691 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8692 break;
8693 case FUTEX_OP_ADD:
8694 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8695 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8696 uaddr, oparg);
8697 break;
8698 case FUTEX_OP_OR:
8699 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8700 return ret;
8701 }
8702
8703 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8704 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8705 int newval)
8706 {
8707
8708 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8709 return -ENOSYS;
8710 #endif
8711
8712 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8713 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8714 return -EFAULT;
8715
8716 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8717 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8718 "2:\t.section .fixup, \"ax\"\n"
8719 "3:\tmov %2, %0\n"
8720 "\tjmp 2b\n"
8721 "\t.previous\n"
8722 _ASM_EXTABLE(1b, 3b)
8723 - : "=a" (oldval), "+m" (*uaddr)
8724 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8725 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8726 : "memory"
8727 );
8728 diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8729 --- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8730 +++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8731 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8732 extern void enable_IO_APIC(void);
8733
8734 /* Statistics */
8735 -extern atomic_t irq_err_count;
8736 -extern atomic_t irq_mis_count;
8737 +extern atomic_unchecked_t irq_err_count;
8738 +extern atomic_unchecked_t irq_mis_count;
8739
8740 /* EISA */
8741 extern void eisa_set_level_irq(unsigned int irq);
8742 diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8743 --- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8744 +++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8745 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8746 {
8747 int err;
8748
8749 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8750 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8751 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8752 +#endif
8753 +
8754 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8755 "2:\n"
8756 ".section .fixup,\"ax\"\n"
8757 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8758 {
8759 int err;
8760
8761 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8762 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8763 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8764 +#endif
8765 +
8766 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8767 "2:\n"
8768 ".section .fixup,\"ax\"\n"
8769 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8770 }
8771
8772 /* We need a safe address that is cheap to find and that is already
8773 - in L1 during context switch. The best choices are unfortunately
8774 - different for UP and SMP */
8775 -#ifdef CONFIG_SMP
8776 -#define safe_address (__per_cpu_offset[0])
8777 -#else
8778 -#define safe_address (kstat_cpu(0).cpustat.user)
8779 -#endif
8780 + in L1 during context switch. */
8781 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8782
8783 /*
8784 * These must be called with preempt disabled
8785 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8786 struct thread_info *me = current_thread_info();
8787 preempt_disable();
8788 if (me->status & TS_USEDFPU)
8789 - __save_init_fpu(me->task);
8790 + __save_init_fpu(current);
8791 else
8792 clts();
8793 }
8794 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8795 --- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8796 +++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8797 @@ -3,6 +3,7 @@
8798
8799 #include <linux/string.h>
8800 #include <linux/compiler.h>
8801 +#include <asm/processor.h>
8802
8803 /*
8804 * This file contains the definitions for the x86 IO instructions
8805 @@ -42,6 +43,17 @@
8806
8807 #ifdef __KERNEL__
8808
8809 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8810 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8811 +{
8812 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8813 +}
8814 +
8815 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8816 +{
8817 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8818 +}
8819 +
8820 #include <asm-generic/iomap.h>
8821
8822 #include <linux/vmalloc.h>
8823 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8824 --- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8825 +++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8826 @@ -140,6 +140,17 @@ __OUTS(l)
8827
8828 #include <linux/vmalloc.h>
8829
8830 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8831 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8832 +{
8833 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8834 +}
8835 +
8836 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8837 +{
8838 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8839 +}
8840 +
8841 #include <asm-generic/iomap.h>
8842
8843 void __memcpy_fromio(void *, unsigned long, unsigned);
8844 diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8845 --- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8846 +++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8847 @@ -3,7 +3,7 @@
8848
8849 extern void pci_iommu_shutdown(void);
8850 extern void no_iommu_init(void);
8851 -extern struct dma_map_ops nommu_dma_ops;
8852 +extern const struct dma_map_ops nommu_dma_ops;
8853 extern int force_iommu, no_iommu;
8854 extern int iommu_detected;
8855 extern int iommu_pass_through;
8856 diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8857 --- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8858 +++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8859 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8860 sti; \
8861 sysexit
8862
8863 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8864 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8865 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8866 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8867 +
8868 #else
8869 #define INTERRUPT_RETURN iret
8870 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8871 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8872 --- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8873 +++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8874 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8875 #define BREAKPOINT_INSTRUCTION 0xcc
8876 #define RELATIVEJUMP_INSTRUCTION 0xe9
8877 #define MAX_INSN_SIZE 16
8878 -#define MAX_STACK_SIZE 64
8879 -#define MIN_STACK_SIZE(ADDR) \
8880 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8881 - THREAD_SIZE - (unsigned long)(ADDR))) \
8882 - ? (MAX_STACK_SIZE) \
8883 - : (((unsigned long)current_thread_info()) + \
8884 - THREAD_SIZE - (unsigned long)(ADDR)))
8885 +#define MAX_STACK_SIZE 64UL
8886 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8887
8888 #define flush_insn_slot(p) do { } while (0)
8889
8890 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8891 --- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8892 +++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8893 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8894 const struct trace_print_flags *exit_reasons_str;
8895 };
8896
8897 -extern struct kvm_x86_ops *kvm_x86_ops;
8898 +extern const struct kvm_x86_ops *kvm_x86_ops;
8899
8900 int kvm_mmu_module_init(void);
8901 void kvm_mmu_module_exit(void);
8902 diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8903 --- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8904 +++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8905 @@ -18,26 +18,58 @@ typedef struct {
8906
8907 static inline void local_inc(local_t *l)
8908 {
8909 - asm volatile(_ASM_INC "%0"
8910 + asm volatile(_ASM_INC "%0\n"
8911 +
8912 +#ifdef CONFIG_PAX_REFCOUNT
8913 + "jno 0f\n"
8914 + _ASM_DEC "%0\n"
8915 + "int $4\n0:\n"
8916 + _ASM_EXTABLE(0b, 0b)
8917 +#endif
8918 +
8919 : "+m" (l->a.counter));
8920 }
8921
8922 static inline void local_dec(local_t *l)
8923 {
8924 - asm volatile(_ASM_DEC "%0"
8925 + asm volatile(_ASM_DEC "%0\n"
8926 +
8927 +#ifdef CONFIG_PAX_REFCOUNT
8928 + "jno 0f\n"
8929 + _ASM_INC "%0\n"
8930 + "int $4\n0:\n"
8931 + _ASM_EXTABLE(0b, 0b)
8932 +#endif
8933 +
8934 : "+m" (l->a.counter));
8935 }
8936
8937 static inline void local_add(long i, local_t *l)
8938 {
8939 - asm volatile(_ASM_ADD "%1,%0"
8940 + asm volatile(_ASM_ADD "%1,%0\n"
8941 +
8942 +#ifdef CONFIG_PAX_REFCOUNT
8943 + "jno 0f\n"
8944 + _ASM_SUB "%1,%0\n"
8945 + "int $4\n0:\n"
8946 + _ASM_EXTABLE(0b, 0b)
8947 +#endif
8948 +
8949 : "+m" (l->a.counter)
8950 : "ir" (i));
8951 }
8952
8953 static inline void local_sub(long i, local_t *l)
8954 {
8955 - asm volatile(_ASM_SUB "%1,%0"
8956 + asm volatile(_ASM_SUB "%1,%0\n"
8957 +
8958 +#ifdef CONFIG_PAX_REFCOUNT
8959 + "jno 0f\n"
8960 + _ASM_ADD "%1,%0\n"
8961 + "int $4\n0:\n"
8962 + _ASM_EXTABLE(0b, 0b)
8963 +#endif
8964 +
8965 : "+m" (l->a.counter)
8966 : "ir" (i));
8967 }
8968 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8969 {
8970 unsigned char c;
8971
8972 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8973 + asm volatile(_ASM_SUB "%2,%0\n"
8974 +
8975 +#ifdef CONFIG_PAX_REFCOUNT
8976 + "jno 0f\n"
8977 + _ASM_ADD "%2,%0\n"
8978 + "int $4\n0:\n"
8979 + _ASM_EXTABLE(0b, 0b)
8980 +#endif
8981 +
8982 + "sete %1\n"
8983 : "+m" (l->a.counter), "=qm" (c)
8984 : "ir" (i) : "memory");
8985 return c;
8986 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8987 {
8988 unsigned char c;
8989
8990 - asm volatile(_ASM_DEC "%0; sete %1"
8991 + asm volatile(_ASM_DEC "%0\n"
8992 +
8993 +#ifdef CONFIG_PAX_REFCOUNT
8994 + "jno 0f\n"
8995 + _ASM_INC "%0\n"
8996 + "int $4\n0:\n"
8997 + _ASM_EXTABLE(0b, 0b)
8998 +#endif
8999 +
9000 + "sete %1\n"
9001 : "+m" (l->a.counter), "=qm" (c)
9002 : : "memory");
9003 return c != 0;
9004 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9005 {
9006 unsigned char c;
9007
9008 - asm volatile(_ASM_INC "%0; sete %1"
9009 + asm volatile(_ASM_INC "%0\n"
9010 +
9011 +#ifdef CONFIG_PAX_REFCOUNT
9012 + "jno 0f\n"
9013 + _ASM_DEC "%0\n"
9014 + "int $4\n0:\n"
9015 + _ASM_EXTABLE(0b, 0b)
9016 +#endif
9017 +
9018 + "sete %1\n"
9019 : "+m" (l->a.counter), "=qm" (c)
9020 : : "memory");
9021 return c != 0;
9022 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9023 {
9024 unsigned char c;
9025
9026 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9027 + asm volatile(_ASM_ADD "%2,%0\n"
9028 +
9029 +#ifdef CONFIG_PAX_REFCOUNT
9030 + "jno 0f\n"
9031 + _ASM_SUB "%2,%0\n"
9032 + "int $4\n0:\n"
9033 + _ASM_EXTABLE(0b, 0b)
9034 +#endif
9035 +
9036 + "sets %1\n"
9037 : "+m" (l->a.counter), "=qm" (c)
9038 : "ir" (i) : "memory");
9039 return c;
9040 @@ -133,7 +201,15 @@ static inline long local_add_return(long
9041 #endif
9042 /* Modern 486+ processor */
9043 __i = i;
9044 - asm volatile(_ASM_XADD "%0, %1;"
9045 + asm volatile(_ASM_XADD "%0, %1\n"
9046 +
9047 +#ifdef CONFIG_PAX_REFCOUNT
9048 + "jno 0f\n"
9049 + _ASM_MOV "%0,%1\n"
9050 + "int $4\n0:\n"
9051 + _ASM_EXTABLE(0b, 0b)
9052 +#endif
9053 +
9054 : "+r" (i), "+m" (l->a.counter)
9055 : : "memory");
9056 return i + __i;
9057 diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
9058 --- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9059 +++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9060 @@ -12,13 +12,13 @@ struct device;
9061 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9062
9063 struct microcode_ops {
9064 - enum ucode_state (*request_microcode_user) (int cpu,
9065 + enum ucode_state (* const request_microcode_user) (int cpu,
9066 const void __user *buf, size_t size);
9067
9068 - enum ucode_state (*request_microcode_fw) (int cpu,
9069 + enum ucode_state (* const request_microcode_fw) (int cpu,
9070 struct device *device);
9071
9072 - void (*microcode_fini_cpu) (int cpu);
9073 + void (* const microcode_fini_cpu) (int cpu);
9074
9075 /*
9076 * The generic 'microcode_core' part guarantees that
9077 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
9078 extern struct ucode_cpu_info ucode_cpu_info[];
9079
9080 #ifdef CONFIG_MICROCODE_INTEL
9081 -extern struct microcode_ops * __init init_intel_microcode(void);
9082 +extern const struct microcode_ops * __init init_intel_microcode(void);
9083 #else
9084 -static inline struct microcode_ops * __init init_intel_microcode(void)
9085 +static inline const struct microcode_ops * __init init_intel_microcode(void)
9086 {
9087 return NULL;
9088 }
9089 #endif /* CONFIG_MICROCODE_INTEL */
9090
9091 #ifdef CONFIG_MICROCODE_AMD
9092 -extern struct microcode_ops * __init init_amd_microcode(void);
9093 +extern const struct microcode_ops * __init init_amd_microcode(void);
9094 #else
9095 -static inline struct microcode_ops * __init init_amd_microcode(void)
9096 +static inline const struct microcode_ops * __init init_amd_microcode(void)
9097 {
9098 return NULL;
9099 }
9100 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
9101 --- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9102 +++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9103 @@ -5,4 +5,14 @@
9104
9105 #include <asm-generic/mman.h>
9106
9107 +#ifdef __KERNEL__
9108 +#ifndef __ASSEMBLY__
9109 +#ifdef CONFIG_X86_32
9110 +#define arch_mmap_check i386_mmap_check
9111 +int i386_mmap_check(unsigned long addr, unsigned long len,
9112 + unsigned long flags);
9113 +#endif
9114 +#endif
9115 +#endif
9116 +
9117 #endif /* _ASM_X86_MMAN_H */
9118 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9119 --- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9120 +++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-23 20:24:19.000000000 -0400
9121 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9122
9123 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9124 {
9125 +
9126 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9127 + unsigned int i;
9128 + pgd_t *pgd;
9129 +
9130 + pax_open_kernel();
9131 + pgd = get_cpu_pgd(smp_processor_id());
9132 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9133 + set_pgd_batched(pgd+i, native_make_pgd(0));
9134 + pax_close_kernel();
9135 +#endif
9136 +
9137 #ifdef CONFIG_SMP
9138 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9139 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9140 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9141 struct task_struct *tsk)
9142 {
9143 unsigned cpu = smp_processor_id();
9144 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9145 + int tlbstate = TLBSTATE_OK;
9146 +#endif
9147
9148 if (likely(prev != next)) {
9149 #ifdef CONFIG_SMP
9150 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9151 + tlbstate = percpu_read(cpu_tlbstate.state);
9152 +#endif
9153 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9154 percpu_write(cpu_tlbstate.active_mm, next);
9155 #endif
9156 cpumask_set_cpu(cpu, mm_cpumask(next));
9157
9158 /* Re-load page tables */
9159 +#ifdef CONFIG_PAX_PER_CPU_PGD
9160 + pax_open_kernel();
9161 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9162 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9163 + pax_close_kernel();
9164 + load_cr3(get_cpu_pgd(cpu));
9165 +#else
9166 load_cr3(next->pgd);
9167 +#endif
9168
9169 /* stop flush ipis for the previous mm */
9170 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9171 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9172 */
9173 if (unlikely(prev->context.ldt != next->context.ldt))
9174 load_LDT_nolock(&next->context);
9175 - }
9176 +
9177 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9178 + if (!nx_enabled) {
9179 + smp_mb__before_clear_bit();
9180 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9181 + smp_mb__after_clear_bit();
9182 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9183 + }
9184 +#endif
9185 +
9186 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9187 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9188 + prev->context.user_cs_limit != next->context.user_cs_limit))
9189 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9190 #ifdef CONFIG_SMP
9191 + else if (unlikely(tlbstate != TLBSTATE_OK))
9192 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9193 +#endif
9194 +#endif
9195 +
9196 + }
9197 else {
9198 +
9199 +#ifdef CONFIG_PAX_PER_CPU_PGD
9200 + pax_open_kernel();
9201 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9202 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9203 + pax_close_kernel();
9204 + load_cr3(get_cpu_pgd(cpu));
9205 +#endif
9206 +
9207 +#ifdef CONFIG_SMP
9208 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9209 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9210
9211 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9212 * tlb flush IPI delivery. We must reload CR3
9213 * to make sure to use no freed page tables.
9214 */
9215 +
9216 +#ifndef CONFIG_PAX_PER_CPU_PGD
9217 load_cr3(next->pgd);
9218 +#endif
9219 +
9220 load_LDT_nolock(&next->context);
9221 +
9222 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9223 + if (!nx_enabled)
9224 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9225 +#endif
9226 +
9227 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9228 +#ifdef CONFIG_PAX_PAGEEXEC
9229 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9230 +#endif
9231 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9232 +#endif
9233 +
9234 }
9235 - }
9236 #endif
9237 + }
9238 }
9239
9240 #define activate_mm(prev, next) \
9241 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9242 --- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9243 +++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9244 @@ -9,10 +9,23 @@
9245 * we put the segment information here.
9246 */
9247 typedef struct {
9248 - void *ldt;
9249 + struct desc_struct *ldt;
9250 int size;
9251 struct mutex lock;
9252 - void *vdso;
9253 + unsigned long vdso;
9254 +
9255 +#ifdef CONFIG_X86_32
9256 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9257 + unsigned long user_cs_base;
9258 + unsigned long user_cs_limit;
9259 +
9260 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9261 + cpumask_t cpu_user_cs_mask;
9262 +#endif
9263 +
9264 +#endif
9265 +#endif
9266 +
9267 } mm_context_t;
9268
9269 #ifdef CONFIG_SMP
9270 diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9271 --- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9272 +++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9273 @@ -5,6 +5,7 @@
9274
9275 #ifdef CONFIG_X86_64
9276 /* X86_64 does not define MODULE_PROC_FAMILY */
9277 +#define MODULE_PROC_FAMILY ""
9278 #elif defined CONFIG_M386
9279 #define MODULE_PROC_FAMILY "386 "
9280 #elif defined CONFIG_M486
9281 @@ -59,13 +60,36 @@
9282 #error unknown processor family
9283 #endif
9284
9285 -#ifdef CONFIG_X86_32
9286 -# ifdef CONFIG_4KSTACKS
9287 -# define MODULE_STACKSIZE "4KSTACKS "
9288 -# else
9289 -# define MODULE_STACKSIZE ""
9290 -# endif
9291 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9293 +#define MODULE_PAX_UDEREF "UDEREF "
9294 +#else
9295 +#define MODULE_PAX_UDEREF ""
9296 +#endif
9297 +
9298 +#ifdef CONFIG_PAX_KERNEXEC
9299 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9300 +#else
9301 +#define MODULE_PAX_KERNEXEC ""
9302 +#endif
9303 +
9304 +#ifdef CONFIG_PAX_REFCOUNT
9305 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9306 +#else
9307 +#define MODULE_PAX_REFCOUNT ""
9308 #endif
9309
9310 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9311 +#define MODULE_STACKSIZE "4KSTACKS "
9312 +#else
9313 +#define MODULE_STACKSIZE ""
9314 +#endif
9315 +
9316 +#ifdef CONFIG_GRKERNSEC
9317 +#define MODULE_GRSEC "GRSECURITY "
9318 +#else
9319 +#define MODULE_GRSEC ""
9320 +#endif
9321 +
9322 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9323 +
9324 #endif /* _ASM_X86_MODULE_H */
9325 diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9326 --- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9327 +++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9328 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9329
9330 /* duplicated to the one in bootmem.h */
9331 extern unsigned long max_pfn;
9332 -extern unsigned long phys_base;
9333 +extern const unsigned long phys_base;
9334
9335 extern unsigned long __phys_addr(unsigned long);
9336 #define __phys_reloc_hide(x) (x)
9337 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9338 --- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9339 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-08-23 21:36:48.000000000 -0400
9340 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9341 val);
9342 }
9343
9344 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9345 +{
9346 + pgdval_t val = native_pgd_val(pgd);
9347 +
9348 + if (sizeof(pgdval_t) > sizeof(long))
9349 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9350 + val, (u64)val >> 32);
9351 + else
9352 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9353 + val);
9354 +}
9355 +
9356 static inline void pgd_clear(pgd_t *pgdp)
9357 {
9358 set_pgd(pgdp, __pgd(0));
9359 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9360 pv_mmu_ops.set_fixmap(idx, phys, flags);
9361 }
9362
9363 +#ifdef CONFIG_PAX_KERNEXEC
9364 +static inline unsigned long pax_open_kernel(void)
9365 +{
9366 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9367 +}
9368 +
9369 +static inline unsigned long pax_close_kernel(void)
9370 +{
9371 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9372 +}
9373 +#else
9374 +static inline unsigned long pax_open_kernel(void) { return 0; }
9375 +static inline unsigned long pax_close_kernel(void) { return 0; }
9376 +#endif
9377 +
9378 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9379
9380 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9381 @@ -945,7 +972,7 @@ extern void default_banner(void);
9382
9383 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9384 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9385 -#define PARA_INDIRECT(addr) *%cs:addr
9386 +#define PARA_INDIRECT(addr) *%ss:addr
9387 #endif
9388
9389 #define INTERRUPT_RETURN \
9390 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
9391 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9392 CLBR_NONE, \
9393 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9394 +
9395 +#define GET_CR0_INTO_RDI \
9396 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9397 + mov %rax,%rdi
9398 +
9399 +#define SET_RDI_INTO_CR0 \
9400 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9401 +
9402 +#define GET_CR3_INTO_RDI \
9403 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9404 + mov %rax,%rdi
9405 +
9406 +#define SET_RDI_INTO_CR3 \
9407 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9408 +
9409 #endif /* CONFIG_X86_32 */
9410
9411 #endif /* __ASSEMBLY__ */
9412 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9413 --- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9414 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-23 20:24:19.000000000 -0400
9415 @@ -78,19 +78,19 @@ struct pv_init_ops {
9416 */
9417 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9418 unsigned long addr, unsigned len);
9419 -};
9420 +} __no_const;
9421
9422
9423 struct pv_lazy_ops {
9424 /* Set deferred update mode, used for batching operations. */
9425 void (*enter)(void);
9426 void (*leave)(void);
9427 -};
9428 +} __no_const;
9429
9430 struct pv_time_ops {
9431 unsigned long long (*sched_clock)(void);
9432 unsigned long (*get_tsc_khz)(void);
9433 -};
9434 +} __no_const;
9435
9436 struct pv_cpu_ops {
9437 /* hooks for various privileged instructions */
9438 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9439
9440 void (*start_context_switch)(struct task_struct *prev);
9441 void (*end_context_switch)(struct task_struct *next);
9442 -};
9443 +} __no_const;
9444
9445 struct pv_irq_ops {
9446 /*
9447 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9448 unsigned long start_eip,
9449 unsigned long start_esp);
9450 #endif
9451 -};
9452 +} __no_const;
9453
9454 struct pv_mmu_ops {
9455 unsigned long (*read_cr2)(void);
9456 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
9457 struct paravirt_callee_save make_pud;
9458
9459 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9460 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9461 #endif /* PAGETABLE_LEVELS == 4 */
9462 #endif /* PAGETABLE_LEVELS >= 3 */
9463
9464 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
9465 an mfn. We can tell which is which from the index. */
9466 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9467 phys_addr_t phys, pgprot_t flags);
9468 +
9469 +#ifdef CONFIG_PAX_KERNEXEC
9470 + unsigned long (*pax_open_kernel)(void);
9471 + unsigned long (*pax_close_kernel)(void);
9472 +#endif
9473 +
9474 };
9475
9476 struct raw_spinlock;
9477 @@ -326,7 +333,7 @@ struct pv_lock_ops {
9478 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9479 int (*spin_trylock)(struct raw_spinlock *lock);
9480 void (*spin_unlock)(struct raw_spinlock *lock);
9481 -};
9482 +} __no_const;
9483
9484 /* This contains all the paravirt structures: we get a convenient
9485 * number for each function using the offset which we use to indicate
9486 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9487 --- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9488 +++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9489 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9490 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9491
9492 struct pci_raw_ops {
9493 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9494 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9495 int reg, int len, u32 *val);
9496 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9497 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9498 int reg, int len, u32 val);
9499 };
9500
9501 -extern struct pci_raw_ops *raw_pci_ops;
9502 -extern struct pci_raw_ops *raw_pci_ext_ops;
9503 +extern const struct pci_raw_ops *raw_pci_ops;
9504 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9505
9506 -extern struct pci_raw_ops pci_direct_conf1;
9507 +extern const struct pci_raw_ops pci_direct_conf1;
9508 extern bool port_cf9_safe;
9509
9510 /* arch_initcall level */
9511 diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9512 --- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9513 +++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9514 @@ -78,6 +78,7 @@ do { \
9515 if (0) { \
9516 T__ tmp__; \
9517 tmp__ = (val); \
9518 + (void)tmp__; \
9519 } \
9520 switch (sizeof(var)) { \
9521 case 1: \
9522 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9523 --- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9524 +++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9525 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9526 pmd_t *pmd, pte_t *pte)
9527 {
9528 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9529 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9530 +}
9531 +
9532 +static inline void pmd_populate_user(struct mm_struct *mm,
9533 + pmd_t *pmd, pte_t *pte)
9534 +{
9535 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9536 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9537 }
9538
9539 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9540 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9541 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9542 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9543
9544 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9545 {
9546 + pax_open_kernel();
9547 *pmdp = pmd;
9548 + pax_close_kernel();
9549 }
9550
9551 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9552 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9553 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9554 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9555 @@ -26,9 +26,6 @@
9556 struct mm_struct;
9557 struct vm_area_struct;
9558
9559 -extern pgd_t swapper_pg_dir[1024];
9560 -extern pgd_t trampoline_pg_dir[1024];
9561 -
9562 static inline void pgtable_cache_init(void) { }
9563 static inline void check_pgt_cache(void) { }
9564 void paging_init(void);
9565 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9566 # include <asm/pgtable-2level.h>
9567 #endif
9568
9569 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9570 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9571 +#ifdef CONFIG_X86_PAE
9572 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9573 +#endif
9574 +
9575 #if defined(CONFIG_HIGHPTE)
9576 #define __KM_PTE \
9577 (in_nmi() ? KM_NMI_PTE : \
9578 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9579 /* Clear a kernel PTE and flush it from the TLB */
9580 #define kpte_clear_flush(ptep, vaddr) \
9581 do { \
9582 + pax_open_kernel(); \
9583 pte_clear(&init_mm, (vaddr), (ptep)); \
9584 + pax_close_kernel(); \
9585 __flush_tlb_one((vaddr)); \
9586 } while (0)
9587
9588 @@ -85,6 +90,9 @@ do { \
9589
9590 #endif /* !__ASSEMBLY__ */
9591
9592 +#define HAVE_ARCH_UNMAPPED_AREA
9593 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9594 +
9595 /*
9596 * kern_addr_valid() is (1) for FLATMEM and (0) for
9597 * SPARSEMEM and DISCONTIGMEM
9598 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9599 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9600 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9601 @@ -8,7 +8,7 @@
9602 */
9603 #ifdef CONFIG_X86_PAE
9604 # include <asm/pgtable-3level_types.h>
9605 -# define PMD_SIZE (1UL << PMD_SHIFT)
9606 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9607 # define PMD_MASK (~(PMD_SIZE - 1))
9608 #else
9609 # include <asm/pgtable-2level_types.h>
9610 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9611 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9612 #endif
9613
9614 +#ifdef CONFIG_PAX_KERNEXEC
9615 +#ifndef __ASSEMBLY__
9616 +extern unsigned char MODULES_EXEC_VADDR[];
9617 +extern unsigned char MODULES_EXEC_END[];
9618 +#endif
9619 +#include <asm/boot.h>
9620 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9621 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9622 +#else
9623 +#define ktla_ktva(addr) (addr)
9624 +#define ktva_ktla(addr) (addr)
9625 +#endif
9626 +
9627 #define MODULES_VADDR VMALLOC_START
9628 #define MODULES_END VMALLOC_END
9629 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9630 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9631 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9632 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9633 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9634
9635 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9636 {
9637 + pax_open_kernel();
9638 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9639 + pax_close_kernel();
9640 }
9641
9642 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9643 {
9644 + pax_open_kernel();
9645 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9646 + pax_close_kernel();
9647 }
9648
9649 /*
9650 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9651 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9652 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-08-23 20:24:19.000000000 -0400
9653 @@ -16,10 +16,13 @@
9654
9655 extern pud_t level3_kernel_pgt[512];
9656 extern pud_t level3_ident_pgt[512];
9657 +extern pud_t level3_vmalloc_pgt[512];
9658 +extern pud_t level3_vmemmap_pgt[512];
9659 +extern pud_t level2_vmemmap_pgt[512];
9660 extern pmd_t level2_kernel_pgt[512];
9661 extern pmd_t level2_fixmap_pgt[512];
9662 -extern pmd_t level2_ident_pgt[512];
9663 -extern pgd_t init_level4_pgt[];
9664 +extern pmd_t level2_ident_pgt[512*2];
9665 +extern pgd_t init_level4_pgt[512];
9666
9667 #define swapper_pg_dir init_level4_pgt
9668
9669 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9670
9671 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9672 {
9673 + pax_open_kernel();
9674 *pmdp = pmd;
9675 + pax_close_kernel();
9676 }
9677
9678 static inline void native_pmd_clear(pmd_t *pmd)
9679 @@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9680
9681 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9682 {
9683 + pax_open_kernel();
9684 + *pgdp = pgd;
9685 + pax_close_kernel();
9686 +}
9687 +
9688 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9689 +{
9690 *pgdp = pgd;
9691 }
9692
9693 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9694 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9695 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9696 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9697 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9698 #define MODULES_END _AC(0xffffffffff000000, UL)
9699 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9700 +#define MODULES_EXEC_VADDR MODULES_VADDR
9701 +#define MODULES_EXEC_END MODULES_END
9702 +
9703 +#define ktla_ktva(addr) (addr)
9704 +#define ktva_ktla(addr) (addr)
9705
9706 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9707 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9708 --- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9709 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-08-23 20:24:19.000000000 -0400
9710 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9711
9712 #ifndef __PAGETABLE_PUD_FOLDED
9713 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9714 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9715 #define pgd_clear(pgd) native_pgd_clear(pgd)
9716 #endif
9717
9718 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9719
9720 #define arch_end_context_switch(prev) do {} while(0)
9721
9722 +#define pax_open_kernel() native_pax_open_kernel()
9723 +#define pax_close_kernel() native_pax_close_kernel()
9724 #endif /* CONFIG_PARAVIRT */
9725
9726 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9727 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9728 +
9729 +#ifdef CONFIG_PAX_KERNEXEC
9730 +static inline unsigned long native_pax_open_kernel(void)
9731 +{
9732 + unsigned long cr0;
9733 +
9734 + preempt_disable();
9735 + barrier();
9736 + cr0 = read_cr0() ^ X86_CR0_WP;
9737 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9738 + write_cr0(cr0);
9739 + return cr0 ^ X86_CR0_WP;
9740 +}
9741 +
9742 +static inline unsigned long native_pax_close_kernel(void)
9743 +{
9744 + unsigned long cr0;
9745 +
9746 + cr0 = read_cr0() ^ X86_CR0_WP;
9747 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9748 + write_cr0(cr0);
9749 + barrier();
9750 + preempt_enable_no_resched();
9751 + return cr0 ^ X86_CR0_WP;
9752 +}
9753 +#else
9754 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9755 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9756 +#endif
9757 +
9758 /*
9759 * The following only work if pte_present() is true.
9760 * Undefined behaviour if not..
9761 */
9762 +static inline int pte_user(pte_t pte)
9763 +{
9764 + return pte_val(pte) & _PAGE_USER;
9765 +}
9766 +
9767 static inline int pte_dirty(pte_t pte)
9768 {
9769 return pte_flags(pte) & _PAGE_DIRTY;
9770 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
9771 return pte_clear_flags(pte, _PAGE_RW);
9772 }
9773
9774 +static inline pte_t pte_mkread(pte_t pte)
9775 +{
9776 + return __pte(pte_val(pte) | _PAGE_USER);
9777 +}
9778 +
9779 static inline pte_t pte_mkexec(pte_t pte)
9780 {
9781 - return pte_clear_flags(pte, _PAGE_NX);
9782 +#ifdef CONFIG_X86_PAE
9783 + if (__supported_pte_mask & _PAGE_NX)
9784 + return pte_clear_flags(pte, _PAGE_NX);
9785 + else
9786 +#endif
9787 + return pte_set_flags(pte, _PAGE_USER);
9788 +}
9789 +
9790 +static inline pte_t pte_exprotect(pte_t pte)
9791 +{
9792 +#ifdef CONFIG_X86_PAE
9793 + if (__supported_pte_mask & _PAGE_NX)
9794 + return pte_set_flags(pte, _PAGE_NX);
9795 + else
9796 +#endif
9797 + return pte_clear_flags(pte, _PAGE_USER);
9798 }
9799
9800 static inline pte_t pte_mkdirty(pte_t pte)
9801 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
9802 #endif
9803
9804 #ifndef __ASSEMBLY__
9805 +
9806 +#ifdef CONFIG_PAX_PER_CPU_PGD
9807 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9808 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9809 +{
9810 + return cpu_pgd[cpu];
9811 +}
9812 +#endif
9813 +
9814 #include <linux/mm_types.h>
9815
9816 static inline int pte_none(pte_t pte)
9817 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
9818
9819 static inline int pgd_bad(pgd_t pgd)
9820 {
9821 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9822 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9823 }
9824
9825 static inline int pgd_none(pgd_t pgd)
9826 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
9827 * pgd_offset() returns a (pgd_t *)
9828 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9829 */
9830 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9831 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9832 +
9833 +#ifdef CONFIG_PAX_PER_CPU_PGD
9834 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9835 +#endif
9836 +
9837 /*
9838 * a shortcut which implies the use of the kernel's pgd, instead
9839 * of a process's
9840 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
9841 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9842 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9843
9844 +#ifdef CONFIG_X86_32
9845 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9846 +#else
9847 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9848 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9849 +
9850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9851 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9852 +#else
9853 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9854 +#endif
9855 +
9856 +#endif
9857 +
9858 #ifndef __ASSEMBLY__
9859
9860 extern int direct_gbpages;
9861 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
9862 * dst and src can be on the same page, but the range must not overlap,
9863 * and must not cross a page boundary.
9864 */
9865 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9866 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9867 {
9868 - memcpy(dst, src, count * sizeof(pgd_t));
9869 + pax_open_kernel();
9870 + while (count--)
9871 + *dst++ = *src++;
9872 + pax_close_kernel();
9873 }
9874
9875 +#ifdef CONFIG_PAX_PER_CPU_PGD
9876 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9877 +#endif
9878 +
9879 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9880 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9881 +#else
9882 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9883 +#endif
9884
9885 #include <asm-generic/pgtable.h>
9886 #endif /* __ASSEMBLY__ */
9887 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9888 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9889 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9890 @@ -16,12 +16,11 @@
9891 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9892 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9893 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9894 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9895 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9896 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9897 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9898 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9899 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9900 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9901 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9902 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9903
9904 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9905 @@ -39,7 +38,6 @@
9906 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9907 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9908 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9909 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9910 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9911 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9912 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9913 @@ -55,8 +53,10 @@
9914
9915 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9916 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9917 -#else
9918 +#elif defined(CONFIG_KMEMCHECK)
9919 #define _PAGE_NX (_AT(pteval_t, 0))
9920 +#else
9921 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9922 #endif
9923
9924 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9925 @@ -93,6 +93,9 @@
9926 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9927 _PAGE_ACCESSED)
9928
9929 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9930 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9931 +
9932 #define __PAGE_KERNEL_EXEC \
9933 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9934 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9935 @@ -103,8 +106,8 @@
9936 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9937 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9938 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9939 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9940 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9941 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9942 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9943 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9944 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9945 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9946 @@ -163,8 +166,8 @@
9947 * bits are combined, this will alow user to access the high address mapped
9948 * VDSO in the presence of CONFIG_COMPAT_VDSO
9949 */
9950 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9951 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9952 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9953 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9954 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9955 #endif
9956
9957 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9958 {
9959 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9960 }
9961 +#endif
9962
9963 +#if PAGETABLE_LEVELS == 3
9964 +#include <asm-generic/pgtable-nopud.h>
9965 +#endif
9966 +
9967 +#if PAGETABLE_LEVELS == 2
9968 +#include <asm-generic/pgtable-nopmd.h>
9969 +#endif
9970 +
9971 +#ifndef __ASSEMBLY__
9972 #if PAGETABLE_LEVELS > 3
9973 typedef struct { pudval_t pud; } pud_t;
9974
9975 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9976 return pud.pud;
9977 }
9978 #else
9979 -#include <asm-generic/pgtable-nopud.h>
9980 -
9981 static inline pudval_t native_pud_val(pud_t pud)
9982 {
9983 return native_pgd_val(pud.pgd);
9984 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9985 return pmd.pmd;
9986 }
9987 #else
9988 -#include <asm-generic/pgtable-nopmd.h>
9989 -
9990 static inline pmdval_t native_pmd_val(pmd_t pmd)
9991 {
9992 return native_pgd_val(pmd.pud.pgd);
9993 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9994
9995 extern pteval_t __supported_pte_mask;
9996 extern void set_nx(void);
9997 +
9998 +#ifdef CONFIG_X86_32
9999 +#ifdef CONFIG_X86_PAE
10000 extern int nx_enabled;
10001 +#else
10002 +#define nx_enabled (0)
10003 +#endif
10004 +#else
10005 +#define nx_enabled (1)
10006 +#endif
10007
10008 #define pgprot_writecombine pgprot_writecombine
10009 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10010 diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
10011 --- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
10012 +++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
10013 @@ -272,7 +272,7 @@ struct tss_struct {
10014
10015 } ____cacheline_aligned;
10016
10017 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10018 +extern struct tss_struct init_tss[NR_CPUS];
10019
10020 /*
10021 * Save the original ist values for checking stack pointers during debugging
10022 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
10023 */
10024 #define TASK_SIZE PAGE_OFFSET
10025 #define TASK_SIZE_MAX TASK_SIZE
10026 +
10027 +#ifdef CONFIG_PAX_SEGMEXEC
10028 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10029 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10030 +#else
10031 #define STACK_TOP TASK_SIZE
10032 -#define STACK_TOP_MAX STACK_TOP
10033 +#endif
10034 +
10035 +#define STACK_TOP_MAX TASK_SIZE
10036
10037 #define INIT_THREAD { \
10038 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10039 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10040 .vm86_info = NULL, \
10041 .sysenter_cs = __KERNEL_CS, \
10042 .io_bitmap_ptr = NULL, \
10043 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10044 */
10045 #define INIT_TSS { \
10046 .x86_tss = { \
10047 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10048 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10049 .ss0 = __KERNEL_DS, \
10050 .ss1 = __KERNEL_CS, \
10051 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10052 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10053 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10054
10055 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10056 -#define KSTK_TOP(info) \
10057 -({ \
10058 - unsigned long *__ptr = (unsigned long *)(info); \
10059 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10060 -})
10061 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10062
10063 /*
10064 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10065 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10066 #define task_pt_regs(task) \
10067 ({ \
10068 struct pt_regs *__regs__; \
10069 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10070 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10071 __regs__ - 1; \
10072 })
10073
10074 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10075 /*
10076 * User space process size. 47bits minus one guard page.
10077 */
10078 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10079 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10080
10081 /* This decides where the kernel will search for a free chunk of vm
10082 * space during mmap's.
10083 */
10084 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10085 - 0xc0000000 : 0xFFFFe000)
10086 + 0xc0000000 : 0xFFFFf000)
10087
10088 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10089 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10090 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10091 #define STACK_TOP_MAX TASK_SIZE_MAX
10092
10093 #define INIT_THREAD { \
10094 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10095 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10096 }
10097
10098 #define INIT_TSS { \
10099 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10100 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10101 }
10102
10103 /*
10104 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10105 */
10106 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10107
10108 +#ifdef CONFIG_PAX_SEGMEXEC
10109 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10110 +#endif
10111 +
10112 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10113
10114 /* Get/set a process' ability to use the timestamp counter instruction */
10115 diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
10116 --- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10117 +++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10118 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10119 }
10120
10121 /*
10122 - * user_mode_vm(regs) determines whether a register set came from user mode.
10123 + * user_mode(regs) determines whether a register set came from user mode.
10124 * This is true if V8086 mode was enabled OR if the register set was from
10125 * protected mode with RPL-3 CS value. This tricky test checks that with
10126 * one comparison. Many places in the kernel can bypass this full check
10127 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10128 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10129 + * be used.
10130 */
10131 -static inline int user_mode(struct pt_regs *regs)
10132 +static inline int user_mode_novm(struct pt_regs *regs)
10133 {
10134 #ifdef CONFIG_X86_32
10135 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10136 #else
10137 - return !!(regs->cs & 3);
10138 + return !!(regs->cs & SEGMENT_RPL_MASK);
10139 #endif
10140 }
10141
10142 -static inline int user_mode_vm(struct pt_regs *regs)
10143 +static inline int user_mode(struct pt_regs *regs)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10147 USER_RPL;
10148 #else
10149 - return user_mode(regs);
10150 + return user_mode_novm(regs);
10151 #endif
10152 }
10153
10154 diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10155 --- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10156 +++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10157 @@ -6,19 +6,19 @@
10158 struct pt_regs;
10159
10160 struct machine_ops {
10161 - void (*restart)(char *cmd);
10162 - void (*halt)(void);
10163 - void (*power_off)(void);
10164 + void (* __noreturn restart)(char *cmd);
10165 + void (* __noreturn halt)(void);
10166 + void (* __noreturn power_off)(void);
10167 void (*shutdown)(void);
10168 void (*crash_shutdown)(struct pt_regs *);
10169 - void (*emergency_restart)(void);
10170 -};
10171 + void (* __noreturn emergency_restart)(void);
10172 +} __no_const;
10173
10174 extern struct machine_ops machine_ops;
10175
10176 void native_machine_crash_shutdown(struct pt_regs *regs);
10177 void native_machine_shutdown(void);
10178 -void machine_real_restart(const unsigned char *code, int length);
10179 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10180
10181 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10182 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10183 diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10184 --- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10185 +++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10186 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10187 {
10188 asm volatile("# beginning down_read\n\t"
10189 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10190 +
10191 +#ifdef CONFIG_PAX_REFCOUNT
10192 + "jno 0f\n"
10193 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10194 + "int $4\n0:\n"
10195 + _ASM_EXTABLE(0b, 0b)
10196 +#endif
10197 +
10198 /* adds 0x00000001, returns the old value */
10199 " jns 1f\n"
10200 " call call_rwsem_down_read_failed\n"
10201 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10202 "1:\n\t"
10203 " mov %1,%2\n\t"
10204 " add %3,%2\n\t"
10205 +
10206 +#ifdef CONFIG_PAX_REFCOUNT
10207 + "jno 0f\n"
10208 + "sub %3,%2\n"
10209 + "int $4\n0:\n"
10210 + _ASM_EXTABLE(0b, 0b)
10211 +#endif
10212 +
10213 " jle 2f\n\t"
10214 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10215 " jnz 1b\n\t"
10216 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10217 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10218 asm volatile("# beginning down_write\n\t"
10219 LOCK_PREFIX " xadd %1,(%2)\n\t"
10220 +
10221 +#ifdef CONFIG_PAX_REFCOUNT
10222 + "jno 0f\n"
10223 + "mov %1,(%2)\n"
10224 + "int $4\n0:\n"
10225 + _ASM_EXTABLE(0b, 0b)
10226 +#endif
10227 +
10228 /* subtract 0x0000ffff, returns the old value */
10229 " test %1,%1\n\t"
10230 /* was the count 0 before? */
10231 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10232 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10233 asm volatile("# beginning __up_read\n\t"
10234 LOCK_PREFIX " xadd %1,(%2)\n\t"
10235 +
10236 +#ifdef CONFIG_PAX_REFCOUNT
10237 + "jno 0f\n"
10238 + "mov %1,(%2)\n"
10239 + "int $4\n0:\n"
10240 + _ASM_EXTABLE(0b, 0b)
10241 +#endif
10242 +
10243 /* subtracts 1, returns the old value */
10244 " jns 1f\n\t"
10245 " call call_rwsem_wake\n"
10246 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10247 rwsem_count_t tmp;
10248 asm volatile("# beginning __up_write\n\t"
10249 LOCK_PREFIX " xadd %1,(%2)\n\t"
10250 +
10251 +#ifdef CONFIG_PAX_REFCOUNT
10252 + "jno 0f\n"
10253 + "mov %1,(%2)\n"
10254 + "int $4\n0:\n"
10255 + _ASM_EXTABLE(0b, 0b)
10256 +#endif
10257 +
10258 /* tries to transition
10259 0xffff0001 -> 0x00000000 */
10260 " jz 1f\n"
10261 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10262 {
10263 asm volatile("# beginning __downgrade_write\n\t"
10264 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10265 +
10266 +#ifdef CONFIG_PAX_REFCOUNT
10267 + "jno 0f\n"
10268 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10269 + "int $4\n0:\n"
10270 + _ASM_EXTABLE(0b, 0b)
10271 +#endif
10272 +
10273 /*
10274 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10275 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10276 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10277 static inline void rwsem_atomic_add(rwsem_count_t delta,
10278 struct rw_semaphore *sem)
10279 {
10280 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10281 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10282 +
10283 +#ifdef CONFIG_PAX_REFCOUNT
10284 + "jno 0f\n"
10285 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10286 + "int $4\n0:\n"
10287 + _ASM_EXTABLE(0b, 0b)
10288 +#endif
10289 +
10290 : "+m" (sem->count)
10291 : "er" (delta));
10292 }
10293 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10294 {
10295 rwsem_count_t tmp = delta;
10296
10297 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10298 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10299 +
10300 +#ifdef CONFIG_PAX_REFCOUNT
10301 + "jno 0f\n"
10302 + "mov %0,%1\n"
10303 + "int $4\n0:\n"
10304 + _ASM_EXTABLE(0b, 0b)
10305 +#endif
10306 +
10307 : "+r" (tmp), "+m" (sem->count)
10308 : : "memory");
10309
10310 diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10311 --- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10312 +++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10313 @@ -62,8 +62,8 @@
10314 * 26 - ESPFIX small SS
10315 * 27 - per-cpu [ offset to per-cpu data area ]
10316 * 28 - stack_canary-20 [ for stack protector ]
10317 - * 29 - unused
10318 - * 30 - unused
10319 + * 29 - PCI BIOS CS
10320 + * 30 - PCI BIOS DS
10321 * 31 - TSS for double fault handler
10322 */
10323 #define GDT_ENTRY_TLS_MIN 6
10324 @@ -77,6 +77,8 @@
10325
10326 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10327
10328 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10329 +
10330 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10331
10332 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10333 @@ -88,7 +90,7 @@
10334 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10335 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10336
10337 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10338 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10339 #ifdef CONFIG_SMP
10340 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10341 #else
10342 @@ -102,6 +104,12 @@
10343 #define __KERNEL_STACK_CANARY 0
10344 #endif
10345
10346 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10347 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10348 +
10349 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10350 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10351 +
10352 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10353
10354 /*
10355 @@ -139,7 +147,7 @@
10356 */
10357
10358 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10359 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10360 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10361
10362
10363 #else
10364 @@ -163,6 +171,8 @@
10365 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10366 #define __USER32_DS __USER_DS
10367
10368 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10369 +
10370 #define GDT_ENTRY_TSS 8 /* needs two entries */
10371 #define GDT_ENTRY_LDT 10 /* needs two entries */
10372 #define GDT_ENTRY_TLS_MIN 12
10373 @@ -183,6 +193,7 @@
10374 #endif
10375
10376 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10377 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10378 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10379 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10380 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10381 diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10382 --- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10383 +++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10384 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10385 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10386 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10387 DECLARE_PER_CPU(u16, cpu_llc_id);
10388 -DECLARE_PER_CPU(int, cpu_number);
10389 +DECLARE_PER_CPU(unsigned int, cpu_number);
10390
10391 static inline struct cpumask *cpu_sibling_mask(int cpu)
10392 {
10393 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10394 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10395
10396 /* Static state in head.S used to set up a CPU */
10397 -extern struct {
10398 - void *sp;
10399 - unsigned short ss;
10400 -} stack_start;
10401 +extern unsigned long stack_start; /* Initial stack pointer address */
10402
10403 struct smp_ops {
10404 void (*smp_prepare_boot_cpu)(void);
10405 @@ -60,7 +57,7 @@ struct smp_ops {
10406
10407 void (*send_call_func_ipi)(const struct cpumask *mask);
10408 void (*send_call_func_single_ipi)(int cpu);
10409 -};
10410 +} __no_const;
10411
10412 /* Globals due to paravirt */
10413 extern void set_cpu_sibling_map(int cpu);
10414 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10415 extern int safe_smp_processor_id(void);
10416
10417 #elif defined(CONFIG_X86_64_SMP)
10418 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10419 -
10420 -#define stack_smp_processor_id() \
10421 -({ \
10422 - struct thread_info *ti; \
10423 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10424 - ti->cpu; \
10425 -})
10426 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10427 +#define stack_smp_processor_id() raw_smp_processor_id()
10428 #define safe_smp_processor_id() smp_processor_id()
10429
10430 #endif
10431 diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10432 --- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10433 +++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10434 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10435 static inline void __raw_read_lock(raw_rwlock_t *rw)
10436 {
10437 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10438 +
10439 +#ifdef CONFIG_PAX_REFCOUNT
10440 + "jno 0f\n"
10441 + LOCK_PREFIX " addl $1,(%0)\n"
10442 + "int $4\n0:\n"
10443 + _ASM_EXTABLE(0b, 0b)
10444 +#endif
10445 +
10446 "jns 1f\n"
10447 "call __read_lock_failed\n\t"
10448 "1:\n"
10449 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10450 static inline void __raw_write_lock(raw_rwlock_t *rw)
10451 {
10452 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10453 +
10454 +#ifdef CONFIG_PAX_REFCOUNT
10455 + "jno 0f\n"
10456 + LOCK_PREFIX " addl %1,(%0)\n"
10457 + "int $4\n0:\n"
10458 + _ASM_EXTABLE(0b, 0b)
10459 +#endif
10460 +
10461 "jz 1f\n"
10462 "call __write_lock_failed\n\t"
10463 "1:\n"
10464 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10465
10466 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10467 {
10468 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10469 + asm volatile(LOCK_PREFIX "incl %0\n"
10470 +
10471 +#ifdef CONFIG_PAX_REFCOUNT
10472 + "jno 0f\n"
10473 + LOCK_PREFIX "decl %0\n"
10474 + "int $4\n0:\n"
10475 + _ASM_EXTABLE(0b, 0b)
10476 +#endif
10477 +
10478 + :"+m" (rw->lock) : : "memory");
10479 }
10480
10481 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10482 {
10483 - asm volatile(LOCK_PREFIX "addl %1, %0"
10484 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10485 +
10486 +#ifdef CONFIG_PAX_REFCOUNT
10487 + "jno 0f\n"
10488 + LOCK_PREFIX "subl %1, %0\n"
10489 + "int $4\n0:\n"
10490 + _ASM_EXTABLE(0b, 0b)
10491 +#endif
10492 +
10493 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10494 }
10495
10496 diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10497 --- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10498 +++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10499 @@ -48,7 +48,7 @@
10500 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10501 */
10502 #define GDT_STACK_CANARY_INIT \
10503 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10504 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10505
10506 /*
10507 * Initialize the stackprotector canary value.
10508 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10509
10510 static inline void load_stack_canary_segment(void)
10511 {
10512 -#ifdef CONFIG_X86_32
10513 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10514 asm volatile ("mov %0, %%gs" : : "r" (0));
10515 #endif
10516 }
10517 diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10518 --- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10519 +++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10520 @@ -132,7 +132,7 @@ do { \
10521 "thread_return:\n\t" \
10522 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10523 __switch_canary \
10524 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10525 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10526 "movq %%rax,%%rdi\n\t" \
10527 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10528 "jnz ret_from_fork\n\t" \
10529 @@ -143,7 +143,7 @@ do { \
10530 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10531 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10532 [_tif_fork] "i" (_TIF_FORK), \
10533 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10534 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10535 [current_task] "m" (per_cpu_var(current_task)) \
10536 __switch_canary_iparam \
10537 : "memory", "cc" __EXTRA_CLOBBER)
10538 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10539 {
10540 unsigned long __limit;
10541 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10542 - return __limit + 1;
10543 + return __limit;
10544 }
10545
10546 static inline void native_clts(void)
10547 @@ -340,12 +340,12 @@ void enable_hlt(void);
10548
10549 void cpu_idle_wait(void);
10550
10551 -extern unsigned long arch_align_stack(unsigned long sp);
10552 +#define arch_align_stack(x) ((x) & ~0xfUL)
10553 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10554
10555 void default_idle(void);
10556
10557 -void stop_this_cpu(void *dummy);
10558 +void stop_this_cpu(void *dummy) __noreturn;
10559
10560 /*
10561 * Force strict CPU ordering.
10562 diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10563 --- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10564 +++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10565 @@ -10,6 +10,7 @@
10566 #include <linux/compiler.h>
10567 #include <asm/page.h>
10568 #include <asm/types.h>
10569 +#include <asm/percpu.h>
10570
10571 /*
10572 * low level task data that entry.S needs immediate access to
10573 @@ -24,7 +25,6 @@ struct exec_domain;
10574 #include <asm/atomic.h>
10575
10576 struct thread_info {
10577 - struct task_struct *task; /* main task structure */
10578 struct exec_domain *exec_domain; /* execution domain */
10579 __u32 flags; /* low level flags */
10580 __u32 status; /* thread synchronous flags */
10581 @@ -34,18 +34,12 @@ struct thread_info {
10582 mm_segment_t addr_limit;
10583 struct restart_block restart_block;
10584 void __user *sysenter_return;
10585 -#ifdef CONFIG_X86_32
10586 - unsigned long previous_esp; /* ESP of the previous stack in
10587 - case of nested (IRQ) stacks
10588 - */
10589 - __u8 supervisor_stack[0];
10590 -#endif
10591 + unsigned long lowest_stack;
10592 int uaccess_err;
10593 };
10594
10595 -#define INIT_THREAD_INFO(tsk) \
10596 +#define INIT_THREAD_INFO \
10597 { \
10598 - .task = &tsk, \
10599 .exec_domain = &default_exec_domain, \
10600 .flags = 0, \
10601 .cpu = 0, \
10602 @@ -56,7 +50,7 @@ struct thread_info {
10603 }, \
10604 }
10605
10606 -#define init_thread_info (init_thread_union.thread_info)
10607 +#define init_thread_info (init_thread_union.stack)
10608 #define init_stack (init_thread_union.stack)
10609
10610 #else /* !__ASSEMBLY__ */
10611 @@ -163,6 +157,23 @@ struct thread_info {
10612 #define alloc_thread_info(tsk) \
10613 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10614
10615 +#ifdef __ASSEMBLY__
10616 +/* how to get the thread information struct from ASM */
10617 +#define GET_THREAD_INFO(reg) \
10618 + mov PER_CPU_VAR(current_tinfo), reg
10619 +
10620 +/* use this one if reg already contains %esp */
10621 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10622 +#else
10623 +/* how to get the thread information struct from C */
10624 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10625 +
10626 +static __always_inline struct thread_info *current_thread_info(void)
10627 +{
10628 + return percpu_read_stable(current_tinfo);
10629 +}
10630 +#endif
10631 +
10632 #ifdef CONFIG_X86_32
10633
10634 #define STACK_WARN (THREAD_SIZE/8)
10635 @@ -173,35 +184,13 @@ struct thread_info {
10636 */
10637 #ifndef __ASSEMBLY__
10638
10639 -
10640 /* how to get the current stack pointer from C */
10641 register unsigned long current_stack_pointer asm("esp") __used;
10642
10643 -/* how to get the thread information struct from C */
10644 -static inline struct thread_info *current_thread_info(void)
10645 -{
10646 - return (struct thread_info *)
10647 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10648 -}
10649 -
10650 -#else /* !__ASSEMBLY__ */
10651 -
10652 -/* how to get the thread information struct from ASM */
10653 -#define GET_THREAD_INFO(reg) \
10654 - movl $-THREAD_SIZE, reg; \
10655 - andl %esp, reg
10656 -
10657 -/* use this one if reg already contains %esp */
10658 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10659 - andl $-THREAD_SIZE, reg
10660 -
10661 #endif
10662
10663 #else /* X86_32 */
10664
10665 -#include <asm/percpu.h>
10666 -#define KERNEL_STACK_OFFSET (5*8)
10667 -
10668 /*
10669 * macros/functions for gaining access to the thread information structure
10670 * preempt_count needs to be 1 initially, until the scheduler is functional.
10671 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10672 #ifndef __ASSEMBLY__
10673 DECLARE_PER_CPU(unsigned long, kernel_stack);
10674
10675 -static inline struct thread_info *current_thread_info(void)
10676 -{
10677 - struct thread_info *ti;
10678 - ti = (void *)(percpu_read_stable(kernel_stack) +
10679 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10680 - return ti;
10681 -}
10682 -
10683 -#else /* !__ASSEMBLY__ */
10684 -
10685 -/* how to get the thread information struct from ASM */
10686 -#define GET_THREAD_INFO(reg) \
10687 - movq PER_CPU_VAR(kernel_stack),reg ; \
10688 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10689 -
10690 +/* how to get the current stack pointer from C */
10691 +register unsigned long current_stack_pointer asm("rsp") __used;
10692 #endif
10693
10694 #endif /* !X86_32 */
10695 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10696 extern void free_thread_info(struct thread_info *ti);
10697 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10698 #define arch_task_cache_init arch_task_cache_init
10699 +
10700 +#define __HAVE_THREAD_FUNCTIONS
10701 +#define task_thread_info(task) (&(task)->tinfo)
10702 +#define task_stack_page(task) ((task)->stack)
10703 +#define setup_thread_stack(p, org) do {} while (0)
10704 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10705 +
10706 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10707 +extern struct task_struct *alloc_task_struct(void);
10708 +extern void free_task_struct(struct task_struct *);
10709 +
10710 #endif
10711 #endif /* _ASM_X86_THREAD_INFO_H */
10712 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10713 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10714 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10715 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10716 static __always_inline unsigned long __must_check
10717 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10718 {
10719 + pax_track_stack();
10720 +
10721 + if ((long)n < 0)
10722 + return n;
10723 +
10724 if (__builtin_constant_p(n)) {
10725 unsigned long ret;
10726
10727 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10728 return ret;
10729 }
10730 }
10731 + if (!__builtin_constant_p(n))
10732 + check_object_size(from, n, true);
10733 return __copy_to_user_ll(to, from, n);
10734 }
10735
10736 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10737 __copy_to_user(void __user *to, const void *from, unsigned long n)
10738 {
10739 might_fault();
10740 +
10741 return __copy_to_user_inatomic(to, from, n);
10742 }
10743
10744 static __always_inline unsigned long
10745 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10746 {
10747 + if ((long)n < 0)
10748 + return n;
10749 +
10750 /* Avoid zeroing the tail if the copy fails..
10751 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10752 * but as the zeroing behaviour is only significant when n is not
10753 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10754 __copy_from_user(void *to, const void __user *from, unsigned long n)
10755 {
10756 might_fault();
10757 +
10758 + pax_track_stack();
10759 +
10760 + if ((long)n < 0)
10761 + return n;
10762 +
10763 if (__builtin_constant_p(n)) {
10764 unsigned long ret;
10765
10766 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10767 return ret;
10768 }
10769 }
10770 + if (!__builtin_constant_p(n))
10771 + check_object_size(to, n, false);
10772 return __copy_from_user_ll(to, from, n);
10773 }
10774
10775 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10776 const void __user *from, unsigned long n)
10777 {
10778 might_fault();
10779 +
10780 + if ((long)n < 0)
10781 + return n;
10782 +
10783 if (__builtin_constant_p(n)) {
10784 unsigned long ret;
10785
10786 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10787 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10788 unsigned long n)
10789 {
10790 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10791 + if ((long)n < 0)
10792 + return n;
10793 +
10794 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10795 +}
10796 +
10797 +/**
10798 + * copy_to_user: - Copy a block of data into user space.
10799 + * @to: Destination address, in user space.
10800 + * @from: Source address, in kernel space.
10801 + * @n: Number of bytes to copy.
10802 + *
10803 + * Context: User context only. This function may sleep.
10804 + *
10805 + * Copy data from kernel space to user space.
10806 + *
10807 + * Returns number of bytes that could not be copied.
10808 + * On success, this will be zero.
10809 + */
10810 +static __always_inline unsigned long __must_check
10811 +copy_to_user(void __user *to, const void *from, unsigned long n)
10812 +{
10813 + if (access_ok(VERIFY_WRITE, to, n))
10814 + n = __copy_to_user(to, from, n);
10815 + return n;
10816 +}
10817 +
10818 +/**
10819 + * copy_from_user: - Copy a block of data from user space.
10820 + * @to: Destination address, in kernel space.
10821 + * @from: Source address, in user space.
10822 + * @n: Number of bytes to copy.
10823 + *
10824 + * Context: User context only. This function may sleep.
10825 + *
10826 + * Copy data from user space to kernel space.
10827 + *
10828 + * Returns number of bytes that could not be copied.
10829 + * On success, this will be zero.
10830 + *
10831 + * If some data could not be copied, this function will pad the copied
10832 + * data to the requested size using zero bytes.
10833 + */
10834 +static __always_inline unsigned long __must_check
10835 +copy_from_user(void *to, const void __user *from, unsigned long n)
10836 +{
10837 + if (access_ok(VERIFY_READ, from, n))
10838 + n = __copy_from_user(to, from, n);
10839 + else if ((long)n > 0) {
10840 + if (!__builtin_constant_p(n))
10841 + check_object_size(to, n, false);
10842 + memset(to, 0, n);
10843 + }
10844 + return n;
10845 }
10846
10847 -unsigned long __must_check copy_to_user(void __user *to,
10848 - const void *from, unsigned long n);
10849 -unsigned long __must_check copy_from_user(void *to,
10850 - const void __user *from,
10851 - unsigned long n);
10852 long __must_check strncpy_from_user(char *dst, const char __user *src,
10853 long count);
10854 long __must_check __strncpy_from_user(char *dst,
10855 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10856 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10857 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10858 @@ -9,6 +9,9 @@
10859 #include <linux/prefetch.h>
10860 #include <linux/lockdep.h>
10861 #include <asm/page.h>
10862 +#include <asm/pgtable.h>
10863 +
10864 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10865
10866 /*
10867 * Copy To/From Userspace
10868 @@ -19,113 +22,203 @@ __must_check unsigned long
10869 copy_user_generic(void *to, const void *from, unsigned len);
10870
10871 __must_check unsigned long
10872 -copy_to_user(void __user *to, const void *from, unsigned len);
10873 -__must_check unsigned long
10874 -copy_from_user(void *to, const void __user *from, unsigned len);
10875 -__must_check unsigned long
10876 copy_in_user(void __user *to, const void __user *from, unsigned len);
10877
10878 static __always_inline __must_check
10879 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10880 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10881 {
10882 - int ret = 0;
10883 + unsigned ret = 0;
10884
10885 might_fault();
10886 - if (!__builtin_constant_p(size))
10887 - return copy_user_generic(dst, (__force void *)src, size);
10888 +
10889 + if ((int)size < 0)
10890 + return size;
10891 +
10892 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10893 + if (!__access_ok(VERIFY_READ, src, size))
10894 + return size;
10895 +#endif
10896 +
10897 + if (!__builtin_constant_p(size)) {
10898 + check_object_size(dst, size, false);
10899 +
10900 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10901 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10902 + src += PAX_USER_SHADOW_BASE;
10903 +#endif
10904 +
10905 + return copy_user_generic(dst, (__force const void *)src, size);
10906 + }
10907 switch (size) {
10908 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10909 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10910 ret, "b", "b", "=q", 1);
10911 return ret;
10912 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10913 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10914 ret, "w", "w", "=r", 2);
10915 return ret;
10916 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10917 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10918 ret, "l", "k", "=r", 4);
10919 return ret;
10920 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10921 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10922 ret, "q", "", "=r", 8);
10923 return ret;
10924 case 10:
10925 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10926 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10927 ret, "q", "", "=r", 10);
10928 if (unlikely(ret))
10929 return ret;
10930 __get_user_asm(*(u16 *)(8 + (char *)dst),
10931 - (u16 __user *)(8 + (char __user *)src),
10932 + (const u16 __user *)(8 + (const char __user *)src),
10933 ret, "w", "w", "=r", 2);
10934 return ret;
10935 case 16:
10936 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10937 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10938 ret, "q", "", "=r", 16);
10939 if (unlikely(ret))
10940 return ret;
10941 __get_user_asm(*(u64 *)(8 + (char *)dst),
10942 - (u64 __user *)(8 + (char __user *)src),
10943 + (const u64 __user *)(8 + (const char __user *)src),
10944 ret, "q", "", "=r", 8);
10945 return ret;
10946 default:
10947 - return copy_user_generic(dst, (__force void *)src, size);
10948 +
10949 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10950 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10951 + src += PAX_USER_SHADOW_BASE;
10952 +#endif
10953 +
10954 + return copy_user_generic(dst, (__force const void *)src, size);
10955 }
10956 }
10957
10958 static __always_inline __must_check
10959 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10960 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10961 {
10962 - int ret = 0;
10963 + unsigned ret = 0;
10964
10965 might_fault();
10966 - if (!__builtin_constant_p(size))
10967 +
10968 + pax_track_stack();
10969 +
10970 + if ((int)size < 0)
10971 + return size;
10972 +
10973 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10974 + if (!__access_ok(VERIFY_WRITE, dst, size))
10975 + return size;
10976 +#endif
10977 +
10978 + if (!__builtin_constant_p(size)) {
10979 + check_object_size(src, size, true);
10980 +
10981 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10982 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10983 + dst += PAX_USER_SHADOW_BASE;
10984 +#endif
10985 +
10986 return copy_user_generic((__force void *)dst, src, size);
10987 + }
10988 switch (size) {
10989 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10990 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10991 ret, "b", "b", "iq", 1);
10992 return ret;
10993 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10994 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10995 ret, "w", "w", "ir", 2);
10996 return ret;
10997 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10998 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10999 ret, "l", "k", "ir", 4);
11000 return ret;
11001 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11002 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11003 ret, "q", "", "er", 8);
11004 return ret;
11005 case 10:
11006 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11007 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11008 ret, "q", "", "er", 10);
11009 if (unlikely(ret))
11010 return ret;
11011 asm("":::"memory");
11012 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11013 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11014 ret, "w", "w", "ir", 2);
11015 return ret;
11016 case 16:
11017 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11018 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11019 ret, "q", "", "er", 16);
11020 if (unlikely(ret))
11021 return ret;
11022 asm("":::"memory");
11023 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11024 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11025 ret, "q", "", "er", 8);
11026 return ret;
11027 default:
11028 +
11029 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11030 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11031 + dst += PAX_USER_SHADOW_BASE;
11032 +#endif
11033 +
11034 return copy_user_generic((__force void *)dst, src, size);
11035 }
11036 }
11037
11038 static __always_inline __must_check
11039 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11040 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11041 +{
11042 + if (access_ok(VERIFY_WRITE, to, len))
11043 + len = __copy_to_user(to, from, len);
11044 + return len;
11045 +}
11046 +
11047 +static __always_inline __must_check
11048 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11049 +{
11050 + if ((int)len < 0)
11051 + return len;
11052 +
11053 + if (access_ok(VERIFY_READ, from, len))
11054 + len = __copy_from_user(to, from, len);
11055 + else if ((int)len > 0) {
11056 + if (!__builtin_constant_p(len))
11057 + check_object_size(to, len, false);
11058 + memset(to, 0, len);
11059 + }
11060 + return len;
11061 +}
11062 +
11063 +static __always_inline __must_check
11064 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11065 {
11066 - int ret = 0;
11067 + unsigned ret = 0;
11068
11069 might_fault();
11070 - if (!__builtin_constant_p(size))
11071 +
11072 + pax_track_stack();
11073 +
11074 + if ((int)size < 0)
11075 + return size;
11076 +
11077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11078 + if (!__access_ok(VERIFY_READ, src, size))
11079 + return size;
11080 + if (!__access_ok(VERIFY_WRITE, dst, size))
11081 + return size;
11082 +#endif
11083 +
11084 + if (!__builtin_constant_p(size)) {
11085 +
11086 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11087 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11088 + src += PAX_USER_SHADOW_BASE;
11089 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11090 + dst += PAX_USER_SHADOW_BASE;
11091 +#endif
11092 +
11093 return copy_user_generic((__force void *)dst,
11094 - (__force void *)src, size);
11095 + (__force const void *)src, size);
11096 + }
11097 switch (size) {
11098 case 1: {
11099 u8 tmp;
11100 - __get_user_asm(tmp, (u8 __user *)src,
11101 + __get_user_asm(tmp, (const u8 __user *)src,
11102 ret, "b", "b", "=q", 1);
11103 if (likely(!ret))
11104 __put_user_asm(tmp, (u8 __user *)dst,
11105 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11106 }
11107 case 2: {
11108 u16 tmp;
11109 - __get_user_asm(tmp, (u16 __user *)src,
11110 + __get_user_asm(tmp, (const u16 __user *)src,
11111 ret, "w", "w", "=r", 2);
11112 if (likely(!ret))
11113 __put_user_asm(tmp, (u16 __user *)dst,
11114 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11115
11116 case 4: {
11117 u32 tmp;
11118 - __get_user_asm(tmp, (u32 __user *)src,
11119 + __get_user_asm(tmp, (const u32 __user *)src,
11120 ret, "l", "k", "=r", 4);
11121 if (likely(!ret))
11122 __put_user_asm(tmp, (u32 __user *)dst,
11123 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11124 }
11125 case 8: {
11126 u64 tmp;
11127 - __get_user_asm(tmp, (u64 __user *)src,
11128 + __get_user_asm(tmp, (const u64 __user *)src,
11129 ret, "q", "", "=r", 8);
11130 if (likely(!ret))
11131 __put_user_asm(tmp, (u64 __user *)dst,
11132 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11133 return ret;
11134 }
11135 default:
11136 +
11137 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11138 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11139 + src += PAX_USER_SHADOW_BASE;
11140 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11141 + dst += PAX_USER_SHADOW_BASE;
11142 +#endif
11143 +
11144 return copy_user_generic((__force void *)dst,
11145 - (__force void *)src, size);
11146 + (__force const void *)src, size);
11147 }
11148 }
11149
11150 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11151 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11152 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11153
11154 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11155 - unsigned size);
11156 +static __must_check __always_inline unsigned long
11157 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11158 +{
11159 + pax_track_stack();
11160 +
11161 + if ((int)size < 0)
11162 + return size;
11163
11164 -static __must_check __always_inline int
11165 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11166 + if (!__access_ok(VERIFY_READ, src, size))
11167 + return size;
11168 +
11169 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11170 + src += PAX_USER_SHADOW_BASE;
11171 +#endif
11172 +
11173 + return copy_user_generic(dst, (__force const void *)src, size);
11174 +}
11175 +
11176 +static __must_check __always_inline unsigned long
11177 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11178 {
11179 + if ((int)size < 0)
11180 + return size;
11181 +
11182 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11183 + if (!__access_ok(VERIFY_WRITE, dst, size))
11184 + return size;
11185 +
11186 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11187 + dst += PAX_USER_SHADOW_BASE;
11188 +#endif
11189 +
11190 return copy_user_generic((__force void *)dst, src, size);
11191 }
11192
11193 -extern long __copy_user_nocache(void *dst, const void __user *src,
11194 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11195 unsigned size, int zerorest);
11196
11197 -static inline int
11198 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11199 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11200 {
11201 might_sleep();
11202 +
11203 + if ((int)size < 0)
11204 + return size;
11205 +
11206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11207 + if (!__access_ok(VERIFY_READ, src, size))
11208 + return size;
11209 +#endif
11210 +
11211 return __copy_user_nocache(dst, src, size, 1);
11212 }
11213
11214 -static inline int
11215 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11216 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11217 unsigned size)
11218 {
11219 + if ((int)size < 0)
11220 + return size;
11221 +
11222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11223 + if (!__access_ok(VERIFY_READ, src, size))
11224 + return size;
11225 +#endif
11226 +
11227 return __copy_user_nocache(dst, src, size, 0);
11228 }
11229
11230 -unsigned long
11231 +extern unsigned long
11232 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11233
11234 #endif /* _ASM_X86_UACCESS_64_H */
11235 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11236 --- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11237 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11238 @@ -8,12 +8,15 @@
11239 #include <linux/thread_info.h>
11240 #include <linux/prefetch.h>
11241 #include <linux/string.h>
11242 +#include <linux/sched.h>
11243 #include <asm/asm.h>
11244 #include <asm/page.h>
11245
11246 #define VERIFY_READ 0
11247 #define VERIFY_WRITE 1
11248
11249 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11250 +
11251 /*
11252 * The fs value determines whether argument validity checking should be
11253 * performed or not. If get_fs() == USER_DS, checking is performed, with
11254 @@ -29,7 +32,12 @@
11255
11256 #define get_ds() (KERNEL_DS)
11257 #define get_fs() (current_thread_info()->addr_limit)
11258 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11259 +void __set_fs(mm_segment_t x);
11260 +void set_fs(mm_segment_t x);
11261 +#else
11262 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11263 +#endif
11264
11265 #define segment_eq(a, b) ((a).seg == (b).seg)
11266
11267 @@ -77,7 +85,33 @@
11268 * checks that the pointer is in the user space range - after calling
11269 * this function, memory access functions may still return -EFAULT.
11270 */
11271 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11272 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11273 +#define access_ok(type, addr, size) \
11274 +({ \
11275 + long __size = size; \
11276 + unsigned long __addr = (unsigned long)addr; \
11277 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11278 + unsigned long __end_ao = __addr + __size - 1; \
11279 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11280 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11281 + while(__addr_ao <= __end_ao) { \
11282 + char __c_ao; \
11283 + __addr_ao += PAGE_SIZE; \
11284 + if (__size > PAGE_SIZE) \
11285 + cond_resched(); \
11286 + if (__get_user(__c_ao, (char __user *)__addr)) \
11287 + break; \
11288 + if (type != VERIFY_WRITE) { \
11289 + __addr = __addr_ao; \
11290 + continue; \
11291 + } \
11292 + if (__put_user(__c_ao, (char __user *)__addr)) \
11293 + break; \
11294 + __addr = __addr_ao; \
11295 + } \
11296 + } \
11297 + __ret_ao; \
11298 +})
11299
11300 /*
11301 * The exception table consists of pairs of addresses: the first is the
11302 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11303 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11304 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11305
11306 -
11307 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11308 +#define __copyuser_seg "gs;"
11309 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11310 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11311 +#else
11312 +#define __copyuser_seg
11313 +#define __COPYUSER_SET_ES
11314 +#define __COPYUSER_RESTORE_ES
11315 +#endif
11316
11317 #ifdef CONFIG_X86_32
11318 #define __put_user_asm_u64(x, addr, err, errret) \
11319 - asm volatile("1: movl %%eax,0(%2)\n" \
11320 - "2: movl %%edx,4(%2)\n" \
11321 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11322 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11323 "3:\n" \
11324 ".section .fixup,\"ax\"\n" \
11325 "4: movl %3,%0\n" \
11326 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11327 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11328
11329 #define __put_user_asm_ex_u64(x, addr) \
11330 - asm volatile("1: movl %%eax,0(%1)\n" \
11331 - "2: movl %%edx,4(%1)\n" \
11332 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11333 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11334 "3:\n" \
11335 _ASM_EXTABLE(1b, 2b - 1b) \
11336 _ASM_EXTABLE(2b, 3b - 2b) \
11337 @@ -374,7 +416,7 @@ do { \
11338 } while (0)
11339
11340 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11341 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11342 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11343 "2:\n" \
11344 ".section .fixup,\"ax\"\n" \
11345 "3: mov %3,%0\n" \
11346 @@ -382,7 +424,7 @@ do { \
11347 " jmp 2b\n" \
11348 ".previous\n" \
11349 _ASM_EXTABLE(1b, 3b) \
11350 - : "=r" (err), ltype(x) \
11351 + : "=r" (err), ltype (x) \
11352 : "m" (__m(addr)), "i" (errret), "0" (err))
11353
11354 #define __get_user_size_ex(x, ptr, size) \
11355 @@ -407,7 +449,7 @@ do { \
11356 } while (0)
11357
11358 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11359 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11360 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11361 "2:\n" \
11362 _ASM_EXTABLE(1b, 2b - 1b) \
11363 : ltype(x) : "m" (__m(addr)))
11364 @@ -424,13 +466,24 @@ do { \
11365 int __gu_err; \
11366 unsigned long __gu_val; \
11367 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11368 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11369 + (x) = (__typeof__(*(ptr)))__gu_val; \
11370 __gu_err; \
11371 })
11372
11373 /* FIXME: this hack is definitely wrong -AK */
11374 struct __large_struct { unsigned long buf[100]; };
11375 -#define __m(x) (*(struct __large_struct __user *)(x))
11376 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11377 +#define ____m(x) \
11378 +({ \
11379 + unsigned long ____x = (unsigned long)(x); \
11380 + if (____x < PAX_USER_SHADOW_BASE) \
11381 + ____x += PAX_USER_SHADOW_BASE; \
11382 + (void __user *)____x; \
11383 +})
11384 +#else
11385 +#define ____m(x) (x)
11386 +#endif
11387 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11388
11389 /*
11390 * Tell gcc we read from memory instead of writing: this is because
11391 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11392 * aliasing issues.
11393 */
11394 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11395 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11396 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11397 "2:\n" \
11398 ".section .fixup,\"ax\"\n" \
11399 "3: mov %3,%0\n" \
11400 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11401 ".previous\n" \
11402 _ASM_EXTABLE(1b, 3b) \
11403 : "=r"(err) \
11404 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11405 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11406
11407 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11408 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11409 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11410 "2:\n" \
11411 _ASM_EXTABLE(1b, 2b - 1b) \
11412 : : ltype(x), "m" (__m(addr)))
11413 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11414 * On error, the variable @x is set to zero.
11415 */
11416
11417 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11418 +#define __get_user(x, ptr) get_user((x), (ptr))
11419 +#else
11420 #define __get_user(x, ptr) \
11421 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11422 +#endif
11423
11424 /**
11425 * __put_user: - Write a simple value into user space, with less checking.
11426 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11427 * Returns zero on success, or -EFAULT on error.
11428 */
11429
11430 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11431 +#define __put_user(x, ptr) put_user((x), (ptr))
11432 +#else
11433 #define __put_user(x, ptr) \
11434 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11435 +#endif
11436
11437 #define __get_user_unaligned __get_user
11438 #define __put_user_unaligned __put_user
11439 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11440 #define get_user_ex(x, ptr) do { \
11441 unsigned long __gue_val; \
11442 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11443 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11444 + (x) = (__typeof__(*(ptr)))__gue_val; \
11445 } while (0)
11446
11447 #ifdef CONFIG_X86_WP_WORKS_OK
11448 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11449
11450 #define ARCH_HAS_NOCACHE_UACCESS 1
11451
11452 +#define ARCH_HAS_SORT_EXTABLE
11453 #ifdef CONFIG_X86_32
11454 # include "uaccess_32.h"
11455 #else
11456 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11457 --- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11458 +++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11459 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11460 int sysctl_enabled;
11461 struct timezone sys_tz;
11462 struct { /* extract of a clocksource struct */
11463 + char name[8];
11464 cycle_t (*vread)(void);
11465 cycle_t cycle_last;
11466 cycle_t mask;
11467 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11468 --- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11469 +++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11470 @@ -191,6 +191,7 @@ struct vrom_header {
11471 u8 reserved[96]; /* Reserved for headers */
11472 char vmi_init[8]; /* VMI_Init jump point */
11473 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11474 + char rom_data[8048]; /* rest of the option ROM */
11475 } __attribute__((packed));
11476
11477 struct pnp_header {
11478 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11479 --- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11480 +++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11481 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11482 int (*wallclock_updated)(void);
11483 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11484 void (*cancel_alarm)(u32 flags);
11485 -} vmi_timer_ops;
11486 +} __no_const vmi_timer_ops;
11487
11488 /* Prototypes */
11489 extern void __init vmi_time_init(void);
11490 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11491 --- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11492 +++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11493 @@ -15,9 +15,10 @@ enum vsyscall_num {
11494
11495 #ifdef __KERNEL__
11496 #include <linux/seqlock.h>
11497 +#include <linux/getcpu.h>
11498 +#include <linux/time.h>
11499
11500 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11501 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11502
11503 /* Definitions for CONFIG_GENERIC_TIME definitions */
11504 #define __section_vsyscall_gtod_data __attribute__ \
11505 @@ -31,7 +32,6 @@ enum vsyscall_num {
11506 #define VGETCPU_LSL 2
11507
11508 extern int __vgetcpu_mode;
11509 -extern volatile unsigned long __jiffies;
11510
11511 /* kernel space (writeable) */
11512 extern int vgetcpu_mode;
11513 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11514
11515 extern void map_vsyscall(void);
11516
11517 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11518 +extern time_t vtime(time_t *t);
11519 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11520 #endif /* __KERNEL__ */
11521
11522 #endif /* _ASM_X86_VSYSCALL_H */
11523 diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11524 --- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11525 +++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11526 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11527 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11528 void (*find_smp_config)(unsigned int reserve);
11529 void (*get_smp_config)(unsigned int early);
11530 -};
11531 +} __no_const;
11532
11533 /**
11534 * struct x86_init_resources - platform specific resource related ops
11535 @@ -42,7 +42,7 @@ struct x86_init_resources {
11536 void (*probe_roms)(void);
11537 void (*reserve_resources)(void);
11538 char *(*memory_setup)(void);
11539 -};
11540 +} __no_const;
11541
11542 /**
11543 * struct x86_init_irqs - platform specific interrupt setup
11544 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11545 void (*pre_vector_init)(void);
11546 void (*intr_init)(void);
11547 void (*trap_init)(void);
11548 -};
11549 +} __no_const;
11550
11551 /**
11552 * struct x86_init_oem - oem platform specific customizing functions
11553 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11554 struct x86_init_oem {
11555 void (*arch_setup)(void);
11556 void (*banner)(void);
11557 -};
11558 +} __no_const;
11559
11560 /**
11561 * struct x86_init_paging - platform specific paging functions
11562 @@ -75,7 +75,7 @@ struct x86_init_oem {
11563 struct x86_init_paging {
11564 void (*pagetable_setup_start)(pgd_t *base);
11565 void (*pagetable_setup_done)(pgd_t *base);
11566 -};
11567 +} __no_const;
11568
11569 /**
11570 * struct x86_init_timers - platform specific timer setup
11571 @@ -88,7 +88,7 @@ struct x86_init_timers {
11572 void (*setup_percpu_clockev)(void);
11573 void (*tsc_pre_init)(void);
11574 void (*timer_init)(void);
11575 -};
11576 +} __no_const;
11577
11578 /**
11579 * struct x86_init_ops - functions for platform specific setup
11580 @@ -101,7 +101,7 @@ struct x86_init_ops {
11581 struct x86_init_oem oem;
11582 struct x86_init_paging paging;
11583 struct x86_init_timers timers;
11584 -};
11585 +} __no_const;
11586
11587 /**
11588 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11589 @@ -109,7 +109,7 @@ struct x86_init_ops {
11590 */
11591 struct x86_cpuinit_ops {
11592 void (*setup_percpu_clockev)(void);
11593 -};
11594 +} __no_const;
11595
11596 /**
11597 * struct x86_platform_ops - platform specific runtime functions
11598 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11599 unsigned long (*calibrate_tsc)(void);
11600 unsigned long (*get_wallclock)(void);
11601 int (*set_wallclock)(unsigned long nowtime);
11602 -};
11603 +} __no_const;
11604
11605 extern struct x86_init_ops x86_init;
11606 extern struct x86_cpuinit_ops x86_cpuinit;
11607 diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11608 --- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11609 +++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11610 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11611 static inline int xsave_user(struct xsave_struct __user *buf)
11612 {
11613 int err;
11614 +
11615 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11616 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11617 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11618 +#endif
11619 +
11620 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11621 "2:\n"
11622 ".section .fixup,\"ax\"\n"
11623 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11624 u32 lmask = mask;
11625 u32 hmask = mask >> 32;
11626
11627 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11628 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11629 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11630 +#endif
11631 +
11632 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11633 "2:\n"
11634 ".section .fixup,\"ax\"\n"
11635 diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11636 --- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11637 +++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11638 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11639
11640 config X86_32_LAZY_GS
11641 def_bool y
11642 - depends on X86_32 && !CC_STACKPROTECTOR
11643 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11644
11645 config KTIME_SCALAR
11646 def_bool X86_32
11647 @@ -1008,7 +1008,7 @@ choice
11648
11649 config NOHIGHMEM
11650 bool "off"
11651 - depends on !X86_NUMAQ
11652 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11653 ---help---
11654 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11655 However, the address space of 32-bit x86 processors is only 4
11656 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11657
11658 config HIGHMEM4G
11659 bool "4GB"
11660 - depends on !X86_NUMAQ
11661 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11662 ---help---
11663 Select this if you have a 32-bit processor and between 1 and 4
11664 gigabytes of physical RAM.
11665 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11666 hex
11667 default 0xB0000000 if VMSPLIT_3G_OPT
11668 default 0x80000000 if VMSPLIT_2G
11669 - default 0x78000000 if VMSPLIT_2G_OPT
11670 + default 0x70000000 if VMSPLIT_2G_OPT
11671 default 0x40000000 if VMSPLIT_1G
11672 default 0xC0000000
11673 depends on X86_32
11674 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11675
11676 config EFI
11677 bool "EFI runtime service support"
11678 - depends on ACPI
11679 + depends on ACPI && !PAX_KERNEXEC
11680 ---help---
11681 This enables the kernel to use EFI runtime services that are
11682 available (such as the EFI variable services).
11683 @@ -1460,6 +1460,7 @@ config SECCOMP
11684
11685 config CC_STACKPROTECTOR
11686 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11687 + depends on X86_64 || !PAX_MEMORY_UDEREF
11688 ---help---
11689 This option turns on the -fstack-protector GCC feature. This
11690 feature puts, at the beginning of functions, a canary value on
11691 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11692 config PHYSICAL_START
11693 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11694 default "0x1000000"
11695 + range 0x400000 0x40000000
11696 ---help---
11697 This gives the physical address where the kernel is loaded.
11698
11699 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11700 hex
11701 prompt "Alignment value to which kernel should be aligned" if X86_32
11702 default "0x1000000"
11703 + range 0x400000 0x1000000 if PAX_KERNEXEC
11704 range 0x2000 0x1000000
11705 ---help---
11706 This value puts the alignment restrictions on physical address
11707 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11708 Say N if you want to disable CPU hotplug.
11709
11710 config COMPAT_VDSO
11711 - def_bool y
11712 + def_bool n
11713 prompt "Compat VDSO support"
11714 depends on X86_32 || IA32_EMULATION
11715 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11716 ---help---
11717 Map the 32-bit VDSO to the predictable old-style address too.
11718 ---help---
11719 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11720 --- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11721 +++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11722 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11723
11724 config X86_F00F_BUG
11725 def_bool y
11726 - depends on M586MMX || M586TSC || M586 || M486 || M386
11727 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11728
11729 config X86_WP_WORKS_OK
11730 def_bool y
11731 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11732
11733 config X86_ALIGNMENT_16
11734 def_bool y
11735 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11736 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11737
11738 config X86_INTEL_USERCOPY
11739 def_bool y
11740 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11741 # generates cmov.
11742 config X86_CMOV
11743 def_bool y
11744 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11745 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11746
11747 config X86_MINIMUM_CPU_FAMILY
11748 int
11749 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11750 --- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11751 +++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11752 @@ -99,7 +99,7 @@ config X86_PTDUMP
11753 config DEBUG_RODATA
11754 bool "Write protect kernel read-only data structures"
11755 default y
11756 - depends on DEBUG_KERNEL
11757 + depends on DEBUG_KERNEL && BROKEN
11758 ---help---
11759 Mark the kernel read-only data as write-protected in the pagetables,
11760 in order to catch accidental (and incorrect) writes to such const
11761 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11762 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11763 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11764 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11765 $(call cc-option, -fno-stack-protector) \
11766 $(call cc-option, -mpreferred-stack-boundary=2)
11767 KBUILD_CFLAGS += $(call cc-option, -m32)
11768 +ifdef CONSTIFY_PLUGIN
11769 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11770 +endif
11771 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11772 GCOV_PROFILE := n
11773
11774 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11775 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11776 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11777 @@ -91,6 +91,9 @@ _start:
11778 /* Do any other stuff... */
11779
11780 #ifndef CONFIG_64BIT
11781 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11782 + call verify_cpu
11783 +
11784 /* This could also be done in C code... */
11785 movl pmode_cr3, %eax
11786 movl %eax, %cr3
11787 @@ -104,7 +107,7 @@ _start:
11788 movl %eax, %ecx
11789 orl %edx, %ecx
11790 jz 1f
11791 - movl $0xc0000080, %ecx
11792 + mov $MSR_EFER, %ecx
11793 wrmsr
11794 1:
11795
11796 @@ -114,6 +117,7 @@ _start:
11797 movl pmode_cr0, %eax
11798 movl %eax, %cr0
11799 jmp pmode_return
11800 +# include "../../verify_cpu.S"
11801 #else
11802 pushw $0
11803 pushw trampoline_segment
11804 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11805 --- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11806 +++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11807 @@ -11,11 +11,12 @@
11808 #include <linux/cpumask.h>
11809 #include <asm/segment.h>
11810 #include <asm/desc.h>
11811 +#include <asm/e820.h>
11812
11813 #include "realmode/wakeup.h"
11814 #include "sleep.h"
11815
11816 -unsigned long acpi_wakeup_address;
11817 +unsigned long acpi_wakeup_address = 0x2000;
11818 unsigned long acpi_realmode_flags;
11819
11820 /* address in low memory of the wakeup routine. */
11821 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11822 #else /* CONFIG_64BIT */
11823 header->trampoline_segment = setup_trampoline() >> 4;
11824 #ifdef CONFIG_SMP
11825 - stack_start.sp = temp_stack + sizeof(temp_stack);
11826 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11827 +
11828 + pax_open_kernel();
11829 early_gdt_descr.address =
11830 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11831 + pax_close_kernel();
11832 +
11833 initial_gs = per_cpu_offset(smp_processor_id());
11834 #endif
11835 initial_code = (unsigned long)wakeup_long64;
11836 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11837 return;
11838 }
11839
11840 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11841 -
11842 - if (!acpi_realmode) {
11843 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11844 - return;
11845 - }
11846 -
11847 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11848 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11849 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11850 }
11851
11852
11853 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11854 --- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11855 +++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11856 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11857 # and restore the stack ... but you need gdt for this to work
11858 movl saved_context_esp, %esp
11859
11860 - movl %cs:saved_magic, %eax
11861 - cmpl $0x12345678, %eax
11862 + cmpl $0x12345678, saved_magic
11863 jne bogus_magic
11864
11865 # jump to place where we left off
11866 - movl saved_eip, %eax
11867 - jmp *%eax
11868 + jmp *(saved_eip)
11869
11870 bogus_magic:
11871 jmp bogus_magic
11872 diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11873 --- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11874 +++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11875 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11876
11877 BUG_ON(p->len > MAX_PATCH_LEN);
11878 /* prep the buffer with the original instructions */
11879 - memcpy(insnbuf, p->instr, p->len);
11880 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11881 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11882 (unsigned long)p->instr, p->len);
11883
11884 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11885 if (smp_alt_once)
11886 free_init_pages("SMP alternatives",
11887 (unsigned long)__smp_locks,
11888 - (unsigned long)__smp_locks_end);
11889 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11890
11891 restart_nmi();
11892 }
11893 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11894 * instructions. And on the local CPU you need to be protected again NMI or MCE
11895 * handlers seeing an inconsistent instruction while you patch.
11896 */
11897 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11898 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11899 size_t len)
11900 {
11901 unsigned long flags;
11902 local_irq_save(flags);
11903 - memcpy(addr, opcode, len);
11904 +
11905 + pax_open_kernel();
11906 + memcpy(ktla_ktva(addr), opcode, len);
11907 sync_core();
11908 + pax_close_kernel();
11909 +
11910 local_irq_restore(flags);
11911 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11912 that causes hangs on some VIA CPUs. */
11913 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11914 */
11915 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11916 {
11917 - unsigned long flags;
11918 - char *vaddr;
11919 + unsigned char *vaddr = ktla_ktva(addr);
11920 struct page *pages[2];
11921 - int i;
11922 + size_t i;
11923
11924 if (!core_kernel_text((unsigned long)addr)) {
11925 - pages[0] = vmalloc_to_page(addr);
11926 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11927 + pages[0] = vmalloc_to_page(vaddr);
11928 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11929 } else {
11930 - pages[0] = virt_to_page(addr);
11931 + pages[0] = virt_to_page(vaddr);
11932 WARN_ON(!PageReserved(pages[0]));
11933 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11934 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11935 }
11936 BUG_ON(!pages[0]);
11937 - local_irq_save(flags);
11938 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11939 - if (pages[1])
11940 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11941 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11942 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11943 - clear_fixmap(FIX_TEXT_POKE0);
11944 - if (pages[1])
11945 - clear_fixmap(FIX_TEXT_POKE1);
11946 - local_flush_tlb();
11947 - sync_core();
11948 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11949 - that causes hangs on some VIA CPUs. */
11950 + text_poke_early(addr, opcode, len);
11951 for (i = 0; i < len; i++)
11952 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11953 - local_irq_restore(flags);
11954 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11955 return addr;
11956 }
11957 diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11958 --- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11959 +++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11960 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11961 }
11962 }
11963
11964 -static struct dma_map_ops amd_iommu_dma_ops = {
11965 +static const struct dma_map_ops amd_iommu_dma_ops = {
11966 .alloc_coherent = alloc_coherent,
11967 .free_coherent = free_coherent,
11968 .map_page = map_page,
11969 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11970 --- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11971 +++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11972 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11973 /*
11974 * Debug level, exported for io_apic.c
11975 */
11976 -unsigned int apic_verbosity;
11977 +int apic_verbosity;
11978
11979 int pic_mode;
11980
11981 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11982 apic_write(APIC_ESR, 0);
11983 v1 = apic_read(APIC_ESR);
11984 ack_APIC_irq();
11985 - atomic_inc(&irq_err_count);
11986 + atomic_inc_unchecked(&irq_err_count);
11987
11988 /*
11989 * Here is what the APIC error bits mean:
11990 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11991 u16 *bios_cpu_apicid;
11992 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11993
11994 + pax_track_stack();
11995 +
11996 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11997 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11998
11999 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
12000 --- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
12001 +++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
12002 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12003 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12004 GFP_ATOMIC);
12005 if (!ioapic_entries)
12006 - return 0;
12007 + return NULL;
12008
12009 for (apic = 0; apic < nr_ioapics; apic++) {
12010 ioapic_entries[apic] =
12011 @@ -733,7 +733,7 @@ nomem:
12012 kfree(ioapic_entries[apic]);
12013 kfree(ioapic_entries);
12014
12015 - return 0;
12016 + return NULL;
12017 }
12018
12019 /*
12020 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12021 }
12022 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12023
12024 -void lock_vector_lock(void)
12025 +void lock_vector_lock(void) __acquires(vector_lock)
12026 {
12027 /* Used to the online set of cpus does not change
12028 * during assign_irq_vector.
12029 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12030 spin_lock(&vector_lock);
12031 }
12032
12033 -void unlock_vector_lock(void)
12034 +void unlock_vector_lock(void) __releases(vector_lock)
12035 {
12036 spin_unlock(&vector_lock);
12037 }
12038 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12039 ack_APIC_irq();
12040 }
12041
12042 -atomic_t irq_mis_count;
12043 +atomic_unchecked_t irq_mis_count;
12044
12045 static void ack_apic_level(unsigned int irq)
12046 {
12047 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12048
12049 /* Tail end of version 0x11 I/O APIC bug workaround */
12050 if (!(v & (1 << (i & 0x1f)))) {
12051 - atomic_inc(&irq_mis_count);
12052 + atomic_inc_unchecked(&irq_mis_count);
12053 spin_lock(&ioapic_lock);
12054 __mask_and_edge_IO_APIC_irq(cfg);
12055 __unmask_and_level_IO_APIC_irq(cfg);
12056 diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
12057 --- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12058 +++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12059 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12060 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12061 * even though they are called in protected mode.
12062 */
12063 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12064 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12065 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12066
12067 static const char driver_version[] = "1.16ac"; /* no spaces */
12068 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12069 BUG_ON(cpu != 0);
12070 gdt = get_cpu_gdt_table(cpu);
12071 save_desc_40 = gdt[0x40 / 8];
12072 +
12073 + pax_open_kernel();
12074 gdt[0x40 / 8] = bad_bios_desc;
12075 + pax_close_kernel();
12076
12077 apm_irq_save(flags);
12078 APM_DO_SAVE_SEGS;
12079 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12080 &call->esi);
12081 APM_DO_RESTORE_SEGS;
12082 apm_irq_restore(flags);
12083 +
12084 + pax_open_kernel();
12085 gdt[0x40 / 8] = save_desc_40;
12086 + pax_close_kernel();
12087 +
12088 put_cpu();
12089
12090 return call->eax & 0xff;
12091 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12092 BUG_ON(cpu != 0);
12093 gdt = get_cpu_gdt_table(cpu);
12094 save_desc_40 = gdt[0x40 / 8];
12095 +
12096 + pax_open_kernel();
12097 gdt[0x40 / 8] = bad_bios_desc;
12098 + pax_close_kernel();
12099
12100 apm_irq_save(flags);
12101 APM_DO_SAVE_SEGS;
12102 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12103 &call->eax);
12104 APM_DO_RESTORE_SEGS;
12105 apm_irq_restore(flags);
12106 +
12107 + pax_open_kernel();
12108 gdt[0x40 / 8] = save_desc_40;
12109 + pax_close_kernel();
12110 +
12111 put_cpu();
12112 return error;
12113 }
12114 @@ -975,7 +989,7 @@ recalc:
12115
12116 static void apm_power_off(void)
12117 {
12118 - unsigned char po_bios_call[] = {
12119 + const unsigned char po_bios_call[] = {
12120 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12121 0x8e, 0xd0, /* movw ax,ss */
12122 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12123 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12124 * code to that CPU.
12125 */
12126 gdt = get_cpu_gdt_table(0);
12127 +
12128 + pax_open_kernel();
12129 set_desc_base(&gdt[APM_CS >> 3],
12130 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12131 set_desc_base(&gdt[APM_CS_16 >> 3],
12132 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12133 set_desc_base(&gdt[APM_DS >> 3],
12134 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12135 + pax_close_kernel();
12136
12137 proc_create("apm", 0, NULL, &apm_file_ops);
12138
12139 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
12140 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12141 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12142 @@ -51,7 +51,6 @@ void foo(void)
12143 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12144 BLANK();
12145
12146 - OFFSET(TI_task, thread_info, task);
12147 OFFSET(TI_exec_domain, thread_info, exec_domain);
12148 OFFSET(TI_flags, thread_info, flags);
12149 OFFSET(TI_status, thread_info, status);
12150 @@ -60,6 +59,8 @@ void foo(void)
12151 OFFSET(TI_restart_block, thread_info, restart_block);
12152 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12153 OFFSET(TI_cpu, thread_info, cpu);
12154 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12155 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12156 BLANK();
12157
12158 OFFSET(GDS_size, desc_ptr, size);
12159 @@ -99,6 +100,7 @@ void foo(void)
12160
12161 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12162 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12163 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12164 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12165 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12166 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12167 @@ -115,6 +117,11 @@ void foo(void)
12168 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12169 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12170 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12171 +
12172 +#ifdef CONFIG_PAX_KERNEXEC
12173 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12174 +#endif
12175 +
12176 #endif
12177
12178 #ifdef CONFIG_XEN
12179 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12180 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12181 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-08-23 20:24:19.000000000 -0400
12182 @@ -44,6 +44,8 @@ int main(void)
12183 ENTRY(addr_limit);
12184 ENTRY(preempt_count);
12185 ENTRY(status);
12186 + ENTRY(lowest_stack);
12187 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12188 #ifdef CONFIG_IA32_EMULATION
12189 ENTRY(sysenter_return);
12190 #endif
12191 @@ -63,6 +65,18 @@ int main(void)
12192 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12193 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12194 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12195 +
12196 +#ifdef CONFIG_PAX_KERNEXEC
12197 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12198 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12199 +#endif
12200 +
12201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12202 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12203 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12204 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12205 +#endif
12206 +
12207 #endif
12208
12209
12210 @@ -115,6 +129,7 @@ int main(void)
12211 ENTRY(cr8);
12212 BLANK();
12213 #undef ENTRY
12214 + DEFINE(TSS_size, sizeof(struct tss_struct));
12215 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12216 BLANK();
12217 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12218 @@ -130,6 +145,7 @@ int main(void)
12219
12220 BLANK();
12221 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12222 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12223 #ifdef CONFIG_XEN
12224 BLANK();
12225 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12226 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12227 --- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12228 +++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12229 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12230 unsigned int size)
12231 {
12232 /* AMD errata T13 (order #21922) */
12233 - if ((c->x86 == 6)) {
12234 + if (c->x86 == 6) {
12235 /* Duron Rev A0 */
12236 if (c->x86_model == 3 && c->x86_mask == 0)
12237 size = 64;
12238 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12239 --- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12240 +++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12241 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12242
12243 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12244
12245 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12246 -#ifdef CONFIG_X86_64
12247 - /*
12248 - * We need valid kernel segments for data and code in long mode too
12249 - * IRET will check the segment types kkeil 2000/10/28
12250 - * Also sysret mandates a special GDT layout
12251 - *
12252 - * TLS descriptors are currently at a different place compared to i386.
12253 - * Hopefully nobody expects them at a fixed place (Wine?)
12254 - */
12255 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12256 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12257 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12258 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12259 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12260 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12261 -#else
12262 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12263 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12264 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12265 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12266 - /*
12267 - * Segments used for calling PnP BIOS have byte granularity.
12268 - * They code segments and data segments have fixed 64k limits,
12269 - * the transfer segment sizes are set at run time.
12270 - */
12271 - /* 32-bit code */
12272 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12273 - /* 16-bit code */
12274 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12275 - /* 16-bit data */
12276 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12277 - /* 16-bit data */
12278 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12279 - /* 16-bit data */
12280 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12281 - /*
12282 - * The APM segments have byte granularity and their bases
12283 - * are set at run time. All have 64k limits.
12284 - */
12285 - /* 32-bit code */
12286 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12287 - /* 16-bit code */
12288 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12289 - /* data */
12290 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12291 -
12292 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12293 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12294 - GDT_STACK_CANARY_INIT
12295 -#endif
12296 -} };
12297 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12298 -
12299 static int __init x86_xsave_setup(char *s)
12300 {
12301 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12302 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12303 {
12304 struct desc_ptr gdt_descr;
12305
12306 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12307 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12308 gdt_descr.size = GDT_SIZE - 1;
12309 load_gdt(&gdt_descr);
12310 /* Reload the per-cpu base */
12311 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12312 /* Filter out anything that depends on CPUID levels we don't have */
12313 filter_cpuid_features(c, true);
12314
12315 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12316 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12317 +#endif
12318 +
12319 /* If the model name is still unset, do table lookup. */
12320 if (!c->x86_model_id[0]) {
12321 const char *p;
12322 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12323 }
12324 __setup("clearcpuid=", setup_disablecpuid);
12325
12326 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12327 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12328 +
12329 #ifdef CONFIG_X86_64
12330 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12331
12332 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12333 EXPORT_PER_CPU_SYMBOL(current_task);
12334
12335 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12336 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12337 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12338 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12339
12340 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12341 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12342 {
12343 memset(regs, 0, sizeof(struct pt_regs));
12344 regs->fs = __KERNEL_PERCPU;
12345 - regs->gs = __KERNEL_STACK_CANARY;
12346 + savesegment(gs, regs->gs);
12347
12348 return regs;
12349 }
12350 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12351 int i;
12352
12353 cpu = stack_smp_processor_id();
12354 - t = &per_cpu(init_tss, cpu);
12355 + t = init_tss + cpu;
12356 orig_ist = &per_cpu(orig_ist, cpu);
12357
12358 #ifdef CONFIG_NUMA
12359 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12360 switch_to_new_gdt(cpu);
12361 loadsegment(fs, 0);
12362
12363 - load_idt((const struct desc_ptr *)&idt_descr);
12364 + load_idt(&idt_descr);
12365
12366 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12367 syscall_init();
12368 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12369 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12370 barrier();
12371
12372 - check_efer();
12373 if (cpu != 0)
12374 enable_x2apic();
12375
12376 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12377 {
12378 int cpu = smp_processor_id();
12379 struct task_struct *curr = current;
12380 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12381 + struct tss_struct *t = init_tss + cpu;
12382 struct thread_struct *thread = &curr->thread;
12383
12384 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12385 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12386 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12387 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12388 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12389 * Update the IDT descriptor and reload the IDT so that
12390 * it uses the read-only mapped virtual address.
12391 */
12392 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12393 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12394 load_idt(&idt_descr);
12395 }
12396 #endif
12397 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12398 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12399 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12400 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12401 return ret;
12402 }
12403
12404 -static struct sysfs_ops sysfs_ops = {
12405 +static const struct sysfs_ops sysfs_ops = {
12406 .show = show,
12407 .store = store,
12408 };
12409 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12410 --- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12411 +++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12412 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12413 CFLAGS_REMOVE_common.o = -pg
12414 endif
12415
12416 -# Make sure load_percpu_segment has no stackprotector
12417 -nostackp := $(call cc-option, -fno-stack-protector)
12418 -CFLAGS_common.o := $(nostackp)
12419 -
12420 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12421 obj-y += proc.o capflags.o powerflags.o common.o
12422 obj-y += vmware.o hypervisor.o sched.o
12423 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12424 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12425 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12426 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12427 return ret;
12428 }
12429
12430 -static struct sysfs_ops threshold_ops = {
12431 +static const struct sysfs_ops threshold_ops = {
12432 .show = show,
12433 .store = store,
12434 };
12435 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12436 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12437 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12438 @@ -43,6 +43,7 @@
12439 #include <asm/ipi.h>
12440 #include <asm/mce.h>
12441 #include <asm/msr.h>
12442 +#include <asm/local.h>
12443
12444 #include "mce-internal.h"
12445
12446 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12447 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12448 m->cs, m->ip);
12449
12450 - if (m->cs == __KERNEL_CS)
12451 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12452 print_symbol("{%s}", m->ip);
12453 pr_cont("\n");
12454 }
12455 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12456
12457 #define PANIC_TIMEOUT 5 /* 5 seconds */
12458
12459 -static atomic_t mce_paniced;
12460 +static atomic_unchecked_t mce_paniced;
12461
12462 static int fake_panic;
12463 -static atomic_t mce_fake_paniced;
12464 +static atomic_unchecked_t mce_fake_paniced;
12465
12466 /* Panic in progress. Enable interrupts and wait for final IPI */
12467 static void wait_for_panic(void)
12468 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12469 /*
12470 * Make sure only one CPU runs in machine check panic
12471 */
12472 - if (atomic_inc_return(&mce_paniced) > 1)
12473 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12474 wait_for_panic();
12475 barrier();
12476
12477 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12478 console_verbose();
12479 } else {
12480 /* Don't log too much for fake panic */
12481 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12482 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12483 return;
12484 }
12485 print_mce_head();
12486 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12487 * might have been modified by someone else.
12488 */
12489 rmb();
12490 - if (atomic_read(&mce_paniced))
12491 + if (atomic_read_unchecked(&mce_paniced))
12492 wait_for_panic();
12493 if (!monarch_timeout)
12494 goto out;
12495 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12496 */
12497
12498 static DEFINE_SPINLOCK(mce_state_lock);
12499 -static int open_count; /* #times opened */
12500 +static local_t open_count; /* #times opened */
12501 static int open_exclu; /* already open exclusive? */
12502
12503 static int mce_open(struct inode *inode, struct file *file)
12504 {
12505 spin_lock(&mce_state_lock);
12506
12507 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12508 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12509 spin_unlock(&mce_state_lock);
12510
12511 return -EBUSY;
12512 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12513
12514 if (file->f_flags & O_EXCL)
12515 open_exclu = 1;
12516 - open_count++;
12517 + local_inc(&open_count);
12518
12519 spin_unlock(&mce_state_lock);
12520
12521 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12522 {
12523 spin_lock(&mce_state_lock);
12524
12525 - open_count--;
12526 + local_dec(&open_count);
12527 open_exclu = 0;
12528
12529 spin_unlock(&mce_state_lock);
12530 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12531 static void mce_reset(void)
12532 {
12533 cpu_missing = 0;
12534 - atomic_set(&mce_fake_paniced, 0);
12535 + atomic_set_unchecked(&mce_fake_paniced, 0);
12536 atomic_set(&mce_executing, 0);
12537 atomic_set(&mce_callin, 0);
12538 atomic_set(&global_nwo, 0);
12539 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12540 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12541 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12542 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12543 static int inject_init(void)
12544 {
12545 printk(KERN_INFO "Machine check injector initialized\n");
12546 - mce_chrdev_ops.write = mce_write;
12547 + pax_open_kernel();
12548 + *(void **)&mce_chrdev_ops.write = mce_write;
12549 + pax_close_kernel();
12550 register_die_notifier(&mce_raise_nb);
12551 return 0;
12552 }
12553 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12554 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12555 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12556 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12557 return 0;
12558 }
12559
12560 -static struct mtrr_ops amd_mtrr_ops = {
12561 +static const struct mtrr_ops amd_mtrr_ops = {
12562 .vendor = X86_VENDOR_AMD,
12563 .set = amd_set_mtrr,
12564 .get = amd_get_mtrr,
12565 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12566 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12567 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12568 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12569 return 0;
12570 }
12571
12572 -static struct mtrr_ops centaur_mtrr_ops = {
12573 +static const struct mtrr_ops centaur_mtrr_ops = {
12574 .vendor = X86_VENDOR_CENTAUR,
12575 .set = centaur_set_mcr,
12576 .get = centaur_get_mcr,
12577 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12578 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12579 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12580 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12581 post_set();
12582 }
12583
12584 -static struct mtrr_ops cyrix_mtrr_ops = {
12585 +static const struct mtrr_ops cyrix_mtrr_ops = {
12586 .vendor = X86_VENDOR_CYRIX,
12587 .set_all = cyrix_set_all,
12588 .set = cyrix_set_arr,
12589 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12590 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12591 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12592 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12593 /*
12594 * Generic structure...
12595 */
12596 -struct mtrr_ops generic_mtrr_ops = {
12597 +const struct mtrr_ops generic_mtrr_ops = {
12598 .use_intel_if = 1,
12599 .set_all = generic_set_all,
12600 .get = generic_get_mtrr,
12601 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12602 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12603 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12604 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12605 u64 size_or_mask, size_and_mask;
12606 static bool mtrr_aps_delayed_init;
12607
12608 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12609 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12610
12611 -struct mtrr_ops *mtrr_if;
12612 +const struct mtrr_ops *mtrr_if;
12613
12614 static void set_mtrr(unsigned int reg, unsigned long base,
12615 unsigned long size, mtrr_type type);
12616
12617 -void set_mtrr_ops(struct mtrr_ops *ops)
12618 +void set_mtrr_ops(const struct mtrr_ops *ops)
12619 {
12620 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12621 mtrr_ops[ops->vendor] = ops;
12622 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12623 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12624 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12625 @@ -12,19 +12,19 @@
12626 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12627
12628 struct mtrr_ops {
12629 - u32 vendor;
12630 - u32 use_intel_if;
12631 - void (*set)(unsigned int reg, unsigned long base,
12632 + const u32 vendor;
12633 + const u32 use_intel_if;
12634 + void (* const set)(unsigned int reg, unsigned long base,
12635 unsigned long size, mtrr_type type);
12636 - void (*set_all)(void);
12637 + void (* const set_all)(void);
12638
12639 - void (*get)(unsigned int reg, unsigned long *base,
12640 + void (* const get)(unsigned int reg, unsigned long *base,
12641 unsigned long *size, mtrr_type *type);
12642 - int (*get_free_region)(unsigned long base, unsigned long size,
12643 + int (* const get_free_region)(unsigned long base, unsigned long size,
12644 int replace_reg);
12645 - int (*validate_add_page)(unsigned long base, unsigned long size,
12646 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12647 unsigned int type);
12648 - int (*have_wrcomb)(void);
12649 + int (* const have_wrcomb)(void);
12650 };
12651
12652 extern int generic_get_free_region(unsigned long base, unsigned long size,
12653 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12654 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12655 unsigned int type);
12656
12657 -extern struct mtrr_ops generic_mtrr_ops;
12658 +extern const struct mtrr_ops generic_mtrr_ops;
12659
12660 extern int positive_have_wrcomb(void);
12661
12662 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12663 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12664 void get_mtrr_state(void);
12665
12666 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12667 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12668
12669 extern u64 size_or_mask, size_and_mask;
12670 -extern struct mtrr_ops *mtrr_if;
12671 +extern const struct mtrr_ops *mtrr_if;
12672
12673 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12674 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12675 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12676 --- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12677 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12678 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12679
12680 /* Interface defining a CPU specific perfctr watchdog */
12681 struct wd_ops {
12682 - int (*reserve)(void);
12683 - void (*unreserve)(void);
12684 - int (*setup)(unsigned nmi_hz);
12685 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12686 - void (*stop)(void);
12687 + int (* const reserve)(void);
12688 + void (* const unreserve)(void);
12689 + int (* const setup)(unsigned nmi_hz);
12690 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12691 + void (* const stop)(void);
12692 unsigned perfctr;
12693 unsigned evntsel;
12694 u64 checkbit;
12695 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12696 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12697 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12698
12699 +/* cannot be const */
12700 static struct wd_ops intel_arch_wd_ops;
12701
12702 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12703 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12704 return 1;
12705 }
12706
12707 +/* cannot be const */
12708 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12709 .reserve = single_msr_reserve,
12710 .unreserve = single_msr_unreserve,
12711 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12712 --- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12713 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12714 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12715 * count to the generic event atomically:
12716 */
12717 again:
12718 - prev_raw_count = atomic64_read(&hwc->prev_count);
12719 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12720 rdmsrl(hwc->event_base + idx, new_raw_count);
12721
12722 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12723 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12724 new_raw_count) != prev_raw_count)
12725 goto again;
12726
12727 @@ -741,7 +741,7 @@ again:
12728 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12729 delta >>= shift;
12730
12731 - atomic64_add(delta, &event->count);
12732 + atomic64_add_unchecked(delta, &event->count);
12733 atomic64_sub(delta, &hwc->period_left);
12734
12735 return new_raw_count;
12736 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12737 * The hw event starts counting from this event offset,
12738 * mark it to be able to extra future deltas:
12739 */
12740 - atomic64_set(&hwc->prev_count, (u64)-left);
12741 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12742
12743 err = checking_wrmsrl(hwc->event_base + idx,
12744 (u64)(-left) & x86_pmu.event_mask);
12745 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12746 break;
12747
12748 callchain_store(entry, frame.return_address);
12749 - fp = frame.next_frame;
12750 + fp = (__force const void __user *)frame.next_frame;
12751 }
12752 }
12753
12754 diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12755 --- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12756 +++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12757 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12758 regs = args->regs;
12759
12760 #ifdef CONFIG_X86_32
12761 - if (!user_mode_vm(regs)) {
12762 + if (!user_mode(regs)) {
12763 crash_fixup_ss_esp(&fixed_regs, regs);
12764 regs = &fixed_regs;
12765 }
12766 diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12767 --- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12768 +++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12769 @@ -11,7 +11,7 @@
12770
12771 #define DOUBLEFAULT_STACKSIZE (1024)
12772 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12773 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12774 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12775
12776 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12777
12778 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12779 unsigned long gdt, tss;
12780
12781 store_gdt(&gdt_desc);
12782 - gdt = gdt_desc.address;
12783 + gdt = (unsigned long)gdt_desc.address;
12784
12785 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12786
12787 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12788 /* 0x2 bit is always set */
12789 .flags = X86_EFLAGS_SF | 0x2,
12790 .sp = STACK_START,
12791 - .es = __USER_DS,
12792 + .es = __KERNEL_DS,
12793 .cs = __KERNEL_CS,
12794 .ss = __KERNEL_DS,
12795 - .ds = __USER_DS,
12796 + .ds = __KERNEL_DS,
12797 .fs = __KERNEL_PERCPU,
12798
12799 .__cr3 = __pa_nodebug(swapper_pg_dir),
12800 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12801 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12802 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12803 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12804 #endif
12805
12806 for (;;) {
12807 - struct thread_info *context;
12808 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12809 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12810
12811 - context = (struct thread_info *)
12812 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12813 - bp = print_context_stack(context, stack, bp, ops,
12814 - data, NULL, &graph);
12815 -
12816 - stack = (unsigned long *)context->previous_esp;
12817 - if (!stack)
12818 + if (stack_start == task_stack_page(task))
12819 break;
12820 + stack = *(unsigned long **)stack_start;
12821 if (ops->stack(data, "IRQ") < 0)
12822 break;
12823 touch_nmi_watchdog();
12824 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12825 * When in-kernel, we also print out the stack and code at the
12826 * time of the fault..
12827 */
12828 - if (!user_mode_vm(regs)) {
12829 + if (!user_mode(regs)) {
12830 unsigned int code_prologue = code_bytes * 43 / 64;
12831 unsigned int code_len = code_bytes;
12832 unsigned char c;
12833 u8 *ip;
12834 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12835
12836 printk(KERN_EMERG "Stack:\n");
12837 show_stack_log_lvl(NULL, regs, &regs->sp,
12838 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12839
12840 printk(KERN_EMERG "Code: ");
12841
12842 - ip = (u8 *)regs->ip - code_prologue;
12843 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12844 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12845 /* try starting at IP */
12846 - ip = (u8 *)regs->ip;
12847 + ip = (u8 *)regs->ip + cs_base;
12848 code_len = code_len - code_prologue + 1;
12849 }
12850 for (i = 0; i < code_len; i++, ip++) {
12851 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12852 printk(" Bad EIP value.");
12853 break;
12854 }
12855 - if (ip == (u8 *)regs->ip)
12856 + if (ip == (u8 *)regs->ip + cs_base)
12857 printk("<%02x> ", c);
12858 else
12859 printk("%02x ", c);
12860 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12861 {
12862 unsigned short ud2;
12863
12864 + ip = ktla_ktva(ip);
12865 if (ip < PAGE_OFFSET)
12866 return 0;
12867 if (probe_kernel_address((unsigned short *)ip, ud2))
12868 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12869 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12870 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12871 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12872 unsigned long *irq_stack_end =
12873 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12874 unsigned used = 0;
12875 - struct thread_info *tinfo;
12876 int graph = 0;
12877 + void *stack_start;
12878
12879 if (!task)
12880 task = current;
12881 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12882 * current stack address. If the stacks consist of nested
12883 * exceptions
12884 */
12885 - tinfo = task_thread_info(task);
12886 for (;;) {
12887 char *id;
12888 unsigned long *estack_end;
12889 +
12890 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12891 &used, &id);
12892
12893 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12894 if (ops->stack(data, id) < 0)
12895 break;
12896
12897 - bp = print_context_stack(tinfo, stack, bp, ops,
12898 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12899 data, estack_end, &graph);
12900 ops->stack(data, "<EOE>");
12901 /*
12902 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12903 if (stack >= irq_stack && stack < irq_stack_end) {
12904 if (ops->stack(data, "IRQ") < 0)
12905 break;
12906 - bp = print_context_stack(tinfo, stack, bp,
12907 + bp = print_context_stack(task, irq_stack, stack, bp,
12908 ops, data, irq_stack_end, &graph);
12909 /*
12910 * We link to the next stack (which would be
12911 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12912 /*
12913 * This handles the process stack:
12914 */
12915 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12916 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12917 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12918 put_cpu();
12919 }
12920 EXPORT_SYMBOL(dump_trace);
12921 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12922 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12923 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12924 @@ -2,6 +2,9 @@
12925 * Copyright (C) 1991, 1992 Linus Torvalds
12926 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12927 */
12928 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12929 +#define __INCLUDED_BY_HIDESYM 1
12930 +#endif
12931 #include <linux/kallsyms.h>
12932 #include <linux/kprobes.h>
12933 #include <linux/uaccess.h>
12934 @@ -28,7 +31,7 @@ static int die_counter;
12935
12936 void printk_address(unsigned long address, int reliable)
12937 {
12938 - printk(" [<%p>] %s%pS\n", (void *) address,
12939 + printk(" [<%p>] %s%pA\n", (void *) address,
12940 reliable ? "" : "? ", (void *) address);
12941 }
12942
12943 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12944 static void
12945 print_ftrace_graph_addr(unsigned long addr, void *data,
12946 const struct stacktrace_ops *ops,
12947 - struct thread_info *tinfo, int *graph)
12948 + struct task_struct *task, int *graph)
12949 {
12950 - struct task_struct *task = tinfo->task;
12951 unsigned long ret_addr;
12952 int index = task->curr_ret_stack;
12953
12954 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12955 static inline void
12956 print_ftrace_graph_addr(unsigned long addr, void *data,
12957 const struct stacktrace_ops *ops,
12958 - struct thread_info *tinfo, int *graph)
12959 + struct task_struct *task, int *graph)
12960 { }
12961 #endif
12962
12963 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12964 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12965 */
12966
12967 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12968 - void *p, unsigned int size, void *end)
12969 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12970 {
12971 - void *t = tinfo;
12972 if (end) {
12973 if (p < end && p >= (end-THREAD_SIZE))
12974 return 1;
12975 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12976 }
12977
12978 unsigned long
12979 -print_context_stack(struct thread_info *tinfo,
12980 +print_context_stack(struct task_struct *task, void *stack_start,
12981 unsigned long *stack, unsigned long bp,
12982 const struct stacktrace_ops *ops, void *data,
12983 unsigned long *end, int *graph)
12984 {
12985 struct stack_frame *frame = (struct stack_frame *)bp;
12986
12987 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12988 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12989 unsigned long addr;
12990
12991 addr = *stack;
12992 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12993 } else {
12994 ops->address(data, addr, 0);
12995 }
12996 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12997 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12998 }
12999 stack++;
13000 }
13001 @@ -180,7 +180,7 @@ void dump_stack(void)
13002 #endif
13003
13004 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13005 - current->pid, current->comm, print_tainted(),
13006 + task_pid_nr(current), current->comm, print_tainted(),
13007 init_utsname()->release,
13008 (int)strcspn(init_utsname()->version, " "),
13009 init_utsname()->version);
13010 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
13011 return flags;
13012 }
13013
13014 +extern void gr_handle_kernel_exploit(void);
13015 +
13016 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13017 {
13018 if (regs && kexec_should_crash(current))
13019 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13020 panic("Fatal exception in interrupt");
13021 if (panic_on_oops)
13022 panic("Fatal exception");
13023 - do_exit(signr);
13024 +
13025 + gr_handle_kernel_exploit();
13026 +
13027 + do_group_exit(signr);
13028 }
13029
13030 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13031 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13032 unsigned long flags = oops_begin();
13033 int sig = SIGSEGV;
13034
13035 - if (!user_mode_vm(regs))
13036 + if (!user_mode(regs))
13037 report_bug(regs->ip, regs);
13038
13039 if (__die(str, regs, err))
13040 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
13041 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13042 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13043 @@ -15,7 +15,7 @@
13044 #endif
13045
13046 extern unsigned long
13047 -print_context_stack(struct thread_info *tinfo,
13048 +print_context_stack(struct task_struct *task, void *stack_start,
13049 unsigned long *stack, unsigned long bp,
13050 const struct stacktrace_ops *ops, void *data,
13051 unsigned long *end, int *graph);
13052 diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
13053 --- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13054 +++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13055 @@ -733,7 +733,7 @@ struct early_res {
13056 };
13057 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13058 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13059 - {}
13060 + { 0, 0, {0}, 0 }
13061 };
13062
13063 static int __init find_overlapped_early(u64 start, u64 end)
13064 diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
13065 --- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13066 +++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13067 @@ -7,6 +7,7 @@
13068 #include <linux/pci_regs.h>
13069 #include <linux/pci_ids.h>
13070 #include <linux/errno.h>
13071 +#include <linux/sched.h>
13072 #include <asm/io.h>
13073 #include <asm/processor.h>
13074 #include <asm/fcntl.h>
13075 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13076 int n;
13077 va_list ap;
13078
13079 + pax_track_stack();
13080 +
13081 va_start(ap, fmt);
13082 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13083 early_console->write(early_console, buf, n);
13084 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
13085 --- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13086 +++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13087 @@ -38,70 +38,38 @@
13088 */
13089
13090 static unsigned long efi_rt_eflags;
13091 -static pgd_t efi_bak_pg_dir_pointer[2];
13092 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13093
13094 -void efi_call_phys_prelog(void)
13095 +void __init efi_call_phys_prelog(void)
13096 {
13097 - unsigned long cr4;
13098 - unsigned long temp;
13099 struct desc_ptr gdt_descr;
13100
13101 local_irq_save(efi_rt_eflags);
13102
13103 - /*
13104 - * If I don't have PAE, I should just duplicate two entries in page
13105 - * directory. If I have PAE, I just need to duplicate one entry in
13106 - * page directory.
13107 - */
13108 - cr4 = read_cr4_safe();
13109
13110 - if (cr4 & X86_CR4_PAE) {
13111 - efi_bak_pg_dir_pointer[0].pgd =
13112 - swapper_pg_dir[pgd_index(0)].pgd;
13113 - swapper_pg_dir[0].pgd =
13114 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13115 - } else {
13116 - efi_bak_pg_dir_pointer[0].pgd =
13117 - swapper_pg_dir[pgd_index(0)].pgd;
13118 - efi_bak_pg_dir_pointer[1].pgd =
13119 - swapper_pg_dir[pgd_index(0x400000)].pgd;
13120 - swapper_pg_dir[pgd_index(0)].pgd =
13121 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13122 - temp = PAGE_OFFSET + 0x400000;
13123 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13124 - swapper_pg_dir[pgd_index(temp)].pgd;
13125 - }
13126 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13127 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13128 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13129
13130 /*
13131 * After the lock is released, the original page table is restored.
13132 */
13133 __flush_tlb_all();
13134
13135 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
13136 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13137 gdt_descr.size = GDT_SIZE - 1;
13138 load_gdt(&gdt_descr);
13139 }
13140
13141 -void efi_call_phys_epilog(void)
13142 +void __init efi_call_phys_epilog(void)
13143 {
13144 - unsigned long cr4;
13145 struct desc_ptr gdt_descr;
13146
13147 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13148 + gdt_descr.address = get_cpu_gdt_table(0);
13149 gdt_descr.size = GDT_SIZE - 1;
13150 load_gdt(&gdt_descr);
13151
13152 - cr4 = read_cr4_safe();
13153 -
13154 - if (cr4 & X86_CR4_PAE) {
13155 - swapper_pg_dir[pgd_index(0)].pgd =
13156 - efi_bak_pg_dir_pointer[0].pgd;
13157 - } else {
13158 - swapper_pg_dir[pgd_index(0)].pgd =
13159 - efi_bak_pg_dir_pointer[0].pgd;
13160 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13161 - efi_bak_pg_dir_pointer[1].pgd;
13162 - }
13163 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13164
13165 /*
13166 * After the lock is released, the original page table is restored.
13167 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13168 --- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13169 +++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13170 @@ -6,6 +6,7 @@
13171 */
13172
13173 #include <linux/linkage.h>
13174 +#include <linux/init.h>
13175 #include <asm/page_types.h>
13176
13177 /*
13178 @@ -20,7 +21,7 @@
13179 * service functions will comply with gcc calling convention, too.
13180 */
13181
13182 -.text
13183 +__INIT
13184 ENTRY(efi_call_phys)
13185 /*
13186 * 0. The function can only be called in Linux kernel. So CS has been
13187 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13188 * The mapping of lower virtual memory has been created in prelog and
13189 * epilog.
13190 */
13191 - movl $1f, %edx
13192 - subl $__PAGE_OFFSET, %edx
13193 - jmp *%edx
13194 + jmp 1f-__PAGE_OFFSET
13195 1:
13196
13197 /*
13198 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13199 * parameter 2, ..., param n. To make things easy, we save the return
13200 * address of efi_call_phys in a global variable.
13201 */
13202 - popl %edx
13203 - movl %edx, saved_return_addr
13204 - /* get the function pointer into ECX*/
13205 - popl %ecx
13206 - movl %ecx, efi_rt_function_ptr
13207 - movl $2f, %edx
13208 - subl $__PAGE_OFFSET, %edx
13209 - pushl %edx
13210 + popl (saved_return_addr)
13211 + popl (efi_rt_function_ptr)
13212
13213 /*
13214 * 3. Clear PG bit in %CR0.
13215 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13216 /*
13217 * 5. Call the physical function.
13218 */
13219 - jmp *%ecx
13220 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13221
13222 -2:
13223 /*
13224 * 6. After EFI runtime service returns, control will return to
13225 * following instruction. We'd better readjust stack pointer first.
13226 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13227 movl %cr0, %edx
13228 orl $0x80000000, %edx
13229 movl %edx, %cr0
13230 - jmp 1f
13231 -1:
13232 +
13233 /*
13234 * 8. Now restore the virtual mode from flat mode by
13235 * adding EIP with PAGE_OFFSET.
13236 */
13237 - movl $1f, %edx
13238 - jmp *%edx
13239 + jmp 1f+__PAGE_OFFSET
13240 1:
13241
13242 /*
13243 * 9. Balance the stack. And because EAX contain the return value,
13244 * we'd better not clobber it.
13245 */
13246 - leal efi_rt_function_ptr, %edx
13247 - movl (%edx), %ecx
13248 - pushl %ecx
13249 + pushl (efi_rt_function_ptr)
13250
13251 /*
13252 - * 10. Push the saved return address onto the stack and return.
13253 + * 10. Return to the saved return address.
13254 */
13255 - leal saved_return_addr, %edx
13256 - movl (%edx), %ecx
13257 - pushl %ecx
13258 - ret
13259 + jmpl *(saved_return_addr)
13260 ENDPROC(efi_call_phys)
13261 .previous
13262
13263 -.data
13264 +__INITDATA
13265 saved_return_addr:
13266 .long 0
13267 efi_rt_function_ptr:
13268 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13269 --- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13270 +++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-08-23 20:24:19.000000000 -0400
13271 @@ -185,13 +185,146 @@
13272 /*CFI_REL_OFFSET gs, PT_GS*/
13273 .endm
13274 .macro SET_KERNEL_GS reg
13275 +
13276 +#ifdef CONFIG_CC_STACKPROTECTOR
13277 movl $(__KERNEL_STACK_CANARY), \reg
13278 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13279 + movl $(__USER_DS), \reg
13280 +#else
13281 + xorl \reg, \reg
13282 +#endif
13283 +
13284 movl \reg, %gs
13285 .endm
13286
13287 #endif /* CONFIG_X86_32_LAZY_GS */
13288
13289 -.macro SAVE_ALL
13290 +.macro pax_enter_kernel
13291 +#ifdef CONFIG_PAX_KERNEXEC
13292 + call pax_enter_kernel
13293 +#endif
13294 +.endm
13295 +
13296 +.macro pax_exit_kernel
13297 +#ifdef CONFIG_PAX_KERNEXEC
13298 + call pax_exit_kernel
13299 +#endif
13300 +.endm
13301 +
13302 +#ifdef CONFIG_PAX_KERNEXEC
13303 +ENTRY(pax_enter_kernel)
13304 +#ifdef CONFIG_PARAVIRT
13305 + pushl %eax
13306 + pushl %ecx
13307 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13308 + mov %eax, %esi
13309 +#else
13310 + mov %cr0, %esi
13311 +#endif
13312 + bts $16, %esi
13313 + jnc 1f
13314 + mov %cs, %esi
13315 + cmp $__KERNEL_CS, %esi
13316 + jz 3f
13317 + ljmp $__KERNEL_CS, $3f
13318 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13319 +2:
13320 +#ifdef CONFIG_PARAVIRT
13321 + mov %esi, %eax
13322 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13323 +#else
13324 + mov %esi, %cr0
13325 +#endif
13326 +3:
13327 +#ifdef CONFIG_PARAVIRT
13328 + popl %ecx
13329 + popl %eax
13330 +#endif
13331 + ret
13332 +ENDPROC(pax_enter_kernel)
13333 +
13334 +ENTRY(pax_exit_kernel)
13335 +#ifdef CONFIG_PARAVIRT
13336 + pushl %eax
13337 + pushl %ecx
13338 +#endif
13339 + mov %cs, %esi
13340 + cmp $__KERNEXEC_KERNEL_CS, %esi
13341 + jnz 2f
13342 +#ifdef CONFIG_PARAVIRT
13343 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13344 + mov %eax, %esi
13345 +#else
13346 + mov %cr0, %esi
13347 +#endif
13348 + btr $16, %esi
13349 + ljmp $__KERNEL_CS, $1f
13350 +1:
13351 +#ifdef CONFIG_PARAVIRT
13352 + mov %esi, %eax
13353 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13354 +#else
13355 + mov %esi, %cr0
13356 +#endif
13357 +2:
13358 +#ifdef CONFIG_PARAVIRT
13359 + popl %ecx
13360 + popl %eax
13361 +#endif
13362 + ret
13363 +ENDPROC(pax_exit_kernel)
13364 +#endif
13365 +
13366 +.macro pax_erase_kstack
13367 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13368 + call pax_erase_kstack
13369 +#endif
13370 +.endm
13371 +
13372 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13373 +/*
13374 + * ebp: thread_info
13375 + * ecx, edx: can be clobbered
13376 + */
13377 +ENTRY(pax_erase_kstack)
13378 + pushl %edi
13379 + pushl %eax
13380 +
13381 + mov TI_lowest_stack(%ebp), %edi
13382 + mov $-0xBEEF, %eax
13383 + std
13384 +
13385 +1: mov %edi, %ecx
13386 + and $THREAD_SIZE_asm - 1, %ecx
13387 + shr $2, %ecx
13388 + repne scasl
13389 + jecxz 2f
13390 +
13391 + cmp $2*16, %ecx
13392 + jc 2f
13393 +
13394 + mov $2*16, %ecx
13395 + repe scasl
13396 + jecxz 2f
13397 + jne 1b
13398 +
13399 +2: cld
13400 + mov %esp, %ecx
13401 + sub %edi, %ecx
13402 + shr $2, %ecx
13403 + rep stosl
13404 +
13405 + mov TI_task_thread_sp0(%ebp), %edi
13406 + sub $128, %edi
13407 + mov %edi, TI_lowest_stack(%ebp)
13408 +
13409 + popl %eax
13410 + popl %edi
13411 + ret
13412 +ENDPROC(pax_erase_kstack)
13413 +#endif
13414 +
13415 +.macro __SAVE_ALL _DS
13416 cld
13417 PUSH_GS
13418 pushl %fs
13419 @@ -224,7 +357,7 @@
13420 pushl %ebx
13421 CFI_ADJUST_CFA_OFFSET 4
13422 CFI_REL_OFFSET ebx, 0
13423 - movl $(__USER_DS), %edx
13424 + movl $\_DS, %edx
13425 movl %edx, %ds
13426 movl %edx, %es
13427 movl $(__KERNEL_PERCPU), %edx
13428 @@ -232,6 +365,15 @@
13429 SET_KERNEL_GS %edx
13430 .endm
13431
13432 +.macro SAVE_ALL
13433 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13434 + __SAVE_ALL __KERNEL_DS
13435 + pax_enter_kernel
13436 +#else
13437 + __SAVE_ALL __USER_DS
13438 +#endif
13439 +.endm
13440 +
13441 .macro RESTORE_INT_REGS
13442 popl %ebx
13443 CFI_ADJUST_CFA_OFFSET -4
13444 @@ -352,7 +494,15 @@ check_userspace:
13445 movb PT_CS(%esp), %al
13446 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13447 cmpl $USER_RPL, %eax
13448 +
13449 +#ifdef CONFIG_PAX_KERNEXEC
13450 + jae resume_userspace
13451 +
13452 + PAX_EXIT_KERNEL
13453 + jmp resume_kernel
13454 +#else
13455 jb resume_kernel # not returning to v8086 or userspace
13456 +#endif
13457
13458 ENTRY(resume_userspace)
13459 LOCKDEP_SYS_EXIT
13460 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13461 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13462 # int/exception return?
13463 jne work_pending
13464 - jmp restore_all
13465 + jmp restore_all_pax
13466 END(ret_from_exception)
13467
13468 #ifdef CONFIG_PREEMPT
13469 @@ -414,25 +564,36 @@ sysenter_past_esp:
13470 /*CFI_REL_OFFSET cs, 0*/
13471 /*
13472 * Push current_thread_info()->sysenter_return to the stack.
13473 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13474 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13475 */
13476 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13477 + pushl $0
13478 CFI_ADJUST_CFA_OFFSET 4
13479 CFI_REL_OFFSET eip, 0
13480
13481 pushl %eax
13482 CFI_ADJUST_CFA_OFFSET 4
13483 SAVE_ALL
13484 + GET_THREAD_INFO(%ebp)
13485 + movl TI_sysenter_return(%ebp),%ebp
13486 + movl %ebp,PT_EIP(%esp)
13487 ENABLE_INTERRUPTS(CLBR_NONE)
13488
13489 /*
13490 * Load the potential sixth argument from user stack.
13491 * Careful about security.
13492 */
13493 + movl PT_OLDESP(%esp),%ebp
13494 +
13495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13496 + mov PT_OLDSS(%esp),%ds
13497 +1: movl %ds:(%ebp),%ebp
13498 + push %ss
13499 + pop %ds
13500 +#else
13501 cmpl $__PAGE_OFFSET-3,%ebp
13502 jae syscall_fault
13503 1: movl (%ebp),%ebp
13504 +#endif
13505 +
13506 movl %ebp,PT_EBP(%esp)
13507 .section __ex_table,"a"
13508 .align 4
13509 @@ -455,12 +616,23 @@ sysenter_do_call:
13510 testl $_TIF_ALLWORK_MASK, %ecx
13511 jne sysexit_audit
13512 sysenter_exit:
13513 +
13514 +#ifdef CONFIG_PAX_RANDKSTACK
13515 + pushl_cfi %eax
13516 + call pax_randomize_kstack
13517 + popl_cfi %eax
13518 +#endif
13519 +
13520 + pax_erase_kstack
13521 +
13522 /* if something modifies registers it must also disable sysexit */
13523 movl PT_EIP(%esp), %edx
13524 movl PT_OLDESP(%esp), %ecx
13525 xorl %ebp,%ebp
13526 TRACE_IRQS_ON
13527 1: mov PT_FS(%esp), %fs
13528 +2: mov PT_DS(%esp), %ds
13529 +3: mov PT_ES(%esp), %es
13530 PTGS_TO_GS
13531 ENABLE_INTERRUPTS_SYSEXIT
13532
13533 @@ -477,6 +649,9 @@ sysenter_audit:
13534 movl %eax,%edx /* 2nd arg: syscall number */
13535 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13536 call audit_syscall_entry
13537 +
13538 + pax_erase_kstack
13539 +
13540 pushl %ebx
13541 CFI_ADJUST_CFA_OFFSET 4
13542 movl PT_EAX(%esp),%eax /* reload syscall number */
13543 @@ -504,11 +679,17 @@ sysexit_audit:
13544
13545 CFI_ENDPROC
13546 .pushsection .fixup,"ax"
13547 -2: movl $0,PT_FS(%esp)
13548 +4: movl $0,PT_FS(%esp)
13549 + jmp 1b
13550 +5: movl $0,PT_DS(%esp)
13551 + jmp 1b
13552 +6: movl $0,PT_ES(%esp)
13553 jmp 1b
13554 .section __ex_table,"a"
13555 .align 4
13556 - .long 1b,2b
13557 + .long 1b,4b
13558 + .long 2b,5b
13559 + .long 3b,6b
13560 .popsection
13561 PTGS_TO_GS_EX
13562 ENDPROC(ia32_sysenter_target)
13563 @@ -538,6 +719,14 @@ syscall_exit:
13564 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13565 jne syscall_exit_work
13566
13567 +restore_all_pax:
13568 +
13569 +#ifdef CONFIG_PAX_RANDKSTACK
13570 + call pax_randomize_kstack
13571 +#endif
13572 +
13573 + pax_erase_kstack
13574 +
13575 restore_all:
13576 TRACE_IRQS_IRET
13577 restore_all_notrace:
13578 @@ -602,10 +791,29 @@ ldt_ss:
13579 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13580 mov %dx, %ax /* eax: new kernel esp */
13581 sub %eax, %edx /* offset (low word is 0) */
13582 - PER_CPU(gdt_page, %ebx)
13583 +#ifdef CONFIG_SMP
13584 + movl PER_CPU_VAR(cpu_number), %ebx
13585 + shll $PAGE_SHIFT_asm, %ebx
13586 + addl $cpu_gdt_table, %ebx
13587 +#else
13588 + movl $cpu_gdt_table, %ebx
13589 +#endif
13590 shr $16, %edx
13591 +
13592 +#ifdef CONFIG_PAX_KERNEXEC
13593 + mov %cr0, %esi
13594 + btr $16, %esi
13595 + mov %esi, %cr0
13596 +#endif
13597 +
13598 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13599 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13600 +
13601 +#ifdef CONFIG_PAX_KERNEXEC
13602 + bts $16, %esi
13603 + mov %esi, %cr0
13604 +#endif
13605 +
13606 pushl $__ESPFIX_SS
13607 CFI_ADJUST_CFA_OFFSET 4
13608 push %eax /* new kernel esp */
13609 @@ -636,31 +844,25 @@ work_resched:
13610 movl TI_flags(%ebp), %ecx
13611 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13612 # than syscall tracing?
13613 - jz restore_all
13614 + jz restore_all_pax
13615 testb $_TIF_NEED_RESCHED, %cl
13616 jnz work_resched
13617
13618 work_notifysig: # deal with pending signals and
13619 # notify-resume requests
13620 + movl %esp, %eax
13621 #ifdef CONFIG_VM86
13622 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13623 - movl %esp, %eax
13624 - jne work_notifysig_v86 # returning to kernel-space or
13625 + jz 1f # returning to kernel-space or
13626 # vm86-space
13627 - xorl %edx, %edx
13628 - call do_notify_resume
13629 - jmp resume_userspace_sig
13630
13631 - ALIGN
13632 -work_notifysig_v86:
13633 pushl %ecx # save ti_flags for do_notify_resume
13634 CFI_ADJUST_CFA_OFFSET 4
13635 call save_v86_state # %eax contains pt_regs pointer
13636 popl %ecx
13637 CFI_ADJUST_CFA_OFFSET -4
13638 movl %eax, %esp
13639 -#else
13640 - movl %esp, %eax
13641 +1:
13642 #endif
13643 xorl %edx, %edx
13644 call do_notify_resume
13645 @@ -673,6 +875,9 @@ syscall_trace_entry:
13646 movl $-ENOSYS,PT_EAX(%esp)
13647 movl %esp, %eax
13648 call syscall_trace_enter
13649 +
13650 + pax_erase_kstack
13651 +
13652 /* What it returned is what we'll actually use. */
13653 cmpl $(nr_syscalls), %eax
13654 jnae syscall_call
13655 @@ -695,6 +900,10 @@ END(syscall_exit_work)
13656
13657 RING0_INT_FRAME # can't unwind into user space anyway
13658 syscall_fault:
13659 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13660 + push %ss
13661 + pop %ds
13662 +#endif
13663 GET_THREAD_INFO(%ebp)
13664 movl $-EFAULT,PT_EAX(%esp)
13665 jmp resume_userspace
13666 @@ -726,6 +935,33 @@ PTREGSCALL(rt_sigreturn)
13667 PTREGSCALL(vm86)
13668 PTREGSCALL(vm86old)
13669
13670 + ALIGN;
13671 +ENTRY(kernel_execve)
13672 + push %ebp
13673 + sub $PT_OLDSS+4,%esp
13674 + push %edi
13675 + push %ecx
13676 + push %eax
13677 + lea 3*4(%esp),%edi
13678 + mov $PT_OLDSS/4+1,%ecx
13679 + xorl %eax,%eax
13680 + rep stosl
13681 + pop %eax
13682 + pop %ecx
13683 + pop %edi
13684 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13685 + mov %eax,PT_EBX(%esp)
13686 + mov %edx,PT_ECX(%esp)
13687 + mov %ecx,PT_EDX(%esp)
13688 + mov %esp,%eax
13689 + call sys_execve
13690 + GET_THREAD_INFO(%ebp)
13691 + test %eax,%eax
13692 + jz syscall_exit
13693 + add $PT_OLDSS+4,%esp
13694 + pop %ebp
13695 + ret
13696 +
13697 .macro FIXUP_ESPFIX_STACK
13698 /*
13699 * Switch back for ESPFIX stack to the normal zerobased stack
13700 @@ -735,7 +971,13 @@ PTREGSCALL(vm86old)
13701 * normal stack and adjusts ESP with the matching offset.
13702 */
13703 /* fixup the stack */
13704 - PER_CPU(gdt_page, %ebx)
13705 +#ifdef CONFIG_SMP
13706 + movl PER_CPU_VAR(cpu_number), %ebx
13707 + shll $PAGE_SHIFT_asm, %ebx
13708 + addl $cpu_gdt_table, %ebx
13709 +#else
13710 + movl $cpu_gdt_table, %ebx
13711 +#endif
13712 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13713 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13714 shl $16, %eax
13715 @@ -1198,7 +1440,6 @@ return_to_handler:
13716 ret
13717 #endif
13718
13719 -.section .rodata,"a"
13720 #include "syscall_table_32.S"
13721
13722 syscall_table_size=(.-sys_call_table)
13723 @@ -1255,9 +1496,12 @@ error_code:
13724 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13725 REG_TO_PTGS %ecx
13726 SET_KERNEL_GS %ecx
13727 - movl $(__USER_DS), %ecx
13728 + movl $(__KERNEL_DS), %ecx
13729 movl %ecx, %ds
13730 movl %ecx, %es
13731 +
13732 + pax_enter_kernel
13733 +
13734 TRACE_IRQS_OFF
13735 movl %esp,%eax # pt_regs pointer
13736 call *%edi
13737 @@ -1351,6 +1595,9 @@ nmi_stack_correct:
13738 xorl %edx,%edx # zero error code
13739 movl %esp,%eax # pt_regs pointer
13740 call do_nmi
13741 +
13742 + pax_exit_kernel
13743 +
13744 jmp restore_all_notrace
13745 CFI_ENDPROC
13746
13747 @@ -1391,6 +1638,9 @@ nmi_espfix_stack:
13748 FIXUP_ESPFIX_STACK # %eax == %esp
13749 xorl %edx,%edx # zero error code
13750 call do_nmi
13751 +
13752 + pax_exit_kernel
13753 +
13754 RESTORE_REGS
13755 lss 12+4(%esp), %esp # back to espfix stack
13756 CFI_ADJUST_CFA_OFFSET -24
13757 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13758 --- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13759 +++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-08-23 20:24:19.000000000 -0400
13760 @@ -53,6 +53,7 @@
13761 #include <asm/paravirt.h>
13762 #include <asm/ftrace.h>
13763 #include <asm/percpu.h>
13764 +#include <asm/pgtable.h>
13765
13766 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13767 #include <linux/elf-em.h>
13768 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13769 ENDPROC(native_usergs_sysret64)
13770 #endif /* CONFIG_PARAVIRT */
13771
13772 + .macro ljmpq sel, off
13773 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13774 + .byte 0x48; ljmp *1234f(%rip)
13775 + .pushsection .rodata
13776 + .align 16
13777 + 1234: .quad \off; .word \sel
13778 + .popsection
13779 +#else
13780 + pushq $\sel
13781 + pushq $\off
13782 + lretq
13783 +#endif
13784 + .endm
13785 +
13786 + .macro pax_enter_kernel
13787 +#ifdef CONFIG_PAX_KERNEXEC
13788 + call pax_enter_kernel
13789 +#endif
13790 + .endm
13791 +
13792 + .macro pax_exit_kernel
13793 +#ifdef CONFIG_PAX_KERNEXEC
13794 + call pax_exit_kernel
13795 +#endif
13796 + .endm
13797 +
13798 +#ifdef CONFIG_PAX_KERNEXEC
13799 +ENTRY(pax_enter_kernel)
13800 + pushq %rdi
13801 +
13802 +#ifdef CONFIG_PARAVIRT
13803 + PV_SAVE_REGS(CLBR_RDI)
13804 +#endif
13805 +
13806 + GET_CR0_INTO_RDI
13807 + bts $16,%rdi
13808 + jnc 1f
13809 + mov %cs,%edi
13810 + cmp $__KERNEL_CS,%edi
13811 + jz 3f
13812 + ljmpq __KERNEL_CS,3f
13813 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13814 +2: SET_RDI_INTO_CR0
13815 +3:
13816 +
13817 +#ifdef CONFIG_PARAVIRT
13818 + PV_RESTORE_REGS(CLBR_RDI)
13819 +#endif
13820 +
13821 + popq %rdi
13822 + retq
13823 +ENDPROC(pax_enter_kernel)
13824 +
13825 +ENTRY(pax_exit_kernel)
13826 + pushq %rdi
13827 +
13828 +#ifdef CONFIG_PARAVIRT
13829 + PV_SAVE_REGS(CLBR_RDI)
13830 +#endif
13831 +
13832 + mov %cs,%rdi
13833 + cmp $__KERNEXEC_KERNEL_CS,%edi
13834 + jnz 2f
13835 + GET_CR0_INTO_RDI
13836 + btr $16,%rdi
13837 + ljmpq __KERNEL_CS,1f
13838 +1: SET_RDI_INTO_CR0
13839 +2:
13840 +
13841 +#ifdef CONFIG_PARAVIRT
13842 + PV_RESTORE_REGS(CLBR_RDI);
13843 +#endif
13844 +
13845 + popq %rdi
13846 + retq
13847 +ENDPROC(pax_exit_kernel)
13848 +#endif
13849 +
13850 + .macro pax_enter_kernel_user
13851 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13852 + call pax_enter_kernel_user
13853 +#endif
13854 + .endm
13855 +
13856 + .macro pax_exit_kernel_user
13857 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13858 + call pax_exit_kernel_user
13859 +#endif
13860 +#ifdef CONFIG_PAX_RANDKSTACK
13861 + push %rax
13862 + call pax_randomize_kstack
13863 + pop %rax
13864 +#endif
13865 + pax_erase_kstack
13866 + .endm
13867 +
13868 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13869 +ENTRY(pax_enter_kernel_user)
13870 + pushq %rdi
13871 + pushq %rbx
13872 +
13873 +#ifdef CONFIG_PARAVIRT
13874 + PV_SAVE_REGS(CLBR_RDI)
13875 +#endif
13876 +
13877 + GET_CR3_INTO_RDI
13878 + mov %rdi,%rbx
13879 + add $__START_KERNEL_map,%rbx
13880 + sub phys_base(%rip),%rbx
13881 +
13882 +#ifdef CONFIG_PARAVIRT
13883 + pushq %rdi
13884 + cmpl $0, pv_info+PARAVIRT_enabled
13885 + jz 1f
13886 + i = 0
13887 + .rept USER_PGD_PTRS
13888 + mov i*8(%rbx),%rsi
13889 + mov $0,%sil
13890 + lea i*8(%rbx),%rdi
13891 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13892 + i = i + 1
13893 + .endr
13894 + jmp 2f
13895 +1:
13896 +#endif
13897 +
13898 + i = 0
13899 + .rept USER_PGD_PTRS
13900 + movb $0,i*8(%rbx)
13901 + i = i + 1
13902 + .endr
13903 +
13904 +#ifdef CONFIG_PARAVIRT
13905 +2: popq %rdi
13906 +#endif
13907 + SET_RDI_INTO_CR3
13908 +
13909 +#ifdef CONFIG_PAX_KERNEXEC
13910 + GET_CR0_INTO_RDI
13911 + bts $16,%rdi
13912 + SET_RDI_INTO_CR0
13913 +#endif
13914 +
13915 +#ifdef CONFIG_PARAVIRT
13916 + PV_RESTORE_REGS(CLBR_RDI)
13917 +#endif
13918 +
13919 + popq %rbx
13920 + popq %rdi
13921 + retq
13922 +ENDPROC(pax_enter_kernel_user)
13923 +
13924 +ENTRY(pax_exit_kernel_user)
13925 + push %rdi
13926 +
13927 +#ifdef CONFIG_PARAVIRT
13928 + pushq %rbx
13929 + PV_SAVE_REGS(CLBR_RDI)
13930 +#endif
13931 +
13932 +#ifdef CONFIG_PAX_KERNEXEC
13933 + GET_CR0_INTO_RDI
13934 + btr $16,%rdi
13935 + SET_RDI_INTO_CR0
13936 +#endif
13937 +
13938 + GET_CR3_INTO_RDI
13939 + add $__START_KERNEL_map,%rdi
13940 + sub phys_base(%rip),%rdi
13941 +
13942 +#ifdef CONFIG_PARAVIRT
13943 + cmpl $0, pv_info+PARAVIRT_enabled
13944 + jz 1f
13945 + mov %rdi,%rbx
13946 + i = 0
13947 + .rept USER_PGD_PTRS
13948 + mov i*8(%rbx),%rsi
13949 + mov $0x67,%sil
13950 + lea i*8(%rbx),%rdi
13951 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13952 + i = i + 1
13953 + .endr
13954 + jmp 2f
13955 +1:
13956 +#endif
13957 +
13958 + i = 0
13959 + .rept USER_PGD_PTRS
13960 + movb $0x67,i*8(%rdi)
13961 + i = i + 1
13962 + .endr
13963 +
13964 +#ifdef CONFIG_PARAVIRT
13965 +2: PV_RESTORE_REGS(CLBR_RDI)
13966 + popq %rbx
13967 +#endif
13968 +
13969 + popq %rdi
13970 + retq
13971 +ENDPROC(pax_exit_kernel_user)
13972 +#endif
13973 +
13974 +.macro pax_erase_kstack
13975 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13976 + call pax_erase_kstack
13977 +#endif
13978 +.endm
13979 +
13980 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13981 +/*
13982 + * r10: thread_info
13983 + * rcx, rdx: can be clobbered
13984 + */
13985 +ENTRY(pax_erase_kstack)
13986 + pushq %rdi
13987 + pushq %rax
13988 +
13989 + GET_THREAD_INFO(%r10)
13990 + mov TI_lowest_stack(%r10), %rdi
13991 + mov $-0xBEEF, %rax
13992 + std
13993 +
13994 +1: mov %edi, %ecx
13995 + and $THREAD_SIZE_asm - 1, %ecx
13996 + shr $3, %ecx
13997 + repne scasq
13998 + jecxz 2f
13999 +
14000 + cmp $2*8, %ecx
14001 + jc 2f
14002 +
14003 + mov $2*8, %ecx
14004 + repe scasq
14005 + jecxz 2f
14006 + jne 1b
14007 +
14008 +2: cld
14009 + mov %esp, %ecx
14010 + sub %edi, %ecx
14011 + shr $3, %ecx
14012 + rep stosq
14013 +
14014 + mov TI_task_thread_sp0(%r10), %rdi
14015 + sub $256, %rdi
14016 + mov %rdi, TI_lowest_stack(%r10)
14017 +
14018 + popq %rax
14019 + popq %rdi
14020 + ret
14021 +ENDPROC(pax_erase_kstack)
14022 +#endif
14023
14024 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14025 #ifdef CONFIG_TRACE_IRQFLAGS
14026 @@ -317,7 +569,7 @@ ENTRY(save_args)
14027 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14028 movq_cfi rbp, 8 /* push %rbp */
14029 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14030 - testl $3, CS(%rdi)
14031 + testb $3, CS(%rdi)
14032 je 1f
14033 SWAPGS
14034 /*
14035 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
14036
14037 RESTORE_REST
14038
14039 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14040 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14041 je int_ret_from_sys_call
14042
14043 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14044 @@ -455,7 +707,7 @@ END(ret_from_fork)
14045 ENTRY(system_call)
14046 CFI_STARTPROC simple
14047 CFI_SIGNAL_FRAME
14048 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14049 + CFI_DEF_CFA rsp,0
14050 CFI_REGISTER rip,rcx
14051 /*CFI_REGISTER rflags,r11*/
14052 SWAPGS_UNSAFE_STACK
14053 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
14054
14055 movq %rsp,PER_CPU_VAR(old_rsp)
14056 movq PER_CPU_VAR(kernel_stack),%rsp
14057 + pax_enter_kernel_user
14058 /*
14059 * No need to follow this irqs off/on section - it's straight
14060 * and short:
14061 */
14062 ENABLE_INTERRUPTS(CLBR_NONE)
14063 - SAVE_ARGS 8,1
14064 + SAVE_ARGS 8*6,1
14065 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14066 movq %rcx,RIP-ARGOFFSET(%rsp)
14067 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14068 @@ -502,6 +755,7 @@ sysret_check:
14069 andl %edi,%edx
14070 jnz sysret_careful
14071 CFI_REMEMBER_STATE
14072 + pax_exit_kernel_user
14073 /*
14074 * sysretq will re-enable interrupts:
14075 */
14076 @@ -562,6 +816,9 @@ auditsys:
14077 movq %rax,%rsi /* 2nd arg: syscall number */
14078 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14079 call audit_syscall_entry
14080 +
14081 + pax_erase_kstack
14082 +
14083 LOAD_ARGS 0 /* reload call-clobbered registers */
14084 jmp system_call_fastpath
14085
14086 @@ -592,6 +849,9 @@ tracesys:
14087 FIXUP_TOP_OF_STACK %rdi
14088 movq %rsp,%rdi
14089 call syscall_trace_enter
14090 +
14091 + pax_erase_kstack
14092 +
14093 /*
14094 * Reload arg registers from stack in case ptrace changed them.
14095 * We don't reload %rax because syscall_trace_enter() returned
14096 @@ -613,7 +873,7 @@ tracesys:
14097 GLOBAL(int_ret_from_sys_call)
14098 DISABLE_INTERRUPTS(CLBR_NONE)
14099 TRACE_IRQS_OFF
14100 - testl $3,CS-ARGOFFSET(%rsp)
14101 + testb $3,CS-ARGOFFSET(%rsp)
14102 je retint_restore_args
14103 movl $_TIF_ALLWORK_MASK,%edi
14104 /* edi: mask to check */
14105 @@ -800,6 +1060,16 @@ END(interrupt)
14106 CFI_ADJUST_CFA_OFFSET 10*8
14107 call save_args
14108 PARTIAL_FRAME 0
14109 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14110 + testb $3, CS(%rdi)
14111 + jnz 1f
14112 + pax_enter_kernel
14113 + jmp 2f
14114 +1: pax_enter_kernel_user
14115 +2:
14116 +#else
14117 + pax_enter_kernel
14118 +#endif
14119 call \func
14120 .endm
14121
14122 @@ -822,7 +1092,7 @@ ret_from_intr:
14123 CFI_ADJUST_CFA_OFFSET -8
14124 exit_intr:
14125 GET_THREAD_INFO(%rcx)
14126 - testl $3,CS-ARGOFFSET(%rsp)
14127 + testb $3,CS-ARGOFFSET(%rsp)
14128 je retint_kernel
14129
14130 /* Interrupt came from user space */
14131 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
14132 * The iretq could re-enable interrupts:
14133 */
14134 DISABLE_INTERRUPTS(CLBR_ANY)
14135 + pax_exit_kernel_user
14136 TRACE_IRQS_IRETQ
14137 SWAPGS
14138 jmp restore_args
14139
14140 retint_restore_args: /* return to kernel space */
14141 DISABLE_INTERRUPTS(CLBR_ANY)
14142 + pax_exit_kernel
14143 /*
14144 * The iretq could re-enable interrupts:
14145 */
14146 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
14147 CFI_ADJUST_CFA_OFFSET 15*8
14148 call error_entry
14149 DEFAULT_FRAME 0
14150 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14151 + testb $3, CS(%rsp)
14152 + jnz 1f
14153 + pax_enter_kernel
14154 + jmp 2f
14155 +1: pax_enter_kernel_user
14156 +2:
14157 +#else
14158 + pax_enter_kernel
14159 +#endif
14160 movq %rsp,%rdi /* pt_regs pointer */
14161 xorl %esi,%esi /* no error code */
14162 call \do_sym
14163 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
14164 subq $15*8, %rsp
14165 call save_paranoid
14166 TRACE_IRQS_OFF
14167 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14168 + testb $3, CS(%rsp)
14169 + jnz 1f
14170 + pax_enter_kernel
14171 + jmp 2f
14172 +1: pax_enter_kernel_user
14173 +2:
14174 +#else
14175 + pax_enter_kernel
14176 +#endif
14177 movq %rsp,%rdi /* pt_regs pointer */
14178 xorl %esi,%esi /* no error code */
14179 call \do_sym
14180 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
14181 subq $15*8, %rsp
14182 call save_paranoid
14183 TRACE_IRQS_OFF
14184 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14185 + testb $3, CS(%rsp)
14186 + jnz 1f
14187 + pax_enter_kernel
14188 + jmp 2f
14189 +1: pax_enter_kernel_user
14190 +2:
14191 +#else
14192 + pax_enter_kernel
14193 +#endif
14194 movq %rsp,%rdi /* pt_regs pointer */
14195 xorl %esi,%esi /* no error code */
14196 - PER_CPU(init_tss, %rbp)
14197 +#ifdef CONFIG_SMP
14198 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14199 + lea init_tss(%rbp), %rbp
14200 +#else
14201 + lea init_tss(%rip), %rbp
14202 +#endif
14203 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14204 call \do_sym
14205 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14206 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
14207 CFI_ADJUST_CFA_OFFSET 15*8
14208 call error_entry
14209 DEFAULT_FRAME 0
14210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14211 + testb $3, CS(%rsp)
14212 + jnz 1f
14213 + pax_enter_kernel
14214 + jmp 2f
14215 +1: pax_enter_kernel_user
14216 +2:
14217 +#else
14218 + pax_enter_kernel
14219 +#endif
14220 movq %rsp,%rdi /* pt_regs pointer */
14221 movq ORIG_RAX(%rsp),%rsi /* get error code */
14222 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14223 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
14224 call save_paranoid
14225 DEFAULT_FRAME 0
14226 TRACE_IRQS_OFF
14227 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14228 + testb $3, CS(%rsp)
14229 + jnz 1f
14230 + pax_enter_kernel
14231 + jmp 2f
14232 +1: pax_enter_kernel_user
14233 +2:
14234 +#else
14235 + pax_enter_kernel
14236 +#endif
14237 movq %rsp,%rdi /* pt_regs pointer */
14238 movq ORIG_RAX(%rsp),%rsi /* get error code */
14239 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14240 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14241 TRACE_IRQS_OFF
14242 testl %ebx,%ebx /* swapgs needed? */
14243 jnz paranoid_restore
14244 - testl $3,CS(%rsp)
14245 + testb $3,CS(%rsp)
14246 jnz paranoid_userspace
14247 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14248 + pax_exit_kernel
14249 + TRACE_IRQS_IRETQ 0
14250 + SWAPGS_UNSAFE_STACK
14251 + RESTORE_ALL 8
14252 + jmp irq_return
14253 +#endif
14254 paranoid_swapgs:
14255 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14256 + pax_exit_kernel_user
14257 +#else
14258 + pax_exit_kernel
14259 +#endif
14260 TRACE_IRQS_IRETQ 0
14261 SWAPGS_UNSAFE_STACK
14262 RESTORE_ALL 8
14263 jmp irq_return
14264 paranoid_restore:
14265 + pax_exit_kernel
14266 TRACE_IRQS_IRETQ 0
14267 RESTORE_ALL 8
14268 jmp irq_return
14269 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14270 movq_cfi r14, R14+8
14271 movq_cfi r15, R15+8
14272 xorl %ebx,%ebx
14273 - testl $3,CS+8(%rsp)
14274 + testb $3,CS+8(%rsp)
14275 je error_kernelspace
14276 error_swapgs:
14277 SWAPGS
14278 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
14279 CFI_ADJUST_CFA_OFFSET 15*8
14280 call save_paranoid
14281 DEFAULT_FRAME 0
14282 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14283 + testb $3, CS(%rsp)
14284 + jnz 1f
14285 + pax_enter_kernel
14286 + jmp 2f
14287 +1: pax_enter_kernel_user
14288 +2:
14289 +#else
14290 + pax_enter_kernel
14291 +#endif
14292 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14293 movq %rsp,%rdi
14294 movq $-1,%rsi
14295 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
14296 DISABLE_INTERRUPTS(CLBR_NONE)
14297 testl %ebx,%ebx /* swapgs needed? */
14298 jnz nmi_restore
14299 - testl $3,CS(%rsp)
14300 + testb $3,CS(%rsp)
14301 jnz nmi_userspace
14302 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14303 + pax_exit_kernel
14304 + SWAPGS_UNSAFE_STACK
14305 + RESTORE_ALL 8
14306 + jmp irq_return
14307 +#endif
14308 nmi_swapgs:
14309 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14310 + pax_exit_kernel_user
14311 +#else
14312 + pax_exit_kernel
14313 +#endif
14314 SWAPGS_UNSAFE_STACK
14315 + RESTORE_ALL 8
14316 + jmp irq_return
14317 nmi_restore:
14318 + pax_exit_kernel
14319 RESTORE_ALL 8
14320 jmp irq_return
14321 nmi_userspace:
14322 diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14323 --- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14324 +++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14325 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14326 static void *mod_code_newcode; /* holds the text to write to the IP */
14327
14328 static unsigned nmi_wait_count;
14329 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14330 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14331
14332 int ftrace_arch_read_dyn_info(char *buf, int size)
14333 {
14334 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14335
14336 r = snprintf(buf, size, "%u %u",
14337 nmi_wait_count,
14338 - atomic_read(&nmi_update_count));
14339 + atomic_read_unchecked(&nmi_update_count));
14340 return r;
14341 }
14342
14343 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14344 {
14345 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14346 smp_rmb();
14347 + pax_open_kernel();
14348 ftrace_mod_code();
14349 - atomic_inc(&nmi_update_count);
14350 + pax_close_kernel();
14351 + atomic_inc_unchecked(&nmi_update_count);
14352 }
14353 /* Must have previous changes seen before executions */
14354 smp_mb();
14355 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14356
14357
14358
14359 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14360 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14361
14362 static unsigned char *ftrace_nop_replace(void)
14363 {
14364 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14365 {
14366 unsigned char replaced[MCOUNT_INSN_SIZE];
14367
14368 + ip = ktla_ktva(ip);
14369 +
14370 /*
14371 * Note: Due to modules and __init, code can
14372 * disappear and change, we need to protect against faulting
14373 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14374 unsigned char old[MCOUNT_INSN_SIZE], *new;
14375 int ret;
14376
14377 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14378 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14379 new = ftrace_call_replace(ip, (unsigned long)func);
14380 ret = ftrace_modify_code(ip, old, new);
14381
14382 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14383 switch (faulted) {
14384 case 0:
14385 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14386 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14387 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14388 break;
14389 case 1:
14390 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14391 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14392 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14393 break;
14394 case 2:
14395 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14396 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14397 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14398 break;
14399 }
14400
14401 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14402 {
14403 unsigned char code[MCOUNT_INSN_SIZE];
14404
14405 + ip = ktla_ktva(ip);
14406 +
14407 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14408 return -EFAULT;
14409
14410 diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14411 --- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14412 +++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14413 @@ -16,6 +16,7 @@
14414 #include <asm/apic.h>
14415 #include <asm/io_apic.h>
14416 #include <asm/bios_ebda.h>
14417 +#include <asm/boot.h>
14418
14419 static void __init i386_default_early_setup(void)
14420 {
14421 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14422 {
14423 reserve_trampoline_memory();
14424
14425 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14426 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14427
14428 #ifdef CONFIG_BLK_DEV_INITRD
14429 /* Reserve INITRD */
14430 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14431 --- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14432 +++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14433 @@ -19,10 +19,17 @@
14434 #include <asm/setup.h>
14435 #include <asm/processor-flags.h>
14436 #include <asm/percpu.h>
14437 +#include <asm/msr-index.h>
14438
14439 /* Physical address */
14440 #define pa(X) ((X) - __PAGE_OFFSET)
14441
14442 +#ifdef CONFIG_PAX_KERNEXEC
14443 +#define ta(X) (X)
14444 +#else
14445 +#define ta(X) ((X) - __PAGE_OFFSET)
14446 +#endif
14447 +
14448 /*
14449 * References to members of the new_cpu_data structure.
14450 */
14451 @@ -52,11 +59,7 @@
14452 * and small than max_low_pfn, otherwise will waste some page table entries
14453 */
14454
14455 -#if PTRS_PER_PMD > 1
14456 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14457 -#else
14458 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14459 -#endif
14460 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14461
14462 /* Enough space to fit pagetables for the low memory linear map */
14463 MAPPING_BEYOND_END = \
14464 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14465 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14466
14467 /*
14468 + * Real beginning of normal "text" segment
14469 + */
14470 +ENTRY(stext)
14471 +ENTRY(_stext)
14472 +
14473 +/*
14474 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14475 * %esi points to the real-mode code as a 32-bit pointer.
14476 * CS and DS must be 4 GB flat segments, but we don't depend on
14477 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14478 * can.
14479 */
14480 __HEAD
14481 +
14482 +#ifdef CONFIG_PAX_KERNEXEC
14483 + jmp startup_32
14484 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14485 +.fill PAGE_SIZE-5,1,0xcc
14486 +#endif
14487 +
14488 ENTRY(startup_32)
14489 + movl pa(stack_start),%ecx
14490 +
14491 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14492 us to not reload segments */
14493 testb $(1<<6), BP_loadflags(%esi)
14494 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14495 movl %eax,%es
14496 movl %eax,%fs
14497 movl %eax,%gs
14498 + movl %eax,%ss
14499 2:
14500 + leal -__PAGE_OFFSET(%ecx),%esp
14501 +
14502 +#ifdef CONFIG_SMP
14503 + movl $pa(cpu_gdt_table),%edi
14504 + movl $__per_cpu_load,%eax
14505 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14506 + rorl $16,%eax
14507 + movb %al,__KERNEL_PERCPU + 4(%edi)
14508 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14509 + movl $__per_cpu_end - 1,%eax
14510 + subl $__per_cpu_start,%eax
14511 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14512 +#endif
14513 +
14514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14515 + movl $NR_CPUS,%ecx
14516 + movl $pa(cpu_gdt_table),%edi
14517 +1:
14518 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14519 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14520 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14521 + addl $PAGE_SIZE_asm,%edi
14522 + loop 1b
14523 +#endif
14524 +
14525 +#ifdef CONFIG_PAX_KERNEXEC
14526 + movl $pa(boot_gdt),%edi
14527 + movl $__LOAD_PHYSICAL_ADDR,%eax
14528 + movw %ax,__BOOT_CS + 2(%edi)
14529 + rorl $16,%eax
14530 + movb %al,__BOOT_CS + 4(%edi)
14531 + movb %ah,__BOOT_CS + 7(%edi)
14532 + rorl $16,%eax
14533 +
14534 + ljmp $(__BOOT_CS),$1f
14535 +1:
14536 +
14537 + movl $NR_CPUS,%ecx
14538 + movl $pa(cpu_gdt_table),%edi
14539 + addl $__PAGE_OFFSET,%eax
14540 +1:
14541 + movw %ax,__KERNEL_CS + 2(%edi)
14542 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14543 + rorl $16,%eax
14544 + movb %al,__KERNEL_CS + 4(%edi)
14545 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14546 + movb %ah,__KERNEL_CS + 7(%edi)
14547 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14548 + rorl $16,%eax
14549 + addl $PAGE_SIZE_asm,%edi
14550 + loop 1b
14551 +#endif
14552
14553 /*
14554 * Clear BSS first so that there are no surprises...
14555 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14556 cmpl $num_subarch_entries, %eax
14557 jae bad_subarch
14558
14559 - movl pa(subarch_entries)(,%eax,4), %eax
14560 - subl $__PAGE_OFFSET, %eax
14561 - jmp *%eax
14562 + jmp *pa(subarch_entries)(,%eax,4)
14563
14564 bad_subarch:
14565 WEAK(lguest_entry)
14566 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14567 __INITDATA
14568
14569 subarch_entries:
14570 - .long default_entry /* normal x86/PC */
14571 - .long lguest_entry /* lguest hypervisor */
14572 - .long xen_entry /* Xen hypervisor */
14573 - .long default_entry /* Moorestown MID */
14574 + .long ta(default_entry) /* normal x86/PC */
14575 + .long ta(lguest_entry) /* lguest hypervisor */
14576 + .long ta(xen_entry) /* Xen hypervisor */
14577 + .long ta(default_entry) /* Moorestown MID */
14578 num_subarch_entries = (. - subarch_entries) / 4
14579 .previous
14580 #endif /* CONFIG_PARAVIRT */
14581 @@ -218,8 +287,11 @@ default_entry:
14582 movl %eax, pa(max_pfn_mapped)
14583
14584 /* Do early initialization of the fixmap area */
14585 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14586 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14587 +#ifdef CONFIG_COMPAT_VDSO
14588 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14589 +#else
14590 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14591 +#endif
14592 #else /* Not PAE */
14593
14594 page_pde_offset = (__PAGE_OFFSET >> 20);
14595 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14596 movl %eax, pa(max_pfn_mapped)
14597
14598 /* Do early initialization of the fixmap area */
14599 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14600 - movl %eax,pa(swapper_pg_dir+0xffc)
14601 +#ifdef CONFIG_COMPAT_VDSO
14602 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14603 +#else
14604 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14605 +#endif
14606 #endif
14607 jmp 3f
14608 /*
14609 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14610 movl %eax,%es
14611 movl %eax,%fs
14612 movl %eax,%gs
14613 + movl pa(stack_start),%ecx
14614 + movl %eax,%ss
14615 + leal -__PAGE_OFFSET(%ecx),%esp
14616 #endif /* CONFIG_SMP */
14617 3:
14618
14619 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14620 orl %edx,%eax
14621 movl %eax,%cr4
14622
14623 +#ifdef CONFIG_X86_PAE
14624 btl $5, %eax # check if PAE is enabled
14625 jnc 6f
14626
14627 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14628 cpuid
14629 cmpl $0x80000000, %eax
14630 jbe 6f
14631 +
14632 + /* Clear bogus XD_DISABLE bits */
14633 + call verify_cpu
14634 +
14635 mov $0x80000001, %eax
14636 cpuid
14637 /* Execute Disable bit supported? */
14638 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14639 jnc 6f
14640
14641 /* Setup EFER (Extended Feature Enable Register) */
14642 - movl $0xc0000080, %ecx
14643 + movl $MSR_EFER, %ecx
14644 rdmsr
14645
14646 btsl $11, %eax
14647 /* Make changes effective */
14648 wrmsr
14649
14650 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14651 + movl $1,pa(nx_enabled)
14652 +#endif
14653 +
14654 6:
14655
14656 /*
14657 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14658 movl %eax,%cr0 /* ..and set paging (PG) bit */
14659 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14660 1:
14661 - /* Set up the stack pointer */
14662 - lss stack_start,%esp
14663 + /* Shift the stack pointer to a virtual address */
14664 + addl $__PAGE_OFFSET, %esp
14665
14666 /*
14667 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14668 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14669
14670 #ifdef CONFIG_SMP
14671 cmpb $0, ready
14672 - jz 1f /* Initial CPU cleans BSS */
14673 - jmp checkCPUtype
14674 -1:
14675 + jnz checkCPUtype
14676 #endif /* CONFIG_SMP */
14677
14678 /*
14679 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14680 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14681 movl %eax,%ss # after changing gdt.
14682
14683 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14684 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14685 movl %eax,%ds
14686 movl %eax,%es
14687
14688 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14689 */
14690 cmpb $0,ready
14691 jne 1f
14692 - movl $per_cpu__gdt_page,%eax
14693 + movl $cpu_gdt_table,%eax
14694 movl $per_cpu__stack_canary,%ecx
14695 +#ifdef CONFIG_SMP
14696 + addl $__per_cpu_load,%ecx
14697 +#endif
14698 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14699 shrl $16, %ecx
14700 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14701 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14702 1:
14703 -#endif
14704 movl $(__KERNEL_STACK_CANARY),%eax
14705 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14706 + movl $(__USER_DS),%eax
14707 +#else
14708 + xorl %eax,%eax
14709 +#endif
14710 movl %eax,%gs
14711
14712 xorl %eax,%eax # Clear LDT
14713 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14714
14715 cld # gcc2 wants the direction flag cleared at all times
14716 pushl $0 # fake return address for unwinder
14717 -#ifdef CONFIG_SMP
14718 - movb ready, %cl
14719 movb $1, ready
14720 - cmpb $0,%cl # the first CPU calls start_kernel
14721 - je 1f
14722 - movl (stack_start), %esp
14723 -1:
14724 -#endif /* CONFIG_SMP */
14725 jmp *(initial_code)
14726
14727 /*
14728 @@ -546,22 +631,22 @@ early_page_fault:
14729 jmp early_fault
14730
14731 early_fault:
14732 - cld
14733 #ifdef CONFIG_PRINTK
14734 + cmpl $1,%ss:early_recursion_flag
14735 + je hlt_loop
14736 + incl %ss:early_recursion_flag
14737 + cld
14738 pusha
14739 movl $(__KERNEL_DS),%eax
14740 movl %eax,%ds
14741 movl %eax,%es
14742 - cmpl $2,early_recursion_flag
14743 - je hlt_loop
14744 - incl early_recursion_flag
14745 movl %cr2,%eax
14746 pushl %eax
14747 pushl %edx /* trapno */
14748 pushl $fault_msg
14749 call printk
14750 +; call dump_stack
14751 #endif
14752 - call dump_stack
14753 hlt_loop:
14754 hlt
14755 jmp hlt_loop
14756 @@ -569,8 +654,11 @@ hlt_loop:
14757 /* This is the default interrupt "handler" :-) */
14758 ALIGN
14759 ignore_int:
14760 - cld
14761 #ifdef CONFIG_PRINTK
14762 + cmpl $2,%ss:early_recursion_flag
14763 + je hlt_loop
14764 + incl %ss:early_recursion_flag
14765 + cld
14766 pushl %eax
14767 pushl %ecx
14768 pushl %edx
14769 @@ -579,9 +667,6 @@ ignore_int:
14770 movl $(__KERNEL_DS),%eax
14771 movl %eax,%ds
14772 movl %eax,%es
14773 - cmpl $2,early_recursion_flag
14774 - je hlt_loop
14775 - incl early_recursion_flag
14776 pushl 16(%esp)
14777 pushl 24(%esp)
14778 pushl 32(%esp)
14779 @@ -600,6 +685,8 @@ ignore_int:
14780 #endif
14781 iret
14782
14783 +#include "verify_cpu.S"
14784 +
14785 __REFDATA
14786 .align 4
14787 ENTRY(initial_code)
14788 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14789 /*
14790 * BSS section
14791 */
14792 -__PAGE_ALIGNED_BSS
14793 - .align PAGE_SIZE_asm
14794 #ifdef CONFIG_X86_PAE
14795 +.section .swapper_pg_pmd,"a",@progbits
14796 swapper_pg_pmd:
14797 .fill 1024*KPMDS,4,0
14798 #else
14799 +.section .swapper_pg_dir,"a",@progbits
14800 ENTRY(swapper_pg_dir)
14801 .fill 1024,4,0
14802 #endif
14803 +.section .swapper_pg_fixmap,"a",@progbits
14804 swapper_pg_fixmap:
14805 .fill 1024,4,0
14806 #ifdef CONFIG_X86_TRAMPOLINE
14807 +.section .trampoline_pg_dir,"a",@progbits
14808 ENTRY(trampoline_pg_dir)
14809 +#ifdef CONFIG_X86_PAE
14810 + .fill 4,8,0
14811 +#else
14812 .fill 1024,4,0
14813 #endif
14814 +#endif
14815 +
14816 +.section .empty_zero_page,"a",@progbits
14817 ENTRY(empty_zero_page)
14818 .fill 4096,1,0
14819
14820 /*
14821 + * The IDT has to be page-aligned to simplify the Pentium
14822 + * F0 0F bug workaround.. We have a special link segment
14823 + * for this.
14824 + */
14825 +.section .idt,"a",@progbits
14826 +ENTRY(idt_table)
14827 + .fill 256,8,0
14828 +
14829 +/*
14830 * This starts the data section.
14831 */
14832 #ifdef CONFIG_X86_PAE
14833 -__PAGE_ALIGNED_DATA
14834 - /* Page-aligned for the benefit of paravirt? */
14835 - .align PAGE_SIZE_asm
14836 +.section .swapper_pg_dir,"a",@progbits
14837 +
14838 ENTRY(swapper_pg_dir)
14839 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14840 # if KPMDS == 3
14841 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14842 # error "Kernel PMDs should be 1, 2 or 3"
14843 # endif
14844 .align PAGE_SIZE_asm /* needs to be page-sized too */
14845 +
14846 +#ifdef CONFIG_PAX_PER_CPU_PGD
14847 +ENTRY(cpu_pgd)
14848 + .rept NR_CPUS
14849 + .fill 4,8,0
14850 + .endr
14851 +#endif
14852 +
14853 #endif
14854
14855 .data
14856 +.balign 4
14857 ENTRY(stack_start)
14858 - .long init_thread_union+THREAD_SIZE
14859 - .long __BOOT_DS
14860 + .long init_thread_union+THREAD_SIZE-8
14861
14862 ready: .byte 0
14863
14864 +.section .rodata,"a",@progbits
14865 early_recursion_flag:
14866 .long 0
14867
14868 @@ -697,7 +809,7 @@ fault_msg:
14869 .word 0 # 32 bit align gdt_desc.address
14870 boot_gdt_descr:
14871 .word __BOOT_DS+7
14872 - .long boot_gdt - __PAGE_OFFSET
14873 + .long pa(boot_gdt)
14874
14875 .word 0 # 32-bit align idt_desc.address
14876 idt_descr:
14877 @@ -708,7 +820,7 @@ idt_descr:
14878 .word 0 # 32 bit align gdt_desc.address
14879 ENTRY(early_gdt_descr)
14880 .word GDT_ENTRIES*8-1
14881 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14882 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14883
14884 /*
14885 * The boot_gdt must mirror the equivalent in setup.S and is
14886 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14887 .align L1_CACHE_BYTES
14888 ENTRY(boot_gdt)
14889 .fill GDT_ENTRY_BOOT_CS,8,0
14890 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14891 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14892 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14893 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14894 +
14895 + .align PAGE_SIZE_asm
14896 +ENTRY(cpu_gdt_table)
14897 + .rept NR_CPUS
14898 + .quad 0x0000000000000000 /* NULL descriptor */
14899 + .quad 0x0000000000000000 /* 0x0b reserved */
14900 + .quad 0x0000000000000000 /* 0x13 reserved */
14901 + .quad 0x0000000000000000 /* 0x1b reserved */
14902 +
14903 +#ifdef CONFIG_PAX_KERNEXEC
14904 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14905 +#else
14906 + .quad 0x0000000000000000 /* 0x20 unused */
14907 +#endif
14908 +
14909 + .quad 0x0000000000000000 /* 0x28 unused */
14910 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14911 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14912 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14913 + .quad 0x0000000000000000 /* 0x4b reserved */
14914 + .quad 0x0000000000000000 /* 0x53 reserved */
14915 + .quad 0x0000000000000000 /* 0x5b reserved */
14916 +
14917 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14918 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14919 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14920 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14921 +
14922 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14923 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14924 +
14925 + /*
14926 + * Segments used for calling PnP BIOS have byte granularity.
14927 + * The code segments and data segments have fixed 64k limits,
14928 + * the transfer segment sizes are set at run time.
14929 + */
14930 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14931 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14932 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14933 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14934 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14935 +
14936 + /*
14937 + * The APM segments have byte granularity and their bases
14938 + * are set at run time. All have 64k limits.
14939 + */
14940 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14941 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14942 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14943 +
14944 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14945 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14946 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14947 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14948 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14949 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14950 +
14951 + /* Be sure this is zeroed to avoid false validations in Xen */
14952 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14953 + .endr
14954 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14955 --- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14956 +++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14957 @@ -19,6 +19,7 @@
14958 #include <asm/cache.h>
14959 #include <asm/processor-flags.h>
14960 #include <asm/percpu.h>
14961 +#include <asm/cpufeature.h>
14962
14963 #ifdef CONFIG_PARAVIRT
14964 #include <asm/asm-offsets.h>
14965 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14966 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14967 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14968 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14969 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14970 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14971 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14972 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14973
14974 .text
14975 __HEAD
14976 @@ -85,35 +90,22 @@ startup_64:
14977 */
14978 addq %rbp, init_level4_pgt + 0(%rip)
14979 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14980 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14981 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14982 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14983
14984 addq %rbp, level3_ident_pgt + 0(%rip)
14985 +#ifndef CONFIG_XEN
14986 + addq %rbp, level3_ident_pgt + 8(%rip)
14987 +#endif
14988
14989 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14990 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14991 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14992
14993 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14994 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14995 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14996
14997 - /* Add an Identity mapping if I am above 1G */
14998 - leaq _text(%rip), %rdi
14999 - andq $PMD_PAGE_MASK, %rdi
15000 -
15001 - movq %rdi, %rax
15002 - shrq $PUD_SHIFT, %rax
15003 - andq $(PTRS_PER_PUD - 1), %rax
15004 - jz ident_complete
15005 -
15006 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15007 - leaq level3_ident_pgt(%rip), %rbx
15008 - movq %rdx, 0(%rbx, %rax, 8)
15009 -
15010 - movq %rdi, %rax
15011 - shrq $PMD_SHIFT, %rax
15012 - andq $(PTRS_PER_PMD - 1), %rax
15013 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15014 - leaq level2_spare_pgt(%rip), %rbx
15015 - movq %rdx, 0(%rbx, %rax, 8)
15016 -ident_complete:
15017 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15018 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15019
15020 /*
15021 * Fixup the kernel text+data virtual addresses. Note that
15022 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15023 * after the boot processor executes this code.
15024 */
15025
15026 - /* Enable PAE mode and PGE */
15027 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15028 + /* Enable PAE mode and PSE/PGE */
15029 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15030 movq %rax, %cr4
15031
15032 /* Setup early boot stage 4 level pagetables. */
15033 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15034 movl $MSR_EFER, %ecx
15035 rdmsr
15036 btsl $_EFER_SCE, %eax /* Enable System Call */
15037 - btl $20,%edi /* No Execute supported? */
15038 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15039 jnc 1f
15040 btsl $_EFER_NX, %eax
15041 + leaq init_level4_pgt(%rip), %rdi
15042 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15043 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15044 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15045 1: wrmsr /* Make changes effective */
15046
15047 /* Setup cr0 */
15048 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15049 .quad x86_64_start_kernel
15050 ENTRY(initial_gs)
15051 .quad INIT_PER_CPU_VAR(irq_stack_union)
15052 - __FINITDATA
15053
15054 ENTRY(stack_start)
15055 .quad init_thread_union+THREAD_SIZE-8
15056 .word 0
15057 + __FINITDATA
15058
15059 bad_address:
15060 jmp bad_address
15061
15062 - .section ".init.text","ax"
15063 + __INIT
15064 #ifdef CONFIG_EARLY_PRINTK
15065 .globl early_idt_handlers
15066 early_idt_handlers:
15067 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15068 #endif /* EARLY_PRINTK */
15069 1: hlt
15070 jmp 1b
15071 + .previous
15072
15073 #ifdef CONFIG_EARLY_PRINTK
15074 + __INITDATA
15075 early_recursion_flag:
15076 .long 0
15077 + .previous
15078
15079 + .section .rodata,"a",@progbits
15080 early_idt_msg:
15081 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15082 early_idt_ripmsg:
15083 .asciz "RIP %s\n"
15084 -#endif /* CONFIG_EARLY_PRINTK */
15085 .previous
15086 +#endif /* CONFIG_EARLY_PRINTK */
15087
15088 + .section .rodata,"a",@progbits
15089 #define NEXT_PAGE(name) \
15090 .balign PAGE_SIZE; \
15091 ENTRY(name)
15092 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15093 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15094 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15096 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15097 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15098 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15099 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15100 .org init_level4_pgt + L4_START_KERNEL*8, 0
15101 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15102 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15103
15104 +#ifdef CONFIG_PAX_PER_CPU_PGD
15105 +NEXT_PAGE(cpu_pgd)
15106 + .rept NR_CPUS
15107 + .fill 512,8,0
15108 + .endr
15109 +#endif
15110 +
15111 NEXT_PAGE(level3_ident_pgt)
15112 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15113 +#ifdef CONFIG_XEN
15114 .fill 511,8,0
15115 +#else
15116 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15117 + .fill 510,8,0
15118 +#endif
15119 +
15120 +NEXT_PAGE(level3_vmalloc_pgt)
15121 + .fill 512,8,0
15122 +
15123 +NEXT_PAGE(level3_vmemmap_pgt)
15124 + .fill L3_VMEMMAP_START,8,0
15125 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15126
15127 NEXT_PAGE(level3_kernel_pgt)
15128 .fill L3_START_KERNEL,8,0
15129 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15130 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15131 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15132
15133 +NEXT_PAGE(level2_vmemmap_pgt)
15134 + .fill 512,8,0
15135 +
15136 NEXT_PAGE(level2_fixmap_pgt)
15137 - .fill 506,8,0
15138 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15139 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15140 - .fill 5,8,0
15141 + .fill 507,8,0
15142 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15143 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15144 + .fill 4,8,0
15145
15146 -NEXT_PAGE(level1_fixmap_pgt)
15147 +NEXT_PAGE(level1_vsyscall_pgt)
15148 .fill 512,8,0
15149
15150 -NEXT_PAGE(level2_ident_pgt)
15151 - /* Since I easily can, map the first 1G.
15152 + /* Since I easily can, map the first 2G.
15153 * Don't set NX because code runs from these pages.
15154 */
15155 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15156 +NEXT_PAGE(level2_ident_pgt)
15157 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15158
15159 NEXT_PAGE(level2_kernel_pgt)
15160 /*
15161 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15162 * If you want to increase this then increase MODULES_VADDR
15163 * too.)
15164 */
15165 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15166 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15167 -
15168 -NEXT_PAGE(level2_spare_pgt)
15169 - .fill 512, 8, 0
15170 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15171
15172 #undef PMDS
15173 #undef NEXT_PAGE
15174
15175 - .data
15176 + .align PAGE_SIZE
15177 +ENTRY(cpu_gdt_table)
15178 + .rept NR_CPUS
15179 + .quad 0x0000000000000000 /* NULL descriptor */
15180 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15181 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15182 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15183 + .quad 0x00cffb000000ffff /* __USER32_CS */
15184 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15185 + .quad 0x00affb000000ffff /* __USER_CS */
15186 +
15187 +#ifdef CONFIG_PAX_KERNEXEC
15188 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15189 +#else
15190 + .quad 0x0 /* unused */
15191 +#endif
15192 +
15193 + .quad 0,0 /* TSS */
15194 + .quad 0,0 /* LDT */
15195 + .quad 0,0,0 /* three TLS descriptors */
15196 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15197 + /* asm/segment.h:GDT_ENTRIES must match this */
15198 +
15199 + /* zero the remaining page */
15200 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15201 + .endr
15202 +
15203 .align 16
15204 .globl early_gdt_descr
15205 early_gdt_descr:
15206 .word GDT_ENTRIES*8-1
15207 early_gdt_descr_base:
15208 - .quad INIT_PER_CPU_VAR(gdt_page)
15209 + .quad cpu_gdt_table
15210
15211 ENTRY(phys_base)
15212 /* This must match the first entry in level2_kernel_pgt */
15213 .quad 0x0000000000000000
15214
15215 #include "../../x86/xen/xen-head.S"
15216 -
15217 - .section .bss, "aw", @nobits
15218 +
15219 + .section .rodata,"a",@progbits
15220 .align L1_CACHE_BYTES
15221 ENTRY(idt_table)
15222 - .skip IDT_ENTRIES * 16
15223 + .fill 512,8,0
15224
15225 __PAGE_ALIGNED_BSS
15226 .align PAGE_SIZE
15227 diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15228 --- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15229 +++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15230 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15231 EXPORT_SYMBOL(cmpxchg8b_emu);
15232 #endif
15233
15234 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15235 +
15236 /* Networking helper routines. */
15237 EXPORT_SYMBOL(csum_partial_copy_generic);
15238 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15239 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15240
15241 EXPORT_SYMBOL(__get_user_1);
15242 EXPORT_SYMBOL(__get_user_2);
15243 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15244
15245 EXPORT_SYMBOL(csum_partial);
15246 EXPORT_SYMBOL(empty_zero_page);
15247 +
15248 +#ifdef CONFIG_PAX_KERNEXEC
15249 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15250 +#endif
15251 diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15252 --- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15253 +++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15254 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15255 "spurious 8259A interrupt: IRQ%d.\n", irq);
15256 spurious_irq_mask |= irqmask;
15257 }
15258 - atomic_inc(&irq_err_count);
15259 + atomic_inc_unchecked(&irq_err_count);
15260 /*
15261 * Theoretically we do not have to handle this IRQ,
15262 * but in Linux this does not cause problems and is
15263 diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15264 --- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15265 +++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15266 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15267 * way process stacks are handled. This is done by having a special
15268 * "init_task" linker map entry..
15269 */
15270 -union thread_union init_thread_union __init_task_data =
15271 - { INIT_THREAD_INFO(init_task) };
15272 +union thread_union init_thread_union __init_task_data;
15273
15274 /*
15275 * Initial task structure.
15276 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15277 * section. Since TSS's are completely CPU-local, we want them
15278 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15279 */
15280 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15281 -
15282 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15283 +EXPORT_SYMBOL(init_tss);
15284 diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15285 --- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15286 +++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15287 @@ -6,6 +6,7 @@
15288 #include <linux/sched.h>
15289 #include <linux/kernel.h>
15290 #include <linux/capability.h>
15291 +#include <linux/security.h>
15292 #include <linux/errno.h>
15293 #include <linux/types.h>
15294 #include <linux/ioport.h>
15295 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15296
15297 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15298 return -EINVAL;
15299 +#ifdef CONFIG_GRKERNSEC_IO
15300 + if (turn_on && grsec_disable_privio) {
15301 + gr_handle_ioperm();
15302 + return -EPERM;
15303 + }
15304 +#endif
15305 if (turn_on && !capable(CAP_SYS_RAWIO))
15306 return -EPERM;
15307
15308 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15309 * because the ->io_bitmap_max value must match the bitmap
15310 * contents:
15311 */
15312 - tss = &per_cpu(init_tss, get_cpu());
15313 + tss = init_tss + get_cpu();
15314
15315 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15316
15317 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15318 return -EINVAL;
15319 /* Trying to gain more privileges? */
15320 if (level > old) {
15321 +#ifdef CONFIG_GRKERNSEC_IO
15322 + if (grsec_disable_privio) {
15323 + gr_handle_iopl();
15324 + return -EPERM;
15325 + }
15326 +#endif
15327 if (!capable(CAP_SYS_RAWIO))
15328 return -EPERM;
15329 }
15330 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15331 --- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15332 +++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15333 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15334 __asm__ __volatile__("andl %%esp,%0" :
15335 "=r" (sp) : "0" (THREAD_SIZE - 1));
15336
15337 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15338 + return sp < STACK_WARN;
15339 }
15340
15341 static void print_stack_overflow(void)
15342 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15343 * per-CPU IRQ handling contexts (thread information and stack)
15344 */
15345 union irq_ctx {
15346 - struct thread_info tinfo;
15347 - u32 stack[THREAD_SIZE/sizeof(u32)];
15348 -} __attribute__((aligned(PAGE_SIZE)));
15349 + unsigned long previous_esp;
15350 + u32 stack[THREAD_SIZE/sizeof(u32)];
15351 +} __attribute__((aligned(THREAD_SIZE)));
15352
15353 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15354 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15355 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15356 static inline int
15357 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15358 {
15359 - union irq_ctx *curctx, *irqctx;
15360 + union irq_ctx *irqctx;
15361 u32 *isp, arg1, arg2;
15362
15363 - curctx = (union irq_ctx *) current_thread_info();
15364 irqctx = __get_cpu_var(hardirq_ctx);
15365
15366 /*
15367 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15368 * handler) we can't do that and just have to keep using the
15369 * current stack (which is the irq stack already after all)
15370 */
15371 - if (unlikely(curctx == irqctx))
15372 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15373 return 0;
15374
15375 /* build the stack frame on the IRQ stack */
15376 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15377 - irqctx->tinfo.task = curctx->tinfo.task;
15378 - irqctx->tinfo.previous_esp = current_stack_pointer;
15379 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15380 + irqctx->previous_esp = current_stack_pointer;
15381
15382 - /*
15383 - * Copy the softirq bits in preempt_count so that the
15384 - * softirq checks work in the hardirq context.
15385 - */
15386 - irqctx->tinfo.preempt_count =
15387 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15388 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15389 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15390 + __set_fs(MAKE_MM_SEG(0));
15391 +#endif
15392
15393 if (unlikely(overflow))
15394 call_on_stack(print_stack_overflow, isp);
15395 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15396 : "0" (irq), "1" (desc), "2" (isp),
15397 "D" (desc->handle_irq)
15398 : "memory", "cc", "ecx");
15399 +
15400 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15401 + __set_fs(current_thread_info()->addr_limit);
15402 +#endif
15403 +
15404 return 1;
15405 }
15406
15407 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15408 */
15409 void __cpuinit irq_ctx_init(int cpu)
15410 {
15411 - union irq_ctx *irqctx;
15412 -
15413 if (per_cpu(hardirq_ctx, cpu))
15414 return;
15415
15416 - irqctx = &per_cpu(hardirq_stack, cpu);
15417 - irqctx->tinfo.task = NULL;
15418 - irqctx->tinfo.exec_domain = NULL;
15419 - irqctx->tinfo.cpu = cpu;
15420 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15421 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15422 -
15423 - per_cpu(hardirq_ctx, cpu) = irqctx;
15424 -
15425 - irqctx = &per_cpu(softirq_stack, cpu);
15426 - irqctx->tinfo.task = NULL;
15427 - irqctx->tinfo.exec_domain = NULL;
15428 - irqctx->tinfo.cpu = cpu;
15429 - irqctx->tinfo.preempt_count = 0;
15430 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15431 -
15432 - per_cpu(softirq_ctx, cpu) = irqctx;
15433 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15434 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15435
15436 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15437 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15438 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15439 asmlinkage void do_softirq(void)
15440 {
15441 unsigned long flags;
15442 - struct thread_info *curctx;
15443 union irq_ctx *irqctx;
15444 u32 *isp;
15445
15446 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15447 local_irq_save(flags);
15448
15449 if (local_softirq_pending()) {
15450 - curctx = current_thread_info();
15451 irqctx = __get_cpu_var(softirq_ctx);
15452 - irqctx->tinfo.task = curctx->task;
15453 - irqctx->tinfo.previous_esp = current_stack_pointer;
15454 + irqctx->previous_esp = current_stack_pointer;
15455
15456 /* build the stack frame on the softirq stack */
15457 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15458 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15459 +
15460 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15461 + __set_fs(MAKE_MM_SEG(0));
15462 +#endif
15463
15464 call_on_stack(__do_softirq, isp);
15465 +
15466 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15467 + __set_fs(current_thread_info()->addr_limit);
15468 +#endif
15469 +
15470 /*
15471 * Shouldnt happen, we returned above if in_interrupt():
15472 */
15473 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15474 --- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15475 +++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15476 @@ -15,7 +15,7 @@
15477 #include <asm/mce.h>
15478 #include <asm/hw_irq.h>
15479
15480 -atomic_t irq_err_count;
15481 +atomic_unchecked_t irq_err_count;
15482
15483 /* Function pointer for generic interrupt vector handling */
15484 void (*generic_interrupt_extension)(void) = NULL;
15485 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15486 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15487 seq_printf(p, " Machine check polls\n");
15488 #endif
15489 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15490 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15491 #if defined(CONFIG_X86_IO_APIC)
15492 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15493 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15494 #endif
15495 return 0;
15496 }
15497 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15498
15499 u64 arch_irq_stat(void)
15500 {
15501 - u64 sum = atomic_read(&irq_err_count);
15502 + u64 sum = atomic_read_unchecked(&irq_err_count);
15503
15504 #ifdef CONFIG_X86_IO_APIC
15505 - sum += atomic_read(&irq_mis_count);
15506 + sum += atomic_read_unchecked(&irq_mis_count);
15507 #endif
15508 return sum;
15509 }
15510 diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15511 --- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15512 +++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15513 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15514
15515 /* clear the trace bit */
15516 linux_regs->flags &= ~X86_EFLAGS_TF;
15517 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15518 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15519
15520 /* set the trace bit if we're stepping */
15521 if (remcomInBuffer[0] == 's') {
15522 linux_regs->flags |= X86_EFLAGS_TF;
15523 kgdb_single_step = 1;
15524 - atomic_set(&kgdb_cpu_doing_single_step,
15525 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15526 raw_smp_processor_id());
15527 }
15528
15529 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15530 break;
15531
15532 case DIE_DEBUG:
15533 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15534 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15535 raw_smp_processor_id()) {
15536 if (user_mode(regs))
15537 return single_step_cont(regs, args);
15538 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15539 return instruction_pointer(regs);
15540 }
15541
15542 -struct kgdb_arch arch_kgdb_ops = {
15543 +const struct kgdb_arch arch_kgdb_ops = {
15544 /* Breakpoint instruction: */
15545 .gdb_bpt_instr = { 0xcc },
15546 .flags = KGDB_HW_BREAKPOINT,
15547 diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15548 --- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15549 +++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15550 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15551 char op;
15552 s32 raddr;
15553 } __attribute__((packed)) * jop;
15554 - jop = (struct __arch_jmp_op *)from;
15555 +
15556 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15557 +
15558 + pax_open_kernel();
15559 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15560 jop->op = RELATIVEJUMP_INSTRUCTION;
15561 + pax_close_kernel();
15562 }
15563
15564 /*
15565 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15566 kprobe_opcode_t opcode;
15567 kprobe_opcode_t *orig_opcodes = opcodes;
15568
15569 - if (search_exception_tables((unsigned long)opcodes))
15570 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15571 return 0; /* Page fault may occur on this address. */
15572
15573 retry:
15574 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15575 disp = (u8 *) p->addr + *((s32 *) insn) -
15576 (u8 *) p->ainsn.insn;
15577 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15578 + pax_open_kernel();
15579 *(s32 *)insn = (s32) disp;
15580 + pax_close_kernel();
15581 }
15582 }
15583 #endif
15584 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15585
15586 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15587 {
15588 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15589 + pax_open_kernel();
15590 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15591 + pax_close_kernel();
15592
15593 fix_riprel(p);
15594
15595 - if (can_boost(p->addr))
15596 + if (can_boost(ktla_ktva(p->addr)))
15597 p->ainsn.boostable = 0;
15598 else
15599 p->ainsn.boostable = -1;
15600
15601 - p->opcode = *p->addr;
15602 + p->opcode = *(ktla_ktva(p->addr));
15603 }
15604
15605 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15606 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15607 if (p->opcode == BREAKPOINT_INSTRUCTION)
15608 regs->ip = (unsigned long)p->addr;
15609 else
15610 - regs->ip = (unsigned long)p->ainsn.insn;
15611 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15612 }
15613
15614 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15615 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15616 if (p->ainsn.boostable == 1 && !p->post_handler) {
15617 /* Boost up -- we can execute copied instructions directly */
15618 reset_current_kprobe();
15619 - regs->ip = (unsigned long)p->ainsn.insn;
15620 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15621 preempt_enable_no_resched();
15622 return;
15623 }
15624 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15625 struct kprobe_ctlblk *kcb;
15626
15627 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15628 - if (*addr != BREAKPOINT_INSTRUCTION) {
15629 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15630 /*
15631 * The breakpoint instruction was removed right
15632 * after we hit it. Another cpu has removed
15633 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15634 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15635 {
15636 unsigned long *tos = stack_addr(regs);
15637 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15638 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15639 unsigned long orig_ip = (unsigned long)p->addr;
15640 kprobe_opcode_t *insn = p->ainsn.insn;
15641
15642 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15643 struct die_args *args = data;
15644 int ret = NOTIFY_DONE;
15645
15646 - if (args->regs && user_mode_vm(args->regs))
15647 + if (args->regs && user_mode(args->regs))
15648 return ret;
15649
15650 switch (val) {
15651 diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15652 --- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15653 +++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15654 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15655 if (reload) {
15656 #ifdef CONFIG_SMP
15657 preempt_disable();
15658 - load_LDT(pc);
15659 + load_LDT_nolock(pc);
15660 if (!cpumask_equal(mm_cpumask(current->mm),
15661 cpumask_of(smp_processor_id())))
15662 smp_call_function(flush_ldt, current->mm, 1);
15663 preempt_enable();
15664 #else
15665 - load_LDT(pc);
15666 + load_LDT_nolock(pc);
15667 #endif
15668 }
15669 if (oldsize) {
15670 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15671 return err;
15672
15673 for (i = 0; i < old->size; i++)
15674 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15675 + write_ldt_entry(new->ldt, i, old->ldt + i);
15676 return 0;
15677 }
15678
15679 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15680 retval = copy_ldt(&mm->context, &old_mm->context);
15681 mutex_unlock(&old_mm->context.lock);
15682 }
15683 +
15684 + if (tsk == current) {
15685 + mm->context.vdso = 0;
15686 +
15687 +#ifdef CONFIG_X86_32
15688 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15689 + mm->context.user_cs_base = 0UL;
15690 + mm->context.user_cs_limit = ~0UL;
15691 +
15692 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15693 + cpus_clear(mm->context.cpu_user_cs_mask);
15694 +#endif
15695 +
15696 +#endif
15697 +#endif
15698 +
15699 + }
15700 +
15701 return retval;
15702 }
15703
15704 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15705 }
15706 }
15707
15708 +#ifdef CONFIG_PAX_SEGMEXEC
15709 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15710 + error = -EINVAL;
15711 + goto out_unlock;
15712 + }
15713 +#endif
15714 +
15715 fill_ldt(&ldt, &ldt_info);
15716 if (oldmode)
15717 ldt.avl = 0;
15718 diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15719 --- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15720 +++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15721 @@ -26,7 +26,7 @@
15722 #include <asm/system.h>
15723 #include <asm/cacheflush.h>
15724
15725 -static void set_idt(void *newidt, __u16 limit)
15726 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15727 {
15728 struct desc_ptr curidt;
15729
15730 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15731 }
15732
15733
15734 -static void set_gdt(void *newgdt, __u16 limit)
15735 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15736 {
15737 struct desc_ptr curgdt;
15738
15739 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15740 }
15741
15742 control_page = page_address(image->control_code_page);
15743 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15744 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15745
15746 relocate_kernel_ptr = control_page;
15747 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15748 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15749 --- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15750 +++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15751 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15752 uci->mc = NULL;
15753 }
15754
15755 -static struct microcode_ops microcode_amd_ops = {
15756 +static const struct microcode_ops microcode_amd_ops = {
15757 .request_microcode_user = request_microcode_user,
15758 .request_microcode_fw = request_microcode_fw,
15759 .collect_cpu_info = collect_cpu_info_amd,
15760 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15761 .microcode_fini_cpu = microcode_fini_cpu_amd,
15762 };
15763
15764 -struct microcode_ops * __init init_amd_microcode(void)
15765 +const struct microcode_ops * __init init_amd_microcode(void)
15766 {
15767 return &microcode_amd_ops;
15768 }
15769 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15770 --- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15771 +++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15772 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15773
15774 #define MICROCODE_VERSION "2.00"
15775
15776 -static struct microcode_ops *microcode_ops;
15777 +static const struct microcode_ops *microcode_ops;
15778
15779 /*
15780 * Synchronization.
15781 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15782 --- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15783 +++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15784 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15785
15786 static int get_ucode_user(void *to, const void *from, size_t n)
15787 {
15788 - return copy_from_user(to, from, n);
15789 + return copy_from_user(to, (__force const void __user *)from, n);
15790 }
15791
15792 static enum ucode_state
15793 request_microcode_user(int cpu, const void __user *buf, size_t size)
15794 {
15795 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15796 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15797 }
15798
15799 static void microcode_fini_cpu(int cpu)
15800 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15801 uci->mc = NULL;
15802 }
15803
15804 -static struct microcode_ops microcode_intel_ops = {
15805 +static const struct microcode_ops microcode_intel_ops = {
15806 .request_microcode_user = request_microcode_user,
15807 .request_microcode_fw = request_microcode_fw,
15808 .collect_cpu_info = collect_cpu_info,
15809 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15810 .microcode_fini_cpu = microcode_fini_cpu,
15811 };
15812
15813 -struct microcode_ops * __init init_intel_microcode(void)
15814 +const struct microcode_ops * __init init_intel_microcode(void)
15815 {
15816 return &microcode_intel_ops;
15817 }
15818 diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15819 --- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15820 +++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15821 @@ -34,7 +34,7 @@
15822 #define DEBUGP(fmt...)
15823 #endif
15824
15825 -void *module_alloc(unsigned long size)
15826 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15827 {
15828 struct vm_struct *area;
15829
15830 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15831 if (!area)
15832 return NULL;
15833
15834 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15835 - PAGE_KERNEL_EXEC);
15836 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15837 +}
15838 +
15839 +void *module_alloc(unsigned long size)
15840 +{
15841 +
15842 +#ifdef CONFIG_PAX_KERNEXEC
15843 + return __module_alloc(size, PAGE_KERNEL);
15844 +#else
15845 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15846 +#endif
15847 +
15848 }
15849
15850 /* Free memory returned from module_alloc */
15851 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15852 vfree(module_region);
15853 }
15854
15855 +#ifdef CONFIG_PAX_KERNEXEC
15856 +#ifdef CONFIG_X86_32
15857 +void *module_alloc_exec(unsigned long size)
15858 +{
15859 + struct vm_struct *area;
15860 +
15861 + if (size == 0)
15862 + return NULL;
15863 +
15864 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15865 + return area ? area->addr : NULL;
15866 +}
15867 +EXPORT_SYMBOL(module_alloc_exec);
15868 +
15869 +void module_free_exec(struct module *mod, void *module_region)
15870 +{
15871 + vunmap(module_region);
15872 +}
15873 +EXPORT_SYMBOL(module_free_exec);
15874 +#else
15875 +void module_free_exec(struct module *mod, void *module_region)
15876 +{
15877 + module_free(mod, module_region);
15878 +}
15879 +EXPORT_SYMBOL(module_free_exec);
15880 +
15881 +void *module_alloc_exec(unsigned long size)
15882 +{
15883 + return __module_alloc(size, PAGE_KERNEL_RX);
15884 +}
15885 +EXPORT_SYMBOL(module_alloc_exec);
15886 +#endif
15887 +#endif
15888 +
15889 /* We don't need anything special. */
15890 int module_frob_arch_sections(Elf_Ehdr *hdr,
15891 Elf_Shdr *sechdrs,
15892 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15893 unsigned int i;
15894 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15895 Elf32_Sym *sym;
15896 - uint32_t *location;
15897 + uint32_t *plocation, location;
15898
15899 DEBUGP("Applying relocate section %u to %u\n", relsec,
15900 sechdrs[relsec].sh_info);
15901 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15902 /* This is where to make the change */
15903 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15904 - + rel[i].r_offset;
15905 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15906 + location = (uint32_t)plocation;
15907 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15908 + plocation = ktla_ktva((void *)plocation);
15909 /* This is the symbol it is referring to. Note that all
15910 undefined symbols have been resolved. */
15911 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15912 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15913 switch (ELF32_R_TYPE(rel[i].r_info)) {
15914 case R_386_32:
15915 /* We add the value into the location given */
15916 - *location += sym->st_value;
15917 + pax_open_kernel();
15918 + *plocation += sym->st_value;
15919 + pax_close_kernel();
15920 break;
15921 case R_386_PC32:
15922 /* Add the value, subtract its postition */
15923 - *location += sym->st_value - (uint32_t)location;
15924 + pax_open_kernel();
15925 + *plocation += sym->st_value - location;
15926 + pax_close_kernel();
15927 break;
15928 default:
15929 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15930 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15931 case R_X86_64_NONE:
15932 break;
15933 case R_X86_64_64:
15934 + pax_open_kernel();
15935 *(u64 *)loc = val;
15936 + pax_close_kernel();
15937 break;
15938 case R_X86_64_32:
15939 + pax_open_kernel();
15940 *(u32 *)loc = val;
15941 + pax_close_kernel();
15942 if (val != *(u32 *)loc)
15943 goto overflow;
15944 break;
15945 case R_X86_64_32S:
15946 + pax_open_kernel();
15947 *(s32 *)loc = val;
15948 + pax_close_kernel();
15949 if ((s64)val != *(s32 *)loc)
15950 goto overflow;
15951 break;
15952 case R_X86_64_PC32:
15953 val -= (u64)loc;
15954 + pax_open_kernel();
15955 *(u32 *)loc = val;
15956 + pax_close_kernel();
15957 +
15958 #if 0
15959 if ((s64)val != *(s32 *)loc)
15960 goto overflow;
15961 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15962 --- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15963 +++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-23 20:24:19.000000000 -0400
15964 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15965 {
15966 return x;
15967 }
15968 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15969 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15970 +#endif
15971
15972 void __init default_banner(void)
15973 {
15974 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15975 * corresponding structure. */
15976 static void *get_call_destination(u8 type)
15977 {
15978 - struct paravirt_patch_template tmpl = {
15979 + const struct paravirt_patch_template tmpl = {
15980 .pv_init_ops = pv_init_ops,
15981 .pv_time_ops = pv_time_ops,
15982 .pv_cpu_ops = pv_cpu_ops,
15983 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15984 .pv_lock_ops = pv_lock_ops,
15985 #endif
15986 };
15987 +
15988 + pax_track_stack();
15989 return *((void **)&tmpl + type);
15990 }
15991
15992 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15993 if (opfunc == NULL)
15994 /* If there's no function, patch it with a ud2a (BUG) */
15995 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15996 - else if (opfunc == _paravirt_nop)
15997 + else if (opfunc == (void *)_paravirt_nop)
15998 /* If the operation is a nop, then nop the callsite */
15999 ret = paravirt_patch_nop();
16000
16001 /* identity functions just return their single argument */
16002 - else if (opfunc == _paravirt_ident_32)
16003 + else if (opfunc == (void *)_paravirt_ident_32)
16004 ret = paravirt_patch_ident_32(insnbuf, len);
16005 - else if (opfunc == _paravirt_ident_64)
16006 + else if (opfunc == (void *)_paravirt_ident_64)
16007 + ret = paravirt_patch_ident_64(insnbuf, len);
16008 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16009 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16010 ret = paravirt_patch_ident_64(insnbuf, len);
16011 +#endif
16012
16013 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16014 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16015 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16016 if (insn_len > len || start == NULL)
16017 insn_len = len;
16018 else
16019 - memcpy(insnbuf, start, insn_len);
16020 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16021
16022 return insn_len;
16023 }
16024 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16025 preempt_enable();
16026 }
16027
16028 -struct pv_info pv_info = {
16029 +struct pv_info pv_info __read_only = {
16030 .name = "bare hardware",
16031 .paravirt_enabled = 0,
16032 .kernel_rpl = 0,
16033 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16034 };
16035
16036 -struct pv_init_ops pv_init_ops = {
16037 +struct pv_init_ops pv_init_ops __read_only = {
16038 .patch = native_patch,
16039 };
16040
16041 -struct pv_time_ops pv_time_ops = {
16042 +struct pv_time_ops pv_time_ops __read_only = {
16043 .sched_clock = native_sched_clock,
16044 };
16045
16046 -struct pv_irq_ops pv_irq_ops = {
16047 +struct pv_irq_ops pv_irq_ops __read_only = {
16048 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16049 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16050 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16051 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16052 #endif
16053 };
16054
16055 -struct pv_cpu_ops pv_cpu_ops = {
16056 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16057 .cpuid = native_cpuid,
16058 .get_debugreg = native_get_debugreg,
16059 .set_debugreg = native_set_debugreg,
16060 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16061 .end_context_switch = paravirt_nop,
16062 };
16063
16064 -struct pv_apic_ops pv_apic_ops = {
16065 +struct pv_apic_ops pv_apic_ops __read_only = {
16066 #ifdef CONFIG_X86_LOCAL_APIC
16067 .startup_ipi_hook = paravirt_nop,
16068 #endif
16069 };
16070
16071 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16072 +#ifdef CONFIG_X86_32
16073 +#ifdef CONFIG_X86_PAE
16074 +/* 64-bit pagetable entries */
16075 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16076 +#else
16077 /* 32-bit pagetable entries */
16078 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16079 +#endif
16080 #else
16081 /* 64-bit pagetable entries */
16082 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16083 #endif
16084
16085 -struct pv_mmu_ops pv_mmu_ops = {
16086 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16087
16088 .read_cr2 = native_read_cr2,
16089 .write_cr2 = native_write_cr2,
16090 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16091 .make_pud = PTE_IDENT,
16092
16093 .set_pgd = native_set_pgd,
16094 + .set_pgd_batched = native_set_pgd_batched,
16095 #endif
16096 #endif /* PAGETABLE_LEVELS >= 3 */
16097
16098 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16099 },
16100
16101 .set_fixmap = native_set_fixmap,
16102 +
16103 +#ifdef CONFIG_PAX_KERNEXEC
16104 + .pax_open_kernel = native_pax_open_kernel,
16105 + .pax_close_kernel = native_pax_close_kernel,
16106 +#endif
16107 +
16108 };
16109
16110 EXPORT_SYMBOL_GPL(pv_time_ops);
16111 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
16112 --- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16113 +++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16114 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16115 __raw_spin_lock(lock);
16116 }
16117
16118 -struct pv_lock_ops pv_lock_ops = {
16119 +struct pv_lock_ops pv_lock_ops __read_only = {
16120 #ifdef CONFIG_SMP
16121 .spin_is_locked = __ticket_spin_is_locked,
16122 .spin_is_contended = __ticket_spin_is_contended,
16123 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
16124 --- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16125 +++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16126 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16127 free_pages((unsigned long)vaddr, get_order(size));
16128 }
16129
16130 -static struct dma_map_ops calgary_dma_ops = {
16131 +static const struct dma_map_ops calgary_dma_ops = {
16132 .alloc_coherent = calgary_alloc_coherent,
16133 .free_coherent = calgary_free_coherent,
16134 .map_sg = calgary_map_sg,
16135 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
16136 --- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16137 +++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16138 @@ -14,7 +14,7 @@
16139
16140 static int forbid_dac __read_mostly;
16141
16142 -struct dma_map_ops *dma_ops;
16143 +const struct dma_map_ops *dma_ops;
16144 EXPORT_SYMBOL(dma_ops);
16145
16146 static int iommu_sac_force __read_mostly;
16147 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16148
16149 int dma_supported(struct device *dev, u64 mask)
16150 {
16151 - struct dma_map_ops *ops = get_dma_ops(dev);
16152 + const struct dma_map_ops *ops = get_dma_ops(dev);
16153
16154 #ifdef CONFIG_PCI
16155 if (mask > 0xffffffff && forbid_dac > 0) {
16156 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
16157 --- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16158 +++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16159 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16160 return -1;
16161 }
16162
16163 -static struct dma_map_ops gart_dma_ops = {
16164 +static const struct dma_map_ops gart_dma_ops = {
16165 .map_sg = gart_map_sg,
16166 .unmap_sg = gart_unmap_sg,
16167 .map_page = gart_map_page,
16168 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
16169 --- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16170 +++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16171 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16172 flush_write_buffers();
16173 }
16174
16175 -struct dma_map_ops nommu_dma_ops = {
16176 +const struct dma_map_ops nommu_dma_ops = {
16177 .alloc_coherent = dma_generic_alloc_coherent,
16178 .free_coherent = nommu_free_coherent,
16179 .map_sg = nommu_map_sg,
16180 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16181 --- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16182 +++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16183 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16184 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16185 }
16186
16187 -static struct dma_map_ops swiotlb_dma_ops = {
16188 +static const struct dma_map_ops swiotlb_dma_ops = {
16189 .mapping_error = swiotlb_dma_mapping_error,
16190 .alloc_coherent = x86_swiotlb_alloc_coherent,
16191 .free_coherent = swiotlb_free_coherent,
16192 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16193 --- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16194 +++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16195 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16196 unsigned long thread_saved_pc(struct task_struct *tsk)
16197 {
16198 return ((unsigned long *)tsk->thread.sp)[3];
16199 +//XXX return tsk->thread.eip;
16200 }
16201
16202 #ifndef CONFIG_SMP
16203 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16204 unsigned short ss, gs;
16205 const char *board;
16206
16207 - if (user_mode_vm(regs)) {
16208 + if (user_mode(regs)) {
16209 sp = regs->sp;
16210 ss = regs->ss & 0xffff;
16211 - gs = get_user_gs(regs);
16212 } else {
16213 sp = (unsigned long) (&regs->sp);
16214 savesegment(ss, ss);
16215 - savesegment(gs, gs);
16216 }
16217 + gs = get_user_gs(regs);
16218
16219 printk("\n");
16220
16221 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16222 regs.bx = (unsigned long) fn;
16223 regs.dx = (unsigned long) arg;
16224
16225 - regs.ds = __USER_DS;
16226 - regs.es = __USER_DS;
16227 + regs.ds = __KERNEL_DS;
16228 + regs.es = __KERNEL_DS;
16229 regs.fs = __KERNEL_PERCPU;
16230 - regs.gs = __KERNEL_STACK_CANARY;
16231 + savesegment(gs, regs.gs);
16232 regs.orig_ax = -1;
16233 regs.ip = (unsigned long) kernel_thread_helper;
16234 regs.cs = __KERNEL_CS | get_kernel_rpl();
16235 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16236 struct task_struct *tsk;
16237 int err;
16238
16239 - childregs = task_pt_regs(p);
16240 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16241 *childregs = *regs;
16242 childregs->ax = 0;
16243 childregs->sp = sp;
16244
16245 p->thread.sp = (unsigned long) childregs;
16246 p->thread.sp0 = (unsigned long) (childregs+1);
16247 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16248
16249 p->thread.ip = (unsigned long) ret_from_fork;
16250
16251 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16252 struct thread_struct *prev = &prev_p->thread,
16253 *next = &next_p->thread;
16254 int cpu = smp_processor_id();
16255 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16256 + struct tss_struct *tss = init_tss + cpu;
16257 bool preload_fpu;
16258
16259 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16260 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16261 */
16262 lazy_save_gs(prev->gs);
16263
16264 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16265 + __set_fs(task_thread_info(next_p)->addr_limit);
16266 +#endif
16267 +
16268 /*
16269 * Load the per-thread Thread-Local Storage descriptor.
16270 */
16271 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16272 */
16273 arch_end_context_switch(next_p);
16274
16275 + percpu_write(current_task, next_p);
16276 + percpu_write(current_tinfo, &next_p->tinfo);
16277 +
16278 if (preload_fpu)
16279 __math_state_restore();
16280
16281 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16282 if (prev->gs | next->gs)
16283 lazy_load_gs(next->gs);
16284
16285 - percpu_write(current_task, next_p);
16286 -
16287 return prev_p;
16288 }
16289
16290 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16291 } while (count++ < 16);
16292 return 0;
16293 }
16294 -
16295 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16296 --- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16297 +++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16298 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16299 void exit_idle(void)
16300 {
16301 /* idle loop has pid 0 */
16302 - if (current->pid)
16303 + if (task_pid_nr(current))
16304 return;
16305 __exit_idle();
16306 }
16307 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16308 if (!board)
16309 board = "";
16310 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16311 - current->pid, current->comm, print_tainted(),
16312 + task_pid_nr(current), current->comm, print_tainted(),
16313 init_utsname()->release,
16314 (int)strcspn(init_utsname()->version, " "),
16315 init_utsname()->version, board);
16316 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16317 struct pt_regs *childregs;
16318 struct task_struct *me = current;
16319
16320 - childregs = ((struct pt_regs *)
16321 - (THREAD_SIZE + task_stack_page(p))) - 1;
16322 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16323 *childregs = *regs;
16324
16325 childregs->ax = 0;
16326 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16327 p->thread.sp = (unsigned long) childregs;
16328 p->thread.sp0 = (unsigned long) (childregs+1);
16329 p->thread.usersp = me->thread.usersp;
16330 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16331
16332 set_tsk_thread_flag(p, TIF_FORK);
16333
16334 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16335 struct thread_struct *prev = &prev_p->thread;
16336 struct thread_struct *next = &next_p->thread;
16337 int cpu = smp_processor_id();
16338 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16339 + struct tss_struct *tss = init_tss + cpu;
16340 unsigned fsindex, gsindex;
16341 bool preload_fpu;
16342
16343 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16344 prev->usersp = percpu_read(old_rsp);
16345 percpu_write(old_rsp, next->usersp);
16346 percpu_write(current_task, next_p);
16347 + percpu_write(current_tinfo, &next_p->tinfo);
16348
16349 - percpu_write(kernel_stack,
16350 - (unsigned long)task_stack_page(next_p) +
16351 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16352 + percpu_write(kernel_stack, next->sp0);
16353
16354 /*
16355 * Now maybe reload the debug registers and handle I/O bitmaps
16356 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16357 if (!p || p == current || p->state == TASK_RUNNING)
16358 return 0;
16359 stack = (unsigned long)task_stack_page(p);
16360 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16361 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16362 return 0;
16363 fp = *(u64 *)(p->thread.sp);
16364 do {
16365 - if (fp < (unsigned long)stack ||
16366 - fp >= (unsigned long)stack+THREAD_SIZE)
16367 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16368 return 0;
16369 ip = *(u64 *)(fp+8);
16370 if (!in_sched_functions(ip))
16371 diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16372 --- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16373 +++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16374 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16375
16376 void free_thread_info(struct thread_info *ti)
16377 {
16378 - free_thread_xstate(ti->task);
16379 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16380 }
16381
16382 +static struct kmem_cache *task_struct_cachep;
16383 +
16384 void arch_task_cache_init(void)
16385 {
16386 - task_xstate_cachep =
16387 - kmem_cache_create("task_xstate", xstate_size,
16388 + /* create a slab on which task_structs can be allocated */
16389 + task_struct_cachep =
16390 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16391 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16392 +
16393 + task_xstate_cachep =
16394 + kmem_cache_create("task_xstate", xstate_size,
16395 __alignof__(union thread_xstate),
16396 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16397 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16398 +}
16399 +
16400 +struct task_struct *alloc_task_struct(void)
16401 +{
16402 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16403 +}
16404 +
16405 +void free_task_struct(struct task_struct *task)
16406 +{
16407 + free_thread_xstate(task);
16408 + kmem_cache_free(task_struct_cachep, task);
16409 }
16410
16411 /*
16412 @@ -73,7 +90,7 @@ void exit_thread(void)
16413 unsigned long *bp = t->io_bitmap_ptr;
16414
16415 if (bp) {
16416 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16417 + struct tss_struct *tss = init_tss + get_cpu();
16418
16419 t->io_bitmap_ptr = NULL;
16420 clear_thread_flag(TIF_IO_BITMAP);
16421 @@ -93,6 +110,9 @@ void flush_thread(void)
16422
16423 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16424
16425 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16426 + loadsegment(gs, 0);
16427 +#endif
16428 tsk->thread.debugreg0 = 0;
16429 tsk->thread.debugreg1 = 0;
16430 tsk->thread.debugreg2 = 0;
16431 @@ -307,7 +327,7 @@ void default_idle(void)
16432 EXPORT_SYMBOL(default_idle);
16433 #endif
16434
16435 -void stop_this_cpu(void *dummy)
16436 +__noreturn void stop_this_cpu(void *dummy)
16437 {
16438 local_irq_disable();
16439 /*
16440 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16441 }
16442 early_param("idle", idle_setup);
16443
16444 -unsigned long arch_align_stack(unsigned long sp)
16445 +#ifdef CONFIG_PAX_RANDKSTACK
16446 +asmlinkage void pax_randomize_kstack(void)
16447 {
16448 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16449 - sp -= get_random_int() % 8192;
16450 - return sp & ~0xf;
16451 -}
16452 + struct thread_struct *thread = &current->thread;
16453 + unsigned long time;
16454
16455 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16456 -{
16457 - unsigned long range_end = mm->brk + 0x02000000;
16458 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16459 + if (!randomize_va_space)
16460 + return;
16461 +
16462 + rdtscl(time);
16463 +
16464 + /* P4 seems to return a 0 LSB, ignore it */
16465 +#ifdef CONFIG_MPENTIUM4
16466 + time &= 0x3EUL;
16467 + time <<= 2;
16468 +#elif defined(CONFIG_X86_64)
16469 + time &= 0xFUL;
16470 + time <<= 4;
16471 +#else
16472 + time &= 0x1FUL;
16473 + time <<= 3;
16474 +#endif
16475 +
16476 + thread->sp0 ^= time;
16477 + load_sp0(init_tss + smp_processor_id(), thread);
16478 +
16479 +#ifdef CONFIG_X86_64
16480 + percpu_write(kernel_stack, thread->sp0);
16481 +#endif
16482 }
16483 +#endif
16484
16485 diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16486 --- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16487 +++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16488 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16489 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16490 {
16491 int ret;
16492 - unsigned long __user *datap = (unsigned long __user *)data;
16493 + unsigned long __user *datap = (__force unsigned long __user *)data;
16494
16495 switch (request) {
16496 /* read the word at location addr in the USER area. */
16497 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16498 if (addr < 0)
16499 return -EIO;
16500 ret = do_get_thread_area(child, addr,
16501 - (struct user_desc __user *) data);
16502 + (__force struct user_desc __user *) data);
16503 break;
16504
16505 case PTRACE_SET_THREAD_AREA:
16506 if (addr < 0)
16507 return -EIO;
16508 ret = do_set_thread_area(child, addr,
16509 - (struct user_desc __user *) data, 0);
16510 + (__force struct user_desc __user *) data, 0);
16511 break;
16512 #endif
16513
16514 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16515 #ifdef CONFIG_X86_PTRACE_BTS
16516 case PTRACE_BTS_CONFIG:
16517 ret = ptrace_bts_config
16518 - (child, data, (struct ptrace_bts_config __user *)addr);
16519 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16520 break;
16521
16522 case PTRACE_BTS_STATUS:
16523 ret = ptrace_bts_status
16524 - (child, data, (struct ptrace_bts_config __user *)addr);
16525 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16526 break;
16527
16528 case PTRACE_BTS_SIZE:
16529 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16530
16531 case PTRACE_BTS_GET:
16532 ret = ptrace_bts_read_record
16533 - (child, data, (struct bts_struct __user *) addr);
16534 + (child, data, (__force struct bts_struct __user *) addr);
16535 break;
16536
16537 case PTRACE_BTS_CLEAR:
16538 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16539
16540 case PTRACE_BTS_DRAIN:
16541 ret = ptrace_bts_drain
16542 - (child, data, (struct bts_struct __user *) addr);
16543 + (child, data, (__force struct bts_struct __user *) addr);
16544 break;
16545 #endif /* CONFIG_X86_PTRACE_BTS */
16546
16547 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16548 info.si_code = si_code;
16549
16550 /* User-mode ip? */
16551 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16552 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16553
16554 /* Send us the fake SIGTRAP */
16555 force_sig_info(SIGTRAP, &info, tsk);
16556 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16557 * We must return the syscall number to actually look up in the table.
16558 * This can be -1L to skip running any syscall at all.
16559 */
16560 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16561 +long syscall_trace_enter(struct pt_regs *regs)
16562 {
16563 long ret = 0;
16564
16565 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16566 return ret ?: regs->orig_ax;
16567 }
16568
16569 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16570 +void syscall_trace_leave(struct pt_regs *regs)
16571 {
16572 if (unlikely(current->audit_context))
16573 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16574 diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16575 --- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16576 +++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16577 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16578 EXPORT_SYMBOL(pm_power_off);
16579
16580 static const struct desc_ptr no_idt = {};
16581 -static int reboot_mode;
16582 +static unsigned short reboot_mode;
16583 enum reboot_type reboot_type = BOOT_KBD;
16584 int reboot_force;
16585
16586 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16587 controller to pulse the CPU reset line, which is more thorough, but
16588 doesn't work with at least one type of 486 motherboard. It is easy
16589 to stop this code working; hence the copious comments. */
16590 -static const unsigned long long
16591 -real_mode_gdt_entries [3] =
16592 +static struct desc_struct
16593 +real_mode_gdt_entries [3] __read_only =
16594 {
16595 - 0x0000000000000000ULL, /* Null descriptor */
16596 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16597 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16598 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16599 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16600 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16601 };
16602
16603 static const struct desc_ptr
16604 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16605 * specified by the code and length parameters.
16606 * We assume that length will aways be less that 100!
16607 */
16608 -void machine_real_restart(const unsigned char *code, int length)
16609 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16610 {
16611 local_irq_disable();
16612
16613 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16614 /* Remap the kernel at virtual address zero, as well as offset zero
16615 from the kernel segment. This assumes the kernel segment starts at
16616 virtual address PAGE_OFFSET. */
16617 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16618 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16619 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16620 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16621
16622 /*
16623 * Use `swapper_pg_dir' as our page directory.
16624 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16625 boot)". This seems like a fairly standard thing that gets set by
16626 REBOOT.COM programs, and the previous reset routine did this
16627 too. */
16628 - *((unsigned short *)0x472) = reboot_mode;
16629 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16630
16631 /* For the switch to real mode, copy some code to low memory. It has
16632 to be in the first 64k because it is running in 16-bit mode, and it
16633 has to have the same physical and virtual address, because it turns
16634 off paging. Copy it near the end of the first page, out of the way
16635 of BIOS variables. */
16636 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16637 - real_mode_switch, sizeof (real_mode_switch));
16638 - memcpy((void *)(0x1000 - 100), code, length);
16639 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16640 + memcpy(__va(0x1000 - 100), code, length);
16641
16642 /* Set up the IDT for real mode. */
16643 load_idt(&real_mode_idt);
16644 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16645 __asm__ __volatile__ ("ljmp $0x0008,%0"
16646 :
16647 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16648 + do { } while (1);
16649 }
16650 #ifdef CONFIG_APM_MODULE
16651 EXPORT_SYMBOL(machine_real_restart);
16652 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16653 {
16654 }
16655
16656 -static void native_machine_emergency_restart(void)
16657 +__noreturn static void native_machine_emergency_restart(void)
16658 {
16659 int i;
16660
16661 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16662 #endif
16663 }
16664
16665 -static void __machine_emergency_restart(int emergency)
16666 +static __noreturn void __machine_emergency_restart(int emergency)
16667 {
16668 reboot_emergency = emergency;
16669 machine_ops.emergency_restart();
16670 }
16671
16672 -static void native_machine_restart(char *__unused)
16673 +static __noreturn void native_machine_restart(char *__unused)
16674 {
16675 printk("machine restart\n");
16676
16677 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16678 __machine_emergency_restart(0);
16679 }
16680
16681 -static void native_machine_halt(void)
16682 +static __noreturn void native_machine_halt(void)
16683 {
16684 /* stop other cpus and apics */
16685 machine_shutdown();
16686 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16687 stop_this_cpu(NULL);
16688 }
16689
16690 -static void native_machine_power_off(void)
16691 +__noreturn static void native_machine_power_off(void)
16692 {
16693 if (pm_power_off) {
16694 if (!reboot_force)
16695 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16696 }
16697 /* a fallback in case there is no PM info available */
16698 tboot_shutdown(TB_SHUTDOWN_HALT);
16699 + do { } while (1);
16700 }
16701
16702 struct machine_ops machine_ops = {
16703 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16704 --- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16705 +++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16706 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16707
16708 if (!boot_params.hdr.root_flags)
16709 root_mountflags &= ~MS_RDONLY;
16710 - init_mm.start_code = (unsigned long) _text;
16711 - init_mm.end_code = (unsigned long) _etext;
16712 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16713 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16714 init_mm.end_data = (unsigned long) _edata;
16715 init_mm.brk = _brk_end;
16716
16717 - code_resource.start = virt_to_phys(_text);
16718 - code_resource.end = virt_to_phys(_etext)-1;
16719 - data_resource.start = virt_to_phys(_etext);
16720 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16721 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16722 + data_resource.start = virt_to_phys(_sdata);
16723 data_resource.end = virt_to_phys(_edata)-1;
16724 bss_resource.start = virt_to_phys(&__bss_start);
16725 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16726 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16727 --- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16728 +++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16729 @@ -25,19 +25,17 @@
16730 # define DBG(x...)
16731 #endif
16732
16733 -DEFINE_PER_CPU(int, cpu_number);
16734 +#ifdef CONFIG_SMP
16735 +DEFINE_PER_CPU(unsigned int, cpu_number);
16736 EXPORT_PER_CPU_SYMBOL(cpu_number);
16737 +#endif
16738
16739 -#ifdef CONFIG_X86_64
16740 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16741 -#else
16742 -#define BOOT_PERCPU_OFFSET 0
16743 -#endif
16744
16745 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16746 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16747
16748 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16749 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16750 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16751 };
16752 EXPORT_SYMBOL(__per_cpu_offset);
16753 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16754 {
16755 #ifdef CONFIG_X86_32
16756 struct desc_struct gdt;
16757 + unsigned long base = per_cpu_offset(cpu);
16758
16759 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16760 - 0x2 | DESCTYPE_S, 0x8);
16761 - gdt.s = 1;
16762 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16763 + 0x83 | DESCTYPE_S, 0xC);
16764 write_gdt_entry(get_cpu_gdt_table(cpu),
16765 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16766 #endif
16767 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16768 /* alrighty, percpu areas up and running */
16769 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16770 for_each_possible_cpu(cpu) {
16771 +#ifdef CONFIG_CC_STACKPROTECTOR
16772 +#ifdef CONFIG_X86_32
16773 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16774 +#endif
16775 +#endif
16776 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16777 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16778 per_cpu(cpu_number, cpu) = cpu;
16779 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16780 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16781 #endif
16782 #endif
16783 +#ifdef CONFIG_CC_STACKPROTECTOR
16784 +#ifdef CONFIG_X86_32
16785 + if (!cpu)
16786 + per_cpu(stack_canary.canary, cpu) = canary;
16787 +#endif
16788 +#endif
16789 /*
16790 * Up to this point, the boot CPU has been using .data.init
16791 * area. Reload any changed state for the boot CPU.
16792 diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16793 --- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16794 +++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16795 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16796 * Align the stack pointer according to the i386 ABI,
16797 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16798 */
16799 - sp = ((sp + 4) & -16ul) - 4;
16800 + sp = ((sp - 12) & -16ul) - 4;
16801 #else /* !CONFIG_X86_32 */
16802 sp = round_down(sp, 16) - 8;
16803 #endif
16804 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16805 * Return an always-bogus address instead so we will die with SIGSEGV.
16806 */
16807 if (onsigstack && !likely(on_sig_stack(sp)))
16808 - return (void __user *)-1L;
16809 + return (__force void __user *)-1L;
16810
16811 /* save i387 state */
16812 if (used_math() && save_i387_xstate(*fpstate) < 0)
16813 - return (void __user *)-1L;
16814 + return (__force void __user *)-1L;
16815
16816 return (void __user *)sp;
16817 }
16818 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16819 }
16820
16821 if (current->mm->context.vdso)
16822 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16823 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16824 else
16825 - restorer = &frame->retcode;
16826 + restorer = (void __user *)&frame->retcode;
16827 if (ka->sa.sa_flags & SA_RESTORER)
16828 restorer = ka->sa.sa_restorer;
16829
16830 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16831 * reasons and because gdb uses it as a signature to notice
16832 * signal handler stack frames.
16833 */
16834 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16835 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16836
16837 if (err)
16838 return -EFAULT;
16839 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16840 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16841
16842 /* Set up to return from userspace. */
16843 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16844 + if (current->mm->context.vdso)
16845 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16846 + else
16847 + restorer = (void __user *)&frame->retcode;
16848 if (ka->sa.sa_flags & SA_RESTORER)
16849 restorer = ka->sa.sa_restorer;
16850 put_user_ex(restorer, &frame->pretcode);
16851 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16852 * reasons and because gdb uses it as a signature to notice
16853 * signal handler stack frames.
16854 */
16855 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16856 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16857 } put_user_catch(err);
16858
16859 if (err)
16860 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16861 int signr;
16862 sigset_t *oldset;
16863
16864 + pax_track_stack();
16865 +
16866 /*
16867 * We want the common case to go fast, which is why we may in certain
16868 * cases get here from kernel mode. Just return without doing anything
16869 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16870 * X86_32: vm86 regs switched out by assembly code before reaching
16871 * here, so testing against kernel CS suffices.
16872 */
16873 - if (!user_mode(regs))
16874 + if (!user_mode_novm(regs))
16875 return;
16876
16877 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16878 diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16879 --- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16880 +++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16881 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16882 */
16883 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16884
16885 -void cpu_hotplug_driver_lock()
16886 +void cpu_hotplug_driver_lock(void)
16887 {
16888 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16889 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16890 }
16891
16892 -void cpu_hotplug_driver_unlock()
16893 +void cpu_hotplug_driver_unlock(void)
16894 {
16895 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16896 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16897 }
16898
16899 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16900 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16901 * target processor state.
16902 */
16903 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16904 - (unsigned long)stack_start.sp);
16905 + stack_start);
16906
16907 /*
16908 * Run STARTUP IPI loop.
16909 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16910 set_idle_for_cpu(cpu, c_idle.idle);
16911 do_rest:
16912 per_cpu(current_task, cpu) = c_idle.idle;
16913 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16914 #ifdef CONFIG_X86_32
16915 /* Stack for startup_32 can be just as for start_secondary onwards */
16916 irq_ctx_init(cpu);
16917 @@ -750,13 +751,15 @@ do_rest:
16918 #else
16919 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16920 initial_gs = per_cpu_offset(cpu);
16921 - per_cpu(kernel_stack, cpu) =
16922 - (unsigned long)task_stack_page(c_idle.idle) -
16923 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16924 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16925 #endif
16926 +
16927 + pax_open_kernel();
16928 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16929 + pax_close_kernel();
16930 +
16931 initial_code = (unsigned long)start_secondary;
16932 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16933 + stack_start = c_idle.idle->thread.sp;
16934
16935 /* start_ip had better be page-aligned! */
16936 start_ip = setup_trampoline();
16937 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16938
16939 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16940
16941 +#ifdef CONFIG_PAX_PER_CPU_PGD
16942 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16943 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16944 + KERNEL_PGD_PTRS);
16945 +#endif
16946 +
16947 err = do_boot_cpu(apicid, cpu);
16948
16949 if (err) {
16950 diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16951 --- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16952 +++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16953 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16954 struct desc_struct *desc;
16955 unsigned long base;
16956
16957 - seg &= ~7UL;
16958 + seg >>= 3;
16959
16960 mutex_lock(&child->mm->context.lock);
16961 - if (unlikely((seg >> 3) >= child->mm->context.size))
16962 + if (unlikely(seg >= child->mm->context.size))
16963 addr = -1L; /* bogus selector, access would fault */
16964 else {
16965 desc = child->mm->context.ldt + seg;
16966 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16967 addr += base;
16968 }
16969 mutex_unlock(&child->mm->context.lock);
16970 - }
16971 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16972 + addr = ktla_ktva(addr);
16973
16974 return addr;
16975 }
16976 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16977 unsigned char opcode[15];
16978 unsigned long addr = convert_ip_to_linear(child, regs);
16979
16980 + if (addr == -EINVAL)
16981 + return 0;
16982 +
16983 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16984 for (i = 0; i < copied; i++) {
16985 switch (opcode[i]) {
16986 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16987
16988 #ifdef CONFIG_X86_64
16989 case 0x40 ... 0x4f:
16990 - if (regs->cs != __USER_CS)
16991 + if ((regs->cs & 0xffff) != __USER_CS)
16992 /* 32-bit mode: register increment */
16993 return 0;
16994 /* 64-bit mode: REX prefix */
16995 diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16996 --- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16997 +++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16998 @@ -1,3 +1,4 @@
16999 +.section .rodata,"a",@progbits
17000 ENTRY(sys_call_table)
17001 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17002 .long sys_exit
17003 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
17004 --- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
17005 +++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
17006 @@ -24,6 +24,21 @@
17007
17008 #include <asm/syscalls.h>
17009
17010 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17011 +{
17012 + unsigned long pax_task_size = TASK_SIZE;
17013 +
17014 +#ifdef CONFIG_PAX_SEGMEXEC
17015 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17016 + pax_task_size = SEGMEXEC_TASK_SIZE;
17017 +#endif
17018 +
17019 + if (len > pax_task_size || addr > pax_task_size - len)
17020 + return -EINVAL;
17021 +
17022 + return 0;
17023 +}
17024 +
17025 /*
17026 * Perform the select(nd, in, out, ex, tv) and mmap() system
17027 * calls. Linux/i386 didn't use to be able to handle more than
17028 @@ -58,6 +73,212 @@ out:
17029 return err;
17030 }
17031
17032 +unsigned long
17033 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17034 + unsigned long len, unsigned long pgoff, unsigned long flags)
17035 +{
17036 + struct mm_struct *mm = current->mm;
17037 + struct vm_area_struct *vma;
17038 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17039 +
17040 +#ifdef CONFIG_PAX_SEGMEXEC
17041 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17042 + pax_task_size = SEGMEXEC_TASK_SIZE;
17043 +#endif
17044 +
17045 + pax_task_size -= PAGE_SIZE;
17046 +
17047 + if (len > pax_task_size)
17048 + return -ENOMEM;
17049 +
17050 + if (flags & MAP_FIXED)
17051 + return addr;
17052 +
17053 +#ifdef CONFIG_PAX_RANDMMAP
17054 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17055 +#endif
17056 +
17057 + if (addr) {
17058 + addr = PAGE_ALIGN(addr);
17059 + if (pax_task_size - len >= addr) {
17060 + vma = find_vma(mm, addr);
17061 + if (check_heap_stack_gap(vma, addr, len))
17062 + return addr;
17063 + }
17064 + }
17065 + if (len > mm->cached_hole_size) {
17066 + start_addr = addr = mm->free_area_cache;
17067 + } else {
17068 + start_addr = addr = mm->mmap_base;
17069 + mm->cached_hole_size = 0;
17070 + }
17071 +
17072 +#ifdef CONFIG_PAX_PAGEEXEC
17073 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17074 + start_addr = 0x00110000UL;
17075 +
17076 +#ifdef CONFIG_PAX_RANDMMAP
17077 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17078 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17079 +#endif
17080 +
17081 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17082 + start_addr = addr = mm->mmap_base;
17083 + else
17084 + addr = start_addr;
17085 + }
17086 +#endif
17087 +
17088 +full_search:
17089 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17090 + /* At this point: (!vma || addr < vma->vm_end). */
17091 + if (pax_task_size - len < addr) {
17092 + /*
17093 + * Start a new search - just in case we missed
17094 + * some holes.
17095 + */
17096 + if (start_addr != mm->mmap_base) {
17097 + start_addr = addr = mm->mmap_base;
17098 + mm->cached_hole_size = 0;
17099 + goto full_search;
17100 + }
17101 + return -ENOMEM;
17102 + }
17103 + if (check_heap_stack_gap(vma, addr, len))
17104 + break;
17105 + if (addr + mm->cached_hole_size < vma->vm_start)
17106 + mm->cached_hole_size = vma->vm_start - addr;
17107 + addr = vma->vm_end;
17108 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17109 + start_addr = addr = mm->mmap_base;
17110 + mm->cached_hole_size = 0;
17111 + goto full_search;
17112 + }
17113 + }
17114 +
17115 + /*
17116 + * Remember the place where we stopped the search:
17117 + */
17118 + mm->free_area_cache = addr + len;
17119 + return addr;
17120 +}
17121 +
17122 +unsigned long
17123 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17124 + const unsigned long len, const unsigned long pgoff,
17125 + const unsigned long flags)
17126 +{
17127 + struct vm_area_struct *vma;
17128 + struct mm_struct *mm = current->mm;
17129 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17130 +
17131 +#ifdef CONFIG_PAX_SEGMEXEC
17132 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17133 + pax_task_size = SEGMEXEC_TASK_SIZE;
17134 +#endif
17135 +
17136 + pax_task_size -= PAGE_SIZE;
17137 +
17138 + /* requested length too big for entire address space */
17139 + if (len > pax_task_size)
17140 + return -ENOMEM;
17141 +
17142 + if (flags & MAP_FIXED)
17143 + return addr;
17144 +
17145 +#ifdef CONFIG_PAX_PAGEEXEC
17146 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17147 + goto bottomup;
17148 +#endif
17149 +
17150 +#ifdef CONFIG_PAX_RANDMMAP
17151 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17152 +#endif
17153 +
17154 + /* requesting a specific address */
17155 + if (addr) {
17156 + addr = PAGE_ALIGN(addr);
17157 + if (pax_task_size - len >= addr) {
17158 + vma = find_vma(mm, addr);
17159 + if (check_heap_stack_gap(vma, addr, len))
17160 + return addr;
17161 + }
17162 + }
17163 +
17164 + /* check if free_area_cache is useful for us */
17165 + if (len <= mm->cached_hole_size) {
17166 + mm->cached_hole_size = 0;
17167 + mm->free_area_cache = mm->mmap_base;
17168 + }
17169 +
17170 + /* either no address requested or can't fit in requested address hole */
17171 + addr = mm->free_area_cache;
17172 +
17173 + /* make sure it can fit in the remaining address space */
17174 + if (addr > len) {
17175 + vma = find_vma(mm, addr-len);
17176 + if (check_heap_stack_gap(vma, addr - len, len))
17177 + /* remember the address as a hint for next time */
17178 + return (mm->free_area_cache = addr-len);
17179 + }
17180 +
17181 + if (mm->mmap_base < len)
17182 + goto bottomup;
17183 +
17184 + addr = mm->mmap_base-len;
17185 +
17186 + do {
17187 + /*
17188 + * Lookup failure means no vma is above this address,
17189 + * else if new region fits below vma->vm_start,
17190 + * return with success:
17191 + */
17192 + vma = find_vma(mm, addr);
17193 + if (check_heap_stack_gap(vma, addr, len))
17194 + /* remember the address as a hint for next time */
17195 + return (mm->free_area_cache = addr);
17196 +
17197 + /* remember the largest hole we saw so far */
17198 + if (addr + mm->cached_hole_size < vma->vm_start)
17199 + mm->cached_hole_size = vma->vm_start - addr;
17200 +
17201 + /* try just below the current vma->vm_start */
17202 + addr = skip_heap_stack_gap(vma, len);
17203 + } while (!IS_ERR_VALUE(addr));
17204 +
17205 +bottomup:
17206 + /*
17207 + * A failed mmap() very likely causes application failure,
17208 + * so fall back to the bottom-up function here. This scenario
17209 + * can happen with large stack limits and large mmap()
17210 + * allocations.
17211 + */
17212 +
17213 +#ifdef CONFIG_PAX_SEGMEXEC
17214 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17215 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17216 + else
17217 +#endif
17218 +
17219 + mm->mmap_base = TASK_UNMAPPED_BASE;
17220 +
17221 +#ifdef CONFIG_PAX_RANDMMAP
17222 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17223 + mm->mmap_base += mm->delta_mmap;
17224 +#endif
17225 +
17226 + mm->free_area_cache = mm->mmap_base;
17227 + mm->cached_hole_size = ~0UL;
17228 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17229 + /*
17230 + * Restore the topdown base:
17231 + */
17232 + mm->mmap_base = base;
17233 + mm->free_area_cache = base;
17234 + mm->cached_hole_size = ~0UL;
17235 +
17236 + return addr;
17237 +}
17238
17239 struct sel_arg_struct {
17240 unsigned long n;
17241 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17242 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17243 case SEMTIMEDOP:
17244 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17245 - (const struct timespec __user *)fifth);
17246 + (__force const struct timespec __user *)fifth);
17247
17248 case SEMGET:
17249 return sys_semget(first, second, third);
17250 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17251 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17252 if (ret)
17253 return ret;
17254 - return put_user(raddr, (ulong __user *) third);
17255 + return put_user(raddr, (__force ulong __user *) third);
17256 }
17257 case 1: /* iBCS2 emulator entry point */
17258 if (!segment_eq(get_fs(), get_ds()))
17259 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17260
17261 return error;
17262 }
17263 -
17264 -
17265 -/*
17266 - * Do a system call from kernel instead of calling sys_execve so we
17267 - * end up with proper pt_regs.
17268 - */
17269 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17270 -{
17271 - long __res;
17272 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17273 - : "=a" (__res)
17274 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17275 - return __res;
17276 -}
17277 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17278 --- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17279 +++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17280 @@ -32,8 +32,8 @@ out:
17281 return error;
17282 }
17283
17284 -static void find_start_end(unsigned long flags, unsigned long *begin,
17285 - unsigned long *end)
17286 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17287 + unsigned long *begin, unsigned long *end)
17288 {
17289 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17290 unsigned long new_begin;
17291 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17292 *begin = new_begin;
17293 }
17294 } else {
17295 - *begin = TASK_UNMAPPED_BASE;
17296 + *begin = mm->mmap_base;
17297 *end = TASK_SIZE;
17298 }
17299 }
17300 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17301 if (flags & MAP_FIXED)
17302 return addr;
17303
17304 - find_start_end(flags, &begin, &end);
17305 + find_start_end(mm, flags, &begin, &end);
17306
17307 if (len > end)
17308 return -ENOMEM;
17309
17310 +#ifdef CONFIG_PAX_RANDMMAP
17311 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17312 +#endif
17313 +
17314 if (addr) {
17315 addr = PAGE_ALIGN(addr);
17316 vma = find_vma(mm, addr);
17317 - if (end - len >= addr &&
17318 - (!vma || addr + len <= vma->vm_start))
17319 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17320 return addr;
17321 }
17322 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17323 @@ -106,7 +109,7 @@ full_search:
17324 }
17325 return -ENOMEM;
17326 }
17327 - if (!vma || addr + len <= vma->vm_start) {
17328 + if (check_heap_stack_gap(vma, addr, len)) {
17329 /*
17330 * Remember the place where we stopped the search:
17331 */
17332 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17333 {
17334 struct vm_area_struct *vma;
17335 struct mm_struct *mm = current->mm;
17336 - unsigned long addr = addr0;
17337 + unsigned long base = mm->mmap_base, addr = addr0;
17338
17339 /* requested length too big for entire address space */
17340 if (len > TASK_SIZE)
17341 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17342 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17343 goto bottomup;
17344
17345 +#ifdef CONFIG_PAX_RANDMMAP
17346 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17347 +#endif
17348 +
17349 /* requesting a specific address */
17350 if (addr) {
17351 addr = PAGE_ALIGN(addr);
17352 - vma = find_vma(mm, addr);
17353 - if (TASK_SIZE - len >= addr &&
17354 - (!vma || addr + len <= vma->vm_start))
17355 - return addr;
17356 + if (TASK_SIZE - len >= addr) {
17357 + vma = find_vma(mm, addr);
17358 + if (check_heap_stack_gap(vma, addr, len))
17359 + return addr;
17360 + }
17361 }
17362
17363 /* check if free_area_cache is useful for us */
17364 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17365 /* make sure it can fit in the remaining address space */
17366 if (addr > len) {
17367 vma = find_vma(mm, addr-len);
17368 - if (!vma || addr <= vma->vm_start)
17369 + if (check_heap_stack_gap(vma, addr - len, len))
17370 /* remember the address as a hint for next time */
17371 return mm->free_area_cache = addr-len;
17372 }
17373 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17374 * return with success:
17375 */
17376 vma = find_vma(mm, addr);
17377 - if (!vma || addr+len <= vma->vm_start)
17378 + if (check_heap_stack_gap(vma, addr, len))
17379 /* remember the address as a hint for next time */
17380 return mm->free_area_cache = addr;
17381
17382 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17383 mm->cached_hole_size = vma->vm_start - addr;
17384
17385 /* try just below the current vma->vm_start */
17386 - addr = vma->vm_start-len;
17387 - } while (len < vma->vm_start);
17388 + addr = skip_heap_stack_gap(vma, len);
17389 + } while (!IS_ERR_VALUE(addr));
17390
17391 bottomup:
17392 /*
17393 @@ -198,13 +206,21 @@ bottomup:
17394 * can happen with large stack limits and large mmap()
17395 * allocations.
17396 */
17397 + mm->mmap_base = TASK_UNMAPPED_BASE;
17398 +
17399 +#ifdef CONFIG_PAX_RANDMMAP
17400 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17401 + mm->mmap_base += mm->delta_mmap;
17402 +#endif
17403 +
17404 + mm->free_area_cache = mm->mmap_base;
17405 mm->cached_hole_size = ~0UL;
17406 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17407 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17408 /*
17409 * Restore the topdown base:
17410 */
17411 - mm->free_area_cache = mm->mmap_base;
17412 + mm->mmap_base = base;
17413 + mm->free_area_cache = base;
17414 mm->cached_hole_size = ~0UL;
17415
17416 return addr;
17417 diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17418 --- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17419 +++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17420 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17421
17422 void tboot_shutdown(u32 shutdown_type)
17423 {
17424 - void (*shutdown)(void);
17425 + void (* __noreturn shutdown)(void);
17426
17427 if (!tboot_enabled())
17428 return;
17429 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17430
17431 switch_to_tboot_pt();
17432
17433 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17434 + shutdown = (void *)tboot->shutdown_entry;
17435 shutdown();
17436
17437 /* should not reach here */
17438 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17439 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17440 }
17441
17442 -static atomic_t ap_wfs_count;
17443 +static atomic_unchecked_t ap_wfs_count;
17444
17445 static int tboot_wait_for_aps(int num_aps)
17446 {
17447 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17448 {
17449 switch (action) {
17450 case CPU_DYING:
17451 - atomic_inc(&ap_wfs_count);
17452 + atomic_inc_unchecked(&ap_wfs_count);
17453 if (num_online_cpus() == 1)
17454 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17455 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17456 return NOTIFY_BAD;
17457 break;
17458 }
17459 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17460
17461 tboot_create_trampoline();
17462
17463 - atomic_set(&ap_wfs_count, 0);
17464 + atomic_set_unchecked(&ap_wfs_count, 0);
17465 register_hotcpu_notifier(&tboot_cpu_notifier);
17466 return 0;
17467 }
17468 diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17469 --- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17470 +++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17471 @@ -26,17 +26,13 @@
17472 int timer_ack;
17473 #endif
17474
17475 -#ifdef CONFIG_X86_64
17476 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17477 -#endif
17478 -
17479 unsigned long profile_pc(struct pt_regs *regs)
17480 {
17481 unsigned long pc = instruction_pointer(regs);
17482
17483 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17484 + if (!user_mode(regs) && in_lock_functions(pc)) {
17485 #ifdef CONFIG_FRAME_POINTER
17486 - return *(unsigned long *)(regs->bp + sizeof(long));
17487 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17488 #else
17489 unsigned long *sp =
17490 (unsigned long *)kernel_stack_pointer(regs);
17491 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17492 * or above a saved flags. Eflags has bits 22-31 zero,
17493 * kernel addresses don't.
17494 */
17495 +
17496 +#ifdef CONFIG_PAX_KERNEXEC
17497 + return ktla_ktva(sp[0]);
17498 +#else
17499 if (sp[0] >> 22)
17500 return sp[0];
17501 if (sp[1] >> 22)
17502 return sp[1];
17503 #endif
17504 +
17505 +#endif
17506 }
17507 return pc;
17508 }
17509 diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17510 --- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17511 +++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17512 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17513 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17514 return -EINVAL;
17515
17516 +#ifdef CONFIG_PAX_SEGMEXEC
17517 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17518 + return -EINVAL;
17519 +#endif
17520 +
17521 set_tls_desc(p, idx, &info, 1);
17522
17523 return 0;
17524 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17525 --- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17526 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17527 @@ -32,6 +32,12 @@
17528 #include <asm/segment.h>
17529 #include <asm/page_types.h>
17530
17531 +#ifdef CONFIG_PAX_KERNEXEC
17532 +#define ta(X) (X)
17533 +#else
17534 +#define ta(X) ((X) - __PAGE_OFFSET)
17535 +#endif
17536 +
17537 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17538 __CPUINITRODATA
17539 .code16
17540 @@ -60,7 +66,7 @@ r_base = .
17541 inc %ax # protected mode (PE) bit
17542 lmsw %ax # into protected mode
17543 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17544 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17545 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17546
17547 # These need to be in the same 64K segment as the above;
17548 # hence we don't use the boot_gdt_descr defined in head.S
17549 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17550 --- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17551 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17552 @@ -91,7 +91,7 @@ startup_32:
17553 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17554 movl %eax, %ds
17555
17556 - movl $X86_CR4_PAE, %eax
17557 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17558 movl %eax, %cr4 # Enable PAE mode
17559
17560 # Setup trampoline 4 level pagetables
17561 @@ -127,7 +127,7 @@ startup_64:
17562 no_longmode:
17563 hlt
17564 jmp no_longmode
17565 -#include "verify_cpu_64.S"
17566 +#include "verify_cpu.S"
17567
17568 # Careful these need to be in the same 64K segment as the above;
17569 tidt:
17570 @@ -138,7 +138,7 @@ tidt:
17571 # so the kernel can live anywhere
17572 .balign 4
17573 tgdt:
17574 - .short tgdt_end - tgdt # gdt limit
17575 + .short tgdt_end - tgdt - 1 # gdt limit
17576 .long tgdt - r_base
17577 .short 0
17578 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17579 diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17580 --- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17581 +++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17582 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17583
17584 /* Do we ignore FPU interrupts ? */
17585 char ignore_fpu_irq;
17586 -
17587 -/*
17588 - * The IDT has to be page-aligned to simplify the Pentium
17589 - * F0 0F bug workaround.
17590 - */
17591 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17592 #endif
17593
17594 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17595 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17596 static inline void
17597 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17598 {
17599 - if (!user_mode_vm(regs))
17600 + if (!user_mode(regs))
17601 die(str, regs, err);
17602 }
17603 #endif
17604
17605 static void __kprobes
17606 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17607 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17608 long error_code, siginfo_t *info)
17609 {
17610 struct task_struct *tsk = current;
17611
17612 #ifdef CONFIG_X86_32
17613 - if (regs->flags & X86_VM_MASK) {
17614 + if (v8086_mode(regs)) {
17615 /*
17616 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17617 * On nmi (interrupt 2), do_trap should not be called.
17618 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17619 }
17620 #endif
17621
17622 - if (!user_mode(regs))
17623 + if (!user_mode_novm(regs))
17624 goto kernel_trap;
17625
17626 #ifdef CONFIG_X86_32
17627 @@ -158,7 +152,7 @@ trap_signal:
17628 printk_ratelimit()) {
17629 printk(KERN_INFO
17630 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17631 - tsk->comm, tsk->pid, str,
17632 + tsk->comm, task_pid_nr(tsk), str,
17633 regs->ip, regs->sp, error_code);
17634 print_vma_addr(" in ", regs->ip);
17635 printk("\n");
17636 @@ -175,8 +169,20 @@ kernel_trap:
17637 if (!fixup_exception(regs)) {
17638 tsk->thread.error_code = error_code;
17639 tsk->thread.trap_no = trapnr;
17640 +
17641 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17642 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17643 + str = "PAX: suspicious stack segment fault";
17644 +#endif
17645 +
17646 die(str, regs, error_code);
17647 }
17648 +
17649 +#ifdef CONFIG_PAX_REFCOUNT
17650 + if (trapnr == 4)
17651 + pax_report_refcount_overflow(regs);
17652 +#endif
17653 +
17654 return;
17655
17656 #ifdef CONFIG_X86_32
17657 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17658 conditional_sti(regs);
17659
17660 #ifdef CONFIG_X86_32
17661 - if (regs->flags & X86_VM_MASK)
17662 + if (v8086_mode(regs))
17663 goto gp_in_vm86;
17664 #endif
17665
17666 tsk = current;
17667 - if (!user_mode(regs))
17668 + if (!user_mode_novm(regs))
17669 goto gp_in_kernel;
17670
17671 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17672 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17673 + struct mm_struct *mm = tsk->mm;
17674 + unsigned long limit;
17675 +
17676 + down_write(&mm->mmap_sem);
17677 + limit = mm->context.user_cs_limit;
17678 + if (limit < TASK_SIZE) {
17679 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17680 + up_write(&mm->mmap_sem);
17681 + return;
17682 + }
17683 + up_write(&mm->mmap_sem);
17684 + }
17685 +#endif
17686 +
17687 tsk->thread.error_code = error_code;
17688 tsk->thread.trap_no = 13;
17689
17690 @@ -305,6 +327,13 @@ gp_in_kernel:
17691 if (notify_die(DIE_GPF, "general protection fault", regs,
17692 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17693 return;
17694 +
17695 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17696 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17697 + die("PAX: suspicious general protection fault", regs, error_code);
17698 + else
17699 +#endif
17700 +
17701 die("general protection fault", regs, error_code);
17702 }
17703
17704 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17705 dotraplinkage notrace __kprobes void
17706 do_nmi(struct pt_regs *regs, long error_code)
17707 {
17708 +
17709 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17710 + if (!user_mode(regs)) {
17711 + unsigned long cs = regs->cs & 0xFFFF;
17712 + unsigned long ip = ktva_ktla(regs->ip);
17713 +
17714 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17715 + regs->ip = ip;
17716 + }
17717 +#endif
17718 +
17719 nmi_enter();
17720
17721 inc_irq_stat(__nmi_count);
17722 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17723 }
17724
17725 #ifdef CONFIG_X86_32
17726 - if (regs->flags & X86_VM_MASK)
17727 + if (v8086_mode(regs))
17728 goto debug_vm86;
17729 #endif
17730
17731 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17732 * kernel space (but re-enable TF when returning to user mode).
17733 */
17734 if (condition & DR_STEP) {
17735 - if (!user_mode(regs))
17736 + if (!user_mode_novm(regs))
17737 goto clear_TF_reenable;
17738 }
17739
17740 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17741 * Handle strange cache flush from user space exception
17742 * in all other cases. This is undocumented behaviour.
17743 */
17744 - if (regs->flags & X86_VM_MASK) {
17745 + if (v8086_mode(regs)) {
17746 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17747 return;
17748 }
17749 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17750 void __math_state_restore(void)
17751 {
17752 struct thread_info *thread = current_thread_info();
17753 - struct task_struct *tsk = thread->task;
17754 + struct task_struct *tsk = current;
17755
17756 /*
17757 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17758 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17759 */
17760 asmlinkage void math_state_restore(void)
17761 {
17762 - struct thread_info *thread = current_thread_info();
17763 - struct task_struct *tsk = thread->task;
17764 + struct task_struct *tsk = current;
17765
17766 if (!tsk_used_math(tsk)) {
17767 local_irq_enable();
17768 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17769 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17770 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17771 @@ -1,105 +0,0 @@
17772 -/*
17773 - *
17774 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17775 - * code has been borrowed from boot/setup.S and was introduced by
17776 - * Andi Kleen.
17777 - *
17778 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17779 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17780 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17781 - *
17782 - * This source code is licensed under the GNU General Public License,
17783 - * Version 2. See the file COPYING for more details.
17784 - *
17785 - * This is a common code for verification whether CPU supports
17786 - * long mode and SSE or not. It is not called directly instead this
17787 - * file is included at various places and compiled in that context.
17788 - * Following are the current usage.
17789 - *
17790 - * This file is included by both 16bit and 32bit code.
17791 - *
17792 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17793 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17794 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17795 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17796 - *
17797 - * verify_cpu, returns the status of cpu check in register %eax.
17798 - * 0: Success 1: Failure
17799 - *
17800 - * The caller needs to check for the error code and take the action
17801 - * appropriately. Either display a message or halt.
17802 - */
17803 -
17804 -#include <asm/cpufeature.h>
17805 -
17806 -verify_cpu:
17807 - pushfl # Save caller passed flags
17808 - pushl $0 # Kill any dangerous flags
17809 - popfl
17810 -
17811 - pushfl # standard way to check for cpuid
17812 - popl %eax
17813 - movl %eax,%ebx
17814 - xorl $0x200000,%eax
17815 - pushl %eax
17816 - popfl
17817 - pushfl
17818 - popl %eax
17819 - cmpl %eax,%ebx
17820 - jz verify_cpu_no_longmode # cpu has no cpuid
17821 -
17822 - movl $0x0,%eax # See if cpuid 1 is implemented
17823 - cpuid
17824 - cmpl $0x1,%eax
17825 - jb verify_cpu_no_longmode # no cpuid 1
17826 -
17827 - xor %di,%di
17828 - cmpl $0x68747541,%ebx # AuthenticAMD
17829 - jnz verify_cpu_noamd
17830 - cmpl $0x69746e65,%edx
17831 - jnz verify_cpu_noamd
17832 - cmpl $0x444d4163,%ecx
17833 - jnz verify_cpu_noamd
17834 - mov $1,%di # cpu is from AMD
17835 -
17836 -verify_cpu_noamd:
17837 - movl $0x1,%eax # Does the cpu have what it takes
17838 - cpuid
17839 - andl $REQUIRED_MASK0,%edx
17840 - xorl $REQUIRED_MASK0,%edx
17841 - jnz verify_cpu_no_longmode
17842 -
17843 - movl $0x80000000,%eax # See if extended cpuid is implemented
17844 - cpuid
17845 - cmpl $0x80000001,%eax
17846 - jb verify_cpu_no_longmode # no extended cpuid
17847 -
17848 - movl $0x80000001,%eax # Does the cpu have what it takes
17849 - cpuid
17850 - andl $REQUIRED_MASK1,%edx
17851 - xorl $REQUIRED_MASK1,%edx
17852 - jnz verify_cpu_no_longmode
17853 -
17854 -verify_cpu_sse_test:
17855 - movl $1,%eax
17856 - cpuid
17857 - andl $SSE_MASK,%edx
17858 - cmpl $SSE_MASK,%edx
17859 - je verify_cpu_sse_ok
17860 - test %di,%di
17861 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17862 - movl $0xc0010015,%ecx # HWCR
17863 - rdmsr
17864 - btr $15,%eax # enable SSE
17865 - wrmsr
17866 - xor %di,%di # don't loop
17867 - jmp verify_cpu_sse_test # try again
17868 -
17869 -verify_cpu_no_longmode:
17870 - popfl # Restore caller passed flags
17871 - movl $1,%eax
17872 - ret
17873 -verify_cpu_sse_ok:
17874 - popfl # Restore caller passed flags
17875 - xorl %eax, %eax
17876 - ret
17877 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17878 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17879 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17880 @@ -0,0 +1,140 @@
17881 +/*
17882 + *
17883 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17884 + * code has been borrowed from boot/setup.S and was introduced by
17885 + * Andi Kleen.
17886 + *
17887 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17888 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17889 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17890 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17891 + *
17892 + * This source code is licensed under the GNU General Public License,
17893 + * Version 2. See the file COPYING for more details.
17894 + *
17895 + * This is a common code for verification whether CPU supports
17896 + * long mode and SSE or not. It is not called directly instead this
17897 + * file is included at various places and compiled in that context.
17898 + * This file is expected to run in 32bit code. Currently:
17899 + *
17900 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17901 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17902 + * arch/x86/kernel/head_32.S: processor startup
17903 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17904 + *
17905 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17906 + * 0: Success 1: Failure
17907 + *
17908 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17909 + *
17910 + * The caller needs to check for the error code and take the action
17911 + * appropriately. Either display a message or halt.
17912 + */
17913 +
17914 +#include <asm/cpufeature.h>
17915 +#include <asm/msr-index.h>
17916 +
17917 +verify_cpu:
17918 + pushfl # Save caller passed flags
17919 + pushl $0 # Kill any dangerous flags
17920 + popfl
17921 +
17922 + pushfl # standard way to check for cpuid
17923 + popl %eax
17924 + movl %eax,%ebx
17925 + xorl $0x200000,%eax
17926 + pushl %eax
17927 + popfl
17928 + pushfl
17929 + popl %eax
17930 + cmpl %eax,%ebx
17931 + jz verify_cpu_no_longmode # cpu has no cpuid
17932 +
17933 + movl $0x0,%eax # See if cpuid 1 is implemented
17934 + cpuid
17935 + cmpl $0x1,%eax
17936 + jb verify_cpu_no_longmode # no cpuid 1
17937 +
17938 + xor %di,%di
17939 + cmpl $0x68747541,%ebx # AuthenticAMD
17940 + jnz verify_cpu_noamd
17941 + cmpl $0x69746e65,%edx
17942 + jnz verify_cpu_noamd
17943 + cmpl $0x444d4163,%ecx
17944 + jnz verify_cpu_noamd
17945 + mov $1,%di # cpu is from AMD
17946 + jmp verify_cpu_check
17947 +
17948 +verify_cpu_noamd:
17949 + cmpl $0x756e6547,%ebx # GenuineIntel?
17950 + jnz verify_cpu_check
17951 + cmpl $0x49656e69,%edx
17952 + jnz verify_cpu_check
17953 + cmpl $0x6c65746e,%ecx
17954 + jnz verify_cpu_check
17955 +
17956 + # only call IA32_MISC_ENABLE when:
17957 + # family > 6 || (family == 6 && model >= 0xd)
17958 + movl $0x1, %eax # check CPU family and model
17959 + cpuid
17960 + movl %eax, %ecx
17961 +
17962 + andl $0x0ff00f00, %eax # mask family and extended family
17963 + shrl $8, %eax
17964 + cmpl $6, %eax
17965 + ja verify_cpu_clear_xd # family > 6, ok
17966 + jb verify_cpu_check # family < 6, skip
17967 +
17968 + andl $0x000f00f0, %ecx # mask model and extended model
17969 + shrl $4, %ecx
17970 + cmpl $0xd, %ecx
17971 + jb verify_cpu_check # family == 6, model < 0xd, skip
17972 +
17973 +verify_cpu_clear_xd:
17974 + movl $MSR_IA32_MISC_ENABLE, %ecx
17975 + rdmsr
17976 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17977 + jnc verify_cpu_check # only write MSR if bit was changed
17978 + wrmsr
17979 +
17980 +verify_cpu_check:
17981 + movl $0x1,%eax # Does the cpu have what it takes
17982 + cpuid
17983 + andl $REQUIRED_MASK0,%edx
17984 + xorl $REQUIRED_MASK0,%edx
17985 + jnz verify_cpu_no_longmode
17986 +
17987 + movl $0x80000000,%eax # See if extended cpuid is implemented
17988 + cpuid
17989 + cmpl $0x80000001,%eax
17990 + jb verify_cpu_no_longmode # no extended cpuid
17991 +
17992 + movl $0x80000001,%eax # Does the cpu have what it takes
17993 + cpuid
17994 + andl $REQUIRED_MASK1,%edx
17995 + xorl $REQUIRED_MASK1,%edx
17996 + jnz verify_cpu_no_longmode
17997 +
17998 +verify_cpu_sse_test:
17999 + movl $1,%eax
18000 + cpuid
18001 + andl $SSE_MASK,%edx
18002 + cmpl $SSE_MASK,%edx
18003 + je verify_cpu_sse_ok
18004 + test %di,%di
18005 + jz verify_cpu_no_longmode # only try to force SSE on AMD
18006 + movl $MSR_K7_HWCR,%ecx
18007 + rdmsr
18008 + btr $15,%eax # enable SSE
18009 + wrmsr
18010 + xor %di,%di # don't loop
18011 + jmp verify_cpu_sse_test # try again
18012 +
18013 +verify_cpu_no_longmode:
18014 + popfl # Restore caller passed flags
18015 + movl $1,%eax
18016 + ret
18017 +verify_cpu_sse_ok:
18018 + popfl # Restore caller passed flags
18019 + xorl %eax, %eax
18020 + ret
18021 diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
18022 --- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
18023 +++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
18024 @@ -41,6 +41,7 @@
18025 #include <linux/ptrace.h>
18026 #include <linux/audit.h>
18027 #include <linux/stddef.h>
18028 +#include <linux/grsecurity.h>
18029
18030 #include <asm/uaccess.h>
18031 #include <asm/io.h>
18032 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18033 do_exit(SIGSEGV);
18034 }
18035
18036 - tss = &per_cpu(init_tss, get_cpu());
18037 + tss = init_tss + get_cpu();
18038 current->thread.sp0 = current->thread.saved_sp0;
18039 current->thread.sysenter_cs = __KERNEL_CS;
18040 load_sp0(tss, &current->thread);
18041 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18042 struct task_struct *tsk;
18043 int tmp, ret = -EPERM;
18044
18045 +#ifdef CONFIG_GRKERNSEC_VM86
18046 + if (!capable(CAP_SYS_RAWIO)) {
18047 + gr_handle_vm86();
18048 + goto out;
18049 + }
18050 +#endif
18051 +
18052 tsk = current;
18053 if (tsk->thread.saved_sp0)
18054 goto out;
18055 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18056 int tmp, ret;
18057 struct vm86plus_struct __user *v86;
18058
18059 +#ifdef CONFIG_GRKERNSEC_VM86
18060 + if (!capable(CAP_SYS_RAWIO)) {
18061 + gr_handle_vm86();
18062 + ret = -EPERM;
18063 + goto out;
18064 + }
18065 +#endif
18066 +
18067 tsk = current;
18068 switch (regs->bx) {
18069 case VM86_REQUEST_IRQ:
18070 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18071 tsk->thread.saved_fs = info->regs32->fs;
18072 tsk->thread.saved_gs = get_user_gs(info->regs32);
18073
18074 - tss = &per_cpu(init_tss, get_cpu());
18075 + tss = init_tss + get_cpu();
18076 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18077 if (cpu_has_sep)
18078 tsk->thread.sysenter_cs = 0;
18079 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18080 goto cannot_handle;
18081 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18082 goto cannot_handle;
18083 - intr_ptr = (unsigned long __user *) (i << 2);
18084 + intr_ptr = (__force unsigned long __user *) (i << 2);
18085 if (get_user(segoffs, intr_ptr))
18086 goto cannot_handle;
18087 if ((segoffs >> 16) == BIOSSEG)
18088 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
18089 --- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18090 +++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18091 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18092 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18093
18094 #define call_vrom_func(rom,func) \
18095 - (((VROMFUNC *)(rom->func))())
18096 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
18097
18098 #define call_vrom_long_func(rom,func,arg) \
18099 - (((VROMLONGFUNC *)(rom->func)) (arg))
18100 +({\
18101 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18102 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18103 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18104 + __reloc;\
18105 +})
18106
18107 -static struct vrom_header *vmi_rom;
18108 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18109 static int disable_pge;
18110 static int disable_pse;
18111 static int disable_sep;
18112 @@ -76,10 +81,10 @@ static struct {
18113 void (*set_initial_ap_state)(int, int);
18114 void (*halt)(void);
18115 void (*set_lazy_mode)(int mode);
18116 -} vmi_ops;
18117 +} __no_const vmi_ops __read_only;
18118
18119 /* Cached VMI operations */
18120 -struct vmi_timer_ops vmi_timer_ops;
18121 +struct vmi_timer_ops vmi_timer_ops __read_only;
18122
18123 /*
18124 * VMI patching routines.
18125 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18126 static inline void patch_offset(void *insnbuf,
18127 unsigned long ip, unsigned long dest)
18128 {
18129 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
18130 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
18131 }
18132
18133 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18134 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18135 {
18136 u64 reloc;
18137 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18138 +
18139 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18140 switch(rel->type) {
18141 case VMI_RELOCATION_CALL_REL:
18142 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18143
18144 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18145 {
18146 - const pte_t pte = { .pte = 0 };
18147 + const pte_t pte = __pte(0ULL);
18148 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18149 }
18150
18151 static void vmi_pmd_clear(pmd_t *pmd)
18152 {
18153 - const pte_t pte = { .pte = 0 };
18154 + const pte_t pte = __pte(0ULL);
18155 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18156 }
18157 #endif
18158 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18159 ap.ss = __KERNEL_DS;
18160 ap.esp = (unsigned long) start_esp;
18161
18162 - ap.ds = __USER_DS;
18163 - ap.es = __USER_DS;
18164 + ap.ds = __KERNEL_DS;
18165 + ap.es = __KERNEL_DS;
18166 ap.fs = __KERNEL_PERCPU;
18167 - ap.gs = __KERNEL_STACK_CANARY;
18168 + savesegment(gs, ap.gs);
18169
18170 ap.eflags = 0;
18171
18172 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18173 paravirt_leave_lazy_mmu();
18174 }
18175
18176 +#ifdef CONFIG_PAX_KERNEXEC
18177 +static unsigned long vmi_pax_open_kernel(void)
18178 +{
18179 + return 0;
18180 +}
18181 +
18182 +static unsigned long vmi_pax_close_kernel(void)
18183 +{
18184 + return 0;
18185 +}
18186 +#endif
18187 +
18188 static inline int __init check_vmi_rom(struct vrom_header *rom)
18189 {
18190 struct pci_header *pci;
18191 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18192 return 0;
18193 if (rom->vrom_signature != VMI_SIGNATURE)
18194 return 0;
18195 + if (rom->rom_length * 512 > sizeof(*rom)) {
18196 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18197 + return 0;
18198 + }
18199 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18200 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18201 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18202 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18203 struct vrom_header *romstart;
18204 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18205 if (check_vmi_rom(romstart)) {
18206 - vmi_rom = romstart;
18207 + vmi_rom = *romstart;
18208 return 1;
18209 }
18210 }
18211 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18212
18213 para_fill(pv_irq_ops.safe_halt, Halt);
18214
18215 +#ifdef CONFIG_PAX_KERNEXEC
18216 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18217 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18218 +#endif
18219 +
18220 /*
18221 * Alternative instruction rewriting doesn't happen soon enough
18222 * to convert VMI_IRET to a call instead of a jump; so we have
18223 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18224
18225 void __init vmi_init(void)
18226 {
18227 - if (!vmi_rom)
18228 + if (!vmi_rom.rom_signature)
18229 probe_vmi_rom();
18230 else
18231 - check_vmi_rom(vmi_rom);
18232 + check_vmi_rom(&vmi_rom);
18233
18234 /* In case probing for or validating the ROM failed, basil */
18235 - if (!vmi_rom)
18236 + if (!vmi_rom.rom_signature)
18237 return;
18238
18239 - reserve_top_address(-vmi_rom->virtual_top);
18240 + reserve_top_address(-vmi_rom.virtual_top);
18241
18242 #ifdef CONFIG_X86_IO_APIC
18243 /* This is virtual hardware; timer routing is wired correctly */
18244 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18245 {
18246 unsigned long flags;
18247
18248 - if (!vmi_rom)
18249 + if (!vmi_rom.rom_signature)
18250 return;
18251
18252 local_irq_save(flags);
18253 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18254 --- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18255 +++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18256 @@ -26,6 +26,13 @@
18257 #include <asm/page_types.h>
18258 #include <asm/cache.h>
18259 #include <asm/boot.h>
18260 +#include <asm/segment.h>
18261 +
18262 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18263 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18264 +#else
18265 +#define __KERNEL_TEXT_OFFSET 0
18266 +#endif
18267
18268 #undef i386 /* in case the preprocessor is a 32bit one */
18269
18270 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18271 #ifdef CONFIG_X86_32
18272 OUTPUT_ARCH(i386)
18273 ENTRY(phys_startup_32)
18274 -jiffies = jiffies_64;
18275 #else
18276 OUTPUT_ARCH(i386:x86-64)
18277 ENTRY(phys_startup_64)
18278 -jiffies_64 = jiffies;
18279 #endif
18280
18281 PHDRS {
18282 text PT_LOAD FLAGS(5); /* R_E */
18283 - data PT_LOAD FLAGS(7); /* RWE */
18284 +#ifdef CONFIG_X86_32
18285 + module PT_LOAD FLAGS(5); /* R_E */
18286 +#endif
18287 +#ifdef CONFIG_XEN
18288 + rodata PT_LOAD FLAGS(5); /* R_E */
18289 +#else
18290 + rodata PT_LOAD FLAGS(4); /* R__ */
18291 +#endif
18292 + data PT_LOAD FLAGS(6); /* RW_ */
18293 #ifdef CONFIG_X86_64
18294 user PT_LOAD FLAGS(5); /* R_E */
18295 +#endif
18296 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18297 #ifdef CONFIG_SMP
18298 percpu PT_LOAD FLAGS(6); /* RW_ */
18299 #endif
18300 + text.init PT_LOAD FLAGS(5); /* R_E */
18301 + text.exit PT_LOAD FLAGS(5); /* R_E */
18302 init PT_LOAD FLAGS(7); /* RWE */
18303 -#endif
18304 note PT_NOTE FLAGS(0); /* ___ */
18305 }
18306
18307 SECTIONS
18308 {
18309 #ifdef CONFIG_X86_32
18310 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18311 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18312 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18313 #else
18314 - . = __START_KERNEL;
18315 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18316 + . = __START_KERNEL;
18317 #endif
18318
18319 /* Text and read-only data */
18320 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18321 - _text = .;
18322 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18323 /* bootstrapping code */
18324 +#ifdef CONFIG_X86_32
18325 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18326 +#else
18327 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18328 +#endif
18329 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18330 + _text = .;
18331 HEAD_TEXT
18332 #ifdef CONFIG_X86_32
18333 . = ALIGN(PAGE_SIZE);
18334 @@ -82,28 +102,71 @@ SECTIONS
18335 IRQENTRY_TEXT
18336 *(.fixup)
18337 *(.gnu.warning)
18338 - /* End of text section */
18339 - _etext = .;
18340 } :text = 0x9090
18341
18342 - NOTES :text :note
18343 + . += __KERNEL_TEXT_OFFSET;
18344 +
18345 +#ifdef CONFIG_X86_32
18346 + . = ALIGN(PAGE_SIZE);
18347 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18348 + *(.vmi.rom)
18349 + } :module
18350 +
18351 + . = ALIGN(PAGE_SIZE);
18352 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18353 +
18354 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18355 + MODULES_EXEC_VADDR = .;
18356 + BYTE(0)
18357 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18358 + . = ALIGN(HPAGE_SIZE);
18359 + MODULES_EXEC_END = . - 1;
18360 +#endif
18361 +
18362 + } :module
18363 +#endif
18364
18365 - EXCEPTION_TABLE(16) :text = 0x9090
18366 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18367 + /* End of text section */
18368 + _etext = . - __KERNEL_TEXT_OFFSET;
18369 + }
18370 +
18371 +#ifdef CONFIG_X86_32
18372 + . = ALIGN(PAGE_SIZE);
18373 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18374 + *(.idt)
18375 + . = ALIGN(PAGE_SIZE);
18376 + *(.empty_zero_page)
18377 + *(.swapper_pg_fixmap)
18378 + *(.swapper_pg_pmd)
18379 + *(.swapper_pg_dir)
18380 + *(.trampoline_pg_dir)
18381 + } :rodata
18382 +#endif
18383 +
18384 + . = ALIGN(PAGE_SIZE);
18385 + NOTES :rodata :note
18386 +
18387 + EXCEPTION_TABLE(16) :rodata
18388
18389 RO_DATA(PAGE_SIZE)
18390
18391 /* Data */
18392 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18393 +
18394 +#ifdef CONFIG_PAX_KERNEXEC
18395 + . = ALIGN(HPAGE_SIZE);
18396 +#else
18397 + . = ALIGN(PAGE_SIZE);
18398 +#endif
18399 +
18400 /* Start of data section */
18401 _sdata = .;
18402
18403 /* init_task */
18404 INIT_TASK_DATA(THREAD_SIZE)
18405
18406 -#ifdef CONFIG_X86_32
18407 - /* 32 bit has nosave before _edata */
18408 NOSAVE_DATA
18409 -#endif
18410
18411 PAGE_ALIGNED_DATA(PAGE_SIZE)
18412
18413 @@ -112,6 +175,8 @@ SECTIONS
18414 DATA_DATA
18415 CONSTRUCTORS
18416
18417 + jiffies = jiffies_64;
18418 +
18419 /* rarely changed data like cpu maps */
18420 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18421
18422 @@ -166,12 +231,6 @@ SECTIONS
18423 }
18424 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18425
18426 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18427 - .jiffies : AT(VLOAD(.jiffies)) {
18428 - *(.jiffies)
18429 - }
18430 - jiffies = VVIRT(.jiffies);
18431 -
18432 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18433 *(.vsyscall_3)
18434 }
18435 @@ -187,12 +246,19 @@ SECTIONS
18436 #endif /* CONFIG_X86_64 */
18437
18438 /* Init code and data - will be freed after init */
18439 - . = ALIGN(PAGE_SIZE);
18440 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18441 + BYTE(0)
18442 +
18443 +#ifdef CONFIG_PAX_KERNEXEC
18444 + . = ALIGN(HPAGE_SIZE);
18445 +#else
18446 + . = ALIGN(PAGE_SIZE);
18447 +#endif
18448 +
18449 __init_begin = .; /* paired with __init_end */
18450 - }
18451 + } :init.begin
18452
18453 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18454 +#ifdef CONFIG_SMP
18455 /*
18456 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18457 * output PHDR, so the next output section - .init.text - should
18458 @@ -201,12 +267,27 @@ SECTIONS
18459 PERCPU_VADDR(0, :percpu)
18460 #endif
18461
18462 - INIT_TEXT_SECTION(PAGE_SIZE)
18463 -#ifdef CONFIG_X86_64
18464 - :init
18465 -#endif
18466 + . = ALIGN(PAGE_SIZE);
18467 + init_begin = .;
18468 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18469 + VMLINUX_SYMBOL(_sinittext) = .;
18470 + INIT_TEXT
18471 + VMLINUX_SYMBOL(_einittext) = .;
18472 + . = ALIGN(PAGE_SIZE);
18473 + } :text.init
18474
18475 - INIT_DATA_SECTION(16)
18476 + /*
18477 + * .exit.text is discard at runtime, not link time, to deal with
18478 + * references from .altinstructions and .eh_frame
18479 + */
18480 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18481 + EXIT_TEXT
18482 + . = ALIGN(16);
18483 + } :text.exit
18484 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18485 +
18486 + . = ALIGN(PAGE_SIZE);
18487 + INIT_DATA_SECTION(16) :init
18488
18489 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18490 __x86_cpu_dev_start = .;
18491 @@ -232,19 +313,11 @@ SECTIONS
18492 *(.altinstr_replacement)
18493 }
18494
18495 - /*
18496 - * .exit.text is discard at runtime, not link time, to deal with
18497 - * references from .altinstructions and .eh_frame
18498 - */
18499 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18500 - EXIT_TEXT
18501 - }
18502 -
18503 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18504 EXIT_DATA
18505 }
18506
18507 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18508 +#ifndef CONFIG_SMP
18509 PERCPU(PAGE_SIZE)
18510 #endif
18511
18512 @@ -267,12 +340,6 @@ SECTIONS
18513 . = ALIGN(PAGE_SIZE);
18514 }
18515
18516 -#ifdef CONFIG_X86_64
18517 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18518 - NOSAVE_DATA
18519 - }
18520 -#endif
18521 -
18522 /* BSS */
18523 . = ALIGN(PAGE_SIZE);
18524 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18525 @@ -288,6 +355,7 @@ SECTIONS
18526 __brk_base = .;
18527 . += 64 * 1024; /* 64k alignment slop space */
18528 *(.brk_reservation) /* areas brk users have reserved */
18529 + . = ALIGN(HPAGE_SIZE);
18530 __brk_limit = .;
18531 }
18532
18533 @@ -316,13 +384,12 @@ SECTIONS
18534 * for the boot processor.
18535 */
18536 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18537 -INIT_PER_CPU(gdt_page);
18538 INIT_PER_CPU(irq_stack_union);
18539
18540 /*
18541 * Build-time check on the image size:
18542 */
18543 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18544 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18545 "kernel image bigger than KERNEL_IMAGE_SIZE");
18546
18547 #ifdef CONFIG_SMP
18548 diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18549 --- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18550 +++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18551 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18552
18553 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18554 /* copy vsyscall data */
18555 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18556 vsyscall_gtod_data.clock.vread = clock->vread;
18557 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18558 vsyscall_gtod_data.clock.mask = clock->mask;
18559 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18560 We do this here because otherwise user space would do it on
18561 its own in a likely inferior way (no access to jiffies).
18562 If you don't like it pass NULL. */
18563 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18564 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18565 p = tcache->blob[1];
18566 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18567 /* Load per CPU data from RDTSCP */
18568 diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18569 --- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18570 +++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18571 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18572
18573 EXPORT_SYMBOL(copy_user_generic);
18574 EXPORT_SYMBOL(__copy_user_nocache);
18575 -EXPORT_SYMBOL(copy_from_user);
18576 -EXPORT_SYMBOL(copy_to_user);
18577 EXPORT_SYMBOL(__copy_from_user_inatomic);
18578
18579 EXPORT_SYMBOL(copy_page);
18580 diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18581 --- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18582 +++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18583 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18584 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18585 return -1;
18586
18587 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18588 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18589 fx_sw_user->extended_size -
18590 FP_XSTATE_MAGIC2_SIZE));
18591 /*
18592 @@ -196,7 +196,7 @@ fx_only:
18593 * the other extended state.
18594 */
18595 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18596 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18597 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18598 }
18599
18600 /*
18601 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18602 if (task_thread_info(tsk)->status & TS_XSAVE)
18603 err = restore_user_xstate(buf);
18604 else
18605 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18606 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18607 buf);
18608 if (unlikely(err)) {
18609 /*
18610 diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18611 --- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18612 +++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18613 @@ -81,8 +81,8 @@
18614 #define Src2CL (1<<29)
18615 #define Src2ImmByte (2<<29)
18616 #define Src2One (3<<29)
18617 -#define Src2Imm16 (4<<29)
18618 -#define Src2Mask (7<<29)
18619 +#define Src2Imm16 (4U<<29)
18620 +#define Src2Mask (7U<<29)
18621
18622 enum {
18623 Group1_80, Group1_81, Group1_82, Group1_83,
18624 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18625
18626 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18627 do { \
18628 + unsigned long _tmp; \
18629 __asm__ __volatile__ ( \
18630 _PRE_EFLAGS("0", "4", "2") \
18631 _op _suffix " %"_x"3,%1; " \
18632 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18633 /* Raw emulation: instruction has two explicit operands. */
18634 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18635 do { \
18636 - unsigned long _tmp; \
18637 - \
18638 switch ((_dst).bytes) { \
18639 case 2: \
18640 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18641 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18642
18643 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18644 do { \
18645 - unsigned long _tmp; \
18646 switch ((_dst).bytes) { \
18647 case 1: \
18648 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18649 diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18650 --- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18651 +++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18652 @@ -52,7 +52,7 @@
18653 #define APIC_BUS_CYCLE_NS 1
18654
18655 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18656 -#define apic_debug(fmt, arg...)
18657 +#define apic_debug(fmt, arg...) do {} while (0)
18658
18659 #define APIC_LVT_NUM 6
18660 /* 14 is the version for Xeon and Pentium 8.4.8*/
18661 diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18662 --- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18663 +++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18664 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18665 int level = PT_PAGE_TABLE_LEVEL;
18666 unsigned long mmu_seq;
18667
18668 + pax_track_stack();
18669 +
18670 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18671 kvm_mmu_audit(vcpu, "pre page fault");
18672
18673 diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18674 --- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18675 +++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18676 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18677 int cpu = raw_smp_processor_id();
18678
18679 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18680 +
18681 + pax_open_kernel();
18682 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18683 + pax_close_kernel();
18684 +
18685 load_TR_desc();
18686 }
18687
18688 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18689 return true;
18690 }
18691
18692 -static struct kvm_x86_ops svm_x86_ops = {
18693 +static const struct kvm_x86_ops svm_x86_ops = {
18694 .cpu_has_kvm_support = has_svm,
18695 .disabled_by_bios = is_disabled,
18696 .hardware_setup = svm_hardware_setup,
18697 diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18698 --- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18699 +++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18700 @@ -570,7 +570,11 @@ static void reload_tss(void)
18701
18702 kvm_get_gdt(&gdt);
18703 descs = (void *)gdt.base;
18704 +
18705 + pax_open_kernel();
18706 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18707 + pax_close_kernel();
18708 +
18709 load_TR_desc();
18710 }
18711
18712 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18713 if (!cpu_has_vmx_flexpriority())
18714 flexpriority_enabled = 0;
18715
18716 - if (!cpu_has_vmx_tpr_shadow())
18717 - kvm_x86_ops->update_cr8_intercept = NULL;
18718 + if (!cpu_has_vmx_tpr_shadow()) {
18719 + pax_open_kernel();
18720 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18721 + pax_close_kernel();
18722 + }
18723
18724 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18725 kvm_disable_largepages();
18726 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18727 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18728
18729 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18730 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18731 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18732 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18733 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18734 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18735 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18736 "jmp .Lkvm_vmx_return \n\t"
18737 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18738 ".Lkvm_vmx_return: "
18739 +
18740 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18741 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18742 + ".Lkvm_vmx_return2: "
18743 +#endif
18744 +
18745 /* Save guest registers, load host registers, keep flags */
18746 "xchg %0, (%%"R"sp) \n\t"
18747 "mov %%"R"ax, %c[rax](%0) \n\t"
18748 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18749 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18750 #endif
18751 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18752 +
18753 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18754 + ,[cs]"i"(__KERNEL_CS)
18755 +#endif
18756 +
18757 : "cc", "memory"
18758 - , R"bx", R"di", R"si"
18759 + , R"ax", R"bx", R"di", R"si"
18760 #ifdef CONFIG_X86_64
18761 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18762 #endif
18763 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18764 if (vmx->rmode.irq.pending)
18765 fixup_rmode_irq(vmx);
18766
18767 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18768 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18769 +
18770 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18771 + loadsegment(fs, __KERNEL_PERCPU);
18772 +#endif
18773 +
18774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18775 + __set_fs(current_thread_info()->addr_limit);
18776 +#endif
18777 +
18778 vmx->launched = 1;
18779
18780 vmx_complete_interrupts(vmx);
18781 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18782 return false;
18783 }
18784
18785 -static struct kvm_x86_ops vmx_x86_ops = {
18786 +static const struct kvm_x86_ops vmx_x86_ops = {
18787 .cpu_has_kvm_support = cpu_has_kvm_support,
18788 .disabled_by_bios = vmx_disabled_by_bios,
18789 .hardware_setup = hardware_setup,
18790 diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18791 --- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18792 +++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18793 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18794 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18795 struct kvm_cpuid_entry2 __user *entries);
18796
18797 -struct kvm_x86_ops *kvm_x86_ops;
18798 +const struct kvm_x86_ops *kvm_x86_ops;
18799 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18800
18801 int ignore_msrs = 0;
18802 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18803 struct kvm_cpuid2 *cpuid,
18804 struct kvm_cpuid_entry2 __user *entries)
18805 {
18806 - int r;
18807 + int r, i;
18808
18809 r = -E2BIG;
18810 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18811 goto out;
18812 r = -EFAULT;
18813 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18814 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18815 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18816 goto out;
18817 + for (i = 0; i < cpuid->nent; ++i) {
18818 + struct kvm_cpuid_entry2 cpuid_entry;
18819 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18820 + goto out;
18821 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18822 + }
18823 vcpu->arch.cpuid_nent = cpuid->nent;
18824 kvm_apic_set_version(vcpu);
18825 return 0;
18826 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18827 struct kvm_cpuid2 *cpuid,
18828 struct kvm_cpuid_entry2 __user *entries)
18829 {
18830 - int r;
18831 + int r, i;
18832
18833 vcpu_load(vcpu);
18834 r = -E2BIG;
18835 if (cpuid->nent < vcpu->arch.cpuid_nent)
18836 goto out;
18837 r = -EFAULT;
18838 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18839 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18840 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18841 goto out;
18842 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18843 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18844 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18845 + goto out;
18846 + }
18847 return 0;
18848
18849 out:
18850 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18851 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18852 struct kvm_interrupt *irq)
18853 {
18854 - if (irq->irq < 0 || irq->irq >= 256)
18855 + if (irq->irq >= 256)
18856 return -EINVAL;
18857 if (irqchip_in_kernel(vcpu->kvm))
18858 return -ENXIO;
18859 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18860 .notifier_call = kvmclock_cpufreq_notifier
18861 };
18862
18863 -int kvm_arch_init(void *opaque)
18864 +int kvm_arch_init(const void *opaque)
18865 {
18866 int r, cpu;
18867 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18868 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18869
18870 if (kvm_x86_ops) {
18871 printk(KERN_ERR "kvm: already loaded the other module\n");
18872 diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18873 --- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18874 +++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18875 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18876 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18877 * Launcher to reboot us.
18878 */
18879 -static void lguest_restart(char *reason)
18880 +static __noreturn void lguest_restart(char *reason)
18881 {
18882 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18883 + BUG();
18884 }
18885
18886 /*G:050
18887 diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18888 --- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18889 +++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18890 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18891 }
18892 EXPORT_SYMBOL(atomic64_cmpxchg);
18893
18894 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18895 +{
18896 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18897 +}
18898 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18899 +
18900 /**
18901 * atomic64_xchg - xchg atomic64 variable
18902 * @ptr: pointer to type atomic64_t
18903 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18904 EXPORT_SYMBOL(atomic64_xchg);
18905
18906 /**
18907 + * atomic64_xchg_unchecked - xchg atomic64 variable
18908 + * @ptr: pointer to type atomic64_unchecked_t
18909 + * @new_val: value to assign
18910 + *
18911 + * Atomically xchgs the value of @ptr to @new_val and returns
18912 + * the old value.
18913 + */
18914 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18915 +{
18916 + /*
18917 + * Try first with a (possibly incorrect) assumption about
18918 + * what we have there. We'll do two loops most likely,
18919 + * but we'll get an ownership MESI transaction straight away
18920 + * instead of a read transaction followed by a
18921 + * flush-for-ownership transaction:
18922 + */
18923 + u64 old_val, real_val = 0;
18924 +
18925 + do {
18926 + old_val = real_val;
18927 +
18928 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18929 +
18930 + } while (real_val != old_val);
18931 +
18932 + return old_val;
18933 +}
18934 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18935 +
18936 +/**
18937 * atomic64_set - set atomic64 variable
18938 * @ptr: pointer to type atomic64_t
18939 * @new_val: value to assign
18940 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18941 EXPORT_SYMBOL(atomic64_set);
18942
18943 /**
18944 -EXPORT_SYMBOL(atomic64_read);
18945 + * atomic64_unchecked_set - set atomic64 variable
18946 + * @ptr: pointer to type atomic64_unchecked_t
18947 + * @new_val: value to assign
18948 + *
18949 + * Atomically sets the value of @ptr to @new_val.
18950 + */
18951 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18952 +{
18953 + atomic64_xchg_unchecked(ptr, new_val);
18954 +}
18955 +EXPORT_SYMBOL(atomic64_set_unchecked);
18956 +
18957 +/**
18958 * atomic64_add_return - add and return
18959 * @delta: integer value to add
18960 * @ptr: pointer to type atomic64_t
18961 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18962 }
18963 EXPORT_SYMBOL(atomic64_add_return);
18964
18965 +/**
18966 + * atomic64_add_return_unchecked - add and return
18967 + * @delta: integer value to add
18968 + * @ptr: pointer to type atomic64_unchecked_t
18969 + *
18970 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18971 + */
18972 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18973 +{
18974 + /*
18975 + * Try first with a (possibly incorrect) assumption about
18976 + * what we have there. We'll do two loops most likely,
18977 + * but we'll get an ownership MESI transaction straight away
18978 + * instead of a read transaction followed by a
18979 + * flush-for-ownership transaction:
18980 + */
18981 + u64 old_val, new_val, real_val = 0;
18982 +
18983 + do {
18984 + old_val = real_val;
18985 + new_val = old_val + delta;
18986 +
18987 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18988 +
18989 + } while (real_val != old_val);
18990 +
18991 + return new_val;
18992 +}
18993 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18994 +
18995 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18996 {
18997 return atomic64_add_return(-delta, ptr);
18998 }
18999 EXPORT_SYMBOL(atomic64_sub_return);
19000
19001 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19002 +{
19003 + return atomic64_add_return_unchecked(-delta, ptr);
19004 +}
19005 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19006 +
19007 u64 atomic64_inc_return(atomic64_t *ptr)
19008 {
19009 return atomic64_add_return(1, ptr);
19010 }
19011 EXPORT_SYMBOL(atomic64_inc_return);
19012
19013 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19014 +{
19015 + return atomic64_add_return_unchecked(1, ptr);
19016 +}
19017 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19018 +
19019 u64 atomic64_dec_return(atomic64_t *ptr)
19020 {
19021 return atomic64_sub_return(1, ptr);
19022 }
19023 EXPORT_SYMBOL(atomic64_dec_return);
19024
19025 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19026 +{
19027 + return atomic64_sub_return_unchecked(1, ptr);
19028 +}
19029 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19030 +
19031 /**
19032 * atomic64_add - add integer to atomic64 variable
19033 * @delta: integer value to add
19034 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19035 EXPORT_SYMBOL(atomic64_add);
19036
19037 /**
19038 + * atomic64_add_unchecked - add integer to atomic64 variable
19039 + * @delta: integer value to add
19040 + * @ptr: pointer to type atomic64_unchecked_t
19041 + *
19042 + * Atomically adds @delta to @ptr.
19043 + */
19044 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19045 +{
19046 + atomic64_add_return_unchecked(delta, ptr);
19047 +}
19048 +EXPORT_SYMBOL(atomic64_add_unchecked);
19049 +
19050 +/**
19051 * atomic64_sub - subtract the atomic64 variable
19052 * @delta: integer value to subtract
19053 * @ptr: pointer to type atomic64_t
19054 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19055 EXPORT_SYMBOL(atomic64_sub);
19056
19057 /**
19058 + * atomic64_sub_unchecked - subtract the atomic64 variable
19059 + * @delta: integer value to subtract
19060 + * @ptr: pointer to type atomic64_unchecked_t
19061 + *
19062 + * Atomically subtracts @delta from @ptr.
19063 + */
19064 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19065 +{
19066 + atomic64_add_unchecked(-delta, ptr);
19067 +}
19068 +EXPORT_SYMBOL(atomic64_sub_unchecked);
19069 +
19070 +/**
19071 * atomic64_sub_and_test - subtract value from variable and test result
19072 * @delta: integer value to subtract
19073 * @ptr: pointer to type atomic64_t
19074 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19075 EXPORT_SYMBOL(atomic64_inc);
19076
19077 /**
19078 + * atomic64_inc_unchecked - increment atomic64 variable
19079 + * @ptr: pointer to type atomic64_unchecked_t
19080 + *
19081 + * Atomically increments @ptr by 1.
19082 + */
19083 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19084 +{
19085 + atomic64_add_unchecked(1, ptr);
19086 +}
19087 +EXPORT_SYMBOL(atomic64_inc_unchecked);
19088 +
19089 +/**
19090 * atomic64_dec - decrement atomic64 variable
19091 * @ptr: pointer to type atomic64_t
19092 *
19093 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19094 EXPORT_SYMBOL(atomic64_dec);
19095
19096 /**
19097 + * atomic64_dec_unchecked - decrement atomic64 variable
19098 + * @ptr: pointer to type atomic64_unchecked_t
19099 + *
19100 + * Atomically decrements @ptr by 1.
19101 + */
19102 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19103 +{
19104 + atomic64_sub_unchecked(1, ptr);
19105 +}
19106 +EXPORT_SYMBOL(atomic64_dec_unchecked);
19107 +
19108 +/**
19109 * atomic64_dec_and_test - decrement and test
19110 * @ptr: pointer to type atomic64_t
19111 *
19112 diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
19113 --- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19114 +++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19115 @@ -28,7 +28,8 @@
19116 #include <linux/linkage.h>
19117 #include <asm/dwarf2.h>
19118 #include <asm/errno.h>
19119 -
19120 +#include <asm/segment.h>
19121 +
19122 /*
19123 * computes a partial checksum, e.g. for TCP/UDP fragments
19124 */
19125 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19126
19127 #define ARGBASE 16
19128 #define FP 12
19129 -
19130 -ENTRY(csum_partial_copy_generic)
19131 +
19132 +ENTRY(csum_partial_copy_generic_to_user)
19133 CFI_STARTPROC
19134 +
19135 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19136 + pushl %gs
19137 + CFI_ADJUST_CFA_OFFSET 4
19138 + popl %es
19139 + CFI_ADJUST_CFA_OFFSET -4
19140 + jmp csum_partial_copy_generic
19141 +#endif
19142 +
19143 +ENTRY(csum_partial_copy_generic_from_user)
19144 +
19145 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19146 + pushl %gs
19147 + CFI_ADJUST_CFA_OFFSET 4
19148 + popl %ds
19149 + CFI_ADJUST_CFA_OFFSET -4
19150 +#endif
19151 +
19152 +ENTRY(csum_partial_copy_generic)
19153 subl $4,%esp
19154 CFI_ADJUST_CFA_OFFSET 4
19155 pushl %edi
19156 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19157 jmp 4f
19158 SRC(1: movw (%esi), %bx )
19159 addl $2, %esi
19160 -DST( movw %bx, (%edi) )
19161 +DST( movw %bx, %es:(%edi) )
19162 addl $2, %edi
19163 addw %bx, %ax
19164 adcl $0, %eax
19165 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19166 SRC(1: movl (%esi), %ebx )
19167 SRC( movl 4(%esi), %edx )
19168 adcl %ebx, %eax
19169 -DST( movl %ebx, (%edi) )
19170 +DST( movl %ebx, %es:(%edi) )
19171 adcl %edx, %eax
19172 -DST( movl %edx, 4(%edi) )
19173 +DST( movl %edx, %es:4(%edi) )
19174
19175 SRC( movl 8(%esi), %ebx )
19176 SRC( movl 12(%esi), %edx )
19177 adcl %ebx, %eax
19178 -DST( movl %ebx, 8(%edi) )
19179 +DST( movl %ebx, %es:8(%edi) )
19180 adcl %edx, %eax
19181 -DST( movl %edx, 12(%edi) )
19182 +DST( movl %edx, %es:12(%edi) )
19183
19184 SRC( movl 16(%esi), %ebx )
19185 SRC( movl 20(%esi), %edx )
19186 adcl %ebx, %eax
19187 -DST( movl %ebx, 16(%edi) )
19188 +DST( movl %ebx, %es:16(%edi) )
19189 adcl %edx, %eax
19190 -DST( movl %edx, 20(%edi) )
19191 +DST( movl %edx, %es:20(%edi) )
19192
19193 SRC( movl 24(%esi), %ebx )
19194 SRC( movl 28(%esi), %edx )
19195 adcl %ebx, %eax
19196 -DST( movl %ebx, 24(%edi) )
19197 +DST( movl %ebx, %es:24(%edi) )
19198 adcl %edx, %eax
19199 -DST( movl %edx, 28(%edi) )
19200 +DST( movl %edx, %es:28(%edi) )
19201
19202 lea 32(%esi), %esi
19203 lea 32(%edi), %edi
19204 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19205 shrl $2, %edx # This clears CF
19206 SRC(3: movl (%esi), %ebx )
19207 adcl %ebx, %eax
19208 -DST( movl %ebx, (%edi) )
19209 +DST( movl %ebx, %es:(%edi) )
19210 lea 4(%esi), %esi
19211 lea 4(%edi), %edi
19212 dec %edx
19213 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19214 jb 5f
19215 SRC( movw (%esi), %cx )
19216 leal 2(%esi), %esi
19217 -DST( movw %cx, (%edi) )
19218 +DST( movw %cx, %es:(%edi) )
19219 leal 2(%edi), %edi
19220 je 6f
19221 shll $16,%ecx
19222 SRC(5: movb (%esi), %cl )
19223 -DST( movb %cl, (%edi) )
19224 +DST( movb %cl, %es:(%edi) )
19225 6: addl %ecx, %eax
19226 adcl $0, %eax
19227 7:
19228 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19229
19230 6001:
19231 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19232 - movl $-EFAULT, (%ebx)
19233 + movl $-EFAULT, %ss:(%ebx)
19234
19235 # zero the complete destination - computing the rest
19236 # is too much work
19237 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19238
19239 6002:
19240 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19241 - movl $-EFAULT,(%ebx)
19242 + movl $-EFAULT,%ss:(%ebx)
19243 jmp 5000b
19244
19245 .previous
19246
19247 + pushl %ss
19248 + CFI_ADJUST_CFA_OFFSET 4
19249 + popl %ds
19250 + CFI_ADJUST_CFA_OFFSET -4
19251 + pushl %ss
19252 + CFI_ADJUST_CFA_OFFSET 4
19253 + popl %es
19254 + CFI_ADJUST_CFA_OFFSET -4
19255 popl %ebx
19256 CFI_ADJUST_CFA_OFFSET -4
19257 CFI_RESTORE ebx
19258 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19259 CFI_ADJUST_CFA_OFFSET -4
19260 ret
19261 CFI_ENDPROC
19262 -ENDPROC(csum_partial_copy_generic)
19263 +ENDPROC(csum_partial_copy_generic_to_user)
19264
19265 #else
19266
19267 /* Version for PentiumII/PPro */
19268
19269 #define ROUND1(x) \
19270 + nop; nop; nop; \
19271 SRC(movl x(%esi), %ebx ) ; \
19272 addl %ebx, %eax ; \
19273 - DST(movl %ebx, x(%edi) ) ;
19274 + DST(movl %ebx, %es:x(%edi)) ;
19275
19276 #define ROUND(x) \
19277 + nop; nop; nop; \
19278 SRC(movl x(%esi), %ebx ) ; \
19279 adcl %ebx, %eax ; \
19280 - DST(movl %ebx, x(%edi) ) ;
19281 + DST(movl %ebx, %es:x(%edi)) ;
19282
19283 #define ARGBASE 12
19284 -
19285 -ENTRY(csum_partial_copy_generic)
19286 +
19287 +ENTRY(csum_partial_copy_generic_to_user)
19288 CFI_STARTPROC
19289 +
19290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19291 + pushl %gs
19292 + CFI_ADJUST_CFA_OFFSET 4
19293 + popl %es
19294 + CFI_ADJUST_CFA_OFFSET -4
19295 + jmp csum_partial_copy_generic
19296 +#endif
19297 +
19298 +ENTRY(csum_partial_copy_generic_from_user)
19299 +
19300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19301 + pushl %gs
19302 + CFI_ADJUST_CFA_OFFSET 4
19303 + popl %ds
19304 + CFI_ADJUST_CFA_OFFSET -4
19305 +#endif
19306 +
19307 +ENTRY(csum_partial_copy_generic)
19308 pushl %ebx
19309 CFI_ADJUST_CFA_OFFSET 4
19310 CFI_REL_OFFSET ebx, 0
19311 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19312 subl %ebx, %edi
19313 lea -1(%esi),%edx
19314 andl $-32,%edx
19315 - lea 3f(%ebx,%ebx), %ebx
19316 + lea 3f(%ebx,%ebx,2), %ebx
19317 testl %esi, %esi
19318 jmp *%ebx
19319 1: addl $64,%esi
19320 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19321 jb 5f
19322 SRC( movw (%esi), %dx )
19323 leal 2(%esi), %esi
19324 -DST( movw %dx, (%edi) )
19325 +DST( movw %dx, %es:(%edi) )
19326 leal 2(%edi), %edi
19327 je 6f
19328 shll $16,%edx
19329 5:
19330 SRC( movb (%esi), %dl )
19331 -DST( movb %dl, (%edi) )
19332 +DST( movb %dl, %es:(%edi) )
19333 6: addl %edx, %eax
19334 adcl $0, %eax
19335 7:
19336 .section .fixup, "ax"
19337 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19338 - movl $-EFAULT, (%ebx)
19339 + movl $-EFAULT, %ss:(%ebx)
19340 # zero the complete destination (computing the rest is too much work)
19341 movl ARGBASE+8(%esp),%edi # dst
19342 movl ARGBASE+12(%esp),%ecx # len
19343 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19344 rep; stosb
19345 jmp 7b
19346 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19347 - movl $-EFAULT, (%ebx)
19348 + movl $-EFAULT, %ss:(%ebx)
19349 jmp 7b
19350 .previous
19351
19352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19353 + pushl %ss
19354 + CFI_ADJUST_CFA_OFFSET 4
19355 + popl %ds
19356 + CFI_ADJUST_CFA_OFFSET -4
19357 + pushl %ss
19358 + CFI_ADJUST_CFA_OFFSET 4
19359 + popl %es
19360 + CFI_ADJUST_CFA_OFFSET -4
19361 +#endif
19362 +
19363 popl %esi
19364 CFI_ADJUST_CFA_OFFSET -4
19365 CFI_RESTORE esi
19366 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19367 CFI_RESTORE ebx
19368 ret
19369 CFI_ENDPROC
19370 -ENDPROC(csum_partial_copy_generic)
19371 +ENDPROC(csum_partial_copy_generic_to_user)
19372
19373 #undef ROUND
19374 #undef ROUND1
19375 diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19376 --- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19377 +++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19378 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19379
19380 #include <asm/cpufeature.h>
19381
19382 - .section .altinstr_replacement,"ax"
19383 + .section .altinstr_replacement,"a"
19384 1: .byte 0xeb /* jmp <disp8> */
19385 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19386 2:
19387 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19388 --- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19389 +++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19390 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19391
19392 #include <asm/cpufeature.h>
19393
19394 - .section .altinstr_replacement,"ax"
19395 + .section .altinstr_replacement,"a"
19396 1: .byte 0xeb /* jmp <disp8> */
19397 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19398 2:
19399 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19400 --- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19401 +++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19402 @@ -15,13 +15,14 @@
19403 #include <asm/asm-offsets.h>
19404 #include <asm/thread_info.h>
19405 #include <asm/cpufeature.h>
19406 +#include <asm/pgtable.h>
19407
19408 .macro ALTERNATIVE_JUMP feature,orig,alt
19409 0:
19410 .byte 0xe9 /* 32bit jump */
19411 .long \orig-1f /* by default jump to orig */
19412 1:
19413 - .section .altinstr_replacement,"ax"
19414 + .section .altinstr_replacement,"a"
19415 2: .byte 0xe9 /* near jump with 32bit immediate */
19416 .long \alt-1b /* offset */ /* or alternatively to alt */
19417 .previous
19418 @@ -64,49 +65,19 @@
19419 #endif
19420 .endm
19421
19422 -/* Standard copy_to_user with segment limit checking */
19423 -ENTRY(copy_to_user)
19424 - CFI_STARTPROC
19425 - GET_THREAD_INFO(%rax)
19426 - movq %rdi,%rcx
19427 - addq %rdx,%rcx
19428 - jc bad_to_user
19429 - cmpq TI_addr_limit(%rax),%rcx
19430 - ja bad_to_user
19431 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19432 - CFI_ENDPROC
19433 -ENDPROC(copy_to_user)
19434 -
19435 -/* Standard copy_from_user with segment limit checking */
19436 -ENTRY(copy_from_user)
19437 - CFI_STARTPROC
19438 - GET_THREAD_INFO(%rax)
19439 - movq %rsi,%rcx
19440 - addq %rdx,%rcx
19441 - jc bad_from_user
19442 - cmpq TI_addr_limit(%rax),%rcx
19443 - ja bad_from_user
19444 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19445 - CFI_ENDPROC
19446 -ENDPROC(copy_from_user)
19447 -
19448 ENTRY(copy_user_generic)
19449 CFI_STARTPROC
19450 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19451 CFI_ENDPROC
19452 ENDPROC(copy_user_generic)
19453
19454 -ENTRY(__copy_from_user_inatomic)
19455 - CFI_STARTPROC
19456 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19457 - CFI_ENDPROC
19458 -ENDPROC(__copy_from_user_inatomic)
19459 -
19460 .section .fixup,"ax"
19461 /* must zero dest */
19462 ENTRY(bad_from_user)
19463 bad_from_user:
19464 CFI_STARTPROC
19465 + testl %edx,%edx
19466 + js bad_to_user
19467 movl %edx,%ecx
19468 xorl %eax,%eax
19469 rep
19470 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19471 --- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19472 +++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19473 @@ -14,6 +14,7 @@
19474 #include <asm/current.h>
19475 #include <asm/asm-offsets.h>
19476 #include <asm/thread_info.h>
19477 +#include <asm/pgtable.h>
19478
19479 .macro ALIGN_DESTINATION
19480 #ifdef FIX_ALIGNMENT
19481 @@ -50,6 +51,15 @@
19482 */
19483 ENTRY(__copy_user_nocache)
19484 CFI_STARTPROC
19485 +
19486 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19487 + mov $PAX_USER_SHADOW_BASE,%rcx
19488 + cmp %rcx,%rsi
19489 + jae 1f
19490 + add %rcx,%rsi
19491 +1:
19492 +#endif
19493 +
19494 cmpl $8,%edx
19495 jb 20f /* less then 8 bytes, go to byte copy loop */
19496 ALIGN_DESTINATION
19497 diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19498 --- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19499 +++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19500 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19501 len -= 2;
19502 }
19503 }
19504 +
19505 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19506 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19507 + src += PAX_USER_SHADOW_BASE;
19508 +#endif
19509 +
19510 isum = csum_partial_copy_generic((__force const void *)src,
19511 dst, len, isum, errp, NULL);
19512 if (unlikely(*errp))
19513 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19514 }
19515
19516 *errp = 0;
19517 +
19518 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19519 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19520 + dst += PAX_USER_SHADOW_BASE;
19521 +#endif
19522 +
19523 return csum_partial_copy_generic(src, (void __force *)dst,
19524 len, isum, NULL, errp);
19525 }
19526 diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19527 --- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19528 +++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19529 @@ -33,14 +33,35 @@
19530 #include <asm/asm-offsets.h>
19531 #include <asm/thread_info.h>
19532 #include <asm/asm.h>
19533 +#include <asm/segment.h>
19534 +#include <asm/pgtable.h>
19535 +
19536 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19537 +#define __copyuser_seg gs;
19538 +#else
19539 +#define __copyuser_seg
19540 +#endif
19541
19542 .text
19543 ENTRY(__get_user_1)
19544 CFI_STARTPROC
19545 +
19546 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19547 GET_THREAD_INFO(%_ASM_DX)
19548 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19549 jae bad_get_user
19550 -1: movzb (%_ASM_AX),%edx
19551 +
19552 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19553 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19554 + cmp %_ASM_DX,%_ASM_AX
19555 + jae 1234f
19556 + add %_ASM_DX,%_ASM_AX
19557 +1234:
19558 +#endif
19559 +
19560 +#endif
19561 +
19562 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19563 xor %eax,%eax
19564 ret
19565 CFI_ENDPROC
19566 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19567 ENTRY(__get_user_2)
19568 CFI_STARTPROC
19569 add $1,%_ASM_AX
19570 +
19571 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19572 jc bad_get_user
19573 GET_THREAD_INFO(%_ASM_DX)
19574 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19575 jae bad_get_user
19576 -2: movzwl -1(%_ASM_AX),%edx
19577 +
19578 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19579 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19580 + cmp %_ASM_DX,%_ASM_AX
19581 + jae 1234f
19582 + add %_ASM_DX,%_ASM_AX
19583 +1234:
19584 +#endif
19585 +
19586 +#endif
19587 +
19588 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19589 xor %eax,%eax
19590 ret
19591 CFI_ENDPROC
19592 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19593 ENTRY(__get_user_4)
19594 CFI_STARTPROC
19595 add $3,%_ASM_AX
19596 +
19597 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19598 jc bad_get_user
19599 GET_THREAD_INFO(%_ASM_DX)
19600 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19601 jae bad_get_user
19602 -3: mov -3(%_ASM_AX),%edx
19603 +
19604 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19605 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19606 + cmp %_ASM_DX,%_ASM_AX
19607 + jae 1234f
19608 + add %_ASM_DX,%_ASM_AX
19609 +1234:
19610 +#endif
19611 +
19612 +#endif
19613 +
19614 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19615 xor %eax,%eax
19616 ret
19617 CFI_ENDPROC
19618 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19619 GET_THREAD_INFO(%_ASM_DX)
19620 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19621 jae bad_get_user
19622 +
19623 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19624 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19625 + cmp %_ASM_DX,%_ASM_AX
19626 + jae 1234f
19627 + add %_ASM_DX,%_ASM_AX
19628 +1234:
19629 +#endif
19630 +
19631 4: movq -7(%_ASM_AX),%_ASM_DX
19632 xor %eax,%eax
19633 ret
19634 diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19635 --- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19636 +++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19637 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19638 * It is also a lot simpler. Use this when possible:
19639 */
19640
19641 - .section .altinstr_replacement, "ax"
19642 + .section .altinstr_replacement, "a"
19643 1: .byte 0xeb /* jmp <disp8> */
19644 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19645 2:
19646 diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19647 --- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19648 +++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19649 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19650
19651 #include <asm/cpufeature.h>
19652
19653 - .section .altinstr_replacement,"ax"
19654 + .section .altinstr_replacement,"a"
19655 1: .byte 0xeb /* jmp <disp8> */
19656 .byte (memset_c - memset) - (2f - 1b) /* offset */
19657 2:
19658 diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19659 --- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19660 +++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19661 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19662 {
19663 void *p;
19664 int i;
19665 + unsigned long cr0;
19666
19667 if (unlikely(in_interrupt()))
19668 return __memcpy(to, from, len);
19669 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19670 kernel_fpu_begin();
19671
19672 __asm__ __volatile__ (
19673 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19674 - " prefetch 64(%0)\n"
19675 - " prefetch 128(%0)\n"
19676 - " prefetch 192(%0)\n"
19677 - " prefetch 256(%0)\n"
19678 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19679 + " prefetch 64(%1)\n"
19680 + " prefetch 128(%1)\n"
19681 + " prefetch 192(%1)\n"
19682 + " prefetch 256(%1)\n"
19683 "2: \n"
19684 ".section .fixup, \"ax\"\n"
19685 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19686 + "3: \n"
19687 +
19688 +#ifdef CONFIG_PAX_KERNEXEC
19689 + " movl %%cr0, %0\n"
19690 + " movl %0, %%eax\n"
19691 + " andl $0xFFFEFFFF, %%eax\n"
19692 + " movl %%eax, %%cr0\n"
19693 +#endif
19694 +
19695 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19696 +
19697 +#ifdef CONFIG_PAX_KERNEXEC
19698 + " movl %0, %%cr0\n"
19699 +#endif
19700 +
19701 " jmp 2b\n"
19702 ".previous\n"
19703 _ASM_EXTABLE(1b, 3b)
19704 - : : "r" (from));
19705 + : "=&r" (cr0) : "r" (from) : "ax");
19706
19707 for ( ; i > 5; i--) {
19708 __asm__ __volatile__ (
19709 - "1: prefetch 320(%0)\n"
19710 - "2: movq (%0), %%mm0\n"
19711 - " movq 8(%0), %%mm1\n"
19712 - " movq 16(%0), %%mm2\n"
19713 - " movq 24(%0), %%mm3\n"
19714 - " movq %%mm0, (%1)\n"
19715 - " movq %%mm1, 8(%1)\n"
19716 - " movq %%mm2, 16(%1)\n"
19717 - " movq %%mm3, 24(%1)\n"
19718 - " movq 32(%0), %%mm0\n"
19719 - " movq 40(%0), %%mm1\n"
19720 - " movq 48(%0), %%mm2\n"
19721 - " movq 56(%0), %%mm3\n"
19722 - " movq %%mm0, 32(%1)\n"
19723 - " movq %%mm1, 40(%1)\n"
19724 - " movq %%mm2, 48(%1)\n"
19725 - " movq %%mm3, 56(%1)\n"
19726 + "1: prefetch 320(%1)\n"
19727 + "2: movq (%1), %%mm0\n"
19728 + " movq 8(%1), %%mm1\n"
19729 + " movq 16(%1), %%mm2\n"
19730 + " movq 24(%1), %%mm3\n"
19731 + " movq %%mm0, (%2)\n"
19732 + " movq %%mm1, 8(%2)\n"
19733 + " movq %%mm2, 16(%2)\n"
19734 + " movq %%mm3, 24(%2)\n"
19735 + " movq 32(%1), %%mm0\n"
19736 + " movq 40(%1), %%mm1\n"
19737 + " movq 48(%1), %%mm2\n"
19738 + " movq 56(%1), %%mm3\n"
19739 + " movq %%mm0, 32(%2)\n"
19740 + " movq %%mm1, 40(%2)\n"
19741 + " movq %%mm2, 48(%2)\n"
19742 + " movq %%mm3, 56(%2)\n"
19743 ".section .fixup, \"ax\"\n"
19744 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19745 + "3:\n"
19746 +
19747 +#ifdef CONFIG_PAX_KERNEXEC
19748 + " movl %%cr0, %0\n"
19749 + " movl %0, %%eax\n"
19750 + " andl $0xFFFEFFFF, %%eax\n"
19751 + " movl %%eax, %%cr0\n"
19752 +#endif
19753 +
19754 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19755 +
19756 +#ifdef CONFIG_PAX_KERNEXEC
19757 + " movl %0, %%cr0\n"
19758 +#endif
19759 +
19760 " jmp 2b\n"
19761 ".previous\n"
19762 _ASM_EXTABLE(1b, 3b)
19763 - : : "r" (from), "r" (to) : "memory");
19764 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19765
19766 from += 64;
19767 to += 64;
19768 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19769 static void fast_copy_page(void *to, void *from)
19770 {
19771 int i;
19772 + unsigned long cr0;
19773
19774 kernel_fpu_begin();
19775
19776 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19777 * but that is for later. -AV
19778 */
19779 __asm__ __volatile__(
19780 - "1: prefetch (%0)\n"
19781 - " prefetch 64(%0)\n"
19782 - " prefetch 128(%0)\n"
19783 - " prefetch 192(%0)\n"
19784 - " prefetch 256(%0)\n"
19785 + "1: prefetch (%1)\n"
19786 + " prefetch 64(%1)\n"
19787 + " prefetch 128(%1)\n"
19788 + " prefetch 192(%1)\n"
19789 + " prefetch 256(%1)\n"
19790 "2: \n"
19791 ".section .fixup, \"ax\"\n"
19792 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19793 + "3: \n"
19794 +
19795 +#ifdef CONFIG_PAX_KERNEXEC
19796 + " movl %%cr0, %0\n"
19797 + " movl %0, %%eax\n"
19798 + " andl $0xFFFEFFFF, %%eax\n"
19799 + " movl %%eax, %%cr0\n"
19800 +#endif
19801 +
19802 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19803 +
19804 +#ifdef CONFIG_PAX_KERNEXEC
19805 + " movl %0, %%cr0\n"
19806 +#endif
19807 +
19808 " jmp 2b\n"
19809 ".previous\n"
19810 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19811 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19812
19813 for (i = 0; i < (4096-320)/64; i++) {
19814 __asm__ __volatile__ (
19815 - "1: prefetch 320(%0)\n"
19816 - "2: movq (%0), %%mm0\n"
19817 - " movntq %%mm0, (%1)\n"
19818 - " movq 8(%0), %%mm1\n"
19819 - " movntq %%mm1, 8(%1)\n"
19820 - " movq 16(%0), %%mm2\n"
19821 - " movntq %%mm2, 16(%1)\n"
19822 - " movq 24(%0), %%mm3\n"
19823 - " movntq %%mm3, 24(%1)\n"
19824 - " movq 32(%0), %%mm4\n"
19825 - " movntq %%mm4, 32(%1)\n"
19826 - " movq 40(%0), %%mm5\n"
19827 - " movntq %%mm5, 40(%1)\n"
19828 - " movq 48(%0), %%mm6\n"
19829 - " movntq %%mm6, 48(%1)\n"
19830 - " movq 56(%0), %%mm7\n"
19831 - " movntq %%mm7, 56(%1)\n"
19832 + "1: prefetch 320(%1)\n"
19833 + "2: movq (%1), %%mm0\n"
19834 + " movntq %%mm0, (%2)\n"
19835 + " movq 8(%1), %%mm1\n"
19836 + " movntq %%mm1, 8(%2)\n"
19837 + " movq 16(%1), %%mm2\n"
19838 + " movntq %%mm2, 16(%2)\n"
19839 + " movq 24(%1), %%mm3\n"
19840 + " movntq %%mm3, 24(%2)\n"
19841 + " movq 32(%1), %%mm4\n"
19842 + " movntq %%mm4, 32(%2)\n"
19843 + " movq 40(%1), %%mm5\n"
19844 + " movntq %%mm5, 40(%2)\n"
19845 + " movq 48(%1), %%mm6\n"
19846 + " movntq %%mm6, 48(%2)\n"
19847 + " movq 56(%1), %%mm7\n"
19848 + " movntq %%mm7, 56(%2)\n"
19849 ".section .fixup, \"ax\"\n"
19850 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19851 + "3:\n"
19852 +
19853 +#ifdef CONFIG_PAX_KERNEXEC
19854 + " movl %%cr0, %0\n"
19855 + " movl %0, %%eax\n"
19856 + " andl $0xFFFEFFFF, %%eax\n"
19857 + " movl %%eax, %%cr0\n"
19858 +#endif
19859 +
19860 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19861 +
19862 +#ifdef CONFIG_PAX_KERNEXEC
19863 + " movl %0, %%cr0\n"
19864 +#endif
19865 +
19866 " jmp 2b\n"
19867 ".previous\n"
19868 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19869 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19870
19871 from += 64;
19872 to += 64;
19873 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19874 static void fast_copy_page(void *to, void *from)
19875 {
19876 int i;
19877 + unsigned long cr0;
19878
19879 kernel_fpu_begin();
19880
19881 __asm__ __volatile__ (
19882 - "1: prefetch (%0)\n"
19883 - " prefetch 64(%0)\n"
19884 - " prefetch 128(%0)\n"
19885 - " prefetch 192(%0)\n"
19886 - " prefetch 256(%0)\n"
19887 + "1: prefetch (%1)\n"
19888 + " prefetch 64(%1)\n"
19889 + " prefetch 128(%1)\n"
19890 + " prefetch 192(%1)\n"
19891 + " prefetch 256(%1)\n"
19892 "2: \n"
19893 ".section .fixup, \"ax\"\n"
19894 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19895 + "3: \n"
19896 +
19897 +#ifdef CONFIG_PAX_KERNEXEC
19898 + " movl %%cr0, %0\n"
19899 + " movl %0, %%eax\n"
19900 + " andl $0xFFFEFFFF, %%eax\n"
19901 + " movl %%eax, %%cr0\n"
19902 +#endif
19903 +
19904 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19905 +
19906 +#ifdef CONFIG_PAX_KERNEXEC
19907 + " movl %0, %%cr0\n"
19908 +#endif
19909 +
19910 " jmp 2b\n"
19911 ".previous\n"
19912 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19913 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19914
19915 for (i = 0; i < 4096/64; i++) {
19916 __asm__ __volatile__ (
19917 - "1: prefetch 320(%0)\n"
19918 - "2: movq (%0), %%mm0\n"
19919 - " movq 8(%0), %%mm1\n"
19920 - " movq 16(%0), %%mm2\n"
19921 - " movq 24(%0), %%mm3\n"
19922 - " movq %%mm0, (%1)\n"
19923 - " movq %%mm1, 8(%1)\n"
19924 - " movq %%mm2, 16(%1)\n"
19925 - " movq %%mm3, 24(%1)\n"
19926 - " movq 32(%0), %%mm0\n"
19927 - " movq 40(%0), %%mm1\n"
19928 - " movq 48(%0), %%mm2\n"
19929 - " movq 56(%0), %%mm3\n"
19930 - " movq %%mm0, 32(%1)\n"
19931 - " movq %%mm1, 40(%1)\n"
19932 - " movq %%mm2, 48(%1)\n"
19933 - " movq %%mm3, 56(%1)\n"
19934 + "1: prefetch 320(%1)\n"
19935 + "2: movq (%1), %%mm0\n"
19936 + " movq 8(%1), %%mm1\n"
19937 + " movq 16(%1), %%mm2\n"
19938 + " movq 24(%1), %%mm3\n"
19939 + " movq %%mm0, (%2)\n"
19940 + " movq %%mm1, 8(%2)\n"
19941 + " movq %%mm2, 16(%2)\n"
19942 + " movq %%mm3, 24(%2)\n"
19943 + " movq 32(%1), %%mm0\n"
19944 + " movq 40(%1), %%mm1\n"
19945 + " movq 48(%1), %%mm2\n"
19946 + " movq 56(%1), %%mm3\n"
19947 + " movq %%mm0, 32(%2)\n"
19948 + " movq %%mm1, 40(%2)\n"
19949 + " movq %%mm2, 48(%2)\n"
19950 + " movq %%mm3, 56(%2)\n"
19951 ".section .fixup, \"ax\"\n"
19952 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19953 + "3:\n"
19954 +
19955 +#ifdef CONFIG_PAX_KERNEXEC
19956 + " movl %%cr0, %0\n"
19957 + " movl %0, %%eax\n"
19958 + " andl $0xFFFEFFFF, %%eax\n"
19959 + " movl %%eax, %%cr0\n"
19960 +#endif
19961 +
19962 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19963 +
19964 +#ifdef CONFIG_PAX_KERNEXEC
19965 + " movl %0, %%cr0\n"
19966 +#endif
19967 +
19968 " jmp 2b\n"
19969 ".previous\n"
19970 _ASM_EXTABLE(1b, 3b)
19971 - : : "r" (from), "r" (to) : "memory");
19972 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19973
19974 from += 64;
19975 to += 64;
19976 diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19977 --- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19978 +++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19979 @@ -15,7 +15,8 @@
19980 #include <asm/thread_info.h>
19981 #include <asm/errno.h>
19982 #include <asm/asm.h>
19983 -
19984 +#include <asm/segment.h>
19985 +#include <asm/pgtable.h>
19986
19987 /*
19988 * __put_user_X
19989 @@ -29,52 +30,119 @@
19990 * as they get called from within inline assembly.
19991 */
19992
19993 -#define ENTER CFI_STARTPROC ; \
19994 - GET_THREAD_INFO(%_ASM_BX)
19995 +#define ENTER CFI_STARTPROC
19996 #define EXIT ret ; \
19997 CFI_ENDPROC
19998
19999 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20000 +#define _DEST %_ASM_CX,%_ASM_BX
20001 +#else
20002 +#define _DEST %_ASM_CX
20003 +#endif
20004 +
20005 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20006 +#define __copyuser_seg gs;
20007 +#else
20008 +#define __copyuser_seg
20009 +#endif
20010 +
20011 .text
20012 ENTRY(__put_user_1)
20013 ENTER
20014 +
20015 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20016 + GET_THREAD_INFO(%_ASM_BX)
20017 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20018 jae bad_put_user
20019 -1: movb %al,(%_ASM_CX)
20020 +
20021 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20022 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20023 + cmp %_ASM_BX,%_ASM_CX
20024 + jb 1234f
20025 + xor %ebx,%ebx
20026 +1234:
20027 +#endif
20028 +
20029 +#endif
20030 +
20031 +1: __copyuser_seg movb %al,(_DEST)
20032 xor %eax,%eax
20033 EXIT
20034 ENDPROC(__put_user_1)
20035
20036 ENTRY(__put_user_2)
20037 ENTER
20038 +
20039 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20040 + GET_THREAD_INFO(%_ASM_BX)
20041 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20042 sub $1,%_ASM_BX
20043 cmp %_ASM_BX,%_ASM_CX
20044 jae bad_put_user
20045 -2: movw %ax,(%_ASM_CX)
20046 +
20047 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20048 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20049 + cmp %_ASM_BX,%_ASM_CX
20050 + jb 1234f
20051 + xor %ebx,%ebx
20052 +1234:
20053 +#endif
20054 +
20055 +#endif
20056 +
20057 +2: __copyuser_seg movw %ax,(_DEST)
20058 xor %eax,%eax
20059 EXIT
20060 ENDPROC(__put_user_2)
20061
20062 ENTRY(__put_user_4)
20063 ENTER
20064 +
20065 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20066 + GET_THREAD_INFO(%_ASM_BX)
20067 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20068 sub $3,%_ASM_BX
20069 cmp %_ASM_BX,%_ASM_CX
20070 jae bad_put_user
20071 -3: movl %eax,(%_ASM_CX)
20072 +
20073 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20074 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20075 + cmp %_ASM_BX,%_ASM_CX
20076 + jb 1234f
20077 + xor %ebx,%ebx
20078 +1234:
20079 +#endif
20080 +
20081 +#endif
20082 +
20083 +3: __copyuser_seg movl %eax,(_DEST)
20084 xor %eax,%eax
20085 EXIT
20086 ENDPROC(__put_user_4)
20087
20088 ENTRY(__put_user_8)
20089 ENTER
20090 +
20091 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20092 + GET_THREAD_INFO(%_ASM_BX)
20093 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20094 sub $7,%_ASM_BX
20095 cmp %_ASM_BX,%_ASM_CX
20096 jae bad_put_user
20097 -4: mov %_ASM_AX,(%_ASM_CX)
20098 +
20099 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20100 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20101 + cmp %_ASM_BX,%_ASM_CX
20102 + jb 1234f
20103 + xor %ebx,%ebx
20104 +1234:
20105 +#endif
20106 +
20107 +#endif
20108 +
20109 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20110 #ifdef CONFIG_X86_32
20111 -5: movl %edx,4(%_ASM_CX)
20112 +5: __copyuser_seg movl %edx,4(_DEST)
20113 #endif
20114 xor %eax,%eax
20115 EXIT
20116 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
20117 --- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20118 +++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20119 @@ -43,7 +43,7 @@ do { \
20120 __asm__ __volatile__( \
20121 " testl %1,%1\n" \
20122 " jz 2f\n" \
20123 - "0: lodsb\n" \
20124 + "0: "__copyuser_seg"lodsb\n" \
20125 " stosb\n" \
20126 " testb %%al,%%al\n" \
20127 " jz 1f\n" \
20128 @@ -128,10 +128,12 @@ do { \
20129 int __d0; \
20130 might_fault(); \
20131 __asm__ __volatile__( \
20132 + __COPYUSER_SET_ES \
20133 "0: rep; stosl\n" \
20134 " movl %2,%0\n" \
20135 "1: rep; stosb\n" \
20136 "2:\n" \
20137 + __COPYUSER_RESTORE_ES \
20138 ".section .fixup,\"ax\"\n" \
20139 "3: lea 0(%2,%0,4),%0\n" \
20140 " jmp 2b\n" \
20141 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20142 might_fault();
20143
20144 __asm__ __volatile__(
20145 + __COPYUSER_SET_ES
20146 " testl %0, %0\n"
20147 " jz 3f\n"
20148 " andl %0,%%ecx\n"
20149 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20150 " subl %%ecx,%0\n"
20151 " addl %0,%%eax\n"
20152 "1:\n"
20153 + __COPYUSER_RESTORE_ES
20154 ".section .fixup,\"ax\"\n"
20155 "2: xorl %%eax,%%eax\n"
20156 " jmp 1b\n"
20157 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20158
20159 #ifdef CONFIG_X86_INTEL_USERCOPY
20160 static unsigned long
20161 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20162 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20163 {
20164 int d0, d1;
20165 __asm__ __volatile__(
20166 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20167 " .align 2,0x90\n"
20168 "3: movl 0(%4), %%eax\n"
20169 "4: movl 4(%4), %%edx\n"
20170 - "5: movl %%eax, 0(%3)\n"
20171 - "6: movl %%edx, 4(%3)\n"
20172 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20173 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20174 "7: movl 8(%4), %%eax\n"
20175 "8: movl 12(%4),%%edx\n"
20176 - "9: movl %%eax, 8(%3)\n"
20177 - "10: movl %%edx, 12(%3)\n"
20178 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20179 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20180 "11: movl 16(%4), %%eax\n"
20181 "12: movl 20(%4), %%edx\n"
20182 - "13: movl %%eax, 16(%3)\n"
20183 - "14: movl %%edx, 20(%3)\n"
20184 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20185 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20186 "15: movl 24(%4), %%eax\n"
20187 "16: movl 28(%4), %%edx\n"
20188 - "17: movl %%eax, 24(%3)\n"
20189 - "18: movl %%edx, 28(%3)\n"
20190 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20191 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20192 "19: movl 32(%4), %%eax\n"
20193 "20: movl 36(%4), %%edx\n"
20194 - "21: movl %%eax, 32(%3)\n"
20195 - "22: movl %%edx, 36(%3)\n"
20196 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20197 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20198 "23: movl 40(%4), %%eax\n"
20199 "24: movl 44(%4), %%edx\n"
20200 - "25: movl %%eax, 40(%3)\n"
20201 - "26: movl %%edx, 44(%3)\n"
20202 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20203 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20204 "27: movl 48(%4), %%eax\n"
20205 "28: movl 52(%4), %%edx\n"
20206 - "29: movl %%eax, 48(%3)\n"
20207 - "30: movl %%edx, 52(%3)\n"
20208 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20209 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20210 "31: movl 56(%4), %%eax\n"
20211 "32: movl 60(%4), %%edx\n"
20212 - "33: movl %%eax, 56(%3)\n"
20213 - "34: movl %%edx, 60(%3)\n"
20214 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20215 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20216 " addl $-64, %0\n"
20217 " addl $64, %4\n"
20218 " addl $64, %3\n"
20219 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20220 " shrl $2, %0\n"
20221 " andl $3, %%eax\n"
20222 " cld\n"
20223 + __COPYUSER_SET_ES
20224 "99: rep; movsl\n"
20225 "36: movl %%eax, %0\n"
20226 "37: rep; movsb\n"
20227 "100:\n"
20228 + __COPYUSER_RESTORE_ES
20229 + ".section .fixup,\"ax\"\n"
20230 + "101: lea 0(%%eax,%0,4),%0\n"
20231 + " jmp 100b\n"
20232 + ".previous\n"
20233 + ".section __ex_table,\"a\"\n"
20234 + " .align 4\n"
20235 + " .long 1b,100b\n"
20236 + " .long 2b,100b\n"
20237 + " .long 3b,100b\n"
20238 + " .long 4b,100b\n"
20239 + " .long 5b,100b\n"
20240 + " .long 6b,100b\n"
20241 + " .long 7b,100b\n"
20242 + " .long 8b,100b\n"
20243 + " .long 9b,100b\n"
20244 + " .long 10b,100b\n"
20245 + " .long 11b,100b\n"
20246 + " .long 12b,100b\n"
20247 + " .long 13b,100b\n"
20248 + " .long 14b,100b\n"
20249 + " .long 15b,100b\n"
20250 + " .long 16b,100b\n"
20251 + " .long 17b,100b\n"
20252 + " .long 18b,100b\n"
20253 + " .long 19b,100b\n"
20254 + " .long 20b,100b\n"
20255 + " .long 21b,100b\n"
20256 + " .long 22b,100b\n"
20257 + " .long 23b,100b\n"
20258 + " .long 24b,100b\n"
20259 + " .long 25b,100b\n"
20260 + " .long 26b,100b\n"
20261 + " .long 27b,100b\n"
20262 + " .long 28b,100b\n"
20263 + " .long 29b,100b\n"
20264 + " .long 30b,100b\n"
20265 + " .long 31b,100b\n"
20266 + " .long 32b,100b\n"
20267 + " .long 33b,100b\n"
20268 + " .long 34b,100b\n"
20269 + " .long 35b,100b\n"
20270 + " .long 36b,100b\n"
20271 + " .long 37b,100b\n"
20272 + " .long 99b,101b\n"
20273 + ".previous"
20274 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20275 + : "1"(to), "2"(from), "0"(size)
20276 + : "eax", "edx", "memory");
20277 + return size;
20278 +}
20279 +
20280 +static unsigned long
20281 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20282 +{
20283 + int d0, d1;
20284 + __asm__ __volatile__(
20285 + " .align 2,0x90\n"
20286 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20287 + " cmpl $67, %0\n"
20288 + " jbe 3f\n"
20289 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20290 + " .align 2,0x90\n"
20291 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20292 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20293 + "5: movl %%eax, 0(%3)\n"
20294 + "6: movl %%edx, 4(%3)\n"
20295 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20296 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20297 + "9: movl %%eax, 8(%3)\n"
20298 + "10: movl %%edx, 12(%3)\n"
20299 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20300 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20301 + "13: movl %%eax, 16(%3)\n"
20302 + "14: movl %%edx, 20(%3)\n"
20303 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20304 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20305 + "17: movl %%eax, 24(%3)\n"
20306 + "18: movl %%edx, 28(%3)\n"
20307 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20308 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20309 + "21: movl %%eax, 32(%3)\n"
20310 + "22: movl %%edx, 36(%3)\n"
20311 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20312 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20313 + "25: movl %%eax, 40(%3)\n"
20314 + "26: movl %%edx, 44(%3)\n"
20315 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20316 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20317 + "29: movl %%eax, 48(%3)\n"
20318 + "30: movl %%edx, 52(%3)\n"
20319 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20320 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20321 + "33: movl %%eax, 56(%3)\n"
20322 + "34: movl %%edx, 60(%3)\n"
20323 + " addl $-64, %0\n"
20324 + " addl $64, %4\n"
20325 + " addl $64, %3\n"
20326 + " cmpl $63, %0\n"
20327 + " ja 1b\n"
20328 + "35: movl %0, %%eax\n"
20329 + " shrl $2, %0\n"
20330 + " andl $3, %%eax\n"
20331 + " cld\n"
20332 + "99: rep; "__copyuser_seg" movsl\n"
20333 + "36: movl %%eax, %0\n"
20334 + "37: rep; "__copyuser_seg" movsb\n"
20335 + "100:\n"
20336 ".section .fixup,\"ax\"\n"
20337 "101: lea 0(%%eax,%0,4),%0\n"
20338 " jmp 100b\n"
20339 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20340 int d0, d1;
20341 __asm__ __volatile__(
20342 " .align 2,0x90\n"
20343 - "0: movl 32(%4), %%eax\n"
20344 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20345 " cmpl $67, %0\n"
20346 " jbe 2f\n"
20347 - "1: movl 64(%4), %%eax\n"
20348 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20349 " .align 2,0x90\n"
20350 - "2: movl 0(%4), %%eax\n"
20351 - "21: movl 4(%4), %%edx\n"
20352 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20353 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20354 " movl %%eax, 0(%3)\n"
20355 " movl %%edx, 4(%3)\n"
20356 - "3: movl 8(%4), %%eax\n"
20357 - "31: movl 12(%4),%%edx\n"
20358 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20359 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20360 " movl %%eax, 8(%3)\n"
20361 " movl %%edx, 12(%3)\n"
20362 - "4: movl 16(%4), %%eax\n"
20363 - "41: movl 20(%4), %%edx\n"
20364 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20365 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20366 " movl %%eax, 16(%3)\n"
20367 " movl %%edx, 20(%3)\n"
20368 - "10: movl 24(%4), %%eax\n"
20369 - "51: movl 28(%4), %%edx\n"
20370 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20371 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20372 " movl %%eax, 24(%3)\n"
20373 " movl %%edx, 28(%3)\n"
20374 - "11: movl 32(%4), %%eax\n"
20375 - "61: movl 36(%4), %%edx\n"
20376 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20377 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20378 " movl %%eax, 32(%3)\n"
20379 " movl %%edx, 36(%3)\n"
20380 - "12: movl 40(%4), %%eax\n"
20381 - "71: movl 44(%4), %%edx\n"
20382 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20383 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20384 " movl %%eax, 40(%3)\n"
20385 " movl %%edx, 44(%3)\n"
20386 - "13: movl 48(%4), %%eax\n"
20387 - "81: movl 52(%4), %%edx\n"
20388 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20389 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20390 " movl %%eax, 48(%3)\n"
20391 " movl %%edx, 52(%3)\n"
20392 - "14: movl 56(%4), %%eax\n"
20393 - "91: movl 60(%4), %%edx\n"
20394 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20395 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20396 " movl %%eax, 56(%3)\n"
20397 " movl %%edx, 60(%3)\n"
20398 " addl $-64, %0\n"
20399 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20400 " shrl $2, %0\n"
20401 " andl $3, %%eax\n"
20402 " cld\n"
20403 - "6: rep; movsl\n"
20404 + "6: rep; "__copyuser_seg" movsl\n"
20405 " movl %%eax,%0\n"
20406 - "7: rep; movsb\n"
20407 + "7: rep; "__copyuser_seg" movsb\n"
20408 "8:\n"
20409 ".section .fixup,\"ax\"\n"
20410 "9: lea 0(%%eax,%0,4),%0\n"
20411 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20412
20413 __asm__ __volatile__(
20414 " .align 2,0x90\n"
20415 - "0: movl 32(%4), %%eax\n"
20416 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20417 " cmpl $67, %0\n"
20418 " jbe 2f\n"
20419 - "1: movl 64(%4), %%eax\n"
20420 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20421 " .align 2,0x90\n"
20422 - "2: movl 0(%4), %%eax\n"
20423 - "21: movl 4(%4), %%edx\n"
20424 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20425 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20426 " movnti %%eax, 0(%3)\n"
20427 " movnti %%edx, 4(%3)\n"
20428 - "3: movl 8(%4), %%eax\n"
20429 - "31: movl 12(%4),%%edx\n"
20430 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20431 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20432 " movnti %%eax, 8(%3)\n"
20433 " movnti %%edx, 12(%3)\n"
20434 - "4: movl 16(%4), %%eax\n"
20435 - "41: movl 20(%4), %%edx\n"
20436 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20437 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20438 " movnti %%eax, 16(%3)\n"
20439 " movnti %%edx, 20(%3)\n"
20440 - "10: movl 24(%4), %%eax\n"
20441 - "51: movl 28(%4), %%edx\n"
20442 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20443 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20444 " movnti %%eax, 24(%3)\n"
20445 " movnti %%edx, 28(%3)\n"
20446 - "11: movl 32(%4), %%eax\n"
20447 - "61: movl 36(%4), %%edx\n"
20448 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20449 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20450 " movnti %%eax, 32(%3)\n"
20451 " movnti %%edx, 36(%3)\n"
20452 - "12: movl 40(%4), %%eax\n"
20453 - "71: movl 44(%4), %%edx\n"
20454 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20455 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20456 " movnti %%eax, 40(%3)\n"
20457 " movnti %%edx, 44(%3)\n"
20458 - "13: movl 48(%4), %%eax\n"
20459 - "81: movl 52(%4), %%edx\n"
20460 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20461 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20462 " movnti %%eax, 48(%3)\n"
20463 " movnti %%edx, 52(%3)\n"
20464 - "14: movl 56(%4), %%eax\n"
20465 - "91: movl 60(%4), %%edx\n"
20466 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20467 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20468 " movnti %%eax, 56(%3)\n"
20469 " movnti %%edx, 60(%3)\n"
20470 " addl $-64, %0\n"
20471 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20472 " shrl $2, %0\n"
20473 " andl $3, %%eax\n"
20474 " cld\n"
20475 - "6: rep; movsl\n"
20476 + "6: rep; "__copyuser_seg" movsl\n"
20477 " movl %%eax,%0\n"
20478 - "7: rep; movsb\n"
20479 + "7: rep; "__copyuser_seg" movsb\n"
20480 "8:\n"
20481 ".section .fixup,\"ax\"\n"
20482 "9: lea 0(%%eax,%0,4),%0\n"
20483 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20484
20485 __asm__ __volatile__(
20486 " .align 2,0x90\n"
20487 - "0: movl 32(%4), %%eax\n"
20488 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20489 " cmpl $67, %0\n"
20490 " jbe 2f\n"
20491 - "1: movl 64(%4), %%eax\n"
20492 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20493 " .align 2,0x90\n"
20494 - "2: movl 0(%4), %%eax\n"
20495 - "21: movl 4(%4), %%edx\n"
20496 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20497 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20498 " movnti %%eax, 0(%3)\n"
20499 " movnti %%edx, 4(%3)\n"
20500 - "3: movl 8(%4), %%eax\n"
20501 - "31: movl 12(%4),%%edx\n"
20502 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20503 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20504 " movnti %%eax, 8(%3)\n"
20505 " movnti %%edx, 12(%3)\n"
20506 - "4: movl 16(%4), %%eax\n"
20507 - "41: movl 20(%4), %%edx\n"
20508 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20509 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20510 " movnti %%eax, 16(%3)\n"
20511 " movnti %%edx, 20(%3)\n"
20512 - "10: movl 24(%4), %%eax\n"
20513 - "51: movl 28(%4), %%edx\n"
20514 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20515 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20516 " movnti %%eax, 24(%3)\n"
20517 " movnti %%edx, 28(%3)\n"
20518 - "11: movl 32(%4), %%eax\n"
20519 - "61: movl 36(%4), %%edx\n"
20520 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20521 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20522 " movnti %%eax, 32(%3)\n"
20523 " movnti %%edx, 36(%3)\n"
20524 - "12: movl 40(%4), %%eax\n"
20525 - "71: movl 44(%4), %%edx\n"
20526 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20527 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20528 " movnti %%eax, 40(%3)\n"
20529 " movnti %%edx, 44(%3)\n"
20530 - "13: movl 48(%4), %%eax\n"
20531 - "81: movl 52(%4), %%edx\n"
20532 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20533 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20534 " movnti %%eax, 48(%3)\n"
20535 " movnti %%edx, 52(%3)\n"
20536 - "14: movl 56(%4), %%eax\n"
20537 - "91: movl 60(%4), %%edx\n"
20538 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20539 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20540 " movnti %%eax, 56(%3)\n"
20541 " movnti %%edx, 60(%3)\n"
20542 " addl $-64, %0\n"
20543 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20544 " shrl $2, %0\n"
20545 " andl $3, %%eax\n"
20546 " cld\n"
20547 - "6: rep; movsl\n"
20548 + "6: rep; "__copyuser_seg" movsl\n"
20549 " movl %%eax,%0\n"
20550 - "7: rep; movsb\n"
20551 + "7: rep; "__copyuser_seg" movsb\n"
20552 "8:\n"
20553 ".section .fixup,\"ax\"\n"
20554 "9: lea 0(%%eax,%0,4),%0\n"
20555 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20556 */
20557 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20558 unsigned long size);
20559 -unsigned long __copy_user_intel(void __user *to, const void *from,
20560 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20561 + unsigned long size);
20562 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20563 unsigned long size);
20564 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20565 const void __user *from, unsigned long size);
20566 #endif /* CONFIG_X86_INTEL_USERCOPY */
20567
20568 /* Generic arbitrary sized copy. */
20569 -#define __copy_user(to, from, size) \
20570 +#define __copy_user(to, from, size, prefix, set, restore) \
20571 do { \
20572 int __d0, __d1, __d2; \
20573 __asm__ __volatile__( \
20574 + set \
20575 " cmp $7,%0\n" \
20576 " jbe 1f\n" \
20577 " movl %1,%0\n" \
20578 " negl %0\n" \
20579 " andl $7,%0\n" \
20580 " subl %0,%3\n" \
20581 - "4: rep; movsb\n" \
20582 + "4: rep; "prefix"movsb\n" \
20583 " movl %3,%0\n" \
20584 " shrl $2,%0\n" \
20585 " andl $3,%3\n" \
20586 " .align 2,0x90\n" \
20587 - "0: rep; movsl\n" \
20588 + "0: rep; "prefix"movsl\n" \
20589 " movl %3,%0\n" \
20590 - "1: rep; movsb\n" \
20591 + "1: rep; "prefix"movsb\n" \
20592 "2:\n" \
20593 + restore \
20594 ".section .fixup,\"ax\"\n" \
20595 "5: addl %3,%0\n" \
20596 " jmp 2b\n" \
20597 @@ -682,14 +799,14 @@ do { \
20598 " negl %0\n" \
20599 " andl $7,%0\n" \
20600 " subl %0,%3\n" \
20601 - "4: rep; movsb\n" \
20602 + "4: rep; "__copyuser_seg"movsb\n" \
20603 " movl %3,%0\n" \
20604 " shrl $2,%0\n" \
20605 " andl $3,%3\n" \
20606 " .align 2,0x90\n" \
20607 - "0: rep; movsl\n" \
20608 + "0: rep; "__copyuser_seg"movsl\n" \
20609 " movl %3,%0\n" \
20610 - "1: rep; movsb\n" \
20611 + "1: rep; "__copyuser_seg"movsb\n" \
20612 "2:\n" \
20613 ".section .fixup,\"ax\"\n" \
20614 "5: addl %3,%0\n" \
20615 @@ -775,9 +892,9 @@ survive:
20616 }
20617 #endif
20618 if (movsl_is_ok(to, from, n))
20619 - __copy_user(to, from, n);
20620 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20621 else
20622 - n = __copy_user_intel(to, from, n);
20623 + n = __generic_copy_to_user_intel(to, from, n);
20624 return n;
20625 }
20626 EXPORT_SYMBOL(__copy_to_user_ll);
20627 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20628 unsigned long n)
20629 {
20630 if (movsl_is_ok(to, from, n))
20631 - __copy_user(to, from, n);
20632 + __copy_user(to, from, n, __copyuser_seg, "", "");
20633 else
20634 - n = __copy_user_intel((void __user *)to,
20635 - (const void *)from, n);
20636 + n = __generic_copy_from_user_intel(to, from, n);
20637 return n;
20638 }
20639 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20640 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20641 if (n > 64 && cpu_has_xmm2)
20642 n = __copy_user_intel_nocache(to, from, n);
20643 else
20644 - __copy_user(to, from, n);
20645 + __copy_user(to, from, n, __copyuser_seg, "", "");
20646 #else
20647 - __copy_user(to, from, n);
20648 + __copy_user(to, from, n, __copyuser_seg, "", "");
20649 #endif
20650 return n;
20651 }
20652 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20653
20654 -/**
20655 - * copy_to_user: - Copy a block of data into user space.
20656 - * @to: Destination address, in user space.
20657 - * @from: Source address, in kernel space.
20658 - * @n: Number of bytes to copy.
20659 - *
20660 - * Context: User context only. This function may sleep.
20661 - *
20662 - * Copy data from kernel space to user space.
20663 - *
20664 - * Returns number of bytes that could not be copied.
20665 - * On success, this will be zero.
20666 - */
20667 -unsigned long
20668 -copy_to_user(void __user *to, const void *from, unsigned long n)
20669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20670 +void __set_fs(mm_segment_t x)
20671 {
20672 - if (access_ok(VERIFY_WRITE, to, n))
20673 - n = __copy_to_user(to, from, n);
20674 - return n;
20675 + switch (x.seg) {
20676 + case 0:
20677 + loadsegment(gs, 0);
20678 + break;
20679 + case TASK_SIZE_MAX:
20680 + loadsegment(gs, __USER_DS);
20681 + break;
20682 + case -1UL:
20683 + loadsegment(gs, __KERNEL_DS);
20684 + break;
20685 + default:
20686 + BUG();
20687 + }
20688 + return;
20689 }
20690 -EXPORT_SYMBOL(copy_to_user);
20691 +EXPORT_SYMBOL(__set_fs);
20692
20693 -/**
20694 - * copy_from_user: - Copy a block of data from user space.
20695 - * @to: Destination address, in kernel space.
20696 - * @from: Source address, in user space.
20697 - * @n: Number of bytes to copy.
20698 - *
20699 - * Context: User context only. This function may sleep.
20700 - *
20701 - * Copy data from user space to kernel space.
20702 - *
20703 - * Returns number of bytes that could not be copied.
20704 - * On success, this will be zero.
20705 - *
20706 - * If some data could not be copied, this function will pad the copied
20707 - * data to the requested size using zero bytes.
20708 - */
20709 -unsigned long
20710 -copy_from_user(void *to, const void __user *from, unsigned long n)
20711 +void set_fs(mm_segment_t x)
20712 {
20713 - if (access_ok(VERIFY_READ, from, n))
20714 - n = __copy_from_user(to, from, n);
20715 - else
20716 - memset(to, 0, n);
20717 - return n;
20718 + current_thread_info()->addr_limit = x;
20719 + __set_fs(x);
20720 }
20721 -EXPORT_SYMBOL(copy_from_user);
20722 +EXPORT_SYMBOL(set_fs);
20723 +#endif
20724 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20725 --- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20726 +++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20727 @@ -42,6 +42,12 @@ long
20728 __strncpy_from_user(char *dst, const char __user *src, long count)
20729 {
20730 long res;
20731 +
20732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20733 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20734 + src += PAX_USER_SHADOW_BASE;
20735 +#endif
20736 +
20737 __do_strncpy_from_user(dst, src, count, res);
20738 return res;
20739 }
20740 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20741 {
20742 long __d0;
20743 might_fault();
20744 +
20745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20746 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20747 + addr += PAX_USER_SHADOW_BASE;
20748 +#endif
20749 +
20750 /* no memory constraint because it doesn't change any memory gcc knows
20751 about */
20752 asm volatile(
20753 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20754
20755 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20756 {
20757 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20758 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20759 +
20760 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20761 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20762 + to += PAX_USER_SHADOW_BASE;
20763 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20764 + from += PAX_USER_SHADOW_BASE;
20765 +#endif
20766 +
20767 return copy_user_generic((__force void *)to, (__force void *)from, len);
20768 - }
20769 - return len;
20770 + }
20771 + return len;
20772 }
20773 EXPORT_SYMBOL(copy_in_user);
20774
20775 diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20776 --- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20777 +++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20778 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20779 else
20780 BITS := 64
20781 UTS_MACHINE := x86_64
20782 + biarch := $(call cc-option,-m64)
20783 CHECKFLAGS += -D__x86_64__ -m64
20784
20785 KBUILD_AFLAGS += -m64
20786 @@ -189,3 +190,12 @@ define archhelp
20787 echo ' FDARGS="..." arguments for the booted kernel'
20788 echo ' FDINITRD=file initrd for the booted kernel'
20789 endef
20790 +
20791 +define OLD_LD
20792 +
20793 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20794 +*** Please upgrade your binutils to 2.18 or newer
20795 +endef
20796 +
20797 +archprepare:
20798 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20799 diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20800 --- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20801 +++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20802 @@ -1,14 +1,71 @@
20803 #include <linux/module.h>
20804 #include <linux/spinlock.h>
20805 +#include <linux/sort.h>
20806 #include <asm/uaccess.h>
20807 +#include <asm/pgtable.h>
20808
20809 +/*
20810 + * The exception table needs to be sorted so that the binary
20811 + * search that we use to find entries in it works properly.
20812 + * This is used both for the kernel exception table and for
20813 + * the exception tables of modules that get loaded.
20814 + */
20815 +static int cmp_ex(const void *a, const void *b)
20816 +{
20817 + const struct exception_table_entry *x = a, *y = b;
20818 +
20819 + /* avoid overflow */
20820 + if (x->insn > y->insn)
20821 + return 1;
20822 + if (x->insn < y->insn)
20823 + return -1;
20824 + return 0;
20825 +}
20826 +
20827 +static void swap_ex(void *a, void *b, int size)
20828 +{
20829 + struct exception_table_entry t, *x = a, *y = b;
20830 +
20831 + t = *x;
20832 +
20833 + pax_open_kernel();
20834 + *x = *y;
20835 + *y = t;
20836 + pax_close_kernel();
20837 +}
20838 +
20839 +void sort_extable(struct exception_table_entry *start,
20840 + struct exception_table_entry *finish)
20841 +{
20842 + sort(start, finish - start, sizeof(struct exception_table_entry),
20843 + cmp_ex, swap_ex);
20844 +}
20845 +
20846 +#ifdef CONFIG_MODULES
20847 +/*
20848 + * If the exception table is sorted, any referring to the module init
20849 + * will be at the beginning or the end.
20850 + */
20851 +void trim_init_extable(struct module *m)
20852 +{
20853 + /*trim the beginning*/
20854 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20855 + m->extable++;
20856 + m->num_exentries--;
20857 + }
20858 + /*trim the end*/
20859 + while (m->num_exentries &&
20860 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20861 + m->num_exentries--;
20862 +}
20863 +#endif /* CONFIG_MODULES */
20864
20865 int fixup_exception(struct pt_regs *regs)
20866 {
20867 const struct exception_table_entry *fixup;
20868
20869 #ifdef CONFIG_PNPBIOS
20870 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20871 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20872 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20873 extern u32 pnp_bios_is_utter_crap;
20874 pnp_bios_is_utter_crap = 1;
20875 diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20876 --- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20877 +++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20878 @@ -11,10 +11,19 @@
20879 #include <linux/kprobes.h> /* __kprobes, ... */
20880 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20881 #include <linux/perf_event.h> /* perf_sw_event */
20882 +#include <linux/unistd.h>
20883 +#include <linux/compiler.h>
20884
20885 #include <asm/traps.h> /* dotraplinkage, ... */
20886 #include <asm/pgalloc.h> /* pgd_*(), ... */
20887 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20888 +#include <asm/vsyscall.h>
20889 +#include <asm/tlbflush.h>
20890 +
20891 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20892 +#include <asm/stacktrace.h>
20893 +#include "../kernel/dumpstack.h"
20894 +#endif
20895
20896 /*
20897 * Page fault error code bits:
20898 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20899 int ret = 0;
20900
20901 /* kprobe_running() needs smp_processor_id() */
20902 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20903 + if (kprobes_built_in() && !user_mode(regs)) {
20904 preempt_disable();
20905 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20906 ret = 1;
20907 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20908 return !instr_lo || (instr_lo>>1) == 1;
20909 case 0x00:
20910 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20911 - if (probe_kernel_address(instr, opcode))
20912 + if (user_mode(regs)) {
20913 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20914 + return 0;
20915 + } else if (probe_kernel_address(instr, opcode))
20916 return 0;
20917
20918 *prefetch = (instr_lo == 0xF) &&
20919 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20920 while (instr < max_instr) {
20921 unsigned char opcode;
20922
20923 - if (probe_kernel_address(instr, opcode))
20924 + if (user_mode(regs)) {
20925 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20926 + break;
20927 + } else if (probe_kernel_address(instr, opcode))
20928 break;
20929
20930 instr++;
20931 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20932 force_sig_info(si_signo, &info, tsk);
20933 }
20934
20935 +#ifdef CONFIG_PAX_EMUTRAMP
20936 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20937 +#endif
20938 +
20939 +#ifdef CONFIG_PAX_PAGEEXEC
20940 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20941 +{
20942 + pgd_t *pgd;
20943 + pud_t *pud;
20944 + pmd_t *pmd;
20945 +
20946 + pgd = pgd_offset(mm, address);
20947 + if (!pgd_present(*pgd))
20948 + return NULL;
20949 + pud = pud_offset(pgd, address);
20950 + if (!pud_present(*pud))
20951 + return NULL;
20952 + pmd = pmd_offset(pud, address);
20953 + if (!pmd_present(*pmd))
20954 + return NULL;
20955 + return pmd;
20956 +}
20957 +#endif
20958 +
20959 DEFINE_SPINLOCK(pgd_lock);
20960 LIST_HEAD(pgd_list);
20961
20962 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20963 address += PMD_SIZE) {
20964
20965 unsigned long flags;
20966 +
20967 +#ifdef CONFIG_PAX_PER_CPU_PGD
20968 + unsigned long cpu;
20969 +#else
20970 struct page *page;
20971 +#endif
20972
20973 spin_lock_irqsave(&pgd_lock, flags);
20974 +
20975 +#ifdef CONFIG_PAX_PER_CPU_PGD
20976 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20977 + pgd_t *pgd = get_cpu_pgd(cpu);
20978 +#else
20979 list_for_each_entry(page, &pgd_list, lru) {
20980 - if (!vmalloc_sync_one(page_address(page), address))
20981 + pgd_t *pgd = page_address(page);
20982 +#endif
20983 +
20984 + if (!vmalloc_sync_one(pgd, address))
20985 break;
20986 }
20987 spin_unlock_irqrestore(&pgd_lock, flags);
20988 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20989 * an interrupt in the middle of a task switch..
20990 */
20991 pgd_paddr = read_cr3();
20992 +
20993 +#ifdef CONFIG_PAX_PER_CPU_PGD
20994 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20995 +#endif
20996 +
20997 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20998 if (!pmd_k)
20999 return -1;
21000 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21001
21002 const pgd_t *pgd_ref = pgd_offset_k(address);
21003 unsigned long flags;
21004 +
21005 +#ifdef CONFIG_PAX_PER_CPU_PGD
21006 + unsigned long cpu;
21007 +#else
21008 struct page *page;
21009 +#endif
21010
21011 if (pgd_none(*pgd_ref))
21012 continue;
21013
21014 spin_lock_irqsave(&pgd_lock, flags);
21015 +
21016 +#ifdef CONFIG_PAX_PER_CPU_PGD
21017 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21018 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
21019 +#else
21020 list_for_each_entry(page, &pgd_list, lru) {
21021 pgd_t *pgd;
21022 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21023 +#endif
21024 +
21025 if (pgd_none(*pgd))
21026 set_pgd(pgd, *pgd_ref);
21027 else
21028 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21029 * happen within a race in page table update. In the later
21030 * case just flush:
21031 */
21032 +
21033 +#ifdef CONFIG_PAX_PER_CPU_PGD
21034 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21035 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21036 +#else
21037 pgd = pgd_offset(current->active_mm, address);
21038 +#endif
21039 +
21040 pgd_ref = pgd_offset_k(address);
21041 if (pgd_none(*pgd_ref))
21042 return -1;
21043 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21044 static int is_errata100(struct pt_regs *regs, unsigned long address)
21045 {
21046 #ifdef CONFIG_X86_64
21047 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21048 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21049 return 1;
21050 #endif
21051 return 0;
21052 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21053 }
21054
21055 static const char nx_warning[] = KERN_CRIT
21056 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21057 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21058
21059 static void
21060 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21061 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21062 if (!oops_may_print())
21063 return;
21064
21065 - if (error_code & PF_INSTR) {
21066 + if (nx_enabled && (error_code & PF_INSTR)) {
21067 unsigned int level;
21068
21069 pte_t *pte = lookup_address(address, &level);
21070
21071 if (pte && pte_present(*pte) && !pte_exec(*pte))
21072 - printk(nx_warning, current_uid());
21073 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21074 }
21075
21076 +#ifdef CONFIG_PAX_KERNEXEC
21077 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21078 + if (current->signal->curr_ip)
21079 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21080 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21081 + else
21082 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21083 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21084 + }
21085 +#endif
21086 +
21087 printk(KERN_ALERT "BUG: unable to handle kernel ");
21088 if (address < PAGE_SIZE)
21089 printk(KERN_CONT "NULL pointer dereference");
21090 @@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21091 unsigned long address, int si_code)
21092 {
21093 struct task_struct *tsk = current;
21094 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21095 + struct mm_struct *mm = tsk->mm;
21096 +#endif
21097 +
21098 +#ifdef CONFIG_X86_64
21099 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21100 + if (regs->ip == (unsigned long)vgettimeofday) {
21101 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21102 + return;
21103 + } else if (regs->ip == (unsigned long)vtime) {
21104 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21105 + return;
21106 + } else if (regs->ip == (unsigned long)vgetcpu) {
21107 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21108 + return;
21109 + }
21110 + }
21111 +#endif
21112 +
21113 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21114 + if (mm && (error_code & PF_USER)) {
21115 + unsigned long ip = regs->ip;
21116 +
21117 + if (v8086_mode(regs))
21118 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21119 +
21120 + /*
21121 + * It's possible to have interrupts off here:
21122 + */
21123 + local_irq_enable();
21124 +
21125 +#ifdef CONFIG_PAX_PAGEEXEC
21126 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21127 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21128 +
21129 +#ifdef CONFIG_PAX_EMUTRAMP
21130 + switch (pax_handle_fetch_fault(regs)) {
21131 + case 2:
21132 + return;
21133 + }
21134 +#endif
21135 +
21136 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21137 + do_group_exit(SIGKILL);
21138 + }
21139 +#endif
21140 +
21141 +#ifdef CONFIG_PAX_SEGMEXEC
21142 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21143 +
21144 +#ifdef CONFIG_PAX_EMUTRAMP
21145 + switch (pax_handle_fetch_fault(regs)) {
21146 + case 2:
21147 + return;
21148 + }
21149 +#endif
21150 +
21151 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21152 + do_group_exit(SIGKILL);
21153 + }
21154 +#endif
21155 +
21156 + }
21157 +#endif
21158
21159 /* User mode accesses just cause a SIGSEGV */
21160 if (error_code & PF_USER) {
21161 @@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21162 return 1;
21163 }
21164
21165 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21166 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21167 +{
21168 + pte_t *pte;
21169 + pmd_t *pmd;
21170 + spinlock_t *ptl;
21171 + unsigned char pte_mask;
21172 +
21173 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21174 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21175 + return 0;
21176 +
21177 + /* PaX: it's our fault, let's handle it if we can */
21178 +
21179 + /* PaX: take a look at read faults before acquiring any locks */
21180 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21181 + /* instruction fetch attempt from a protected page in user mode */
21182 + up_read(&mm->mmap_sem);
21183 +
21184 +#ifdef CONFIG_PAX_EMUTRAMP
21185 + switch (pax_handle_fetch_fault(regs)) {
21186 + case 2:
21187 + return 1;
21188 + }
21189 +#endif
21190 +
21191 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21192 + do_group_exit(SIGKILL);
21193 + }
21194 +
21195 + pmd = pax_get_pmd(mm, address);
21196 + if (unlikely(!pmd))
21197 + return 0;
21198 +
21199 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21200 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21201 + pte_unmap_unlock(pte, ptl);
21202 + return 0;
21203 + }
21204 +
21205 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21206 + /* write attempt to a protected page in user mode */
21207 + pte_unmap_unlock(pte, ptl);
21208 + return 0;
21209 + }
21210 +
21211 +#ifdef CONFIG_SMP
21212 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21213 +#else
21214 + if (likely(address > get_limit(regs->cs)))
21215 +#endif
21216 + {
21217 + set_pte(pte, pte_mkread(*pte));
21218 + __flush_tlb_one(address);
21219 + pte_unmap_unlock(pte, ptl);
21220 + up_read(&mm->mmap_sem);
21221 + return 1;
21222 + }
21223 +
21224 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21225 +
21226 + /*
21227 + * PaX: fill DTLB with user rights and retry
21228 + */
21229 + __asm__ __volatile__ (
21230 + "orb %2,(%1)\n"
21231 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21232 +/*
21233 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21234 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21235 + * page fault when examined during a TLB load attempt. this is true not only
21236 + * for PTEs holding a non-present entry but also present entries that will
21237 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21238 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21239 + * for our target pages since their PTEs are simply not in the TLBs at all.
21240 +
21241 + * the best thing in omitting it is that we gain around 15-20% speed in the
21242 + * fast path of the page fault handler and can get rid of tracing since we
21243 + * can no longer flush unintended entries.
21244 + */
21245 + "invlpg (%0)\n"
21246 +#endif
21247 + __copyuser_seg"testb $0,(%0)\n"
21248 + "xorb %3,(%1)\n"
21249 + :
21250 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21251 + : "memory", "cc");
21252 + pte_unmap_unlock(pte, ptl);
21253 + up_read(&mm->mmap_sem);
21254 + return 1;
21255 +}
21256 +#endif
21257 +
21258 /*
21259 * Handle a spurious fault caused by a stale TLB entry.
21260 *
21261 @@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21262 static inline int
21263 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21264 {
21265 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21266 + return 1;
21267 +
21268 if (write) {
21269 /* write, present and write, not present: */
21270 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21271 @@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21272 {
21273 struct vm_area_struct *vma;
21274 struct task_struct *tsk;
21275 - unsigned long address;
21276 struct mm_struct *mm;
21277 int write;
21278 int fault;
21279
21280 + /* Get the faulting address: */
21281 + unsigned long address = read_cr2();
21282 +
21283 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21284 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21285 + if (!search_exception_tables(regs->ip)) {
21286 + bad_area_nosemaphore(regs, error_code, address);
21287 + return;
21288 + }
21289 + if (address < PAX_USER_SHADOW_BASE) {
21290 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21291 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21292 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21293 + } else
21294 + address -= PAX_USER_SHADOW_BASE;
21295 + }
21296 +#endif
21297 +
21298 tsk = current;
21299 mm = tsk->mm;
21300
21301 - /* Get the faulting address: */
21302 - address = read_cr2();
21303 -
21304 /*
21305 * Detect and handle instructions that would cause a page fault for
21306 * both a tracked kernel page and a userspace page.
21307 @@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21308 * User-mode registers count as a user access even for any
21309 * potential system fault or CPU buglet:
21310 */
21311 - if (user_mode_vm(regs)) {
21312 + if (user_mode(regs)) {
21313 local_irq_enable();
21314 error_code |= PF_USER;
21315 } else {
21316 @@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21317 might_sleep();
21318 }
21319
21320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21321 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21322 + return;
21323 +#endif
21324 +
21325 vma = find_vma(mm, address);
21326 if (unlikely(!vma)) {
21327 bad_area(regs, error_code, address);
21328 @@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21329 bad_area(regs, error_code, address);
21330 return;
21331 }
21332 - if (error_code & PF_USER) {
21333 - /*
21334 - * Accessing the stack below %sp is always a bug.
21335 - * The large cushion allows instructions like enter
21336 - * and pusha to work. ("enter $65535, $31" pushes
21337 - * 32 pointers and then decrements %sp by 65535.)
21338 - */
21339 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21340 - bad_area(regs, error_code, address);
21341 - return;
21342 - }
21343 + /*
21344 + * Accessing the stack below %sp is always a bug.
21345 + * The large cushion allows instructions like enter
21346 + * and pusha to work. ("enter $65535, $31" pushes
21347 + * 32 pointers and then decrements %sp by 65535.)
21348 + */
21349 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21350 + bad_area(regs, error_code, address);
21351 + return;
21352 }
21353 +
21354 +#ifdef CONFIG_PAX_SEGMEXEC
21355 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21356 + bad_area(regs, error_code, address);
21357 + return;
21358 + }
21359 +#endif
21360 +
21361 if (unlikely(expand_stack(vma, address))) {
21362 bad_area(regs, error_code, address);
21363 return;
21364 @@ -1146,3 +1418,199 @@ good_area:
21365
21366 up_read(&mm->mmap_sem);
21367 }
21368 +
21369 +#ifdef CONFIG_PAX_EMUTRAMP
21370 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21371 +{
21372 + int err;
21373 +
21374 + do { /* PaX: gcc trampoline emulation #1 */
21375 + unsigned char mov1, mov2;
21376 + unsigned short jmp;
21377 + unsigned int addr1, addr2;
21378 +
21379 +#ifdef CONFIG_X86_64
21380 + if ((regs->ip + 11) >> 32)
21381 + break;
21382 +#endif
21383 +
21384 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21385 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21386 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21387 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21388 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21389 +
21390 + if (err)
21391 + break;
21392 +
21393 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21394 + regs->cx = addr1;
21395 + regs->ax = addr2;
21396 + regs->ip = addr2;
21397 + return 2;
21398 + }
21399 + } while (0);
21400 +
21401 + do { /* PaX: gcc trampoline emulation #2 */
21402 + unsigned char mov, jmp;
21403 + unsigned int addr1, addr2;
21404 +
21405 +#ifdef CONFIG_X86_64
21406 + if ((regs->ip + 9) >> 32)
21407 + break;
21408 +#endif
21409 +
21410 + err = get_user(mov, (unsigned char __user *)regs->ip);
21411 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21412 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21413 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21414 +
21415 + if (err)
21416 + break;
21417 +
21418 + if (mov == 0xB9 && jmp == 0xE9) {
21419 + regs->cx = addr1;
21420 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21421 + return 2;
21422 + }
21423 + } while (0);
21424 +
21425 + return 1; /* PaX in action */
21426 +}
21427 +
21428 +#ifdef CONFIG_X86_64
21429 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21430 +{
21431 + int err;
21432 +
21433 + do { /* PaX: gcc trampoline emulation #1 */
21434 + unsigned short mov1, mov2, jmp1;
21435 + unsigned char jmp2;
21436 + unsigned int addr1;
21437 + unsigned long addr2;
21438 +
21439 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21440 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21441 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21442 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21443 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21444 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21445 +
21446 + if (err)
21447 + break;
21448 +
21449 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21450 + regs->r11 = addr1;
21451 + regs->r10 = addr2;
21452 + regs->ip = addr1;
21453 + return 2;
21454 + }
21455 + } while (0);
21456 +
21457 + do { /* PaX: gcc trampoline emulation #2 */
21458 + unsigned short mov1, mov2, jmp1;
21459 + unsigned char jmp2;
21460 + unsigned long addr1, addr2;
21461 +
21462 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21463 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21464 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21465 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21466 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21467 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21468 +
21469 + if (err)
21470 + break;
21471 +
21472 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21473 + regs->r11 = addr1;
21474 + regs->r10 = addr2;
21475 + regs->ip = addr1;
21476 + return 2;
21477 + }
21478 + } while (0);
21479 +
21480 + return 1; /* PaX in action */
21481 +}
21482 +#endif
21483 +
21484 +/*
21485 + * PaX: decide what to do with offenders (regs->ip = fault address)
21486 + *
21487 + * returns 1 when task should be killed
21488 + * 2 when gcc trampoline was detected
21489 + */
21490 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21491 +{
21492 + if (v8086_mode(regs))
21493 + return 1;
21494 +
21495 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21496 + return 1;
21497 +
21498 +#ifdef CONFIG_X86_32
21499 + return pax_handle_fetch_fault_32(regs);
21500 +#else
21501 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21502 + return pax_handle_fetch_fault_32(regs);
21503 + else
21504 + return pax_handle_fetch_fault_64(regs);
21505 +#endif
21506 +}
21507 +#endif
21508 +
21509 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21510 +void pax_report_insns(void *pc, void *sp)
21511 +{
21512 + long i;
21513 +
21514 + printk(KERN_ERR "PAX: bytes at PC: ");
21515 + for (i = 0; i < 20; i++) {
21516 + unsigned char c;
21517 + if (get_user(c, (__force unsigned char __user *)pc+i))
21518 + printk(KERN_CONT "?? ");
21519 + else
21520 + printk(KERN_CONT "%02x ", c);
21521 + }
21522 + printk("\n");
21523 +
21524 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21525 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21526 + unsigned long c;
21527 + if (get_user(c, (__force unsigned long __user *)sp+i))
21528 +#ifdef CONFIG_X86_32
21529 + printk(KERN_CONT "???????? ");
21530 +#else
21531 + printk(KERN_CONT "???????????????? ");
21532 +#endif
21533 + else
21534 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21535 + }
21536 + printk("\n");
21537 +}
21538 +#endif
21539 +
21540 +/**
21541 + * probe_kernel_write(): safely attempt to write to a location
21542 + * @dst: address to write to
21543 + * @src: pointer to the data that shall be written
21544 + * @size: size of the data chunk
21545 + *
21546 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21547 + * happens, handle that and return -EFAULT.
21548 + */
21549 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21550 +{
21551 + long ret;
21552 + mm_segment_t old_fs = get_fs();
21553 +
21554 + set_fs(KERNEL_DS);
21555 + pagefault_disable();
21556 + pax_open_kernel();
21557 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21558 + pax_close_kernel();
21559 + pagefault_enable();
21560 + set_fs(old_fs);
21561 +
21562 + return ret ? -EFAULT : 0;
21563 +}
21564 diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21565 --- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21566 +++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21567 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21568 addr = start;
21569 len = (unsigned long) nr_pages << PAGE_SHIFT;
21570 end = start + len;
21571 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21572 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21573 (void __user *)start, len)))
21574 return 0;
21575
21576 diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21577 --- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21578 +++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21579 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21580 idx = type + KM_TYPE_NR*smp_processor_id();
21581 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21582 BUG_ON(!pte_none(*(kmap_pte-idx)));
21583 +
21584 + pax_open_kernel();
21585 set_pte(kmap_pte-idx, mk_pte(page, prot));
21586 + pax_close_kernel();
21587
21588 return (void *)vaddr;
21589 }
21590 diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21591 --- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21592 +++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21593 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21594 struct hstate *h = hstate_file(file);
21595 struct mm_struct *mm = current->mm;
21596 struct vm_area_struct *vma;
21597 - unsigned long start_addr;
21598 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21599 +
21600 +#ifdef CONFIG_PAX_SEGMEXEC
21601 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21602 + pax_task_size = SEGMEXEC_TASK_SIZE;
21603 +#endif
21604 +
21605 + pax_task_size -= PAGE_SIZE;
21606
21607 if (len > mm->cached_hole_size) {
21608 - start_addr = mm->free_area_cache;
21609 + start_addr = mm->free_area_cache;
21610 } else {
21611 - start_addr = TASK_UNMAPPED_BASE;
21612 - mm->cached_hole_size = 0;
21613 + start_addr = mm->mmap_base;
21614 + mm->cached_hole_size = 0;
21615 }
21616
21617 full_search:
21618 @@ -281,26 +288,27 @@ full_search:
21619
21620 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21621 /* At this point: (!vma || addr < vma->vm_end). */
21622 - if (TASK_SIZE - len < addr) {
21623 + if (pax_task_size - len < addr) {
21624 /*
21625 * Start a new search - just in case we missed
21626 * some holes.
21627 */
21628 - if (start_addr != TASK_UNMAPPED_BASE) {
21629 - start_addr = TASK_UNMAPPED_BASE;
21630 + if (start_addr != mm->mmap_base) {
21631 + start_addr = mm->mmap_base;
21632 mm->cached_hole_size = 0;
21633 goto full_search;
21634 }
21635 return -ENOMEM;
21636 }
21637 - if (!vma || addr + len <= vma->vm_start) {
21638 - mm->free_area_cache = addr + len;
21639 - return addr;
21640 - }
21641 + if (check_heap_stack_gap(vma, addr, len))
21642 + break;
21643 if (addr + mm->cached_hole_size < vma->vm_start)
21644 mm->cached_hole_size = vma->vm_start - addr;
21645 addr = ALIGN(vma->vm_end, huge_page_size(h));
21646 }
21647 +
21648 + mm->free_area_cache = addr + len;
21649 + return addr;
21650 }
21651
21652 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21653 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21654 {
21655 struct hstate *h = hstate_file(file);
21656 struct mm_struct *mm = current->mm;
21657 - struct vm_area_struct *vma, *prev_vma;
21658 - unsigned long base = mm->mmap_base, addr = addr0;
21659 + struct vm_area_struct *vma;
21660 + unsigned long base = mm->mmap_base, addr;
21661 unsigned long largest_hole = mm->cached_hole_size;
21662 - int first_time = 1;
21663
21664 /* don't allow allocations above current base */
21665 if (mm->free_area_cache > base)
21666 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21667 largest_hole = 0;
21668 mm->free_area_cache = base;
21669 }
21670 -try_again:
21671 +
21672 /* make sure it can fit in the remaining address space */
21673 if (mm->free_area_cache < len)
21674 goto fail;
21675
21676 /* either no address requested or cant fit in requested address hole */
21677 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21678 + addr = (mm->free_area_cache - len);
21679 do {
21680 + addr &= huge_page_mask(h);
21681 + vma = find_vma(mm, addr);
21682 /*
21683 * Lookup failure means no vma is above this address,
21684 * i.e. return with success:
21685 - */
21686 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21687 - return addr;
21688 -
21689 - /*
21690 * new region fits between prev_vma->vm_end and
21691 * vma->vm_start, use it:
21692 */
21693 - if (addr + len <= vma->vm_start &&
21694 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21695 + if (check_heap_stack_gap(vma, addr, len)) {
21696 /* remember the address as a hint for next time */
21697 - mm->cached_hole_size = largest_hole;
21698 - return (mm->free_area_cache = addr);
21699 - } else {
21700 - /* pull free_area_cache down to the first hole */
21701 - if (mm->free_area_cache == vma->vm_end) {
21702 - mm->free_area_cache = vma->vm_start;
21703 - mm->cached_hole_size = largest_hole;
21704 - }
21705 + mm->cached_hole_size = largest_hole;
21706 + return (mm->free_area_cache = addr);
21707 + }
21708 + /* pull free_area_cache down to the first hole */
21709 + if (mm->free_area_cache == vma->vm_end) {
21710 + mm->free_area_cache = vma->vm_start;
21711 + mm->cached_hole_size = largest_hole;
21712 }
21713
21714 /* remember the largest hole we saw so far */
21715 if (addr + largest_hole < vma->vm_start)
21716 - largest_hole = vma->vm_start - addr;
21717 + largest_hole = vma->vm_start - addr;
21718
21719 /* try just below the current vma->vm_start */
21720 - addr = (vma->vm_start - len) & huge_page_mask(h);
21721 - } while (len <= vma->vm_start);
21722 + addr = skip_heap_stack_gap(vma, len);
21723 + } while (!IS_ERR_VALUE(addr));
21724
21725 fail:
21726 /*
21727 - * if hint left us with no space for the requested
21728 - * mapping then try again:
21729 - */
21730 - if (first_time) {
21731 - mm->free_area_cache = base;
21732 - largest_hole = 0;
21733 - first_time = 0;
21734 - goto try_again;
21735 - }
21736 - /*
21737 * A failed mmap() very likely causes application failure,
21738 * so fall back to the bottom-up function here. This scenario
21739 * can happen with large stack limits and large mmap()
21740 * allocations.
21741 */
21742 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21743 +
21744 +#ifdef CONFIG_PAX_SEGMEXEC
21745 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21746 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21747 + else
21748 +#endif
21749 +
21750 + mm->mmap_base = TASK_UNMAPPED_BASE;
21751 +
21752 +#ifdef CONFIG_PAX_RANDMMAP
21753 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21754 + mm->mmap_base += mm->delta_mmap;
21755 +#endif
21756 +
21757 + mm->free_area_cache = mm->mmap_base;
21758 mm->cached_hole_size = ~0UL;
21759 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21760 len, pgoff, flags);
21761 @@ -387,6 +393,7 @@ fail:
21762 /*
21763 * Restore the topdown base:
21764 */
21765 + mm->mmap_base = base;
21766 mm->free_area_cache = base;
21767 mm->cached_hole_size = ~0UL;
21768
21769 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21770 struct hstate *h = hstate_file(file);
21771 struct mm_struct *mm = current->mm;
21772 struct vm_area_struct *vma;
21773 + unsigned long pax_task_size = TASK_SIZE;
21774
21775 if (len & ~huge_page_mask(h))
21776 return -EINVAL;
21777 - if (len > TASK_SIZE)
21778 +
21779 +#ifdef CONFIG_PAX_SEGMEXEC
21780 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21781 + pax_task_size = SEGMEXEC_TASK_SIZE;
21782 +#endif
21783 +
21784 + pax_task_size -= PAGE_SIZE;
21785 +
21786 + if (len > pax_task_size)
21787 return -ENOMEM;
21788
21789 if (flags & MAP_FIXED) {
21790 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21791 if (addr) {
21792 addr = ALIGN(addr, huge_page_size(h));
21793 vma = find_vma(mm, addr);
21794 - if (TASK_SIZE - len >= addr &&
21795 - (!vma || addr + len <= vma->vm_start))
21796 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21797 return addr;
21798 }
21799 if (mm->get_unmapped_area == arch_get_unmapped_area)
21800 diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21801 --- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21802 +++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21803 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21804 }
21805
21806 /*
21807 - * Creates a middle page table and puts a pointer to it in the
21808 - * given global directory entry. This only returns the gd entry
21809 - * in non-PAE compilation mode, since the middle layer is folded.
21810 - */
21811 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21812 -{
21813 - pud_t *pud;
21814 - pmd_t *pmd_table;
21815 -
21816 -#ifdef CONFIG_X86_PAE
21817 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21818 - if (after_bootmem)
21819 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21820 - else
21821 - pmd_table = (pmd_t *)alloc_low_page();
21822 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21823 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21824 - pud = pud_offset(pgd, 0);
21825 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21826 -
21827 - return pmd_table;
21828 - }
21829 -#endif
21830 - pud = pud_offset(pgd, 0);
21831 - pmd_table = pmd_offset(pud, 0);
21832 -
21833 - return pmd_table;
21834 -}
21835 -
21836 -/*
21837 * Create a page table and place a pointer to it in a middle page
21838 * directory entry:
21839 */
21840 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21841 page_table = (pte_t *)alloc_low_page();
21842
21843 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21844 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21845 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21846 +#else
21847 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21848 +#endif
21849 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21850 }
21851
21852 return pte_offset_kernel(pmd, 0);
21853 }
21854
21855 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21856 +{
21857 + pud_t *pud;
21858 + pmd_t *pmd_table;
21859 +
21860 + pud = pud_offset(pgd, 0);
21861 + pmd_table = pmd_offset(pud, 0);
21862 +
21863 + return pmd_table;
21864 +}
21865 +
21866 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21867 {
21868 int pgd_idx = pgd_index(vaddr);
21869 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21870 int pgd_idx, pmd_idx;
21871 unsigned long vaddr;
21872 pgd_t *pgd;
21873 + pud_t *pud;
21874 pmd_t *pmd;
21875 pte_t *pte = NULL;
21876
21877 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21878 pgd = pgd_base + pgd_idx;
21879
21880 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21881 - pmd = one_md_table_init(pgd);
21882 - pmd = pmd + pmd_index(vaddr);
21883 + pud = pud_offset(pgd, vaddr);
21884 + pmd = pmd_offset(pud, vaddr);
21885 +
21886 +#ifdef CONFIG_X86_PAE
21887 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21888 +#endif
21889 +
21890 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21891 pmd++, pmd_idx++) {
21892 pte = page_table_kmap_check(one_page_table_init(pmd),
21893 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21894 }
21895 }
21896
21897 -static inline int is_kernel_text(unsigned long addr)
21898 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21899 {
21900 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21901 - return 1;
21902 - return 0;
21903 + if ((start > ktla_ktva((unsigned long)_etext) ||
21904 + end <= ktla_ktva((unsigned long)_stext)) &&
21905 + (start > ktla_ktva((unsigned long)_einittext) ||
21906 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21907 +
21908 +#ifdef CONFIG_ACPI_SLEEP
21909 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21910 +#endif
21911 +
21912 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21913 + return 0;
21914 + return 1;
21915 }
21916
21917 /*
21918 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21919 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21920 unsigned long start_pfn, end_pfn;
21921 pgd_t *pgd_base = swapper_pg_dir;
21922 - int pgd_idx, pmd_idx, pte_ofs;
21923 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21924 unsigned long pfn;
21925 pgd_t *pgd;
21926 + pud_t *pud;
21927 pmd_t *pmd;
21928 pte_t *pte;
21929 unsigned pages_2m, pages_4k;
21930 @@ -278,8 +279,13 @@ repeat:
21931 pfn = start_pfn;
21932 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21933 pgd = pgd_base + pgd_idx;
21934 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21935 - pmd = one_md_table_init(pgd);
21936 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21937 + pud = pud_offset(pgd, 0);
21938 + pmd = pmd_offset(pud, 0);
21939 +
21940 +#ifdef CONFIG_X86_PAE
21941 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21942 +#endif
21943
21944 if (pfn >= end_pfn)
21945 continue;
21946 @@ -291,14 +297,13 @@ repeat:
21947 #endif
21948 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21949 pmd++, pmd_idx++) {
21950 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21951 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21952
21953 /*
21954 * Map with big pages if possible, otherwise
21955 * create normal page tables:
21956 */
21957 if (use_pse) {
21958 - unsigned int addr2;
21959 pgprot_t prot = PAGE_KERNEL_LARGE;
21960 /*
21961 * first pass will use the same initial
21962 @@ -308,11 +313,7 @@ repeat:
21963 __pgprot(PTE_IDENT_ATTR |
21964 _PAGE_PSE);
21965
21966 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21967 - PAGE_OFFSET + PAGE_SIZE-1;
21968 -
21969 - if (is_kernel_text(addr) ||
21970 - is_kernel_text(addr2))
21971 + if (is_kernel_text(address, address + PMD_SIZE))
21972 prot = PAGE_KERNEL_LARGE_EXEC;
21973
21974 pages_2m++;
21975 @@ -329,7 +330,7 @@ repeat:
21976 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21977 pte += pte_ofs;
21978 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21979 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21980 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21981 pgprot_t prot = PAGE_KERNEL;
21982 /*
21983 * first pass will use the same initial
21984 @@ -337,7 +338,7 @@ repeat:
21985 */
21986 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21987
21988 - if (is_kernel_text(addr))
21989 + if (is_kernel_text(address, address + PAGE_SIZE))
21990 prot = PAGE_KERNEL_EXEC;
21991
21992 pages_4k++;
21993 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21994
21995 pud = pud_offset(pgd, va);
21996 pmd = pmd_offset(pud, va);
21997 - if (!pmd_present(*pmd))
21998 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21999 break;
22000
22001 pte = pte_offset_kernel(pmd, va);
22002 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22003
22004 static void __init pagetable_init(void)
22005 {
22006 - pgd_t *pgd_base = swapper_pg_dir;
22007 -
22008 - permanent_kmaps_init(pgd_base);
22009 + permanent_kmaps_init(swapper_pg_dir);
22010 }
22011
22012 #ifdef CONFIG_ACPI_SLEEP
22013 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22014 * ACPI suspend needs this for resume, because things like the intel-agp
22015 * driver might have split up a kernel 4MB mapping.
22016 */
22017 -char swsusp_pg_dir[PAGE_SIZE]
22018 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22019 __attribute__ ((aligned(PAGE_SIZE)));
22020
22021 static inline void save_pg_dir(void)
22022 {
22023 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22024 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22025 }
22026 #else /* !CONFIG_ACPI_SLEEP */
22027 static inline void save_pg_dir(void)
22028 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22029 flush_tlb_all();
22030 }
22031
22032 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22033 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22034 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22035
22036 /* user-defined highmem size */
22037 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22038 * Initialize the boot-time allocator (with low memory only):
22039 */
22040 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22041 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22042 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22043 PAGE_SIZE);
22044 if (bootmap == -1L)
22045 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22046 @@ -864,6 +863,12 @@ void __init mem_init(void)
22047
22048 pci_iommu_alloc();
22049
22050 +#ifdef CONFIG_PAX_PER_CPU_PGD
22051 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22052 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22053 + KERNEL_PGD_PTRS);
22054 +#endif
22055 +
22056 #ifdef CONFIG_FLATMEM
22057 BUG_ON(!mem_map);
22058 #endif
22059 @@ -881,7 +886,7 @@ void __init mem_init(void)
22060 set_highmem_pages_init();
22061
22062 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22063 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22064 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22065 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22066
22067 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22068 @@ -923,10 +928,10 @@ void __init mem_init(void)
22069 ((unsigned long)&__init_end -
22070 (unsigned long)&__init_begin) >> 10,
22071
22072 - (unsigned long)&_etext, (unsigned long)&_edata,
22073 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22074 + (unsigned long)&_sdata, (unsigned long)&_edata,
22075 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22076
22077 - (unsigned long)&_text, (unsigned long)&_etext,
22078 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22079 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22080
22081 /*
22082 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22083 if (!kernel_set_to_readonly)
22084 return;
22085
22086 + start = ktla_ktva(start);
22087 pr_debug("Set kernel text: %lx - %lx for read write\n",
22088 start, start+size);
22089
22090 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22091 if (!kernel_set_to_readonly)
22092 return;
22093
22094 + start = ktla_ktva(start);
22095 pr_debug("Set kernel text: %lx - %lx for read only\n",
22096 start, start+size);
22097
22098 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22099 unsigned long start = PFN_ALIGN(_text);
22100 unsigned long size = PFN_ALIGN(_etext) - start;
22101
22102 + start = ktla_ktva(start);
22103 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22104 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22105 size >> 10);
22106 diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
22107 --- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22108 +++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22109 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22110 pmd = fill_pmd(pud, vaddr);
22111 pte = fill_pte(pmd, vaddr);
22112
22113 + pax_open_kernel();
22114 set_pte(pte, new_pte);
22115 + pax_close_kernel();
22116
22117 /*
22118 * It's enough to flush this one mapping.
22119 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22120 pgd = pgd_offset_k((unsigned long)__va(phys));
22121 if (pgd_none(*pgd)) {
22122 pud = (pud_t *) spp_getpage();
22123 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22124 - _PAGE_USER));
22125 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22126 }
22127 pud = pud_offset(pgd, (unsigned long)__va(phys));
22128 if (pud_none(*pud)) {
22129 pmd = (pmd_t *) spp_getpage();
22130 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22131 - _PAGE_USER));
22132 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22133 }
22134 pmd = pmd_offset(pud, phys);
22135 BUG_ON(!pmd_none(*pmd));
22136 @@ -675,6 +675,12 @@ void __init mem_init(void)
22137
22138 pci_iommu_alloc();
22139
22140 +#ifdef CONFIG_PAX_PER_CPU_PGD
22141 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22142 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22143 + KERNEL_PGD_PTRS);
22144 +#endif
22145 +
22146 /* clear_bss() already clear the empty_zero_page */
22147
22148 reservedpages = 0;
22149 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22150 static struct vm_area_struct gate_vma = {
22151 .vm_start = VSYSCALL_START,
22152 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22153 - .vm_page_prot = PAGE_READONLY_EXEC,
22154 - .vm_flags = VM_READ | VM_EXEC
22155 + .vm_page_prot = PAGE_READONLY,
22156 + .vm_flags = VM_READ
22157 };
22158
22159 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22160 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22161
22162 const char *arch_vma_name(struct vm_area_struct *vma)
22163 {
22164 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22165 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22166 return "[vdso]";
22167 if (vma == &gate_vma)
22168 return "[vsyscall]";
22169 diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
22170 --- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22171 +++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22172 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
22173 * cause a hotspot and fill up ZONE_DMA. The page tables
22174 * need roughly 0.5KB per GB.
22175 */
22176 -#ifdef CONFIG_X86_32
22177 - start = 0x7000;
22178 -#else
22179 - start = 0x8000;
22180 -#endif
22181 + start = 0x100000;
22182 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22183 tables, PAGE_SIZE);
22184 if (e820_table_start == -1UL)
22185 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22186 #endif
22187
22188 set_nx();
22189 - if (nx_enabled)
22190 + if (nx_enabled && cpu_has_nx)
22191 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22192
22193 /* Enable PSE if available */
22194 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22195 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22196 * mmio resources as well as potential bios/acpi data regions.
22197 */
22198 +
22199 int devmem_is_allowed(unsigned long pagenr)
22200 {
22201 +#ifdef CONFIG_GRKERNSEC_KMEM
22202 + /* allow BDA */
22203 + if (!pagenr)
22204 + return 1;
22205 + /* allow EBDA */
22206 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22207 + return 1;
22208 + /* allow ISA/video mem */
22209 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22210 + return 1;
22211 + /* throw out everything else below 1MB */
22212 + if (pagenr <= 256)
22213 + return 0;
22214 +#else
22215 if (pagenr <= 256)
22216 return 1;
22217 +#endif
22218 +
22219 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22220 return 0;
22221 if (!page_is_ram(pagenr))
22222 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22223
22224 void free_initmem(void)
22225 {
22226 +
22227 +#ifdef CONFIG_PAX_KERNEXEC
22228 +#ifdef CONFIG_X86_32
22229 + /* PaX: limit KERNEL_CS to actual size */
22230 + unsigned long addr, limit;
22231 + struct desc_struct d;
22232 + int cpu;
22233 +
22234 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22235 + limit = (limit - 1UL) >> PAGE_SHIFT;
22236 +
22237 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22238 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22239 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22240 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22241 + }
22242 +
22243 + /* PaX: make KERNEL_CS read-only */
22244 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22245 + if (!paravirt_enabled())
22246 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22247 +/*
22248 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22249 + pgd = pgd_offset_k(addr);
22250 + pud = pud_offset(pgd, addr);
22251 + pmd = pmd_offset(pud, addr);
22252 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22253 + }
22254 +*/
22255 +#ifdef CONFIG_X86_PAE
22256 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22257 +/*
22258 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22259 + pgd = pgd_offset_k(addr);
22260 + pud = pud_offset(pgd, addr);
22261 + pmd = pmd_offset(pud, addr);
22262 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22263 + }
22264 +*/
22265 +#endif
22266 +
22267 +#ifdef CONFIG_MODULES
22268 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22269 +#endif
22270 +
22271 +#else
22272 + pgd_t *pgd;
22273 + pud_t *pud;
22274 + pmd_t *pmd;
22275 + unsigned long addr, end;
22276 +
22277 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22278 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22279 + pgd = pgd_offset_k(addr);
22280 + pud = pud_offset(pgd, addr);
22281 + pmd = pmd_offset(pud, addr);
22282 + if (!pmd_present(*pmd))
22283 + continue;
22284 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22285 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22286 + else
22287 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22288 + }
22289 +
22290 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22291 + end = addr + KERNEL_IMAGE_SIZE;
22292 + for (; addr < end; addr += PMD_SIZE) {
22293 + pgd = pgd_offset_k(addr);
22294 + pud = pud_offset(pgd, addr);
22295 + pmd = pmd_offset(pud, addr);
22296 + if (!pmd_present(*pmd))
22297 + continue;
22298 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22299 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22300 + }
22301 +#endif
22302 +
22303 + flush_tlb_all();
22304 +#endif
22305 +
22306 free_init_pages("unused kernel memory",
22307 (unsigned long)(&__init_begin),
22308 (unsigned long)(&__init_end));
22309 diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22310 --- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22311 +++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22312 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22313 debug_kmap_atomic(type);
22314 idx = type + KM_TYPE_NR * smp_processor_id();
22315 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22316 +
22317 + pax_open_kernel();
22318 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22319 + pax_close_kernel();
22320 +
22321 arch_flush_lazy_mmu_mode();
22322
22323 return (void *)vaddr;
22324 diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22325 --- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22326 +++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22327 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22328 * Second special case: Some BIOSen report the PC BIOS
22329 * area (640->1Mb) as ram even though it is not.
22330 */
22331 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22332 - pagenr < (BIOS_END >> PAGE_SHIFT))
22333 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22334 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22335 return 0;
22336
22337 for (i = 0; i < e820.nr_map; i++) {
22338 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22339 /*
22340 * Don't allow anybody to remap normal RAM that we're using..
22341 */
22342 - for (pfn = phys_addr >> PAGE_SHIFT;
22343 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22344 - pfn++) {
22345 -
22346 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22347 int is_ram = page_is_ram(pfn);
22348
22349 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22350 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22351 return NULL;
22352 WARN_ON_ONCE(is_ram);
22353 }
22354 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22355 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22356
22357 static __initdata int after_paging_init;
22358 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22359 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22360
22361 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22362 {
22363 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22364 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22365
22366 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22367 - memset(bm_pte, 0, sizeof(bm_pte));
22368 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22369 + pmd_populate_user(&init_mm, pmd, bm_pte);
22370
22371 /*
22372 * The boot-ioremap range spans multiple pmds, for which
22373 diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22374 --- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22375 +++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22376 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22377 * memory (e.g. tracked pages)? For now, we need this to avoid
22378 * invoking kmemcheck for PnP BIOS calls.
22379 */
22380 - if (regs->flags & X86_VM_MASK)
22381 + if (v8086_mode(regs))
22382 return false;
22383 - if (regs->cs != __KERNEL_CS)
22384 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22385 return false;
22386
22387 pte = kmemcheck_pte_lookup(address);
22388 diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22389 --- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22390 +++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22391 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22392 * Leave an at least ~128 MB hole with possible stack randomization.
22393 */
22394 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22395 -#define MAX_GAP (TASK_SIZE/6*5)
22396 +#define MAX_GAP (pax_task_size/6*5)
22397
22398 /*
22399 * True on X86_32 or when emulating IA32 on X86_64
22400 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22401 return rnd << PAGE_SHIFT;
22402 }
22403
22404 -static unsigned long mmap_base(void)
22405 +static unsigned long mmap_base(struct mm_struct *mm)
22406 {
22407 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22408 + unsigned long pax_task_size = TASK_SIZE;
22409 +
22410 +#ifdef CONFIG_PAX_SEGMEXEC
22411 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22412 + pax_task_size = SEGMEXEC_TASK_SIZE;
22413 +#endif
22414
22415 if (gap < MIN_GAP)
22416 gap = MIN_GAP;
22417 else if (gap > MAX_GAP)
22418 gap = MAX_GAP;
22419
22420 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22421 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22422 }
22423
22424 /*
22425 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22426 * does, but not when emulating X86_32
22427 */
22428 -static unsigned long mmap_legacy_base(void)
22429 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22430 {
22431 - if (mmap_is_ia32())
22432 + if (mmap_is_ia32()) {
22433 +
22434 +#ifdef CONFIG_PAX_SEGMEXEC
22435 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22436 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22437 + else
22438 +#endif
22439 +
22440 return TASK_UNMAPPED_BASE;
22441 - else
22442 + } else
22443 return TASK_UNMAPPED_BASE + mmap_rnd();
22444 }
22445
22446 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22447 void arch_pick_mmap_layout(struct mm_struct *mm)
22448 {
22449 if (mmap_is_legacy()) {
22450 - mm->mmap_base = mmap_legacy_base();
22451 + mm->mmap_base = mmap_legacy_base(mm);
22452 +
22453 +#ifdef CONFIG_PAX_RANDMMAP
22454 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22455 + mm->mmap_base += mm->delta_mmap;
22456 +#endif
22457 +
22458 mm->get_unmapped_area = arch_get_unmapped_area;
22459 mm->unmap_area = arch_unmap_area;
22460 } else {
22461 - mm->mmap_base = mmap_base();
22462 + mm->mmap_base = mmap_base(mm);
22463 +
22464 +#ifdef CONFIG_PAX_RANDMMAP
22465 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22466 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22467 +#endif
22468 +
22469 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22470 mm->unmap_area = arch_unmap_area_topdown;
22471 }
22472 diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22473 --- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22474 +++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22475 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22476 break;
22477 default:
22478 {
22479 - unsigned char *ip = (unsigned char *)instptr;
22480 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22481 my_trace->opcode = MMIO_UNKNOWN_OP;
22482 my_trace->width = 0;
22483 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22484 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22485 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22486 void __iomem *addr)
22487 {
22488 - static atomic_t next_id;
22489 + static atomic_unchecked_t next_id;
22490 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22491 /* These are page-unaligned. */
22492 struct mmiotrace_map map = {
22493 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22494 .private = trace
22495 },
22496 .phys = offset,
22497 - .id = atomic_inc_return(&next_id)
22498 + .id = atomic_inc_return_unchecked(&next_id)
22499 };
22500 map.map_id = trace->id;
22501
22502 diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22503 --- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22504 +++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22505 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22506 }
22507 #endif
22508
22509 -extern unsigned long find_max_low_pfn(void);
22510 extern unsigned long highend_pfn, highstart_pfn;
22511
22512 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22513 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22514 --- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22515 +++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22516 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22517 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22518 */
22519 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22520 - pgprot_val(forbidden) |= _PAGE_NX;
22521 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22522
22523 /*
22524 * The kernel text needs to be executable for obvious reasons
22525 * Does not cover __inittext since that is gone later on. On
22526 * 64bit we do not enforce !NX on the low mapping
22527 */
22528 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22529 - pgprot_val(forbidden) |= _PAGE_NX;
22530 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22531 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22532
22533 +#ifdef CONFIG_DEBUG_RODATA
22534 /*
22535 * The .rodata section needs to be read-only. Using the pfn
22536 * catches all aliases.
22537 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22538 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22539 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22540 pgprot_val(forbidden) |= _PAGE_RW;
22541 +#endif
22542 +
22543 +#ifdef CONFIG_PAX_KERNEXEC
22544 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22545 + pgprot_val(forbidden) |= _PAGE_RW;
22546 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22547 + }
22548 +#endif
22549
22550 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22551
22552 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22553 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22554 {
22555 /* change init_mm */
22556 + pax_open_kernel();
22557 set_pte_atomic(kpte, pte);
22558 +
22559 #ifdef CONFIG_X86_32
22560 if (!SHARED_KERNEL_PMD) {
22561 +
22562 +#ifdef CONFIG_PAX_PER_CPU_PGD
22563 + unsigned long cpu;
22564 +#else
22565 struct page *page;
22566 +#endif
22567
22568 +#ifdef CONFIG_PAX_PER_CPU_PGD
22569 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22570 + pgd_t *pgd = get_cpu_pgd(cpu);
22571 +#else
22572 list_for_each_entry(page, &pgd_list, lru) {
22573 - pgd_t *pgd;
22574 + pgd_t *pgd = (pgd_t *)page_address(page);
22575 +#endif
22576 +
22577 pud_t *pud;
22578 pmd_t *pmd;
22579
22580 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22581 + pgd += pgd_index(address);
22582 pud = pud_offset(pgd, address);
22583 pmd = pmd_offset(pud, address);
22584 set_pte_atomic((pte_t *)pmd, pte);
22585 }
22586 }
22587 #endif
22588 + pax_close_kernel();
22589 }
22590
22591 static int
22592 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22593 --- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22594 +++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22595 @@ -36,7 +36,7 @@ enum {
22596
22597 static int pte_testbit(pte_t pte)
22598 {
22599 - return pte_flags(pte) & _PAGE_UNUSED1;
22600 + return pte_flags(pte) & _PAGE_CPA_TEST;
22601 }
22602
22603 struct split_state {
22604 diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22605 --- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22606 +++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22607 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22608
22609 conflict:
22610 printk(KERN_INFO "%s:%d conflicting memory types "
22611 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22612 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22613 new->end, cattr_name(new->type), cattr_name(entry->type));
22614 return -EBUSY;
22615 }
22616 @@ -559,7 +559,7 @@ unlock_ret:
22617
22618 if (err) {
22619 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22620 - current->comm, current->pid, start, end);
22621 + current->comm, task_pid_nr(current), start, end);
22622 }
22623
22624 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22625 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22626 while (cursor < to) {
22627 if (!devmem_is_allowed(pfn)) {
22628 printk(KERN_INFO
22629 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22630 - current->comm, from, to);
22631 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22632 + current->comm, from, to, cursor);
22633 return 0;
22634 }
22635 cursor += PAGE_SIZE;
22636 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22637 printk(KERN_INFO
22638 "%s:%d ioremap_change_attr failed %s "
22639 "for %Lx-%Lx\n",
22640 - current->comm, current->pid,
22641 + current->comm, task_pid_nr(current),
22642 cattr_name(flags),
22643 base, (unsigned long long)(base + size));
22644 return -EINVAL;
22645 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22646 free_memtype(paddr, paddr + size);
22647 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22648 " for %Lx-%Lx, got %s\n",
22649 - current->comm, current->pid,
22650 + current->comm, task_pid_nr(current),
22651 cattr_name(want_flags),
22652 (unsigned long long)paddr,
22653 (unsigned long long)(paddr + size),
22654 diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22655 --- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22656 +++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22657 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22658 int i;
22659 enum reason_type rv = OTHERS;
22660
22661 - p = (unsigned char *)ins_addr;
22662 + p = (unsigned char *)ktla_ktva(ins_addr);
22663 p += skip_prefix(p, &prf);
22664 p += get_opcode(p, &opcode);
22665
22666 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22667 struct prefix_bits prf;
22668 int i;
22669
22670 - p = (unsigned char *)ins_addr;
22671 + p = (unsigned char *)ktla_ktva(ins_addr);
22672 p += skip_prefix(p, &prf);
22673 p += get_opcode(p, &opcode);
22674
22675 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22676 struct prefix_bits prf;
22677 int i;
22678
22679 - p = (unsigned char *)ins_addr;
22680 + p = (unsigned char *)ktla_ktva(ins_addr);
22681 p += skip_prefix(p, &prf);
22682 p += get_opcode(p, &opcode);
22683
22684 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22685 int i;
22686 unsigned long rv;
22687
22688 - p = (unsigned char *)ins_addr;
22689 + p = (unsigned char *)ktla_ktva(ins_addr);
22690 p += skip_prefix(p, &prf);
22691 p += get_opcode(p, &opcode);
22692 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22693 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22694 int i;
22695 unsigned long rv;
22696
22697 - p = (unsigned char *)ins_addr;
22698 + p = (unsigned char *)ktla_ktva(ins_addr);
22699 p += skip_prefix(p, &prf);
22700 p += get_opcode(p, &opcode);
22701 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22702 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22703 --- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22704 +++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22705 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22706 return;
22707 }
22708 pte = pte_offset_kernel(pmd, vaddr);
22709 +
22710 + pax_open_kernel();
22711 if (pte_val(pteval))
22712 set_pte_at(&init_mm, vaddr, pte, pteval);
22713 else
22714 pte_clear(&init_mm, vaddr, pte);
22715 + pax_close_kernel();
22716
22717 /*
22718 * It's enough to flush this one mapping.
22719 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22720 --- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22721 +++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22722 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22723 list_del(&page->lru);
22724 }
22725
22726 -#define UNSHARED_PTRS_PER_PGD \
22727 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22729 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22730
22731 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22732 +{
22733 + while (count--)
22734 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22735 +}
22736 +#endif
22737 +
22738 +#ifdef CONFIG_PAX_PER_CPU_PGD
22739 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22740 +{
22741 + while (count--)
22742 +
22743 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22744 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22745 +#else
22746 + *dst++ = *src++;
22747 +#endif
22748 +
22749 +}
22750 +#endif
22751 +
22752 +#ifdef CONFIG_X86_64
22753 +#define pxd_t pud_t
22754 +#define pyd_t pgd_t
22755 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22756 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22757 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22758 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22759 +#define PYD_SIZE PGDIR_SIZE
22760 +#else
22761 +#define pxd_t pmd_t
22762 +#define pyd_t pud_t
22763 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22764 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22765 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22766 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22767 +#define PYD_SIZE PUD_SIZE
22768 +#endif
22769 +
22770 +#ifdef CONFIG_PAX_PER_CPU_PGD
22771 +static inline void pgd_ctor(pgd_t *pgd) {}
22772 +static inline void pgd_dtor(pgd_t *pgd) {}
22773 +#else
22774 static void pgd_ctor(pgd_t *pgd)
22775 {
22776 /* If the pgd points to a shared pagetable level (either the
22777 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22778 pgd_list_del(pgd);
22779 spin_unlock_irqrestore(&pgd_lock, flags);
22780 }
22781 +#endif
22782
22783 /*
22784 * List of all pgd's needed for non-PAE so it can invalidate entries
22785 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22786 * -- wli
22787 */
22788
22789 -#ifdef CONFIG_X86_PAE
22790 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22791 /*
22792 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22793 * updating the top-level pagetable entries to guarantee the
22794 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22795 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22796 * and initialize the kernel pmds here.
22797 */
22798 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22799 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22800
22801 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22802 {
22803 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22804 */
22805 flush_tlb_mm(mm);
22806 }
22807 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22808 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22809 #else /* !CONFIG_X86_PAE */
22810
22811 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22812 -#define PREALLOCATED_PMDS 0
22813 +#define PREALLOCATED_PXDS 0
22814
22815 #endif /* CONFIG_X86_PAE */
22816
22817 -static void free_pmds(pmd_t *pmds[])
22818 +static void free_pxds(pxd_t *pxds[])
22819 {
22820 int i;
22821
22822 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22823 - if (pmds[i])
22824 - free_page((unsigned long)pmds[i]);
22825 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22826 + if (pxds[i])
22827 + free_page((unsigned long)pxds[i]);
22828 }
22829
22830 -static int preallocate_pmds(pmd_t *pmds[])
22831 +static int preallocate_pxds(pxd_t *pxds[])
22832 {
22833 int i;
22834 bool failed = false;
22835
22836 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22837 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22838 - if (pmd == NULL)
22839 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22840 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22841 + if (pxd == NULL)
22842 failed = true;
22843 - pmds[i] = pmd;
22844 + pxds[i] = pxd;
22845 }
22846
22847 if (failed) {
22848 - free_pmds(pmds);
22849 + free_pxds(pxds);
22850 return -ENOMEM;
22851 }
22852
22853 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22854 * preallocate which never got a corresponding vma will need to be
22855 * freed manually.
22856 */
22857 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22858 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22859 {
22860 int i;
22861
22862 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22863 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22864 pgd_t pgd = pgdp[i];
22865
22866 if (pgd_val(pgd) != 0) {
22867 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22868 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22869
22870 - pgdp[i] = native_make_pgd(0);
22871 + set_pgd(pgdp + i, native_make_pgd(0));
22872
22873 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22874 - pmd_free(mm, pmd);
22875 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22876 + pxd_free(mm, pxd);
22877 }
22878 }
22879 }
22880
22881 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22882 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22883 {
22884 - pud_t *pud;
22885 + pyd_t *pyd;
22886 unsigned long addr;
22887 int i;
22888
22889 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22890 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22891 return;
22892
22893 - pud = pud_offset(pgd, 0);
22894 +#ifdef CONFIG_X86_64
22895 + pyd = pyd_offset(mm, 0L);
22896 +#else
22897 + pyd = pyd_offset(pgd, 0L);
22898 +#endif
22899
22900 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22901 - i++, pud++, addr += PUD_SIZE) {
22902 - pmd_t *pmd = pmds[i];
22903 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22904 + i++, pyd++, addr += PYD_SIZE) {
22905 + pxd_t *pxd = pxds[i];
22906
22907 if (i >= KERNEL_PGD_BOUNDARY)
22908 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22909 - sizeof(pmd_t) * PTRS_PER_PMD);
22910 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22911 + sizeof(pxd_t) * PTRS_PER_PMD);
22912
22913 - pud_populate(mm, pud, pmd);
22914 + pyd_populate(mm, pyd, pxd);
22915 }
22916 }
22917
22918 pgd_t *pgd_alloc(struct mm_struct *mm)
22919 {
22920 pgd_t *pgd;
22921 - pmd_t *pmds[PREALLOCATED_PMDS];
22922 + pxd_t *pxds[PREALLOCATED_PXDS];
22923 +
22924 unsigned long flags;
22925
22926 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22927 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22928
22929 mm->pgd = pgd;
22930
22931 - if (preallocate_pmds(pmds) != 0)
22932 + if (preallocate_pxds(pxds) != 0)
22933 goto out_free_pgd;
22934
22935 if (paravirt_pgd_alloc(mm) != 0)
22936 - goto out_free_pmds;
22937 + goto out_free_pxds;
22938
22939 /*
22940 * Make sure that pre-populating the pmds is atomic with
22941 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22942 spin_lock_irqsave(&pgd_lock, flags);
22943
22944 pgd_ctor(pgd);
22945 - pgd_prepopulate_pmd(mm, pgd, pmds);
22946 + pgd_prepopulate_pxd(mm, pgd, pxds);
22947
22948 spin_unlock_irqrestore(&pgd_lock, flags);
22949
22950 return pgd;
22951
22952 -out_free_pmds:
22953 - free_pmds(pmds);
22954 +out_free_pxds:
22955 + free_pxds(pxds);
22956 out_free_pgd:
22957 free_page((unsigned long)pgd);
22958 out:
22959 @@ -287,7 +338,7 @@ out:
22960
22961 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22962 {
22963 - pgd_mop_up_pmds(mm, pgd);
22964 + pgd_mop_up_pxds(mm, pgd);
22965 pgd_dtor(pgd);
22966 paravirt_pgd_free(mm, pgd);
22967 free_page((unsigned long)pgd);
22968 diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22969 --- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22970 +++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22971 @@ -4,11 +4,10 @@
22972
22973 #include <asm/pgtable.h>
22974
22975 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22976 int nx_enabled;
22977
22978 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22979 -static int disable_nx __cpuinitdata;
22980 -
22981 +#ifndef CONFIG_PAX_PAGEEXEC
22982 /*
22983 * noexec = on|off
22984 *
22985 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22986 if (!str)
22987 return -EINVAL;
22988 if (!strncmp(str, "on", 2)) {
22989 - __supported_pte_mask |= _PAGE_NX;
22990 - disable_nx = 0;
22991 + nx_enabled = 1;
22992 } else if (!strncmp(str, "off", 3)) {
22993 - disable_nx = 1;
22994 - __supported_pte_mask &= ~_PAGE_NX;
22995 + nx_enabled = 0;
22996 }
22997 return 0;
22998 }
22999 early_param("noexec", noexec_setup);
23000 #endif
23001 +#endif
23002
23003 #ifdef CONFIG_X86_PAE
23004 void __init set_nx(void)
23005 {
23006 - unsigned int v[4], l, h;
23007 + if (!nx_enabled && cpu_has_nx) {
23008 + unsigned l, h;
23009
23010 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23011 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23012 -
23013 - if ((v[3] & (1 << 20)) && !disable_nx) {
23014 - rdmsr(MSR_EFER, l, h);
23015 - l |= EFER_NX;
23016 - wrmsr(MSR_EFER, l, h);
23017 - nx_enabled = 1;
23018 - __supported_pte_mask |= _PAGE_NX;
23019 - }
23020 + __supported_pte_mask &= ~_PAGE_NX;
23021 + rdmsr(MSR_EFER, l, h);
23022 + l &= ~EFER_NX;
23023 + wrmsr(MSR_EFER, l, h);
23024 }
23025 }
23026 #else
23027 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23028 unsigned long efer;
23029
23030 rdmsrl(MSR_EFER, efer);
23031 - if (!(efer & EFER_NX) || disable_nx)
23032 + if (!(efer & EFER_NX) || !nx_enabled)
23033 __supported_pte_mask &= ~_PAGE_NX;
23034 }
23035 #endif
23036 diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
23037 --- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
23038 +++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
23039 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
23040 BUG();
23041 cpumask_clear_cpu(cpu,
23042 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23043 +
23044 +#ifndef CONFIG_PAX_PER_CPU_PGD
23045 load_cr3(swapper_pg_dir);
23046 +#endif
23047 +
23048 }
23049 EXPORT_SYMBOL_GPL(leave_mm);
23050
23051 diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
23052 --- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
23053 +++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
23054 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23055 struct frame_head bufhead[2];
23056
23057 /* Also check accessibility of one struct frame_head beyond */
23058 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23059 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23060 return NULL;
23061 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23062 return NULL;
23063 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23064 {
23065 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23066
23067 - if (!user_mode_vm(regs)) {
23068 + if (!user_mode(regs)) {
23069 unsigned long stack = kernel_stack_pointer(regs);
23070 if (depth)
23071 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23072 diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
23073 --- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23074 +++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23075 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23076 #endif
23077 }
23078
23079 -static int inline addr_increment(void)
23080 +static inline int addr_increment(void)
23081 {
23082 #ifdef CONFIG_SMP
23083 return smp_num_siblings == 2 ? 2 : 1;
23084 diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
23085 --- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23086 +++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23087 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
23088 int pcibios_last_bus = -1;
23089 unsigned long pirq_table_addr;
23090 struct pci_bus *pci_root_bus;
23091 -struct pci_raw_ops *raw_pci_ops;
23092 -struct pci_raw_ops *raw_pci_ext_ops;
23093 +const struct pci_raw_ops *raw_pci_ops;
23094 +const struct pci_raw_ops *raw_pci_ext_ops;
23095
23096 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23097 int reg, int len, u32 *val)
23098 diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
23099 --- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23100 +++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23101 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23102
23103 #undef PCI_CONF1_ADDRESS
23104
23105 -struct pci_raw_ops pci_direct_conf1 = {
23106 +const struct pci_raw_ops pci_direct_conf1 = {
23107 .read = pci_conf1_read,
23108 .write = pci_conf1_write,
23109 };
23110 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23111
23112 #undef PCI_CONF2_ADDRESS
23113
23114 -struct pci_raw_ops pci_direct_conf2 = {
23115 +const struct pci_raw_ops pci_direct_conf2 = {
23116 .read = pci_conf2_read,
23117 .write = pci_conf2_write,
23118 };
23119 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23120 * This should be close to trivial, but it isn't, because there are buggy
23121 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23122 */
23123 -static int __init pci_sanity_check(struct pci_raw_ops *o)
23124 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
23125 {
23126 u32 x = 0;
23127 int year, devfn;
23128 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
23129 --- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23130 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23131 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23132 return 0;
23133 }
23134
23135 -static struct pci_raw_ops pci_mmcfg = {
23136 +static const struct pci_raw_ops pci_mmcfg = {
23137 .read = pci_mmcfg_read,
23138 .write = pci_mmcfg_write,
23139 };
23140 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
23141 --- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23142 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23143 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23144 return 0;
23145 }
23146
23147 -static struct pci_raw_ops pci_mmcfg = {
23148 +static const struct pci_raw_ops pci_mmcfg = {
23149 .read = pci_mmcfg_read,
23150 .write = pci_mmcfg_write,
23151 };
23152 diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
23153 --- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23154 +++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23155 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23156
23157 #undef PCI_CONF1_MQ_ADDRESS
23158
23159 -static struct pci_raw_ops pci_direct_conf1_mq = {
23160 +static const struct pci_raw_ops pci_direct_conf1_mq = {
23161 .read = pci_conf1_mq_read,
23162 .write = pci_conf1_mq_write
23163 };
23164 diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
23165 --- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23166 +++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23167 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23168 return 0;
23169 }
23170
23171 -static struct pci_raw_ops pci_olpc_conf = {
23172 +static const struct pci_raw_ops pci_olpc_conf = {
23173 .read = pci_olpc_read,
23174 .write = pci_olpc_write,
23175 };
23176 diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23177 --- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23178 +++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23179 @@ -56,50 +56,93 @@ union bios32 {
23180 static struct {
23181 unsigned long address;
23182 unsigned short segment;
23183 -} bios32_indirect = { 0, __KERNEL_CS };
23184 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23185
23186 /*
23187 * Returns the entry point for the given service, NULL on error
23188 */
23189
23190 -static unsigned long bios32_service(unsigned long service)
23191 +static unsigned long __devinit bios32_service(unsigned long service)
23192 {
23193 unsigned char return_code; /* %al */
23194 unsigned long address; /* %ebx */
23195 unsigned long length; /* %ecx */
23196 unsigned long entry; /* %edx */
23197 unsigned long flags;
23198 + struct desc_struct d, *gdt;
23199
23200 local_irq_save(flags);
23201 - __asm__("lcall *(%%edi); cld"
23202 +
23203 + gdt = get_cpu_gdt_table(smp_processor_id());
23204 +
23205 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23206 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23207 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23208 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23209 +
23210 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23211 : "=a" (return_code),
23212 "=b" (address),
23213 "=c" (length),
23214 "=d" (entry)
23215 : "0" (service),
23216 "1" (0),
23217 - "D" (&bios32_indirect));
23218 + "D" (&bios32_indirect),
23219 + "r"(__PCIBIOS_DS)
23220 + : "memory");
23221 +
23222 + pax_open_kernel();
23223 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23224 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23225 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23226 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23227 + pax_close_kernel();
23228 +
23229 local_irq_restore(flags);
23230
23231 switch (return_code) {
23232 - case 0:
23233 - return address + entry;
23234 - case 0x80: /* Not present */
23235 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23236 - return 0;
23237 - default: /* Shouldn't happen */
23238 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23239 - service, return_code);
23240 + case 0: {
23241 + int cpu;
23242 + unsigned char flags;
23243 +
23244 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23245 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23246 + printk(KERN_WARNING "bios32_service: not valid\n");
23247 return 0;
23248 + }
23249 + address = address + PAGE_OFFSET;
23250 + length += 16UL; /* some BIOSs underreport this... */
23251 + flags = 4;
23252 + if (length >= 64*1024*1024) {
23253 + length >>= PAGE_SHIFT;
23254 + flags |= 8;
23255 + }
23256 +
23257 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23258 + gdt = get_cpu_gdt_table(cpu);
23259 + pack_descriptor(&d, address, length, 0x9b, flags);
23260 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23261 + pack_descriptor(&d, address, length, 0x93, flags);
23262 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23263 + }
23264 + return entry;
23265 + }
23266 + case 0x80: /* Not present */
23267 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23268 + return 0;
23269 + default: /* Shouldn't happen */
23270 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23271 + service, return_code);
23272 + return 0;
23273 }
23274 }
23275
23276 static struct {
23277 unsigned long address;
23278 unsigned short segment;
23279 -} pci_indirect = { 0, __KERNEL_CS };
23280 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23281
23282 -static int pci_bios_present;
23283 +static int pci_bios_present __read_only;
23284
23285 static int __devinit check_pcibios(void)
23286 {
23287 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23288 unsigned long flags, pcibios_entry;
23289
23290 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23291 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23292 + pci_indirect.address = pcibios_entry;
23293
23294 local_irq_save(flags);
23295 - __asm__(
23296 - "lcall *(%%edi); cld\n\t"
23297 + __asm__("movw %w6, %%ds\n\t"
23298 + "lcall *%%ss:(%%edi); cld\n\t"
23299 + "push %%ss\n\t"
23300 + "pop %%ds\n\t"
23301 "jc 1f\n\t"
23302 "xor %%ah, %%ah\n"
23303 "1:"
23304 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23305 "=b" (ebx),
23306 "=c" (ecx)
23307 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23308 - "D" (&pci_indirect)
23309 + "D" (&pci_indirect),
23310 + "r" (__PCIBIOS_DS)
23311 : "memory");
23312 local_irq_restore(flags);
23313
23314 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23315
23316 switch (len) {
23317 case 1:
23318 - __asm__("lcall *(%%esi); cld\n\t"
23319 + __asm__("movw %w6, %%ds\n\t"
23320 + "lcall *%%ss:(%%esi); cld\n\t"
23321 + "push %%ss\n\t"
23322 + "pop %%ds\n\t"
23323 "jc 1f\n\t"
23324 "xor %%ah, %%ah\n"
23325 "1:"
23326 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23327 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23328 "b" (bx),
23329 "D" ((long)reg),
23330 - "S" (&pci_indirect));
23331 + "S" (&pci_indirect),
23332 + "r" (__PCIBIOS_DS));
23333 /*
23334 * Zero-extend the result beyond 8 bits, do not trust the
23335 * BIOS having done it:
23336 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23337 *value &= 0xff;
23338 break;
23339 case 2:
23340 - __asm__("lcall *(%%esi); cld\n\t"
23341 + __asm__("movw %w6, %%ds\n\t"
23342 + "lcall *%%ss:(%%esi); cld\n\t"
23343 + "push %%ss\n\t"
23344 + "pop %%ds\n\t"
23345 "jc 1f\n\t"
23346 "xor %%ah, %%ah\n"
23347 "1:"
23348 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23349 : "1" (PCIBIOS_READ_CONFIG_WORD),
23350 "b" (bx),
23351 "D" ((long)reg),
23352 - "S" (&pci_indirect));
23353 + "S" (&pci_indirect),
23354 + "r" (__PCIBIOS_DS));
23355 /*
23356 * Zero-extend the result beyond 16 bits, do not trust the
23357 * BIOS having done it:
23358 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23359 *value &= 0xffff;
23360 break;
23361 case 4:
23362 - __asm__("lcall *(%%esi); cld\n\t"
23363 + __asm__("movw %w6, %%ds\n\t"
23364 + "lcall *%%ss:(%%esi); cld\n\t"
23365 + "push %%ss\n\t"
23366 + "pop %%ds\n\t"
23367 "jc 1f\n\t"
23368 "xor %%ah, %%ah\n"
23369 "1:"
23370 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23371 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23372 "b" (bx),
23373 "D" ((long)reg),
23374 - "S" (&pci_indirect));
23375 + "S" (&pci_indirect),
23376 + "r" (__PCIBIOS_DS));
23377 break;
23378 }
23379
23380 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23381
23382 switch (len) {
23383 case 1:
23384 - __asm__("lcall *(%%esi); cld\n\t"
23385 + __asm__("movw %w6, %%ds\n\t"
23386 + "lcall *%%ss:(%%esi); cld\n\t"
23387 + "push %%ss\n\t"
23388 + "pop %%ds\n\t"
23389 "jc 1f\n\t"
23390 "xor %%ah, %%ah\n"
23391 "1:"
23392 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23393 "c" (value),
23394 "b" (bx),
23395 "D" ((long)reg),
23396 - "S" (&pci_indirect));
23397 + "S" (&pci_indirect),
23398 + "r" (__PCIBIOS_DS));
23399 break;
23400 case 2:
23401 - __asm__("lcall *(%%esi); cld\n\t"
23402 + __asm__("movw %w6, %%ds\n\t"
23403 + "lcall *%%ss:(%%esi); cld\n\t"
23404 + "push %%ss\n\t"
23405 + "pop %%ds\n\t"
23406 "jc 1f\n\t"
23407 "xor %%ah, %%ah\n"
23408 "1:"
23409 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23410 "c" (value),
23411 "b" (bx),
23412 "D" ((long)reg),
23413 - "S" (&pci_indirect));
23414 + "S" (&pci_indirect),
23415 + "r" (__PCIBIOS_DS));
23416 break;
23417 case 4:
23418 - __asm__("lcall *(%%esi); cld\n\t"
23419 + __asm__("movw %w6, %%ds\n\t"
23420 + "lcall *%%ss:(%%esi); cld\n\t"
23421 + "push %%ss\n\t"
23422 + "pop %%ds\n\t"
23423 "jc 1f\n\t"
23424 "xor %%ah, %%ah\n"
23425 "1:"
23426 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23427 "c" (value),
23428 "b" (bx),
23429 "D" ((long)reg),
23430 - "S" (&pci_indirect));
23431 + "S" (&pci_indirect),
23432 + "r" (__PCIBIOS_DS));
23433 break;
23434 }
23435
23436 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23437 * Function table for BIOS32 access
23438 */
23439
23440 -static struct pci_raw_ops pci_bios_access = {
23441 +static const struct pci_raw_ops pci_bios_access = {
23442 .read = pci_bios_read,
23443 .write = pci_bios_write
23444 };
23445 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23446 * Try to find PCI BIOS.
23447 */
23448
23449 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23450 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23451 {
23452 union bios32 *check;
23453 unsigned char sum;
23454 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23455
23456 DBG("PCI: Fetching IRQ routing table... ");
23457 __asm__("push %%es\n\t"
23458 + "movw %w8, %%ds\n\t"
23459 "push %%ds\n\t"
23460 "pop %%es\n\t"
23461 - "lcall *(%%esi); cld\n\t"
23462 + "lcall *%%ss:(%%esi); cld\n\t"
23463 "pop %%es\n\t"
23464 + "push %%ss\n\t"
23465 + "pop %%ds\n"
23466 "jc 1f\n\t"
23467 "xor %%ah, %%ah\n"
23468 "1:"
23469 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23470 "1" (0),
23471 "D" ((long) &opt),
23472 "S" (&pci_indirect),
23473 - "m" (opt)
23474 + "m" (opt),
23475 + "r" (__PCIBIOS_DS)
23476 : "memory");
23477 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23478 if (ret & 0xff00)
23479 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23480 {
23481 int ret;
23482
23483 - __asm__("lcall *(%%esi); cld\n\t"
23484 + __asm__("movw %w5, %%ds\n\t"
23485 + "lcall *%%ss:(%%esi); cld\n\t"
23486 + "push %%ss\n\t"
23487 + "pop %%ds\n"
23488 "jc 1f\n\t"
23489 "xor %%ah, %%ah\n"
23490 "1:"
23491 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23492 : "0" (PCIBIOS_SET_PCI_HW_INT),
23493 "b" ((dev->bus->number << 8) | dev->devfn),
23494 "c" ((irq << 8) | (pin + 10)),
23495 - "S" (&pci_indirect));
23496 + "S" (&pci_indirect),
23497 + "r" (__PCIBIOS_DS));
23498 return !(ret & 0xff00);
23499 }
23500 EXPORT_SYMBOL(pcibios_set_irq_routing);
23501 diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23502 --- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23503 +++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23504 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23505 static void fix_processor_context(void)
23506 {
23507 int cpu = smp_processor_id();
23508 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23509 + struct tss_struct *t = init_tss + cpu;
23510
23511 set_tss_desc(cpu, t); /*
23512 * This just modifies memory; should not be
23513 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23514 */
23515
23516 #ifdef CONFIG_X86_64
23517 + pax_open_kernel();
23518 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23519 + pax_close_kernel();
23520
23521 syscall_init(); /* This sets MSR_*STAR and related */
23522 #endif
23523 diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23524 --- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23525 +++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23526 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23527 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23528 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23529
23530 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23531 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23532 GCOV_PROFILE := n
23533
23534 #
23535 diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23536 --- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23537 +++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23538 @@ -22,24 +22,48 @@
23539 #include <asm/hpet.h>
23540 #include <asm/unistd.h>
23541 #include <asm/io.h>
23542 +#include <asm/fixmap.h>
23543 #include "vextern.h"
23544
23545 #define gtod vdso_vsyscall_gtod_data
23546
23547 +notrace noinline long __vdso_fallback_time(long *t)
23548 +{
23549 + long secs;
23550 + asm volatile("syscall"
23551 + : "=a" (secs)
23552 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23553 + return secs;
23554 +}
23555 +
23556 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23557 {
23558 long ret;
23559 asm("syscall" : "=a" (ret) :
23560 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23561 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23562 return ret;
23563 }
23564
23565 +notrace static inline cycle_t __vdso_vread_hpet(void)
23566 +{
23567 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23568 +}
23569 +
23570 +notrace static inline cycle_t __vdso_vread_tsc(void)
23571 +{
23572 + cycle_t ret = (cycle_t)vget_cycles();
23573 +
23574 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23575 +}
23576 +
23577 notrace static inline long vgetns(void)
23578 {
23579 long v;
23580 - cycles_t (*vread)(void);
23581 - vread = gtod->clock.vread;
23582 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23583 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23584 + v = __vdso_vread_tsc();
23585 + else
23586 + v = __vdso_vread_hpet();
23587 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23588 return (v * gtod->clock.mult) >> gtod->clock.shift;
23589 }
23590
23591 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23592
23593 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23594 {
23595 - if (likely(gtod->sysctl_enabled))
23596 + if (likely(gtod->sysctl_enabled &&
23597 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23598 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23599 switch (clock) {
23600 case CLOCK_REALTIME:
23601 if (likely(gtod->clock.vread))
23602 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23603 int clock_gettime(clockid_t, struct timespec *)
23604 __attribute__((weak, alias("__vdso_clock_gettime")));
23605
23606 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23607 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23608 {
23609 long ret;
23610 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23611 + asm("syscall" : "=a" (ret) :
23612 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23613 + return ret;
23614 +}
23615 +
23616 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23617 +{
23618 + if (likely(gtod->sysctl_enabled &&
23619 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23620 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23621 + {
23622 if (likely(tv != NULL)) {
23623 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23624 offsetof(struct timespec, tv_nsec) ||
23625 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23626 }
23627 return 0;
23628 }
23629 - asm("syscall" : "=a" (ret) :
23630 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23631 - return ret;
23632 + return __vdso_fallback_gettimeofday(tv, tz);
23633 }
23634 int gettimeofday(struct timeval *, struct timezone *)
23635 __attribute__((weak, alias("__vdso_gettimeofday")));
23636 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23637 --- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23638 +++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23639 @@ -25,6 +25,7 @@
23640 #include <asm/tlbflush.h>
23641 #include <asm/vdso.h>
23642 #include <asm/proto.h>
23643 +#include <asm/mman.h>
23644
23645 enum {
23646 VDSO_DISABLED = 0,
23647 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23648 void enable_sep_cpu(void)
23649 {
23650 int cpu = get_cpu();
23651 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23652 + struct tss_struct *tss = init_tss + cpu;
23653
23654 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23655 put_cpu();
23656 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23657 gate_vma.vm_start = FIXADDR_USER_START;
23658 gate_vma.vm_end = FIXADDR_USER_END;
23659 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23660 - gate_vma.vm_page_prot = __P101;
23661 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23662 /*
23663 * Make sure the vDSO gets into every core dump.
23664 * Dumping its contents makes post-mortem fully interpretable later
23665 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23666 if (compat)
23667 addr = VDSO_HIGH_BASE;
23668 else {
23669 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23670 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23671 if (IS_ERR_VALUE(addr)) {
23672 ret = addr;
23673 goto up_fail;
23674 }
23675 }
23676
23677 - current->mm->context.vdso = (void *)addr;
23678 + current->mm->context.vdso = addr;
23679
23680 if (compat_uses_vma || !compat) {
23681 /*
23682 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23683 }
23684
23685 current_thread_info()->sysenter_return =
23686 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23687 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23688
23689 up_fail:
23690 if (ret)
23691 - current->mm->context.vdso = NULL;
23692 + current->mm->context.vdso = 0;
23693
23694 up_write(&mm->mmap_sem);
23695
23696 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23697
23698 const char *arch_vma_name(struct vm_area_struct *vma)
23699 {
23700 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23701 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23702 return "[vdso]";
23703 +
23704 +#ifdef CONFIG_PAX_SEGMEXEC
23705 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23706 + return "[vdso]";
23707 +#endif
23708 +
23709 return NULL;
23710 }
23711
23712 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23713 struct mm_struct *mm = tsk->mm;
23714
23715 /* Check to see if this task was created in compat vdso mode */
23716 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23717 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23718 return &gate_vma;
23719 return NULL;
23720 }
23721 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23722 --- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23723 +++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23724 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23725 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23726 #include "vextern.h"
23727 #undef VEXTERN
23728 +
23729 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23730 +VEXTERN(fallback_gettimeofday)
23731 +VEXTERN(fallback_time)
23732 +VEXTERN(getcpu)
23733 +#undef VEXTERN
23734 diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23735 --- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23736 +++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23737 @@ -11,6 +11,5 @@
23738 put into vextern.h and be referenced as a pointer with vdso prefix.
23739 The main kernel later fills in the values. */
23740
23741 -VEXTERN(jiffies)
23742 VEXTERN(vgetcpu_mode)
23743 VEXTERN(vsyscall_gtod_data)
23744 diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23745 --- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23746 +++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-08-23 20:24:19.000000000 -0400
23747 @@ -17,8 +17,6 @@
23748 #include "vextern.h" /* Just for VMAGIC. */
23749 #undef VEXTERN
23750
23751 -unsigned int __read_mostly vdso_enabled = 1;
23752 -
23753 extern char vdso_start[], vdso_end[];
23754 extern unsigned short vdso_sync_cpuid;
23755
23756 @@ -27,10 +25,8 @@ static unsigned vdso_size;
23757
23758 static inline void *var_ref(void *p, char *name)
23759 {
23760 - if (*(void **)p != (void *)VMAGIC) {
23761 - printk("VDSO: variable %s broken\n", name);
23762 - vdso_enabled = 0;
23763 - }
23764 + if (*(void **)p != (void *)VMAGIC)
23765 + panic("VDSO: variable %s broken\n", name);
23766 return p;
23767 }
23768
23769 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
23770 if (!vbase)
23771 goto oom;
23772
23773 - if (memcmp(vbase, "\177ELF", 4)) {
23774 - printk("VDSO: I'm broken; not ELF\n");
23775 - vdso_enabled = 0;
23776 - }
23777 + if (memcmp(vbase, ELFMAG, SELFMAG))
23778 + panic("VDSO: I'm broken; not ELF\n");
23779
23780 #define VEXTERN(x) \
23781 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23782 #include "vextern.h"
23783 #undef VEXTERN
23784 + vunmap(vbase);
23785 return 0;
23786
23787 oom:
23788 - printk("Cannot allocate vdso\n");
23789 - vdso_enabled = 0;
23790 - return -ENOMEM;
23791 + panic("Cannot allocate vdso\n");
23792 }
23793 __initcall(init_vdso_vars);
23794
23795 @@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
23796 unsigned long addr;
23797 int ret;
23798
23799 - if (!vdso_enabled)
23800 - return 0;
23801 -
23802 down_write(&mm->mmap_sem);
23803 addr = vdso_addr(mm->start_stack, vdso_size);
23804 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23805 @@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
23806 goto up_fail;
23807 }
23808
23809 - current->mm->context.vdso = (void *)addr;
23810 + current->mm->context.vdso = addr;
23811
23812 ret = install_special_mapping(mm, addr, vdso_size,
23813 VM_READ|VM_EXEC|
23814 @@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
23815 VM_ALWAYSDUMP,
23816 vdso_pages);
23817 if (ret) {
23818 - current->mm->context.vdso = NULL;
23819 + current->mm->context.vdso = 0;
23820 goto up_fail;
23821 }
23822
23823 @@ -132,10 +122,3 @@ up_fail:
23824 up_write(&mm->mmap_sem);
23825 return ret;
23826 }
23827 -
23828 -static __init int vdso_setup(char *s)
23829 -{
23830 - vdso_enabled = simple_strtoul(s, NULL, 0);
23831 - return 0;
23832 -}
23833 -__setup("vdso=", vdso_setup);
23834 diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23835 --- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23836 +++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23837 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23838
23839 struct shared_info xen_dummy_shared_info;
23840
23841 -void *xen_initial_gdt;
23842 -
23843 /*
23844 * Point at some empty memory to start with. We map the real shared_info
23845 * page as soon as fixmap is up and running.
23846 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23847
23848 preempt_disable();
23849
23850 - start = __get_cpu_var(idt_desc).address;
23851 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23852 end = start + __get_cpu_var(idt_desc).size + 1;
23853
23854 xen_mc_flush();
23855 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23856 #endif
23857 };
23858
23859 -static void xen_reboot(int reason)
23860 +static __noreturn void xen_reboot(int reason)
23861 {
23862 struct sched_shutdown r = { .reason = reason };
23863
23864 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23865 BUG();
23866 }
23867
23868 -static void xen_restart(char *msg)
23869 +static __noreturn void xen_restart(char *msg)
23870 {
23871 xen_reboot(SHUTDOWN_reboot);
23872 }
23873
23874 -static void xen_emergency_restart(void)
23875 +static __noreturn void xen_emergency_restart(void)
23876 {
23877 xen_reboot(SHUTDOWN_reboot);
23878 }
23879
23880 -static void xen_machine_halt(void)
23881 +static __noreturn void xen_machine_halt(void)
23882 {
23883 xen_reboot(SHUTDOWN_poweroff);
23884 }
23885 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23886 */
23887 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23888
23889 -#ifdef CONFIG_X86_64
23890 /* Work out if we support NX */
23891 - check_efer();
23892 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23893 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23894 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23895 + unsigned l, h;
23896 +
23897 +#ifdef CONFIG_X86_PAE
23898 + nx_enabled = 1;
23899 +#endif
23900 + __supported_pte_mask |= _PAGE_NX;
23901 + rdmsr(MSR_EFER, l, h);
23902 + l |= EFER_NX;
23903 + wrmsr(MSR_EFER, l, h);
23904 + }
23905 #endif
23906
23907 xen_setup_features();
23908 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23909
23910 machine_ops = xen_machine_ops;
23911
23912 - /*
23913 - * The only reliable way to retain the initial address of the
23914 - * percpu gdt_page is to remember it here, so we can go and
23915 - * mark it RW later, when the initial percpu area is freed.
23916 - */
23917 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23918 -
23919 xen_smp_init();
23920
23921 pgd = (pgd_t *)xen_start_info->pt_base;
23922 diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23923 --- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23924 +++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23925 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23926 convert_pfn_mfn(init_level4_pgt);
23927 convert_pfn_mfn(level3_ident_pgt);
23928 convert_pfn_mfn(level3_kernel_pgt);
23929 + convert_pfn_mfn(level3_vmalloc_pgt);
23930 + convert_pfn_mfn(level3_vmemmap_pgt);
23931
23932 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23933 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23934 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23935 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23936 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23937 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23938 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23939 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23940 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23941 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23942 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23943 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23944
23945 diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23946 --- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23947 +++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23948 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23949 {
23950 BUG_ON(smp_processor_id() != 0);
23951 native_smp_prepare_boot_cpu();
23952 -
23953 - /* We've switched to the "real" per-cpu gdt, so make sure the
23954 - old memory can be recycled */
23955 - make_lowmem_page_readwrite(xen_initial_gdt);
23956 -
23957 xen_setup_vcpu_info_placement();
23958 }
23959
23960 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23961 gdt = get_cpu_gdt_table(cpu);
23962
23963 ctxt->flags = VGCF_IN_KERNEL;
23964 - ctxt->user_regs.ds = __USER_DS;
23965 - ctxt->user_regs.es = __USER_DS;
23966 + ctxt->user_regs.ds = __KERNEL_DS;
23967 + ctxt->user_regs.es = __KERNEL_DS;
23968 ctxt->user_regs.ss = __KERNEL_DS;
23969 #ifdef CONFIG_X86_32
23970 ctxt->user_regs.fs = __KERNEL_PERCPU;
23971 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23972 + savesegment(gs, ctxt->user_regs.gs);
23973 #else
23974 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23975 #endif
23976 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23977 int rc;
23978
23979 per_cpu(current_task, cpu) = idle;
23980 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23981 #ifdef CONFIG_X86_32
23982 irq_ctx_init(cpu);
23983 #else
23984 clear_tsk_thread_flag(idle, TIF_FORK);
23985 - per_cpu(kernel_stack, cpu) =
23986 - (unsigned long)task_stack_page(idle) -
23987 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23988 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23989 #endif
23990 xen_setup_runstate_info(cpu);
23991 xen_setup_timer(cpu);
23992 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23993 --- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23994 +++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23995 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23996 ESP_OFFSET=4 # bytes pushed onto stack
23997
23998 /*
23999 - * Store vcpu_info pointer for easy access. Do it this way to
24000 - * avoid having to reload %fs
24001 + * Store vcpu_info pointer for easy access.
24002 */
24003 #ifdef CONFIG_SMP
24004 - GET_THREAD_INFO(%eax)
24005 - movl TI_cpu(%eax), %eax
24006 - movl __per_cpu_offset(,%eax,4), %eax
24007 - mov per_cpu__xen_vcpu(%eax), %eax
24008 + push %fs
24009 + mov $(__KERNEL_PERCPU), %eax
24010 + mov %eax, %fs
24011 + mov PER_CPU_VAR(xen_vcpu), %eax
24012 + pop %fs
24013 #else
24014 movl per_cpu__xen_vcpu, %eax
24015 #endif
24016 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
24017 --- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
24018 +++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
24019 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24020 #ifdef CONFIG_X86_32
24021 mov %esi,xen_start_info
24022 mov $init_thread_union+THREAD_SIZE,%esp
24023 +#ifdef CONFIG_SMP
24024 + movl $cpu_gdt_table,%edi
24025 + movl $__per_cpu_load,%eax
24026 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24027 + rorl $16,%eax
24028 + movb %al,__KERNEL_PERCPU + 4(%edi)
24029 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24030 + movl $__per_cpu_end - 1,%eax
24031 + subl $__per_cpu_start,%eax
24032 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24033 +#endif
24034 #else
24035 mov %rsi,xen_start_info
24036 mov $init_thread_union+THREAD_SIZE,%rsp
24037 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
24038 --- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
24039 +++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
24040 @@ -10,8 +10,6 @@
24041 extern const char xen_hypervisor_callback[];
24042 extern const char xen_failsafe_callback[];
24043
24044 -extern void *xen_initial_gdt;
24045 -
24046 struct trap_info;
24047 void xen_copy_trap_info(struct trap_info *traps);
24048
24049 diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
24050 --- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
24051 +++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
24052 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24053 NULL,
24054 };
24055
24056 -static struct sysfs_ops integrity_ops = {
24057 +static const struct sysfs_ops integrity_ops = {
24058 .show = &integrity_attr_show,
24059 .store = &integrity_attr_store,
24060 };
24061 diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
24062 --- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
24063 +++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
24064 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24065 }
24066 EXPORT_SYMBOL(blk_iopoll_complete);
24067
24068 -static void blk_iopoll_softirq(struct softirq_action *h)
24069 +static void blk_iopoll_softirq(void)
24070 {
24071 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24072 int rearm = 0, budget = blk_iopoll_budget;
24073 diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
24074 --- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
24075 +++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
24076 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24077 * direct dma. else, set up kernel bounce buffers
24078 */
24079 uaddr = (unsigned long) ubuf;
24080 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
24081 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24082 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24083 else
24084 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24085 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24086 for (i = 0; i < iov_count; i++) {
24087 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24088
24089 + if (!iov[i].iov_len)
24090 + return -EINVAL;
24091 +
24092 if (uaddr & queue_dma_alignment(q)) {
24093 unaligned = 1;
24094 break;
24095 }
24096 - if (!iov[i].iov_len)
24097 - return -EINVAL;
24098 }
24099
24100 if (unaligned || (q->dma_pad_mask & len) || map_data)
24101 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24102 if (!len || !kbuf)
24103 return -EINVAL;
24104
24105 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24106 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24107 if (do_copy)
24108 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24109 else
24110 diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
24111 --- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24112 +++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24113 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24114 * Softirq action handler - move entries to local list and loop over them
24115 * while passing them to the queue registered handler.
24116 */
24117 -static void blk_done_softirq(struct softirq_action *h)
24118 +static void blk_done_softirq(void)
24119 {
24120 struct list_head *cpu_list, local_list;
24121
24122 diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
24123 --- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24124 +++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24125 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24126 kmem_cache_free(blk_requestq_cachep, q);
24127 }
24128
24129 -static struct sysfs_ops queue_sysfs_ops = {
24130 +static const struct sysfs_ops queue_sysfs_ops = {
24131 .show = queue_attr_show,
24132 .store = queue_attr_store,
24133 };
24134 diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
24135 --- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24136 +++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24137 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24138 struct sg_io_v4 *hdr, struct bsg_device *bd,
24139 fmode_t has_write_perm)
24140 {
24141 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24142 + unsigned char *cmdptr;
24143 +
24144 if (hdr->request_len > BLK_MAX_CDB) {
24145 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24146 if (!rq->cmd)
24147 return -ENOMEM;
24148 - }
24149 + cmdptr = rq->cmd;
24150 + } else
24151 + cmdptr = tmpcmd;
24152
24153 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24154 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24155 hdr->request_len))
24156 return -EFAULT;
24157
24158 + if (cmdptr != rq->cmd)
24159 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24160 +
24161 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24162 if (blk_verify_command(rq->cmd, has_write_perm))
24163 return -EPERM;
24164 diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
24165 --- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24166 +++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24167 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24168 return error;
24169 }
24170
24171 -static struct sysfs_ops elv_sysfs_ops = {
24172 +static const struct sysfs_ops elv_sysfs_ops = {
24173 .show = elv_attr_show,
24174 .store = elv_attr_store,
24175 };
24176 diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
24177 --- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24178 +++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24179 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24180 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24181 struct sg_io_hdr *hdr, fmode_t mode)
24182 {
24183 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24184 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24185 + unsigned char *cmdptr;
24186 +
24187 + if (rq->cmd != rq->__cmd)
24188 + cmdptr = rq->cmd;
24189 + else
24190 + cmdptr = tmpcmd;
24191 +
24192 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24193 return -EFAULT;
24194 +
24195 + if (cmdptr != rq->cmd)
24196 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24197 +
24198 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24199 return -EPERM;
24200
24201 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24202 int err;
24203 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24204 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24205 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24206 + unsigned char *cmdptr;
24207
24208 if (!sic)
24209 return -EINVAL;
24210 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24211 */
24212 err = -EFAULT;
24213 rq->cmd_len = cmdlen;
24214 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24215 +
24216 + if (rq->cmd != rq->__cmd)
24217 + cmdptr = rq->cmd;
24218 + else
24219 + cmdptr = tmpcmd;
24220 +
24221 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24222 goto error;
24223
24224 + if (rq->cmd != cmdptr)
24225 + memcpy(rq->cmd, cmdptr, cmdlen);
24226 +
24227 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24228 goto error;
24229
24230 diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24231 --- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24232 +++ linux-2.6.32.45/crypto/cryptd.c 2011-08-23 21:22:32.000000000 -0400
24233 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
24234
24235 struct cryptd_blkcipher_request_ctx {
24236 crypto_completion_t complete;
24237 -};
24238 +} __no_const;
24239
24240 struct cryptd_hash_ctx {
24241 struct crypto_shash *child;
24242 diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24243 --- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24244 +++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24245 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24246 for (i = 0; i < 7; ++i)
24247 gf128mul_x_lle(&p[i + 1], &p[i]);
24248
24249 - memset(r, 0, sizeof(r));
24250 + memset(r, 0, sizeof(*r));
24251 for (i = 0;;) {
24252 u8 ch = ((u8 *)b)[15 - i];
24253
24254 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24255 for (i = 0; i < 7; ++i)
24256 gf128mul_x_bbe(&p[i + 1], &p[i]);
24257
24258 - memset(r, 0, sizeof(r));
24259 + memset(r, 0, sizeof(*r));
24260 for (i = 0;;) {
24261 u8 ch = ((u8 *)b)[i];
24262
24263 diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24264 --- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24265 +++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24266 @@ -21,6 +21,7 @@
24267 #include <asm/byteorder.h>
24268 #include <linux/crypto.h>
24269 #include <linux/types.h>
24270 +#include <linux/sched.h>
24271
24272 /* Key is padded to the maximum of 256 bits before round key generation.
24273 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24274 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24275 u32 r0,r1,r2,r3,r4;
24276 int i;
24277
24278 + pax_track_stack();
24279 +
24280 /* Copy key, add padding */
24281
24282 for (i = 0; i < keylen; ++i)
24283 diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24284 --- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24285 +++ linux-2.6.32.45/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24286 @@ -1,13 +1,16 @@
24287 *.a
24288 *.aux
24289 *.bin
24290 +*.cis
24291 *.cpio
24292 *.csp
24293 +*.dbg
24294 *.dsp
24295 *.dvi
24296 *.elf
24297 *.eps
24298 *.fw
24299 +*.gcno
24300 *.gen.S
24301 *.gif
24302 *.grep
24303 @@ -38,8 +41,10 @@
24304 *.tab.h
24305 *.tex
24306 *.ver
24307 +*.vim
24308 *.xml
24309 *_MODULES
24310 +*_reg_safe.h
24311 *_vga16.c
24312 *~
24313 *.9
24314 @@ -49,11 +54,16 @@
24315 53c700_d.h
24316 CVS
24317 ChangeSet
24318 +GPATH
24319 +GRTAGS
24320 +GSYMS
24321 +GTAGS
24322 Image
24323 Kerntypes
24324 Module.markers
24325 Module.symvers
24326 PENDING
24327 +PERF*
24328 SCCS
24329 System.map*
24330 TAGS
24331 @@ -76,7 +86,11 @@ btfixupprep
24332 build
24333 bvmlinux
24334 bzImage*
24335 +capability_names.h
24336 +capflags.c
24337 classlist.h*
24338 +clut_vga16.c
24339 +common-cmds.h
24340 comp*.log
24341 compile.h*
24342 conf
24343 @@ -97,19 +111,21 @@ elfconfig.h*
24344 fixdep
24345 fore200e_mkfirm
24346 fore200e_pca_fw.c*
24347 +gate.lds
24348 gconf
24349 gen-devlist
24350 gen_crc32table
24351 gen_init_cpio
24352 genksyms
24353 *_gray256.c
24354 +hash
24355 ihex2fw
24356 ikconfig.h*
24357 initramfs_data.cpio
24358 +initramfs_data.cpio.bz2
24359 initramfs_data.cpio.gz
24360 initramfs_list
24361 kallsyms
24362 -kconfig
24363 keywords.c
24364 ksym.c*
24365 ksym.h*
24366 @@ -133,7 +149,9 @@ mkboot
24367 mkbugboot
24368 mkcpustr
24369 mkdep
24370 +mkpiggy
24371 mkprep
24372 +mkregtable
24373 mktables
24374 mktree
24375 modpost
24376 @@ -149,6 +167,7 @@ patches*
24377 pca200e.bin
24378 pca200e_ecd.bin2
24379 piggy.gz
24380 +piggy.S
24381 piggyback
24382 pnmtologo
24383 ppc_defs.h*
24384 @@ -157,12 +176,15 @@ qconf
24385 raid6altivec*.c
24386 raid6int*.c
24387 raid6tables.c
24388 +regdb.c
24389 relocs
24390 +rlim_names.h
24391 series
24392 setup
24393 setup.bin
24394 setup.elf
24395 sImage
24396 +slabinfo
24397 sm_tbl*
24398 split-include
24399 syscalltab.h
24400 @@ -186,14 +208,20 @@ version.h*
24401 vmlinux
24402 vmlinux-*
24403 vmlinux.aout
24404 +vmlinux.bin.all
24405 +vmlinux.bin.bz2
24406 vmlinux.lds
24407 +vmlinux.relocs
24408 +voffset.h
24409 vsyscall.lds
24410 vsyscall_32.lds
24411 wanxlfw.inc
24412 uImage
24413 unifdef
24414 +utsrelease.h
24415 wakeup.bin
24416 wakeup.elf
24417 wakeup.lds
24418 zImage*
24419 zconf.hash.c
24420 +zoffset.h
24421 diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24422 --- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24423 +++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24424 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24425 the specified number of seconds. This is to be used if
24426 your oopses keep scrolling off the screen.
24427
24428 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24429 + virtualization environments that don't cope well with the
24430 + expand down segment used by UDEREF on X86-32 or the frequent
24431 + page table updates on X86-64.
24432 +
24433 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24434 +
24435 pcbit= [HW,ISDN]
24436
24437 pcd. [PARIDE]
24438 diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24439 --- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24440 +++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24441 @@ -30,7 +30,7 @@
24442 #include <acpi/acpi_bus.h>
24443 #include <acpi/acpi_drivers.h>
24444
24445 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24446 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24447 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24448 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24449 static DEFINE_MUTEX(isolated_cpus_lock);
24450 diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24451 --- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24452 +++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24453 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24454 }
24455
24456 static struct battery_file {
24457 - struct file_operations ops;
24458 + const struct file_operations ops;
24459 mode_t mode;
24460 const char *name;
24461 } acpi_battery_file[] = {
24462 diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24463 --- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24464 +++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24465 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24466 struct list_head list;
24467 struct list_head hotplug_list;
24468 acpi_handle handle;
24469 - struct acpi_dock_ops *ops;
24470 + const struct acpi_dock_ops *ops;
24471 void *context;
24472 };
24473
24474 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24475 * the dock driver after _DCK is executed.
24476 */
24477 int
24478 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24479 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24480 void *context)
24481 {
24482 struct dock_dependent_device *dd;
24483 diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24484 --- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24485 +++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24486 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24487 void __iomem *virt_addr;
24488
24489 virt_addr = ioremap(phys_addr, width);
24490 + if (!virt_addr)
24491 + return AE_NO_MEMORY;
24492 if (!value)
24493 value = &dummy;
24494
24495 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24496 void __iomem *virt_addr;
24497
24498 virt_addr = ioremap(phys_addr, width);
24499 + if (!virt_addr)
24500 + return AE_NO_MEMORY;
24501
24502 switch (width) {
24503 case 8:
24504 diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24505 --- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24506 +++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24507 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24508 return res;
24509
24510 temp /= 1000;
24511 - if (temp < 0)
24512 - return -EINVAL;
24513
24514 mutex_lock(&resource->lock);
24515 resource->trip[attr->index - 7] = temp;
24516 diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24517 --- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24518 +++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24519 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24520 size_t count, loff_t * ppos)
24521 {
24522 struct list_head *node, *next;
24523 - char strbuf[5];
24524 - char str[5] = "";
24525 - unsigned int len = count;
24526 + char strbuf[5] = {0};
24527 struct acpi_device *found_dev = NULL;
24528
24529 - if (len > 4)
24530 - len = 4;
24531 - if (len < 0)
24532 - return -EFAULT;
24533 + if (count > 4)
24534 + count = 4;
24535
24536 - if (copy_from_user(strbuf, buffer, len))
24537 + if (copy_from_user(strbuf, buffer, count))
24538 return -EFAULT;
24539 - strbuf[len] = '\0';
24540 - sscanf(strbuf, "%s", str);
24541 + strbuf[count] = '\0';
24542
24543 mutex_lock(&acpi_device_lock);
24544 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24545 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24546 if (!dev->wakeup.flags.valid)
24547 continue;
24548
24549 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24550 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24551 dev->wakeup.state.enabled =
24552 dev->wakeup.state.enabled ? 0 : 1;
24553 found_dev = dev;
24554 diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24555 --- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24556 +++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24557 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24558 return 0;
24559 }
24560
24561 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24562 + BUG_ON(pr->id >= nr_cpu_ids);
24563
24564 /*
24565 * Buggy BIOS check
24566 diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24567 --- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24568 +++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24569 @@ -17,7 +17,7 @@
24570
24571 #define PREFIX "ACPI: "
24572
24573 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24574 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24575 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24576
24577 struct acpi_smb_hc {
24578 diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24579 --- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24580 +++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24581 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24582 }
24583 }
24584
24585 -static struct platform_suspend_ops acpi_suspend_ops = {
24586 +static const struct platform_suspend_ops acpi_suspend_ops = {
24587 .valid = acpi_suspend_state_valid,
24588 .begin = acpi_suspend_begin,
24589 .prepare_late = acpi_pm_prepare,
24590 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24591 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24592 * been requested.
24593 */
24594 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24595 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24596 .valid = acpi_suspend_state_valid,
24597 .begin = acpi_suspend_begin_old,
24598 .prepare_late = acpi_pm_disable_gpes,
24599 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24600 acpi_enable_all_runtime_gpes();
24601 }
24602
24603 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24604 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24605 .begin = acpi_hibernation_begin,
24606 .end = acpi_pm_end,
24607 .pre_snapshot = acpi_hibernation_pre_snapshot,
24608 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24609 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24610 * been requested.
24611 */
24612 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24613 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24614 .begin = acpi_hibernation_begin_old,
24615 .end = acpi_pm_end,
24616 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24617 diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24618 --- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24619 +++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24620 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24621 vd->brightness->levels[request_level]);
24622 }
24623
24624 -static struct backlight_ops acpi_backlight_ops = {
24625 +static const struct backlight_ops acpi_backlight_ops = {
24626 .get_brightness = acpi_video_get_brightness,
24627 .update_status = acpi_video_set_brightness,
24628 };
24629 diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24630 --- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24631 +++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24632 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24633 .sdev_attrs = ahci_sdev_attrs,
24634 };
24635
24636 -static struct ata_port_operations ahci_ops = {
24637 +static const struct ata_port_operations ahci_ops = {
24638 .inherits = &sata_pmp_port_ops,
24639
24640 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24641 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24642 .port_stop = ahci_port_stop,
24643 };
24644
24645 -static struct ata_port_operations ahci_vt8251_ops = {
24646 +static const struct ata_port_operations ahci_vt8251_ops = {
24647 .inherits = &ahci_ops,
24648 .hardreset = ahci_vt8251_hardreset,
24649 };
24650
24651 -static struct ata_port_operations ahci_p5wdh_ops = {
24652 +static const struct ata_port_operations ahci_p5wdh_ops = {
24653 .inherits = &ahci_ops,
24654 .hardreset = ahci_p5wdh_hardreset,
24655 };
24656
24657 -static struct ata_port_operations ahci_sb600_ops = {
24658 +static const struct ata_port_operations ahci_sb600_ops = {
24659 .inherits = &ahci_ops,
24660 .softreset = ahci_sb600_softreset,
24661 .pmp_softreset = ahci_sb600_softreset,
24662 diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24663 --- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24664 +++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24665 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24666 ATA_BMDMA_SHT(DRV_NAME),
24667 };
24668
24669 -static struct ata_port_operations generic_port_ops = {
24670 +static const struct ata_port_operations generic_port_ops = {
24671 .inherits = &ata_bmdma_port_ops,
24672 .cable_detect = ata_cable_unknown,
24673 .set_mode = generic_set_mode,
24674 diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24675 --- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24676 +++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24677 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24678 ATA_BMDMA_SHT(DRV_NAME),
24679 };
24680
24681 -static struct ata_port_operations piix_pata_ops = {
24682 +static const struct ata_port_operations piix_pata_ops = {
24683 .inherits = &ata_bmdma32_port_ops,
24684 .cable_detect = ata_cable_40wire,
24685 .set_piomode = piix_set_piomode,
24686 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24687 .prereset = piix_pata_prereset,
24688 };
24689
24690 -static struct ata_port_operations piix_vmw_ops = {
24691 +static const struct ata_port_operations piix_vmw_ops = {
24692 .inherits = &piix_pata_ops,
24693 .bmdma_status = piix_vmw_bmdma_status,
24694 };
24695
24696 -static struct ata_port_operations ich_pata_ops = {
24697 +static const struct ata_port_operations ich_pata_ops = {
24698 .inherits = &piix_pata_ops,
24699 .cable_detect = ich_pata_cable_detect,
24700 .set_dmamode = ich_set_dmamode,
24701 };
24702
24703 -static struct ata_port_operations piix_sata_ops = {
24704 +static const struct ata_port_operations piix_sata_ops = {
24705 .inherits = &ata_bmdma_port_ops,
24706 };
24707
24708 -static struct ata_port_operations piix_sidpr_sata_ops = {
24709 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24710 .inherits = &piix_sata_ops,
24711 .hardreset = sata_std_hardreset,
24712 .scr_read = piix_sidpr_scr_read,
24713 diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24714 --- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24715 +++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24716 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24717 ata_acpi_uevent(dev->link->ap, dev, event);
24718 }
24719
24720 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24721 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24722 .handler = ata_acpi_dev_notify_dock,
24723 .uevent = ata_acpi_dev_uevent,
24724 };
24725
24726 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24727 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24728 .handler = ata_acpi_ap_notify_dock,
24729 .uevent = ata_acpi_ap_uevent,
24730 };
24731 diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24732 --- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24733 +++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24734 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24735 struct ata_port *ap;
24736 unsigned int tag;
24737
24738 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24739 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24740 ap = qc->ap;
24741
24742 qc->flags = 0;
24743 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24744 struct ata_port *ap;
24745 struct ata_link *link;
24746
24747 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24748 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24749 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24750 ap = qc->ap;
24751 link = qc->dev->link;
24752 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24753 * LOCKING:
24754 * None.
24755 */
24756 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24757 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24758 {
24759 static DEFINE_SPINLOCK(lock);
24760 const struct ata_port_operations *cur;
24761 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24762 return;
24763
24764 spin_lock(&lock);
24765 + pax_open_kernel();
24766
24767 for (cur = ops->inherits; cur; cur = cur->inherits) {
24768 void **inherit = (void **)cur;
24769 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24770 if (IS_ERR(*pp))
24771 *pp = NULL;
24772
24773 - ops->inherits = NULL;
24774 + *(struct ata_port_operations **)&ops->inherits = NULL;
24775
24776 + pax_close_kernel();
24777 spin_unlock(&lock);
24778 }
24779
24780 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24781 */
24782 /* KILLME - the only user left is ipr */
24783 void ata_host_init(struct ata_host *host, struct device *dev,
24784 - unsigned long flags, struct ata_port_operations *ops)
24785 + unsigned long flags, const struct ata_port_operations *ops)
24786 {
24787 spin_lock_init(&host->lock);
24788 host->dev = dev;
24789 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24790 /* truly dummy */
24791 }
24792
24793 -struct ata_port_operations ata_dummy_port_ops = {
24794 +const struct ata_port_operations ata_dummy_port_ops = {
24795 .qc_prep = ata_noop_qc_prep,
24796 .qc_issue = ata_dummy_qc_issue,
24797 .error_handler = ata_dummy_error_handler,
24798 diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24799 --- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24800 +++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24801 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24802 {
24803 struct ata_link *link;
24804
24805 + pax_track_stack();
24806 +
24807 ata_for_each_link(link, ap, HOST_FIRST)
24808 ata_eh_link_report(link);
24809 }
24810 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24811 */
24812 void ata_std_error_handler(struct ata_port *ap)
24813 {
24814 - struct ata_port_operations *ops = ap->ops;
24815 + const struct ata_port_operations *ops = ap->ops;
24816 ata_reset_fn_t hardreset = ops->hardreset;
24817
24818 /* ignore built-in hardreset if SCR access is not available */
24819 diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24820 --- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24821 +++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24822 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24823 */
24824 static int sata_pmp_eh_recover(struct ata_port *ap)
24825 {
24826 - struct ata_port_operations *ops = ap->ops;
24827 + const struct ata_port_operations *ops = ap->ops;
24828 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24829 struct ata_link *pmp_link = &ap->link;
24830 struct ata_device *pmp_dev = pmp_link->device;
24831 diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24832 --- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24833 +++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24834 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24835 ATA_BMDMA_SHT(DRV_NAME),
24836 };
24837
24838 -static struct ata_port_operations pacpi_ops = {
24839 +static const struct ata_port_operations pacpi_ops = {
24840 .inherits = &ata_bmdma_port_ops,
24841 .qc_issue = pacpi_qc_issue,
24842 .cable_detect = pacpi_cable_detect,
24843 diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24844 --- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24845 +++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24846 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24847 * Port operations for PIO only ALi
24848 */
24849
24850 -static struct ata_port_operations ali_early_port_ops = {
24851 +static const struct ata_port_operations ali_early_port_ops = {
24852 .inherits = &ata_sff_port_ops,
24853 .cable_detect = ata_cable_40wire,
24854 .set_piomode = ali_set_piomode,
24855 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24856 * Port operations for DMA capable ALi without cable
24857 * detect
24858 */
24859 -static struct ata_port_operations ali_20_port_ops = {
24860 +static const struct ata_port_operations ali_20_port_ops = {
24861 .inherits = &ali_dma_base_ops,
24862 .cable_detect = ata_cable_40wire,
24863 .mode_filter = ali_20_filter,
24864 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24865 /*
24866 * Port operations for DMA capable ALi with cable detect
24867 */
24868 -static struct ata_port_operations ali_c2_port_ops = {
24869 +static const struct ata_port_operations ali_c2_port_ops = {
24870 .inherits = &ali_dma_base_ops,
24871 .check_atapi_dma = ali_check_atapi_dma,
24872 .cable_detect = ali_c2_cable_detect,
24873 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24874 /*
24875 * Port operations for DMA capable ALi with cable detect
24876 */
24877 -static struct ata_port_operations ali_c4_port_ops = {
24878 +static const struct ata_port_operations ali_c4_port_ops = {
24879 .inherits = &ali_dma_base_ops,
24880 .check_atapi_dma = ali_check_atapi_dma,
24881 .cable_detect = ali_c2_cable_detect,
24882 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24883 /*
24884 * Port operations for DMA capable ALi with cable detect and LBA48
24885 */
24886 -static struct ata_port_operations ali_c5_port_ops = {
24887 +static const struct ata_port_operations ali_c5_port_ops = {
24888 .inherits = &ali_dma_base_ops,
24889 .check_atapi_dma = ali_check_atapi_dma,
24890 .dev_config = ali_warn_atapi_dma,
24891 diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24892 --- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24893 +++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24894 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24895 .prereset = amd_pre_reset,
24896 };
24897
24898 -static struct ata_port_operations amd33_port_ops = {
24899 +static const struct ata_port_operations amd33_port_ops = {
24900 .inherits = &amd_base_port_ops,
24901 .cable_detect = ata_cable_40wire,
24902 .set_piomode = amd33_set_piomode,
24903 .set_dmamode = amd33_set_dmamode,
24904 };
24905
24906 -static struct ata_port_operations amd66_port_ops = {
24907 +static const struct ata_port_operations amd66_port_ops = {
24908 .inherits = &amd_base_port_ops,
24909 .cable_detect = ata_cable_unknown,
24910 .set_piomode = amd66_set_piomode,
24911 .set_dmamode = amd66_set_dmamode,
24912 };
24913
24914 -static struct ata_port_operations amd100_port_ops = {
24915 +static const struct ata_port_operations amd100_port_ops = {
24916 .inherits = &amd_base_port_ops,
24917 .cable_detect = ata_cable_unknown,
24918 .set_piomode = amd100_set_piomode,
24919 .set_dmamode = amd100_set_dmamode,
24920 };
24921
24922 -static struct ata_port_operations amd133_port_ops = {
24923 +static const struct ata_port_operations amd133_port_ops = {
24924 .inherits = &amd_base_port_ops,
24925 .cable_detect = amd_cable_detect,
24926 .set_piomode = amd133_set_piomode,
24927 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24928 .host_stop = nv_host_stop,
24929 };
24930
24931 -static struct ata_port_operations nv100_port_ops = {
24932 +static const struct ata_port_operations nv100_port_ops = {
24933 .inherits = &nv_base_port_ops,
24934 .set_piomode = nv100_set_piomode,
24935 .set_dmamode = nv100_set_dmamode,
24936 };
24937
24938 -static struct ata_port_operations nv133_port_ops = {
24939 +static const struct ata_port_operations nv133_port_ops = {
24940 .inherits = &nv_base_port_ops,
24941 .set_piomode = nv133_set_piomode,
24942 .set_dmamode = nv133_set_dmamode,
24943 diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24944 --- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24945 +++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24946 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24947 ATA_BMDMA_SHT(DRV_NAME),
24948 };
24949
24950 -static struct ata_port_operations artop6210_ops = {
24951 +static const struct ata_port_operations artop6210_ops = {
24952 .inherits = &ata_bmdma_port_ops,
24953 .cable_detect = ata_cable_40wire,
24954 .set_piomode = artop6210_set_piomode,
24955 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24956 .qc_defer = artop6210_qc_defer,
24957 };
24958
24959 -static struct ata_port_operations artop6260_ops = {
24960 +static const struct ata_port_operations artop6260_ops = {
24961 .inherits = &ata_bmdma_port_ops,
24962 .cable_detect = artop6260_cable_detect,
24963 .set_piomode = artop6260_set_piomode,
24964 diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24965 --- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24966 +++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24967 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24968 ATA_PIO_SHT(DRV_NAME),
24969 };
24970
24971 -static struct ata_port_operations at32_port_ops = {
24972 +static const struct ata_port_operations at32_port_ops = {
24973 .inherits = &ata_sff_port_ops,
24974 .cable_detect = ata_cable_40wire,
24975 .set_piomode = pata_at32_set_piomode,
24976 diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24977 --- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24978 +++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24979 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24980 ATA_PIO_SHT(DRV_NAME),
24981 };
24982
24983 -static struct ata_port_operations pata_at91_port_ops = {
24984 +static const struct ata_port_operations pata_at91_port_ops = {
24985 .inherits = &ata_sff_port_ops,
24986
24987 .sff_data_xfer = pata_at91_data_xfer_noirq,
24988 diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24989 --- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24990 +++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24991 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24992 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24993 };
24994
24995 -static struct ata_port_operations atiixp_port_ops = {
24996 +static const struct ata_port_operations atiixp_port_ops = {
24997 .inherits = &ata_bmdma_port_ops,
24998
24999 .qc_prep = ata_sff_dumb_qc_prep,
25000 diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
25001 --- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
25002 +++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
25003 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25004 ATA_BMDMA_SHT(DRV_NAME),
25005 };
25006
25007 -static struct ata_port_operations atp867x_ops = {
25008 +static const struct ata_port_operations atp867x_ops = {
25009 .inherits = &ata_bmdma_port_ops,
25010 .cable_detect = atp867x_cable_detect,
25011 .set_piomode = atp867x_set_piomode,
25012 diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
25013 --- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
25014 +++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
25015 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25016 .dma_boundary = ATA_DMA_BOUNDARY,
25017 };
25018
25019 -static struct ata_port_operations bfin_pata_ops = {
25020 +static const struct ata_port_operations bfin_pata_ops = {
25021 .inherits = &ata_sff_port_ops,
25022
25023 .set_piomode = bfin_set_piomode,
25024 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
25025 --- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
25026 +++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
25027 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25028 ATA_BMDMA_SHT(DRV_NAME),
25029 };
25030
25031 -static struct ata_port_operations cmd640_port_ops = {
25032 +static const struct ata_port_operations cmd640_port_ops = {
25033 .inherits = &ata_bmdma_port_ops,
25034 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25035 .sff_data_xfer = ata_sff_data_xfer_noirq,
25036 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
25037 --- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
25038 +++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
25039 @@ -271,18 +271,18 @@ static const struct ata_port_operations
25040 .set_dmamode = cmd64x_set_dmamode,
25041 };
25042
25043 -static struct ata_port_operations cmd64x_port_ops = {
25044 +static const struct ata_port_operations cmd64x_port_ops = {
25045 .inherits = &cmd64x_base_ops,
25046 .cable_detect = ata_cable_40wire,
25047 };
25048
25049 -static struct ata_port_operations cmd646r1_port_ops = {
25050 +static const struct ata_port_operations cmd646r1_port_ops = {
25051 .inherits = &cmd64x_base_ops,
25052 .bmdma_stop = cmd646r1_bmdma_stop,
25053 .cable_detect = ata_cable_40wire,
25054 };
25055
25056 -static struct ata_port_operations cmd648_port_ops = {
25057 +static const struct ata_port_operations cmd648_port_ops = {
25058 .inherits = &cmd64x_base_ops,
25059 .bmdma_stop = cmd648_bmdma_stop,
25060 .cable_detect = cmd648_cable_detect,
25061 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
25062 --- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
25063 +++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
25064 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25065 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25066 };
25067
25068 -static struct ata_port_operations cs5520_port_ops = {
25069 +static const struct ata_port_operations cs5520_port_ops = {
25070 .inherits = &ata_bmdma_port_ops,
25071 .qc_prep = ata_sff_dumb_qc_prep,
25072 .cable_detect = ata_cable_40wire,
25073 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
25074 --- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
25075 +++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
25076 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25077 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25078 };
25079
25080 -static struct ata_port_operations cs5530_port_ops = {
25081 +static const struct ata_port_operations cs5530_port_ops = {
25082 .inherits = &ata_bmdma_port_ops,
25083
25084 .qc_prep = ata_sff_dumb_qc_prep,
25085 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
25086 --- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
25087 +++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
25088 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25089 ATA_BMDMA_SHT(DRV_NAME),
25090 };
25091
25092 -static struct ata_port_operations cs5535_port_ops = {
25093 +static const struct ata_port_operations cs5535_port_ops = {
25094 .inherits = &ata_bmdma_port_ops,
25095 .cable_detect = cs5535_cable_detect,
25096 .set_piomode = cs5535_set_piomode,
25097 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
25098 --- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
25099 +++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
25100 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25101 ATA_BMDMA_SHT(DRV_NAME),
25102 };
25103
25104 -static struct ata_port_operations cs5536_port_ops = {
25105 +static const struct ata_port_operations cs5536_port_ops = {
25106 .inherits = &ata_bmdma_port_ops,
25107 .cable_detect = cs5536_cable_detect,
25108 .set_piomode = cs5536_set_piomode,
25109 diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
25110 --- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25111 +++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25112 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25113 ATA_BMDMA_SHT(DRV_NAME),
25114 };
25115
25116 -static struct ata_port_operations cy82c693_port_ops = {
25117 +static const struct ata_port_operations cy82c693_port_ops = {
25118 .inherits = &ata_bmdma_port_ops,
25119 .cable_detect = ata_cable_40wire,
25120 .set_piomode = cy82c693_set_piomode,
25121 diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
25122 --- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25123 +++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25124 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25125 ATA_BMDMA_SHT(DRV_NAME),
25126 };
25127
25128 -static struct ata_port_operations efar_ops = {
25129 +static const struct ata_port_operations efar_ops = {
25130 .inherits = &ata_bmdma_port_ops,
25131 .cable_detect = efar_cable_detect,
25132 .set_piomode = efar_set_piomode,
25133 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
25134 --- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25135 +++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25136 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25137 * Configuration for HPT366/68
25138 */
25139
25140 -static struct ata_port_operations hpt366_port_ops = {
25141 +static const struct ata_port_operations hpt366_port_ops = {
25142 .inherits = &ata_bmdma_port_ops,
25143 .cable_detect = hpt36x_cable_detect,
25144 .mode_filter = hpt366_filter,
25145 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
25146 --- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25147 +++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25148 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25149 * Configuration for HPT370
25150 */
25151
25152 -static struct ata_port_operations hpt370_port_ops = {
25153 +static const struct ata_port_operations hpt370_port_ops = {
25154 .inherits = &ata_bmdma_port_ops,
25155
25156 .bmdma_stop = hpt370_bmdma_stop,
25157 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25158 * Configuration for HPT370A. Close to 370 but less filters
25159 */
25160
25161 -static struct ata_port_operations hpt370a_port_ops = {
25162 +static const struct ata_port_operations hpt370a_port_ops = {
25163 .inherits = &hpt370_port_ops,
25164 .mode_filter = hpt370a_filter,
25165 };
25166 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25167 * and DMA mode setting functionality.
25168 */
25169
25170 -static struct ata_port_operations hpt372_port_ops = {
25171 +static const struct ata_port_operations hpt372_port_ops = {
25172 .inherits = &ata_bmdma_port_ops,
25173
25174 .bmdma_stop = hpt37x_bmdma_stop,
25175 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25176 * but we have a different cable detection procedure for function 1.
25177 */
25178
25179 -static struct ata_port_operations hpt374_fn1_port_ops = {
25180 +static const struct ata_port_operations hpt374_fn1_port_ops = {
25181 .inherits = &hpt372_port_ops,
25182 .prereset = hpt374_fn1_pre_reset,
25183 };
25184 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
25185 --- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25186 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25187 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25188 * Configuration for HPT3x2n.
25189 */
25190
25191 -static struct ata_port_operations hpt3x2n_port_ops = {
25192 +static const struct ata_port_operations hpt3x2n_port_ops = {
25193 .inherits = &ata_bmdma_port_ops,
25194
25195 .bmdma_stop = hpt3x2n_bmdma_stop,
25196 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
25197 --- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25198 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25199 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25200 ATA_BMDMA_SHT(DRV_NAME),
25201 };
25202
25203 -static struct ata_port_operations hpt3x3_port_ops = {
25204 +static const struct ata_port_operations hpt3x3_port_ops = {
25205 .inherits = &ata_bmdma_port_ops,
25206 .cable_detect = ata_cable_40wire,
25207 .set_piomode = hpt3x3_set_piomode,
25208 diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
25209 --- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25210 +++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25211 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25212 }
25213 }
25214
25215 -static struct ata_port_operations pata_icside_port_ops = {
25216 +static const struct ata_port_operations pata_icside_port_ops = {
25217 .inherits = &ata_sff_port_ops,
25218 /* no need to build any PRD tables for DMA */
25219 .qc_prep = ata_noop_qc_prep,
25220 diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
25221 --- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25222 +++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25223 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25224 ATA_PIO_SHT(DRV_NAME),
25225 };
25226
25227 -static struct ata_port_operations isapnp_port_ops = {
25228 +static const struct ata_port_operations isapnp_port_ops = {
25229 .inherits = &ata_sff_port_ops,
25230 .cable_detect = ata_cable_40wire,
25231 };
25232
25233 -static struct ata_port_operations isapnp_noalt_port_ops = {
25234 +static const struct ata_port_operations isapnp_noalt_port_ops = {
25235 .inherits = &ata_sff_port_ops,
25236 .cable_detect = ata_cable_40wire,
25237 /* No altstatus so we don't want to use the lost interrupt poll */
25238 diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25239 --- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25240 +++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25241 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25242 };
25243
25244
25245 -static struct ata_port_operations it8213_ops = {
25246 +static const struct ata_port_operations it8213_ops = {
25247 .inherits = &ata_bmdma_port_ops,
25248 .cable_detect = it8213_cable_detect,
25249 .set_piomode = it8213_set_piomode,
25250 diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25251 --- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25252 +++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25253 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25254 ATA_BMDMA_SHT(DRV_NAME),
25255 };
25256
25257 -static struct ata_port_operations it821x_smart_port_ops = {
25258 +static const struct ata_port_operations it821x_smart_port_ops = {
25259 .inherits = &ata_bmdma_port_ops,
25260
25261 .check_atapi_dma= it821x_check_atapi_dma,
25262 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25263 .port_start = it821x_port_start,
25264 };
25265
25266 -static struct ata_port_operations it821x_passthru_port_ops = {
25267 +static const struct ata_port_operations it821x_passthru_port_ops = {
25268 .inherits = &ata_bmdma_port_ops,
25269
25270 .check_atapi_dma= it821x_check_atapi_dma,
25271 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25272 .port_start = it821x_port_start,
25273 };
25274
25275 -static struct ata_port_operations it821x_rdc_port_ops = {
25276 +static const struct ata_port_operations it821x_rdc_port_ops = {
25277 .inherits = &ata_bmdma_port_ops,
25278
25279 .check_atapi_dma= it821x_check_atapi_dma,
25280 diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25281 --- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25282 +++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25283 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25284 ATA_PIO_SHT(DRV_NAME),
25285 };
25286
25287 -static struct ata_port_operations ixp4xx_port_ops = {
25288 +static const struct ata_port_operations ixp4xx_port_ops = {
25289 .inherits = &ata_sff_port_ops,
25290 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25291 .cable_detect = ata_cable_40wire,
25292 diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25293 --- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25294 +++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25295 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25296 ATA_BMDMA_SHT(DRV_NAME),
25297 };
25298
25299 -static struct ata_port_operations jmicron_ops = {
25300 +static const struct ata_port_operations jmicron_ops = {
25301 .inherits = &ata_bmdma_port_ops,
25302 .prereset = jmicron_pre_reset,
25303 };
25304 diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25305 --- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25306 +++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25307 @@ -106,7 +106,7 @@ struct legacy_probe {
25308
25309 struct legacy_controller {
25310 const char *name;
25311 - struct ata_port_operations *ops;
25312 + const struct ata_port_operations *ops;
25313 unsigned int pio_mask;
25314 unsigned int flags;
25315 unsigned int pflags;
25316 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25317 * pio_mask as well.
25318 */
25319
25320 -static struct ata_port_operations simple_port_ops = {
25321 +static const struct ata_port_operations simple_port_ops = {
25322 .inherits = &legacy_base_port_ops,
25323 .sff_data_xfer = ata_sff_data_xfer_noirq,
25324 };
25325
25326 -static struct ata_port_operations legacy_port_ops = {
25327 +static const struct ata_port_operations legacy_port_ops = {
25328 .inherits = &legacy_base_port_ops,
25329 .sff_data_xfer = ata_sff_data_xfer_noirq,
25330 .set_mode = legacy_set_mode,
25331 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25332 return buflen;
25333 }
25334
25335 -static struct ata_port_operations pdc20230_port_ops = {
25336 +static const struct ata_port_operations pdc20230_port_ops = {
25337 .inherits = &legacy_base_port_ops,
25338 .set_piomode = pdc20230_set_piomode,
25339 .sff_data_xfer = pdc_data_xfer_vlb,
25340 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25341 ioread8(ap->ioaddr.status_addr);
25342 }
25343
25344 -static struct ata_port_operations ht6560a_port_ops = {
25345 +static const struct ata_port_operations ht6560a_port_ops = {
25346 .inherits = &legacy_base_port_ops,
25347 .set_piomode = ht6560a_set_piomode,
25348 };
25349 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25350 ioread8(ap->ioaddr.status_addr);
25351 }
25352
25353 -static struct ata_port_operations ht6560b_port_ops = {
25354 +static const struct ata_port_operations ht6560b_port_ops = {
25355 .inherits = &legacy_base_port_ops,
25356 .set_piomode = ht6560b_set_piomode,
25357 };
25358 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25359 }
25360
25361
25362 -static struct ata_port_operations opti82c611a_port_ops = {
25363 +static const struct ata_port_operations opti82c611a_port_ops = {
25364 .inherits = &legacy_base_port_ops,
25365 .set_piomode = opti82c611a_set_piomode,
25366 };
25367 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25368 return ata_sff_qc_issue(qc);
25369 }
25370
25371 -static struct ata_port_operations opti82c46x_port_ops = {
25372 +static const struct ata_port_operations opti82c46x_port_ops = {
25373 .inherits = &legacy_base_port_ops,
25374 .set_piomode = opti82c46x_set_piomode,
25375 .qc_issue = opti82c46x_qc_issue,
25376 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25377 return 0;
25378 }
25379
25380 -static struct ata_port_operations qdi6500_port_ops = {
25381 +static const struct ata_port_operations qdi6500_port_ops = {
25382 .inherits = &legacy_base_port_ops,
25383 .set_piomode = qdi6500_set_piomode,
25384 .qc_issue = qdi_qc_issue,
25385 .sff_data_xfer = vlb32_data_xfer,
25386 };
25387
25388 -static struct ata_port_operations qdi6580_port_ops = {
25389 +static const struct ata_port_operations qdi6580_port_ops = {
25390 .inherits = &legacy_base_port_ops,
25391 .set_piomode = qdi6580_set_piomode,
25392 .sff_data_xfer = vlb32_data_xfer,
25393 };
25394
25395 -static struct ata_port_operations qdi6580dp_port_ops = {
25396 +static const struct ata_port_operations qdi6580dp_port_ops = {
25397 .inherits = &legacy_base_port_ops,
25398 .set_piomode = qdi6580dp_set_piomode,
25399 .sff_data_xfer = vlb32_data_xfer,
25400 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25401 return 0;
25402 }
25403
25404 -static struct ata_port_operations winbond_port_ops = {
25405 +static const struct ata_port_operations winbond_port_ops = {
25406 .inherits = &legacy_base_port_ops,
25407 .set_piomode = winbond_set_piomode,
25408 .sff_data_xfer = vlb32_data_xfer,
25409 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25410 int pio_modes = controller->pio_mask;
25411 unsigned long io = probe->port;
25412 u32 mask = (1 << probe->slot);
25413 - struct ata_port_operations *ops = controller->ops;
25414 + const struct ata_port_operations *ops = controller->ops;
25415 struct legacy_data *ld = &legacy_data[probe->slot];
25416 struct ata_host *host = NULL;
25417 struct ata_port *ap;
25418 diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25419 --- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25420 +++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25421 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25422 ATA_BMDMA_SHT(DRV_NAME),
25423 };
25424
25425 -static struct ata_port_operations marvell_ops = {
25426 +static const struct ata_port_operations marvell_ops = {
25427 .inherits = &ata_bmdma_port_ops,
25428 .cable_detect = marvell_cable_detect,
25429 .prereset = marvell_pre_reset,
25430 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25431 --- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25432 +++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25433 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25434 ATA_PIO_SHT(DRV_NAME),
25435 };
25436
25437 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25438 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25439 .inherits = &ata_bmdma_port_ops,
25440 .sff_dev_select = mpc52xx_ata_dev_select,
25441 .set_piomode = mpc52xx_ata_set_piomode,
25442 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25443 --- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25444 +++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25445 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25446 ATA_PIO_SHT(DRV_NAME),
25447 };
25448
25449 -static struct ata_port_operations mpiix_port_ops = {
25450 +static const struct ata_port_operations mpiix_port_ops = {
25451 .inherits = &ata_sff_port_ops,
25452 .qc_issue = mpiix_qc_issue,
25453 .cable_detect = ata_cable_40wire,
25454 diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25455 --- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25456 +++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25457 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25458 ATA_BMDMA_SHT(DRV_NAME),
25459 };
25460
25461 -static struct ata_port_operations netcell_ops = {
25462 +static const struct ata_port_operations netcell_ops = {
25463 .inherits = &ata_bmdma_port_ops,
25464 .cable_detect = ata_cable_80wire,
25465 .read_id = netcell_read_id,
25466 diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25467 --- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25468 +++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25469 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25470 ATA_BMDMA_SHT(DRV_NAME),
25471 };
25472
25473 -static struct ata_port_operations ninja32_port_ops = {
25474 +static const struct ata_port_operations ninja32_port_ops = {
25475 .inherits = &ata_bmdma_port_ops,
25476 .sff_dev_select = ninja32_dev_select,
25477 .cable_detect = ata_cable_40wire,
25478 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25479 --- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25480 +++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25481 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25482 ATA_PIO_SHT(DRV_NAME),
25483 };
25484
25485 -static struct ata_port_operations ns87410_port_ops = {
25486 +static const struct ata_port_operations ns87410_port_ops = {
25487 .inherits = &ata_sff_port_ops,
25488 .qc_issue = ns87410_qc_issue,
25489 .cable_detect = ata_cable_40wire,
25490 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25491 --- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25492 +++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25493 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25494 }
25495 #endif /* 87560 SuperIO Support */
25496
25497 -static struct ata_port_operations ns87415_pata_ops = {
25498 +static const struct ata_port_operations ns87415_pata_ops = {
25499 .inherits = &ata_bmdma_port_ops,
25500
25501 .check_atapi_dma = ns87415_check_atapi_dma,
25502 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25503 };
25504
25505 #if defined(CONFIG_SUPERIO)
25506 -static struct ata_port_operations ns87560_pata_ops = {
25507 +static const struct ata_port_operations ns87560_pata_ops = {
25508 .inherits = &ns87415_pata_ops,
25509 .sff_tf_read = ns87560_tf_read,
25510 .sff_check_status = ns87560_check_status,
25511 diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25512 --- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25513 +++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25514 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25515 return 0;
25516 }
25517
25518 +/* cannot be const */
25519 static struct ata_port_operations octeon_cf_ops = {
25520 .inherits = &ata_sff_port_ops,
25521 .check_atapi_dma = octeon_cf_check_atapi_dma,
25522 diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25523 --- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25524 +++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25525 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25526 ATA_BMDMA_SHT(DRV_NAME),
25527 };
25528
25529 -static struct ata_port_operations oldpiix_pata_ops = {
25530 +static const struct ata_port_operations oldpiix_pata_ops = {
25531 .inherits = &ata_bmdma_port_ops,
25532 .qc_issue = oldpiix_qc_issue,
25533 .cable_detect = ata_cable_40wire,
25534 diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25535 --- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25536 +++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25537 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25538 ATA_PIO_SHT(DRV_NAME),
25539 };
25540
25541 -static struct ata_port_operations opti_port_ops = {
25542 +static const struct ata_port_operations opti_port_ops = {
25543 .inherits = &ata_sff_port_ops,
25544 .cable_detect = ata_cable_40wire,
25545 .set_piomode = opti_set_piomode,
25546 diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25547 --- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25548 +++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25549 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25550 ATA_BMDMA_SHT(DRV_NAME),
25551 };
25552
25553 -static struct ata_port_operations optidma_port_ops = {
25554 +static const struct ata_port_operations optidma_port_ops = {
25555 .inherits = &ata_bmdma_port_ops,
25556 .cable_detect = ata_cable_40wire,
25557 .set_piomode = optidma_set_pio_mode,
25558 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25559 .prereset = optidma_pre_reset,
25560 };
25561
25562 -static struct ata_port_operations optiplus_port_ops = {
25563 +static const struct ata_port_operations optiplus_port_ops = {
25564 .inherits = &optidma_port_ops,
25565 .set_piomode = optiplus_set_pio_mode,
25566 .set_dmamode = optiplus_set_dma_mode,
25567 diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25568 --- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25569 +++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25570 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25571 ATA_PIO_SHT(DRV_NAME),
25572 };
25573
25574 -static struct ata_port_operations palmld_port_ops = {
25575 +static const struct ata_port_operations palmld_port_ops = {
25576 .inherits = &ata_sff_port_ops,
25577 .sff_data_xfer = ata_sff_data_xfer_noirq,
25578 .cable_detect = ata_cable_40wire,
25579 diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25580 --- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25581 +++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25582 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25583 ATA_PIO_SHT(DRV_NAME),
25584 };
25585
25586 -static struct ata_port_operations pcmcia_port_ops = {
25587 +static const struct ata_port_operations pcmcia_port_ops = {
25588 .inherits = &ata_sff_port_ops,
25589 .sff_data_xfer = ata_sff_data_xfer_noirq,
25590 .cable_detect = ata_cable_40wire,
25591 .set_mode = pcmcia_set_mode,
25592 };
25593
25594 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25595 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25596 .inherits = &ata_sff_port_ops,
25597 .sff_data_xfer = ata_data_xfer_8bit,
25598 .cable_detect = ata_cable_40wire,
25599 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25600 unsigned long io_base, ctl_base;
25601 void __iomem *io_addr, *ctl_addr;
25602 int n_ports = 1;
25603 - struct ata_port_operations *ops = &pcmcia_port_ops;
25604 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25605
25606 info = kzalloc(sizeof(*info), GFP_KERNEL);
25607 if (info == NULL)
25608 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25609 --- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25610 +++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25611 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25612 ATA_BMDMA_SHT(DRV_NAME),
25613 };
25614
25615 -static struct ata_port_operations pdc2027x_pata100_ops = {
25616 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25617 .inherits = &ata_bmdma_port_ops,
25618 .check_atapi_dma = pdc2027x_check_atapi_dma,
25619 .cable_detect = pdc2027x_cable_detect,
25620 .prereset = pdc2027x_prereset,
25621 };
25622
25623 -static struct ata_port_operations pdc2027x_pata133_ops = {
25624 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25625 .inherits = &pdc2027x_pata100_ops,
25626 .mode_filter = pdc2027x_mode_filter,
25627 .set_piomode = pdc2027x_set_piomode,
25628 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25629 --- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25630 +++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25631 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25632 ATA_BMDMA_SHT(DRV_NAME),
25633 };
25634
25635 -static struct ata_port_operations pdc2024x_port_ops = {
25636 +static const struct ata_port_operations pdc2024x_port_ops = {
25637 .inherits = &ata_bmdma_port_ops,
25638
25639 .cable_detect = ata_cable_40wire,
25640 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25641 .sff_exec_command = pdc202xx_exec_command,
25642 };
25643
25644 -static struct ata_port_operations pdc2026x_port_ops = {
25645 +static const struct ata_port_operations pdc2026x_port_ops = {
25646 .inherits = &pdc2024x_port_ops,
25647
25648 .check_atapi_dma = pdc2026x_check_atapi_dma,
25649 diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25650 --- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25651 +++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25652 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25653 ATA_PIO_SHT(DRV_NAME),
25654 };
25655
25656 -static struct ata_port_operations pata_platform_port_ops = {
25657 +static const struct ata_port_operations pata_platform_port_ops = {
25658 .inherits = &ata_sff_port_ops,
25659 .sff_data_xfer = ata_sff_data_xfer_noirq,
25660 .cable_detect = ata_cable_unknown,
25661 diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25662 --- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25663 +++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25664 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25665 ATA_PIO_SHT(DRV_NAME),
25666 };
25667
25668 -static struct ata_port_operations qdi6500_port_ops = {
25669 +static const struct ata_port_operations qdi6500_port_ops = {
25670 .inherits = &ata_sff_port_ops,
25671 .qc_issue = qdi_qc_issue,
25672 .sff_data_xfer = qdi_data_xfer,
25673 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25674 .set_piomode = qdi6500_set_piomode,
25675 };
25676
25677 -static struct ata_port_operations qdi6580_port_ops = {
25678 +static const struct ata_port_operations qdi6580_port_ops = {
25679 .inherits = &qdi6500_port_ops,
25680 .set_piomode = qdi6580_set_piomode,
25681 };
25682 diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25683 --- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25684 +++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25685 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25686 ATA_BMDMA_SHT(DRV_NAME),
25687 };
25688
25689 -static struct ata_port_operations radisys_pata_ops = {
25690 +static const struct ata_port_operations radisys_pata_ops = {
25691 .inherits = &ata_bmdma_port_ops,
25692 .qc_issue = radisys_qc_issue,
25693 .cable_detect = ata_cable_unknown,
25694 diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25695 --- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25696 +++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25697 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25698 return IRQ_HANDLED;
25699 }
25700
25701 -static struct ata_port_operations rb532_pata_port_ops = {
25702 +static const struct ata_port_operations rb532_pata_port_ops = {
25703 .inherits = &ata_sff_port_ops,
25704 .sff_data_xfer = ata_sff_data_xfer32,
25705 };
25706 diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25707 --- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25708 +++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25709 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25710 pci_write_config_byte(dev, 0x48, udma_enable);
25711 }
25712
25713 -static struct ata_port_operations rdc_pata_ops = {
25714 +static const struct ata_port_operations rdc_pata_ops = {
25715 .inherits = &ata_bmdma32_port_ops,
25716 .cable_detect = rdc_pata_cable_detect,
25717 .set_piomode = rdc_set_piomode,
25718 diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25719 --- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25720 +++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25721 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25722 ATA_PIO_SHT(DRV_NAME),
25723 };
25724
25725 -static struct ata_port_operations rz1000_port_ops = {
25726 +static const struct ata_port_operations rz1000_port_ops = {
25727 .inherits = &ata_sff_port_ops,
25728 .cable_detect = ata_cable_40wire,
25729 .set_mode = rz1000_set_mode,
25730 diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25731 --- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25732 +++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25733 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25734 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25735 };
25736
25737 -static struct ata_port_operations sc1200_port_ops = {
25738 +static const struct ata_port_operations sc1200_port_ops = {
25739 .inherits = &ata_bmdma_port_ops,
25740 .qc_prep = ata_sff_dumb_qc_prep,
25741 .qc_issue = sc1200_qc_issue,
25742 diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25743 --- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25744 +++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25745 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25746 ATA_BMDMA_SHT(DRV_NAME),
25747 };
25748
25749 -static struct ata_port_operations scc_pata_ops = {
25750 +static const struct ata_port_operations scc_pata_ops = {
25751 .inherits = &ata_bmdma_port_ops,
25752
25753 .set_piomode = scc_set_piomode,
25754 diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25755 --- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25756 +++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25757 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25758 ATA_BMDMA_SHT(DRV_NAME),
25759 };
25760
25761 -static struct ata_port_operations sch_pata_ops = {
25762 +static const struct ata_port_operations sch_pata_ops = {
25763 .inherits = &ata_bmdma_port_ops,
25764 .cable_detect = ata_cable_unknown,
25765 .set_piomode = sch_set_piomode,
25766 diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25767 --- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25768 +++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25769 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25770 ATA_BMDMA_SHT(DRV_NAME),
25771 };
25772
25773 -static struct ata_port_operations serverworks_osb4_port_ops = {
25774 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25775 .inherits = &ata_bmdma_port_ops,
25776 .cable_detect = serverworks_cable_detect,
25777 .mode_filter = serverworks_osb4_filter,
25778 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25779 .set_dmamode = serverworks_set_dmamode,
25780 };
25781
25782 -static struct ata_port_operations serverworks_csb_port_ops = {
25783 +static const struct ata_port_operations serverworks_csb_port_ops = {
25784 .inherits = &serverworks_osb4_port_ops,
25785 .mode_filter = serverworks_csb_filter,
25786 };
25787 diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25788 --- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25789 +++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25790 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25791 ATA_BMDMA_SHT(DRV_NAME),
25792 };
25793
25794 -static struct ata_port_operations sil680_port_ops = {
25795 +static const struct ata_port_operations sil680_port_ops = {
25796 .inherits = &ata_bmdma32_port_ops,
25797 .cable_detect = sil680_cable_detect,
25798 .set_piomode = sil680_set_piomode,
25799 diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25800 --- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25801 +++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25802 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25803 ATA_BMDMA_SHT(DRV_NAME),
25804 };
25805
25806 -static struct ata_port_operations sis_133_for_sata_ops = {
25807 +static const struct ata_port_operations sis_133_for_sata_ops = {
25808 .inherits = &ata_bmdma_port_ops,
25809 .set_piomode = sis_133_set_piomode,
25810 .set_dmamode = sis_133_set_dmamode,
25811 .cable_detect = sis_133_cable_detect,
25812 };
25813
25814 -static struct ata_port_operations sis_base_ops = {
25815 +static const struct ata_port_operations sis_base_ops = {
25816 .inherits = &ata_bmdma_port_ops,
25817 .prereset = sis_pre_reset,
25818 };
25819
25820 -static struct ata_port_operations sis_133_ops = {
25821 +static const struct ata_port_operations sis_133_ops = {
25822 .inherits = &sis_base_ops,
25823 .set_piomode = sis_133_set_piomode,
25824 .set_dmamode = sis_133_set_dmamode,
25825 .cable_detect = sis_133_cable_detect,
25826 };
25827
25828 -static struct ata_port_operations sis_133_early_ops = {
25829 +static const struct ata_port_operations sis_133_early_ops = {
25830 .inherits = &sis_base_ops,
25831 .set_piomode = sis_100_set_piomode,
25832 .set_dmamode = sis_133_early_set_dmamode,
25833 .cable_detect = sis_66_cable_detect,
25834 };
25835
25836 -static struct ata_port_operations sis_100_ops = {
25837 +static const struct ata_port_operations sis_100_ops = {
25838 .inherits = &sis_base_ops,
25839 .set_piomode = sis_100_set_piomode,
25840 .set_dmamode = sis_100_set_dmamode,
25841 .cable_detect = sis_66_cable_detect,
25842 };
25843
25844 -static struct ata_port_operations sis_66_ops = {
25845 +static const struct ata_port_operations sis_66_ops = {
25846 .inherits = &sis_base_ops,
25847 .set_piomode = sis_old_set_piomode,
25848 .set_dmamode = sis_66_set_dmamode,
25849 .cable_detect = sis_66_cable_detect,
25850 };
25851
25852 -static struct ata_port_operations sis_old_ops = {
25853 +static const struct ata_port_operations sis_old_ops = {
25854 .inherits = &sis_base_ops,
25855 .set_piomode = sis_old_set_piomode,
25856 .set_dmamode = sis_old_set_dmamode,
25857 diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25858 --- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25859 +++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25860 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25861 ATA_BMDMA_SHT(DRV_NAME),
25862 };
25863
25864 -static struct ata_port_operations sl82c105_port_ops = {
25865 +static const struct ata_port_operations sl82c105_port_ops = {
25866 .inherits = &ata_bmdma_port_ops,
25867 .qc_defer = sl82c105_qc_defer,
25868 .bmdma_start = sl82c105_bmdma_start,
25869 diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25870 --- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25871 +++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25872 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25873 ATA_BMDMA_SHT(DRV_NAME),
25874 };
25875
25876 -static struct ata_port_operations triflex_port_ops = {
25877 +static const struct ata_port_operations triflex_port_ops = {
25878 .inherits = &ata_bmdma_port_ops,
25879 .bmdma_start = triflex_bmdma_start,
25880 .bmdma_stop = triflex_bmdma_stop,
25881 diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25882 --- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25883 +++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25884 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25885 ATA_BMDMA_SHT(DRV_NAME),
25886 };
25887
25888 -static struct ata_port_operations via_port_ops = {
25889 +static const struct ata_port_operations via_port_ops = {
25890 .inherits = &ata_bmdma_port_ops,
25891 .cable_detect = via_cable_detect,
25892 .set_piomode = via_set_piomode,
25893 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25894 .port_start = via_port_start,
25895 };
25896
25897 -static struct ata_port_operations via_port_ops_noirq = {
25898 +static const struct ata_port_operations via_port_ops_noirq = {
25899 .inherits = &via_port_ops,
25900 .sff_data_xfer = ata_sff_data_xfer_noirq,
25901 };
25902 diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25903 --- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25904 +++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25905 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25906 ATA_PIO_SHT(DRV_NAME),
25907 };
25908
25909 -static struct ata_port_operations winbond_port_ops = {
25910 +static const struct ata_port_operations winbond_port_ops = {
25911 .inherits = &ata_sff_port_ops,
25912 .sff_data_xfer = winbond_data_xfer,
25913 .cable_detect = ata_cable_40wire,
25914 diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25915 --- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25916 +++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25917 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25918 .dma_boundary = ADMA_DMA_BOUNDARY,
25919 };
25920
25921 -static struct ata_port_operations adma_ata_ops = {
25922 +static const struct ata_port_operations adma_ata_ops = {
25923 .inherits = &ata_sff_port_ops,
25924
25925 .lost_interrupt = ATA_OP_NULL,
25926 diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25927 --- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25928 +++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25929 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25930 .dma_boundary = ATA_DMA_BOUNDARY,
25931 };
25932
25933 -static struct ata_port_operations sata_fsl_ops = {
25934 +static const struct ata_port_operations sata_fsl_ops = {
25935 .inherits = &sata_pmp_port_ops,
25936
25937 .qc_defer = ata_std_qc_defer,
25938 diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25939 --- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25940 +++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25941 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25942 return 0;
25943 }
25944
25945 -static struct ata_port_operations inic_port_ops = {
25946 +static const struct ata_port_operations inic_port_ops = {
25947 .inherits = &sata_port_ops,
25948
25949 .check_atapi_dma = inic_check_atapi_dma,
25950 diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25951 --- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25952 +++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25953 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25954 .dma_boundary = MV_DMA_BOUNDARY,
25955 };
25956
25957 -static struct ata_port_operations mv5_ops = {
25958 +static const struct ata_port_operations mv5_ops = {
25959 .inherits = &ata_sff_port_ops,
25960
25961 .lost_interrupt = ATA_OP_NULL,
25962 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25963 .port_stop = mv_port_stop,
25964 };
25965
25966 -static struct ata_port_operations mv6_ops = {
25967 +static const struct ata_port_operations mv6_ops = {
25968 .inherits = &mv5_ops,
25969 .dev_config = mv6_dev_config,
25970 .scr_read = mv_scr_read,
25971 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25972 .bmdma_status = mv_bmdma_status,
25973 };
25974
25975 -static struct ata_port_operations mv_iie_ops = {
25976 +static const struct ata_port_operations mv_iie_ops = {
25977 .inherits = &mv6_ops,
25978 .dev_config = ATA_OP_NULL,
25979 .qc_prep = mv_qc_prep_iie,
25980 diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25981 --- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25982 +++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25983 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25984 * cases. Define nv_hardreset() which only kicks in for post-boot
25985 * probing and use it for all variants.
25986 */
25987 -static struct ata_port_operations nv_generic_ops = {
25988 +static const struct ata_port_operations nv_generic_ops = {
25989 .inherits = &ata_bmdma_port_ops,
25990 .lost_interrupt = ATA_OP_NULL,
25991 .scr_read = nv_scr_read,
25992 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25993 .hardreset = nv_hardreset,
25994 };
25995
25996 -static struct ata_port_operations nv_nf2_ops = {
25997 +static const struct ata_port_operations nv_nf2_ops = {
25998 .inherits = &nv_generic_ops,
25999 .freeze = nv_nf2_freeze,
26000 .thaw = nv_nf2_thaw,
26001 };
26002
26003 -static struct ata_port_operations nv_ck804_ops = {
26004 +static const struct ata_port_operations nv_ck804_ops = {
26005 .inherits = &nv_generic_ops,
26006 .freeze = nv_ck804_freeze,
26007 .thaw = nv_ck804_thaw,
26008 .host_stop = nv_ck804_host_stop,
26009 };
26010
26011 -static struct ata_port_operations nv_adma_ops = {
26012 +static const struct ata_port_operations nv_adma_ops = {
26013 .inherits = &nv_ck804_ops,
26014
26015 .check_atapi_dma = nv_adma_check_atapi_dma,
26016 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26017 .host_stop = nv_adma_host_stop,
26018 };
26019
26020 -static struct ata_port_operations nv_swncq_ops = {
26021 +static const struct ata_port_operations nv_swncq_ops = {
26022 .inherits = &nv_generic_ops,
26023
26024 .qc_defer = ata_std_qc_defer,
26025 diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
26026 --- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
26027 +++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
26028 @@ -195,7 +195,7 @@ static const struct ata_port_operations
26029 .error_handler = pdc_error_handler,
26030 };
26031
26032 -static struct ata_port_operations pdc_sata_ops = {
26033 +static const struct ata_port_operations pdc_sata_ops = {
26034 .inherits = &pdc_common_ops,
26035 .cable_detect = pdc_sata_cable_detect,
26036 .freeze = pdc_sata_freeze,
26037 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26038
26039 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26040 and ->freeze/thaw that ignore the hotplug controls. */
26041 -static struct ata_port_operations pdc_old_sata_ops = {
26042 +static const struct ata_port_operations pdc_old_sata_ops = {
26043 .inherits = &pdc_sata_ops,
26044 .freeze = pdc_freeze,
26045 .thaw = pdc_thaw,
26046 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26047 };
26048
26049 -static struct ata_port_operations pdc_pata_ops = {
26050 +static const struct ata_port_operations pdc_pata_ops = {
26051 .inherits = &pdc_common_ops,
26052 .cable_detect = pdc_pata_cable_detect,
26053 .freeze = pdc_freeze,
26054 diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
26055 --- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
26056 +++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
26057 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26058 .dma_boundary = QS_DMA_BOUNDARY,
26059 };
26060
26061 -static struct ata_port_operations qs_ata_ops = {
26062 +static const struct ata_port_operations qs_ata_ops = {
26063 .inherits = &ata_sff_port_ops,
26064
26065 .check_atapi_dma = qs_check_atapi_dma,
26066 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
26067 --- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
26068 +++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
26069 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26070 .dma_boundary = ATA_DMA_BOUNDARY,
26071 };
26072
26073 -static struct ata_port_operations sil24_ops = {
26074 +static const struct ata_port_operations sil24_ops = {
26075 .inherits = &sata_pmp_port_ops,
26076
26077 .qc_defer = sil24_qc_defer,
26078 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
26079 --- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
26080 +++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
26081 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26082 .sg_tablesize = ATA_MAX_PRD
26083 };
26084
26085 -static struct ata_port_operations sil_ops = {
26086 +static const struct ata_port_operations sil_ops = {
26087 .inherits = &ata_bmdma32_port_ops,
26088 .dev_config = sil_dev_config,
26089 .set_mode = sil_set_mode,
26090 diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
26091 --- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
26092 +++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
26093 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26094 ATA_BMDMA_SHT(DRV_NAME),
26095 };
26096
26097 -static struct ata_port_operations sis_ops = {
26098 +static const struct ata_port_operations sis_ops = {
26099 .inherits = &ata_bmdma_port_ops,
26100 .scr_read = sis_scr_read,
26101 .scr_write = sis_scr_write,
26102 diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
26103 --- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26104 +++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26105 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26106 };
26107
26108
26109 -static struct ata_port_operations k2_sata_ops = {
26110 +static const struct ata_port_operations k2_sata_ops = {
26111 .inherits = &ata_bmdma_port_ops,
26112 .sff_tf_load = k2_sata_tf_load,
26113 .sff_tf_read = k2_sata_tf_read,
26114 diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
26115 --- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26116 +++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26117 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26118 };
26119
26120 /* TODO: inherit from base port_ops after converting to new EH */
26121 -static struct ata_port_operations pdc_20621_ops = {
26122 +static const struct ata_port_operations pdc_20621_ops = {
26123 .inherits = &ata_sff_port_ops,
26124
26125 .check_atapi_dma = pdc_check_atapi_dma,
26126 diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
26127 --- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26128 +++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26129 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26130 ATA_BMDMA_SHT(DRV_NAME),
26131 };
26132
26133 -static struct ata_port_operations uli_ops = {
26134 +static const struct ata_port_operations uli_ops = {
26135 .inherits = &ata_bmdma_port_ops,
26136 .scr_read = uli_scr_read,
26137 .scr_write = uli_scr_write,
26138 diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
26139 --- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26140 +++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26141 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26142 ATA_BMDMA_SHT(DRV_NAME),
26143 };
26144
26145 -static struct ata_port_operations svia_base_ops = {
26146 +static const struct ata_port_operations svia_base_ops = {
26147 .inherits = &ata_bmdma_port_ops,
26148 .sff_tf_load = svia_tf_load,
26149 };
26150
26151 -static struct ata_port_operations vt6420_sata_ops = {
26152 +static const struct ata_port_operations vt6420_sata_ops = {
26153 .inherits = &svia_base_ops,
26154 .freeze = svia_noop_freeze,
26155 .prereset = vt6420_prereset,
26156 .bmdma_start = vt6420_bmdma_start,
26157 };
26158
26159 -static struct ata_port_operations vt6421_pata_ops = {
26160 +static const struct ata_port_operations vt6421_pata_ops = {
26161 .inherits = &svia_base_ops,
26162 .cable_detect = vt6421_pata_cable_detect,
26163 .set_piomode = vt6421_set_pio_mode,
26164 .set_dmamode = vt6421_set_dma_mode,
26165 };
26166
26167 -static struct ata_port_operations vt6421_sata_ops = {
26168 +static const struct ata_port_operations vt6421_sata_ops = {
26169 .inherits = &svia_base_ops,
26170 .scr_read = svia_scr_read,
26171 .scr_write = svia_scr_write,
26172 };
26173
26174 -static struct ata_port_operations vt8251_ops = {
26175 +static const struct ata_port_operations vt8251_ops = {
26176 .inherits = &svia_base_ops,
26177 .hardreset = sata_std_hardreset,
26178 .scr_read = vt8251_scr_read,
26179 diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
26180 --- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26181 +++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26182 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26183 };
26184
26185
26186 -static struct ata_port_operations vsc_sata_ops = {
26187 +static const struct ata_port_operations vsc_sata_ops = {
26188 .inherits = &ata_bmdma_port_ops,
26189 /* The IRQ handling is not quite standard SFF behaviour so we
26190 cannot use the default lost interrupt handler */
26191 diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
26192 --- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26193 +++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26194 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26195 vcc->pop(vcc, skb);
26196 else
26197 dev_kfree_skb_any(skb);
26198 - atomic_inc(&vcc->stats->tx);
26199 + atomic_inc_unchecked(&vcc->stats->tx);
26200
26201 return 0;
26202 }
26203 diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
26204 --- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26205 +++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26206 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26207 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26208
26209 // VC layer stats
26210 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26211 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26212
26213 // free the descriptor
26214 kfree (tx_descr);
26215 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26216 dump_skb ("<<<", vc, skb);
26217
26218 // VC layer stats
26219 - atomic_inc(&atm_vcc->stats->rx);
26220 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26221 __net_timestamp(skb);
26222 // end of our responsability
26223 atm_vcc->push (atm_vcc, skb);
26224 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26225 } else {
26226 PRINTK (KERN_INFO, "dropped over-size frame");
26227 // should we count this?
26228 - atomic_inc(&atm_vcc->stats->rx_drop);
26229 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26230 }
26231
26232 } else {
26233 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26234 }
26235
26236 if (check_area (skb->data, skb->len)) {
26237 - atomic_inc(&atm_vcc->stats->tx_err);
26238 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26239 return -ENOMEM; // ?
26240 }
26241
26242 diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26243 --- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26244 +++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26245 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26246 if (vcc->pop) vcc->pop(vcc,skb);
26247 else dev_kfree_skb(skb);
26248 if (dev_data) return 0;
26249 - atomic_inc(&vcc->stats->tx_err);
26250 + atomic_inc_unchecked(&vcc->stats->tx_err);
26251 return -ENOLINK;
26252 }
26253 size = skb->len+sizeof(struct atmtcp_hdr);
26254 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26255 if (!new_skb) {
26256 if (vcc->pop) vcc->pop(vcc,skb);
26257 else dev_kfree_skb(skb);
26258 - atomic_inc(&vcc->stats->tx_err);
26259 + atomic_inc_unchecked(&vcc->stats->tx_err);
26260 return -ENOBUFS;
26261 }
26262 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26263 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26264 if (vcc->pop) vcc->pop(vcc,skb);
26265 else dev_kfree_skb(skb);
26266 out_vcc->push(out_vcc,new_skb);
26267 - atomic_inc(&vcc->stats->tx);
26268 - atomic_inc(&out_vcc->stats->rx);
26269 + atomic_inc_unchecked(&vcc->stats->tx);
26270 + atomic_inc_unchecked(&out_vcc->stats->rx);
26271 return 0;
26272 }
26273
26274 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26275 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26276 read_unlock(&vcc_sklist_lock);
26277 if (!out_vcc) {
26278 - atomic_inc(&vcc->stats->tx_err);
26279 + atomic_inc_unchecked(&vcc->stats->tx_err);
26280 goto done;
26281 }
26282 skb_pull(skb,sizeof(struct atmtcp_hdr));
26283 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26284 __net_timestamp(new_skb);
26285 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26286 out_vcc->push(out_vcc,new_skb);
26287 - atomic_inc(&vcc->stats->tx);
26288 - atomic_inc(&out_vcc->stats->rx);
26289 + atomic_inc_unchecked(&vcc->stats->tx);
26290 + atomic_inc_unchecked(&out_vcc->stats->rx);
26291 done:
26292 if (vcc->pop) vcc->pop(vcc,skb);
26293 else dev_kfree_skb(skb);
26294 diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26295 --- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26296 +++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26297 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26298 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26299 vcc->dev->number);
26300 length = 0;
26301 - atomic_inc(&vcc->stats->rx_err);
26302 + atomic_inc_unchecked(&vcc->stats->rx_err);
26303 }
26304 else {
26305 length = ATM_CELL_SIZE-1; /* no HEC */
26306 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26307 size);
26308 }
26309 eff = length = 0;
26310 - atomic_inc(&vcc->stats->rx_err);
26311 + atomic_inc_unchecked(&vcc->stats->rx_err);
26312 }
26313 else {
26314 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26315 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26316 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26317 vcc->dev->number,vcc->vci,length,size << 2,descr);
26318 length = eff = 0;
26319 - atomic_inc(&vcc->stats->rx_err);
26320 + atomic_inc_unchecked(&vcc->stats->rx_err);
26321 }
26322 }
26323 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26324 @@ -770,7 +770,7 @@ rx_dequeued++;
26325 vcc->push(vcc,skb);
26326 pushed++;
26327 }
26328 - atomic_inc(&vcc->stats->rx);
26329 + atomic_inc_unchecked(&vcc->stats->rx);
26330 }
26331 wake_up(&eni_dev->rx_wait);
26332 }
26333 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26334 PCI_DMA_TODEVICE);
26335 if (vcc->pop) vcc->pop(vcc,skb);
26336 else dev_kfree_skb_irq(skb);
26337 - atomic_inc(&vcc->stats->tx);
26338 + atomic_inc_unchecked(&vcc->stats->tx);
26339 wake_up(&eni_dev->tx_wait);
26340 dma_complete++;
26341 }
26342 diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26343 --- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26344 +++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26345 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26346 }
26347 }
26348
26349 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26350 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26351
26352 fs_dprintk (FS_DEBUG_TXMEM, "i");
26353 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26354 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26355 #endif
26356 skb_put (skb, qe->p1 & 0xffff);
26357 ATM_SKB(skb)->vcc = atm_vcc;
26358 - atomic_inc(&atm_vcc->stats->rx);
26359 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26360 __net_timestamp(skb);
26361 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26362 atm_vcc->push (atm_vcc, skb);
26363 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26364 kfree (pe);
26365 }
26366 if (atm_vcc)
26367 - atomic_inc(&atm_vcc->stats->rx_drop);
26368 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26369 break;
26370 case 0x1f: /* Reassembly abort: no buffers. */
26371 /* Silently increment error counter. */
26372 if (atm_vcc)
26373 - atomic_inc(&atm_vcc->stats->rx_drop);
26374 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26375 break;
26376 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26377 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26378 diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26379 --- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26380 +++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26381 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26382 #endif
26383 /* check error condition */
26384 if (*entry->status & STATUS_ERROR)
26385 - atomic_inc(&vcc->stats->tx_err);
26386 + atomic_inc_unchecked(&vcc->stats->tx_err);
26387 else
26388 - atomic_inc(&vcc->stats->tx);
26389 + atomic_inc_unchecked(&vcc->stats->tx);
26390 }
26391 }
26392
26393 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26394 if (skb == NULL) {
26395 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26396
26397 - atomic_inc(&vcc->stats->rx_drop);
26398 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26399 return -ENOMEM;
26400 }
26401
26402 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26403
26404 dev_kfree_skb_any(skb);
26405
26406 - atomic_inc(&vcc->stats->rx_drop);
26407 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26408 return -ENOMEM;
26409 }
26410
26411 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26412
26413 vcc->push(vcc, skb);
26414 - atomic_inc(&vcc->stats->rx);
26415 + atomic_inc_unchecked(&vcc->stats->rx);
26416
26417 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26418
26419 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26420 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26421 fore200e->atm_dev->number,
26422 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26423 - atomic_inc(&vcc->stats->rx_err);
26424 + atomic_inc_unchecked(&vcc->stats->rx_err);
26425 }
26426 }
26427
26428 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26429 goto retry_here;
26430 }
26431
26432 - atomic_inc(&vcc->stats->tx_err);
26433 + atomic_inc_unchecked(&vcc->stats->tx_err);
26434
26435 fore200e->tx_sat++;
26436 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26437 diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26438 --- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26439 +++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26440 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26441
26442 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26443 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26444 - atomic_inc(&vcc->stats->rx_drop);
26445 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26446 goto return_host_buffers;
26447 }
26448
26449 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26450 RBRQ_LEN_ERR(he_dev->rbrq_head)
26451 ? "LEN_ERR" : "",
26452 vcc->vpi, vcc->vci);
26453 - atomic_inc(&vcc->stats->rx_err);
26454 + atomic_inc_unchecked(&vcc->stats->rx_err);
26455 goto return_host_buffers;
26456 }
26457
26458 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26459 vcc->push(vcc, skb);
26460 spin_lock(&he_dev->global_lock);
26461
26462 - atomic_inc(&vcc->stats->rx);
26463 + atomic_inc_unchecked(&vcc->stats->rx);
26464
26465 return_host_buffers:
26466 ++pdus_assembled;
26467 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26468 tpd->vcc->pop(tpd->vcc, tpd->skb);
26469 else
26470 dev_kfree_skb_any(tpd->skb);
26471 - atomic_inc(&tpd->vcc->stats->tx_err);
26472 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26473 }
26474 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26475 return;
26476 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26477 vcc->pop(vcc, skb);
26478 else
26479 dev_kfree_skb_any(skb);
26480 - atomic_inc(&vcc->stats->tx_err);
26481 + atomic_inc_unchecked(&vcc->stats->tx_err);
26482 return -EINVAL;
26483 }
26484
26485 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26486 vcc->pop(vcc, skb);
26487 else
26488 dev_kfree_skb_any(skb);
26489 - atomic_inc(&vcc->stats->tx_err);
26490 + atomic_inc_unchecked(&vcc->stats->tx_err);
26491 return -EINVAL;
26492 }
26493 #endif
26494 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26495 vcc->pop(vcc, skb);
26496 else
26497 dev_kfree_skb_any(skb);
26498 - atomic_inc(&vcc->stats->tx_err);
26499 + atomic_inc_unchecked(&vcc->stats->tx_err);
26500 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26501 return -ENOMEM;
26502 }
26503 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26504 vcc->pop(vcc, skb);
26505 else
26506 dev_kfree_skb_any(skb);
26507 - atomic_inc(&vcc->stats->tx_err);
26508 + atomic_inc_unchecked(&vcc->stats->tx_err);
26509 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26510 return -ENOMEM;
26511 }
26512 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26513 __enqueue_tpd(he_dev, tpd, cid);
26514 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26515
26516 - atomic_inc(&vcc->stats->tx);
26517 + atomic_inc_unchecked(&vcc->stats->tx);
26518
26519 return 0;
26520 }
26521 diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26522 --- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26523 +++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26524 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26525 {
26526 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26527 // VC layer stats
26528 - atomic_inc(&vcc->stats->rx);
26529 + atomic_inc_unchecked(&vcc->stats->rx);
26530 __net_timestamp(skb);
26531 // end of our responsability
26532 vcc->push (vcc, skb);
26533 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26534 dev->tx_iovec = NULL;
26535
26536 // VC layer stats
26537 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26538 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26539
26540 // free the skb
26541 hrz_kfree_skb (skb);
26542 diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26543 --- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26544 +++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26545 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26546 else
26547 dev_kfree_skb(skb);
26548
26549 - atomic_inc(&vcc->stats->tx);
26550 + atomic_inc_unchecked(&vcc->stats->tx);
26551 }
26552
26553 atomic_dec(&scq->used);
26554 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26555 if ((sb = dev_alloc_skb(64)) == NULL) {
26556 printk("%s: Can't allocate buffers for aal0.\n",
26557 card->name);
26558 - atomic_add(i, &vcc->stats->rx_drop);
26559 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26560 break;
26561 }
26562 if (!atm_charge(vcc, sb->truesize)) {
26563 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26564 card->name);
26565 - atomic_add(i - 1, &vcc->stats->rx_drop);
26566 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26567 dev_kfree_skb(sb);
26568 break;
26569 }
26570 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26571 ATM_SKB(sb)->vcc = vcc;
26572 __net_timestamp(sb);
26573 vcc->push(vcc, sb);
26574 - atomic_inc(&vcc->stats->rx);
26575 + atomic_inc_unchecked(&vcc->stats->rx);
26576
26577 cell += ATM_CELL_PAYLOAD;
26578 }
26579 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26580 "(CDC: %08x)\n",
26581 card->name, len, rpp->len, readl(SAR_REG_CDC));
26582 recycle_rx_pool_skb(card, rpp);
26583 - atomic_inc(&vcc->stats->rx_err);
26584 + atomic_inc_unchecked(&vcc->stats->rx_err);
26585 return;
26586 }
26587 if (stat & SAR_RSQE_CRC) {
26588 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26589 recycle_rx_pool_skb(card, rpp);
26590 - atomic_inc(&vcc->stats->rx_err);
26591 + atomic_inc_unchecked(&vcc->stats->rx_err);
26592 return;
26593 }
26594 if (skb_queue_len(&rpp->queue) > 1) {
26595 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26596 RXPRINTK("%s: Can't alloc RX skb.\n",
26597 card->name);
26598 recycle_rx_pool_skb(card, rpp);
26599 - atomic_inc(&vcc->stats->rx_err);
26600 + atomic_inc_unchecked(&vcc->stats->rx_err);
26601 return;
26602 }
26603 if (!atm_charge(vcc, skb->truesize)) {
26604 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26605 __net_timestamp(skb);
26606
26607 vcc->push(vcc, skb);
26608 - atomic_inc(&vcc->stats->rx);
26609 + atomic_inc_unchecked(&vcc->stats->rx);
26610
26611 return;
26612 }
26613 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26614 __net_timestamp(skb);
26615
26616 vcc->push(vcc, skb);
26617 - atomic_inc(&vcc->stats->rx);
26618 + atomic_inc_unchecked(&vcc->stats->rx);
26619
26620 if (skb->truesize > SAR_FB_SIZE_3)
26621 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26622 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26623 if (vcc->qos.aal != ATM_AAL0) {
26624 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26625 card->name, vpi, vci);
26626 - atomic_inc(&vcc->stats->rx_drop);
26627 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26628 goto drop;
26629 }
26630
26631 if ((sb = dev_alloc_skb(64)) == NULL) {
26632 printk("%s: Can't allocate buffers for AAL0.\n",
26633 card->name);
26634 - atomic_inc(&vcc->stats->rx_err);
26635 + atomic_inc_unchecked(&vcc->stats->rx_err);
26636 goto drop;
26637 }
26638
26639 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26640 ATM_SKB(sb)->vcc = vcc;
26641 __net_timestamp(sb);
26642 vcc->push(vcc, sb);
26643 - atomic_inc(&vcc->stats->rx);
26644 + atomic_inc_unchecked(&vcc->stats->rx);
26645
26646 drop:
26647 skb_pull(queue, 64);
26648 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26649
26650 if (vc == NULL) {
26651 printk("%s: NULL connection in send().\n", card->name);
26652 - atomic_inc(&vcc->stats->tx_err);
26653 + atomic_inc_unchecked(&vcc->stats->tx_err);
26654 dev_kfree_skb(skb);
26655 return -EINVAL;
26656 }
26657 if (!test_bit(VCF_TX, &vc->flags)) {
26658 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26659 - atomic_inc(&vcc->stats->tx_err);
26660 + atomic_inc_unchecked(&vcc->stats->tx_err);
26661 dev_kfree_skb(skb);
26662 return -EINVAL;
26663 }
26664 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26665 break;
26666 default:
26667 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26668 - atomic_inc(&vcc->stats->tx_err);
26669 + atomic_inc_unchecked(&vcc->stats->tx_err);
26670 dev_kfree_skb(skb);
26671 return -EINVAL;
26672 }
26673
26674 if (skb_shinfo(skb)->nr_frags != 0) {
26675 printk("%s: No scatter-gather yet.\n", card->name);
26676 - atomic_inc(&vcc->stats->tx_err);
26677 + atomic_inc_unchecked(&vcc->stats->tx_err);
26678 dev_kfree_skb(skb);
26679 return -EINVAL;
26680 }
26681 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26682
26683 err = queue_skb(card, vc, skb, oam);
26684 if (err) {
26685 - atomic_inc(&vcc->stats->tx_err);
26686 + atomic_inc_unchecked(&vcc->stats->tx_err);
26687 dev_kfree_skb(skb);
26688 return err;
26689 }
26690 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26691 skb = dev_alloc_skb(64);
26692 if (!skb) {
26693 printk("%s: Out of memory in send_oam().\n", card->name);
26694 - atomic_inc(&vcc->stats->tx_err);
26695 + atomic_inc_unchecked(&vcc->stats->tx_err);
26696 return -ENOMEM;
26697 }
26698 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26699 diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26700 --- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26701 +++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26702 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26703 status = (u_short) (buf_desc_ptr->desc_mode);
26704 if (status & (RX_CER | RX_PTE | RX_OFL))
26705 {
26706 - atomic_inc(&vcc->stats->rx_err);
26707 + atomic_inc_unchecked(&vcc->stats->rx_err);
26708 IF_ERR(printk("IA: bad packet, dropping it");)
26709 if (status & RX_CER) {
26710 IF_ERR(printk(" cause: packet CRC error\n");)
26711 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26712 len = dma_addr - buf_addr;
26713 if (len > iadev->rx_buf_sz) {
26714 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26715 - atomic_inc(&vcc->stats->rx_err);
26716 + atomic_inc_unchecked(&vcc->stats->rx_err);
26717 goto out_free_desc;
26718 }
26719
26720 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26721 ia_vcc = INPH_IA_VCC(vcc);
26722 if (ia_vcc == NULL)
26723 {
26724 - atomic_inc(&vcc->stats->rx_err);
26725 + atomic_inc_unchecked(&vcc->stats->rx_err);
26726 dev_kfree_skb_any(skb);
26727 atm_return(vcc, atm_guess_pdu2truesize(len));
26728 goto INCR_DLE;
26729 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26730 if ((length > iadev->rx_buf_sz) || (length >
26731 (skb->len - sizeof(struct cpcs_trailer))))
26732 {
26733 - atomic_inc(&vcc->stats->rx_err);
26734 + atomic_inc_unchecked(&vcc->stats->rx_err);
26735 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26736 length, skb->len);)
26737 dev_kfree_skb_any(skb);
26738 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26739
26740 IF_RX(printk("rx_dle_intr: skb push");)
26741 vcc->push(vcc,skb);
26742 - atomic_inc(&vcc->stats->rx);
26743 + atomic_inc_unchecked(&vcc->stats->rx);
26744 iadev->rx_pkt_cnt++;
26745 }
26746 INCR_DLE:
26747 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26748 {
26749 struct k_sonet_stats *stats;
26750 stats = &PRIV(_ia_dev[board])->sonet_stats;
26751 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26752 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26753 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26754 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26755 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26756 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26757 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26758 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26759 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26760 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26761 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26762 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26763 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26764 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26765 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26766 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26767 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26768 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26769 }
26770 ia_cmds.status = 0;
26771 break;
26772 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26773 if ((desc == 0) || (desc > iadev->num_tx_desc))
26774 {
26775 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26776 - atomic_inc(&vcc->stats->tx);
26777 + atomic_inc_unchecked(&vcc->stats->tx);
26778 if (vcc->pop)
26779 vcc->pop(vcc, skb);
26780 else
26781 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26782 ATM_DESC(skb) = vcc->vci;
26783 skb_queue_tail(&iadev->tx_dma_q, skb);
26784
26785 - atomic_inc(&vcc->stats->tx);
26786 + atomic_inc_unchecked(&vcc->stats->tx);
26787 iadev->tx_pkt_cnt++;
26788 /* Increment transaction counter */
26789 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26790
26791 #if 0
26792 /* add flow control logic */
26793 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26794 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26795 if (iavcc->vc_desc_cnt > 10) {
26796 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26797 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26798 diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26799 --- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26800 +++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26801 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26802 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26803 lanai_endtx(lanai, lvcc);
26804 lanai_free_skb(lvcc->tx.atmvcc, skb);
26805 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26806 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26807 }
26808
26809 /* Try to fill the buffer - don't call unless there is backlog */
26810 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26811 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26812 __net_timestamp(skb);
26813 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26814 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26815 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26816 out:
26817 lvcc->rx.buf.ptr = end;
26818 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26819 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26820 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26821 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26822 lanai->stats.service_rxnotaal5++;
26823 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26824 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26825 return 0;
26826 }
26827 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26828 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26829 int bytes;
26830 read_unlock(&vcc_sklist_lock);
26831 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26832 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26833 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26834 lvcc->stats.x.aal5.service_trash++;
26835 bytes = (SERVICE_GET_END(s) * 16) -
26836 (((unsigned long) lvcc->rx.buf.ptr) -
26837 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26838 }
26839 if (s & SERVICE_STREAM) {
26840 read_unlock(&vcc_sklist_lock);
26841 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26842 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26843 lvcc->stats.x.aal5.service_stream++;
26844 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26845 "PDU on VCI %d!\n", lanai->number, vci);
26846 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26847 return 0;
26848 }
26849 DPRINTK("got rx crc error on vci %d\n", vci);
26850 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26851 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26852 lvcc->stats.x.aal5.service_rxcrc++;
26853 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26854 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26855 diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26856 --- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26857 +++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26858 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26859 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26860 {
26861 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26862 - atomic_inc(&vcc->stats->tx_err);
26863 + atomic_inc_unchecked(&vcc->stats->tx_err);
26864 dev_kfree_skb_any(skb);
26865 return -EINVAL;
26866 }
26867 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26868 if (!vc->tx)
26869 {
26870 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26871 - atomic_inc(&vcc->stats->tx_err);
26872 + atomic_inc_unchecked(&vcc->stats->tx_err);
26873 dev_kfree_skb_any(skb);
26874 return -EINVAL;
26875 }
26876 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26877 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26878 {
26879 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26880 - atomic_inc(&vcc->stats->tx_err);
26881 + atomic_inc_unchecked(&vcc->stats->tx_err);
26882 dev_kfree_skb_any(skb);
26883 return -EINVAL;
26884 }
26885 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26886 if (skb_shinfo(skb)->nr_frags != 0)
26887 {
26888 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26889 - atomic_inc(&vcc->stats->tx_err);
26890 + atomic_inc_unchecked(&vcc->stats->tx_err);
26891 dev_kfree_skb_any(skb);
26892 return -EINVAL;
26893 }
26894 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26895
26896 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26897 {
26898 - atomic_inc(&vcc->stats->tx_err);
26899 + atomic_inc_unchecked(&vcc->stats->tx_err);
26900 dev_kfree_skb_any(skb);
26901 return -EIO;
26902 }
26903 - atomic_inc(&vcc->stats->tx);
26904 + atomic_inc_unchecked(&vcc->stats->tx);
26905
26906 return 0;
26907 }
26908 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26909 {
26910 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26911 card->index);
26912 - atomic_add(i,&vcc->stats->rx_drop);
26913 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26914 break;
26915 }
26916 if (!atm_charge(vcc, sb->truesize))
26917 {
26918 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26919 card->index);
26920 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26921 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26922 dev_kfree_skb_any(sb);
26923 break;
26924 }
26925 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26926 ATM_SKB(sb)->vcc = vcc;
26927 __net_timestamp(sb);
26928 vcc->push(vcc, sb);
26929 - atomic_inc(&vcc->stats->rx);
26930 + atomic_inc_unchecked(&vcc->stats->rx);
26931 cell += ATM_CELL_PAYLOAD;
26932 }
26933
26934 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26935 if (iovb == NULL)
26936 {
26937 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26938 - atomic_inc(&vcc->stats->rx_drop);
26939 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26940 recycle_rx_buf(card, skb);
26941 return;
26942 }
26943 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26944 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26945 {
26946 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26947 - atomic_inc(&vcc->stats->rx_err);
26948 + atomic_inc_unchecked(&vcc->stats->rx_err);
26949 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26950 NS_SKB(iovb)->iovcnt = 0;
26951 iovb->len = 0;
26952 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26953 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26954 card->index);
26955 which_list(card, skb);
26956 - atomic_inc(&vcc->stats->rx_err);
26957 + atomic_inc_unchecked(&vcc->stats->rx_err);
26958 recycle_rx_buf(card, skb);
26959 vc->rx_iov = NULL;
26960 recycle_iov_buf(card, iovb);
26961 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26962 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26963 card->index);
26964 which_list(card, skb);
26965 - atomic_inc(&vcc->stats->rx_err);
26966 + atomic_inc_unchecked(&vcc->stats->rx_err);
26967 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26968 NS_SKB(iovb)->iovcnt);
26969 vc->rx_iov = NULL;
26970 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26971 printk(" - PDU size mismatch.\n");
26972 else
26973 printk(".\n");
26974 - atomic_inc(&vcc->stats->rx_err);
26975 + atomic_inc_unchecked(&vcc->stats->rx_err);
26976 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26977 NS_SKB(iovb)->iovcnt);
26978 vc->rx_iov = NULL;
26979 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26980 if (!atm_charge(vcc, skb->truesize))
26981 {
26982 push_rxbufs(card, skb);
26983 - atomic_inc(&vcc->stats->rx_drop);
26984 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26985 }
26986 else
26987 {
26988 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26989 ATM_SKB(skb)->vcc = vcc;
26990 __net_timestamp(skb);
26991 vcc->push(vcc, skb);
26992 - atomic_inc(&vcc->stats->rx);
26993 + atomic_inc_unchecked(&vcc->stats->rx);
26994 }
26995 }
26996 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26997 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26998 if (!atm_charge(vcc, sb->truesize))
26999 {
27000 push_rxbufs(card, sb);
27001 - atomic_inc(&vcc->stats->rx_drop);
27002 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27003 }
27004 else
27005 {
27006 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27007 ATM_SKB(sb)->vcc = vcc;
27008 __net_timestamp(sb);
27009 vcc->push(vcc, sb);
27010 - atomic_inc(&vcc->stats->rx);
27011 + atomic_inc_unchecked(&vcc->stats->rx);
27012 }
27013
27014 push_rxbufs(card, skb);
27015 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27016 if (!atm_charge(vcc, skb->truesize))
27017 {
27018 push_rxbufs(card, skb);
27019 - atomic_inc(&vcc->stats->rx_drop);
27020 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27021 }
27022 else
27023 {
27024 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27025 ATM_SKB(skb)->vcc = vcc;
27026 __net_timestamp(skb);
27027 vcc->push(vcc, skb);
27028 - atomic_inc(&vcc->stats->rx);
27029 + atomic_inc_unchecked(&vcc->stats->rx);
27030 }
27031
27032 push_rxbufs(card, sb);
27033 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27034 if (hb == NULL)
27035 {
27036 printk("nicstar%d: Out of huge buffers.\n", card->index);
27037 - atomic_inc(&vcc->stats->rx_drop);
27038 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27039 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27040 NS_SKB(iovb)->iovcnt);
27041 vc->rx_iov = NULL;
27042 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27043 }
27044 else
27045 dev_kfree_skb_any(hb);
27046 - atomic_inc(&vcc->stats->rx_drop);
27047 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27048 }
27049 else
27050 {
27051 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27052 #endif /* NS_USE_DESTRUCTORS */
27053 __net_timestamp(hb);
27054 vcc->push(vcc, hb);
27055 - atomic_inc(&vcc->stats->rx);
27056 + atomic_inc_unchecked(&vcc->stats->rx);
27057 }
27058 }
27059
27060 diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
27061 --- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
27062 +++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
27063 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27064 }
27065 atm_charge(vcc, skb->truesize);
27066 vcc->push(vcc, skb);
27067 - atomic_inc(&vcc->stats->rx);
27068 + atomic_inc_unchecked(&vcc->stats->rx);
27069 break;
27070
27071 case PKT_STATUS:
27072 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27073 char msg[500];
27074 char item[10];
27075
27076 + pax_track_stack();
27077 +
27078 len = buf->len;
27079 for (i = 0; i < len; i++){
27080 if(i % 8 == 0)
27081 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27082 vcc = SKB_CB(oldskb)->vcc;
27083
27084 if (vcc) {
27085 - atomic_inc(&vcc->stats->tx);
27086 + atomic_inc_unchecked(&vcc->stats->tx);
27087 solos_pop(vcc, oldskb);
27088 } else
27089 dev_kfree_skb_irq(oldskb);
27090 diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
27091 --- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
27092 +++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
27093 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27094
27095
27096 #define ADD_LIMITED(s,v) \
27097 - atomic_add((v),&stats->s); \
27098 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27099 + atomic_add_unchecked((v),&stats->s); \
27100 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27101
27102
27103 static void suni_hz(unsigned long from_timer)
27104 diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
27105 --- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27106 +++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27107 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27108 struct sonet_stats tmp;
27109 int error = 0;
27110
27111 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27112 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27113 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27114 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27115 if (zero && !error) {
27116 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27117
27118
27119 #define ADD_LIMITED(s,v) \
27120 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27121 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27122 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27123 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27124 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27125 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27126
27127
27128 static void stat_event(struct atm_dev *dev)
27129 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27130 if (reason & uPD98402_INT_PFM) stat_event(dev);
27131 if (reason & uPD98402_INT_PCO) {
27132 (void) GET(PCOCR); /* clear interrupt cause */
27133 - atomic_add(GET(HECCT),
27134 + atomic_add_unchecked(GET(HECCT),
27135 &PRIV(dev)->sonet_stats.uncorr_hcs);
27136 }
27137 if ((reason & uPD98402_INT_RFO) &&
27138 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27139 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27140 uPD98402_INT_LOS),PIMR); /* enable them */
27141 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27142 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27143 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27144 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27145 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27146 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27147 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27148 return 0;
27149 }
27150
27151 diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
27152 --- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27153 +++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27154 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27155 }
27156 if (!size) {
27157 dev_kfree_skb_irq(skb);
27158 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27159 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27160 continue;
27161 }
27162 if (!atm_charge(vcc,skb->truesize)) {
27163 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27164 skb->len = size;
27165 ATM_SKB(skb)->vcc = vcc;
27166 vcc->push(vcc,skb);
27167 - atomic_inc(&vcc->stats->rx);
27168 + atomic_inc_unchecked(&vcc->stats->rx);
27169 }
27170 zout(pos & 0xffff,MTA(mbx));
27171 #if 0 /* probably a stupid idea */
27172 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27173 skb_queue_head(&zatm_vcc->backlog,skb);
27174 break;
27175 }
27176 - atomic_inc(&vcc->stats->tx);
27177 + atomic_inc_unchecked(&vcc->stats->tx);
27178 wake_up(&zatm_vcc->tx_wait);
27179 }
27180
27181 diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
27182 --- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27183 +++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27184 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27185 return ret;
27186 }
27187
27188 -static struct sysfs_ops driver_sysfs_ops = {
27189 +static const struct sysfs_ops driver_sysfs_ops = {
27190 .show = drv_attr_show,
27191 .store = drv_attr_store,
27192 };
27193 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27194 return ret;
27195 }
27196
27197 -static struct sysfs_ops bus_sysfs_ops = {
27198 +static const struct sysfs_ops bus_sysfs_ops = {
27199 .show = bus_attr_show,
27200 .store = bus_attr_store,
27201 };
27202 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27203 return 0;
27204 }
27205
27206 -static struct kset_uevent_ops bus_uevent_ops = {
27207 +static const struct kset_uevent_ops bus_uevent_ops = {
27208 .filter = bus_uevent_filter,
27209 };
27210
27211 diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
27212 --- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27213 +++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27214 @@ -63,7 +63,7 @@ static void class_release(struct kobject
27215 kfree(cp);
27216 }
27217
27218 -static struct sysfs_ops class_sysfs_ops = {
27219 +static const struct sysfs_ops class_sysfs_ops = {
27220 .show = class_attr_show,
27221 .store = class_attr_store,
27222 };
27223 diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
27224 --- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27225 +++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27226 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27227 return ret;
27228 }
27229
27230 -static struct sysfs_ops dev_sysfs_ops = {
27231 +static const struct sysfs_ops dev_sysfs_ops = {
27232 .show = dev_attr_show,
27233 .store = dev_attr_store,
27234 };
27235 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27236 return retval;
27237 }
27238
27239 -static struct kset_uevent_ops device_uevent_ops = {
27240 +static const struct kset_uevent_ops device_uevent_ops = {
27241 .filter = dev_uevent_filter,
27242 .name = dev_uevent_name,
27243 .uevent = dev_uevent,
27244 diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27245 --- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27246 +++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27247 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27248 return retval;
27249 }
27250
27251 -static struct kset_uevent_ops memory_uevent_ops = {
27252 +static const struct kset_uevent_ops memory_uevent_ops = {
27253 .name = memory_uevent_name,
27254 .uevent = memory_uevent,
27255 };
27256 diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27257 --- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27258 +++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27259 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27260 return -EIO;
27261 }
27262
27263 -static struct sysfs_ops sysfs_ops = {
27264 +static const struct sysfs_ops sysfs_ops = {
27265 .show = sysdev_show,
27266 .store = sysdev_store,
27267 };
27268 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27269 return -EIO;
27270 }
27271
27272 -static struct sysfs_ops sysfs_class_ops = {
27273 +static const struct sysfs_ops sysfs_class_ops = {
27274 .show = sysdev_class_show,
27275 .store = sysdev_class_store,
27276 };
27277 diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27278 --- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27279 +++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27280 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27281 int err;
27282 u32 cp;
27283
27284 + memset(&arg64, 0, sizeof(arg64));
27285 +
27286 err = 0;
27287 err |=
27288 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27289 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27290 /* Wait (up to 20 seconds) for a command to complete */
27291
27292 for (i = 20 * HZ; i > 0; i--) {
27293 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27294 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27295 if (done == FIFO_EMPTY)
27296 schedule_timeout_uninterruptible(1);
27297 else
27298 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27299 resend_cmd1:
27300
27301 /* Disable interrupt on the board. */
27302 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27303 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27304
27305 /* Make sure there is room in the command FIFO */
27306 /* Actually it should be completely empty at this time */
27307 @@ -2884,13 +2886,13 @@ resend_cmd1:
27308 /* tape side of the driver. */
27309 for (i = 200000; i > 0; i--) {
27310 /* if fifo isn't full go */
27311 - if (!(h->access.fifo_full(h)))
27312 + if (!(h->access->fifo_full(h)))
27313 break;
27314 udelay(10);
27315 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27316 " waiting!\n", h->ctlr);
27317 }
27318 - h->access.submit_command(h, c); /* Send the cmd */
27319 + h->access->submit_command(h, c); /* Send the cmd */
27320 do {
27321 complete = pollcomplete(h->ctlr);
27322
27323 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27324 while (!hlist_empty(&h->reqQ)) {
27325 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27326 /* can't do anything if fifo is full */
27327 - if ((h->access.fifo_full(h))) {
27328 + if ((h->access->fifo_full(h))) {
27329 printk(KERN_WARNING "cciss: fifo full\n");
27330 break;
27331 }
27332 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27333 h->Qdepth--;
27334
27335 /* Tell the controller execute command */
27336 - h->access.submit_command(h, c);
27337 + h->access->submit_command(h, c);
27338
27339 /* Put job onto the completed Q */
27340 addQ(&h->cmpQ, c);
27341 @@ -3393,17 +3395,17 @@ startio:
27342
27343 static inline unsigned long get_next_completion(ctlr_info_t *h)
27344 {
27345 - return h->access.command_completed(h);
27346 + return h->access->command_completed(h);
27347 }
27348
27349 static inline int interrupt_pending(ctlr_info_t *h)
27350 {
27351 - return h->access.intr_pending(h);
27352 + return h->access->intr_pending(h);
27353 }
27354
27355 static inline long interrupt_not_for_us(ctlr_info_t *h)
27356 {
27357 - return (((h->access.intr_pending(h) == 0) ||
27358 + return (((h->access->intr_pending(h) == 0) ||
27359 (h->interrupts_enabled == 0)));
27360 }
27361
27362 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27363 */
27364 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27365 c->product_name = products[prod_index].product_name;
27366 - c->access = *(products[prod_index].access);
27367 + c->access = products[prod_index].access;
27368 c->nr_cmds = c->max_commands - 4;
27369 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27370 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27371 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27372 }
27373
27374 /* make sure the board interrupts are off */
27375 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27376 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27377 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27378 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27379 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27380 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27381 cciss_scsi_setup(i);
27382
27383 /* Turn the interrupts on so we can service requests */
27384 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27385 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27386
27387 /* Get the firmware version */
27388 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27389 diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27390 --- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27391 +++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27392 @@ -90,7 +90,7 @@ struct ctlr_info
27393 // information about each logical volume
27394 drive_info_struct *drv[CISS_MAX_LUN];
27395
27396 - struct access_method access;
27397 + struct access_method *access;
27398
27399 /* queue and queue Info */
27400 struct hlist_head reqQ;
27401 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27402 --- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27403 +++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27404 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27405 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27406 goto Enomem4;
27407 }
27408 - hba[i]->access.set_intr_mask(hba[i], 0);
27409 + hba[i]->access->set_intr_mask(hba[i], 0);
27410 if (request_irq(hba[i]->intr, do_ida_intr,
27411 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27412 {
27413 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27414 add_timer(&hba[i]->timer);
27415
27416 /* Enable IRQ now that spinlock and rate limit timer are set up */
27417 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27418 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27419
27420 for(j=0; j<NWD; j++) {
27421 struct gendisk *disk = ida_gendisk[i][j];
27422 @@ -695,7 +695,7 @@ DBGINFO(
27423 for(i=0; i<NR_PRODUCTS; i++) {
27424 if (board_id == products[i].board_id) {
27425 c->product_name = products[i].product_name;
27426 - c->access = *(products[i].access);
27427 + c->access = products[i].access;
27428 break;
27429 }
27430 }
27431 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27432 hba[ctlr]->intr = intr;
27433 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27434 hba[ctlr]->product_name = products[j].product_name;
27435 - hba[ctlr]->access = *(products[j].access);
27436 + hba[ctlr]->access = products[j].access;
27437 hba[ctlr]->ctlr = ctlr;
27438 hba[ctlr]->board_id = board_id;
27439 hba[ctlr]->pci_dev = NULL; /* not PCI */
27440 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27441 struct scatterlist tmp_sg[SG_MAX];
27442 int i, dir, seg;
27443
27444 + pax_track_stack();
27445 +
27446 if (blk_queue_plugged(q))
27447 goto startio;
27448
27449 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27450
27451 while((c = h->reqQ) != NULL) {
27452 /* Can't do anything if we're busy */
27453 - if (h->access.fifo_full(h) == 0)
27454 + if (h->access->fifo_full(h) == 0)
27455 return;
27456
27457 /* Get the first entry from the request Q */
27458 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27459 h->Qdepth--;
27460
27461 /* Tell the controller to do our bidding */
27462 - h->access.submit_command(h, c);
27463 + h->access->submit_command(h, c);
27464
27465 /* Get onto the completion Q */
27466 addQ(&h->cmpQ, c);
27467 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27468 unsigned long flags;
27469 __u32 a,a1;
27470
27471 - istat = h->access.intr_pending(h);
27472 + istat = h->access->intr_pending(h);
27473 /* Is this interrupt for us? */
27474 if (istat == 0)
27475 return IRQ_NONE;
27476 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27477 */
27478 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27479 if (istat & FIFO_NOT_EMPTY) {
27480 - while((a = h->access.command_completed(h))) {
27481 + while((a = h->access->command_completed(h))) {
27482 a1 = a; a &= ~3;
27483 if ((c = h->cmpQ) == NULL)
27484 {
27485 @@ -1434,11 +1436,11 @@ static int sendcmd(
27486 /*
27487 * Disable interrupt
27488 */
27489 - info_p->access.set_intr_mask(info_p, 0);
27490 + info_p->access->set_intr_mask(info_p, 0);
27491 /* Make sure there is room in the command FIFO */
27492 /* Actually it should be completely empty at this time. */
27493 for (i = 200000; i > 0; i--) {
27494 - temp = info_p->access.fifo_full(info_p);
27495 + temp = info_p->access->fifo_full(info_p);
27496 if (temp != 0) {
27497 break;
27498 }
27499 @@ -1451,7 +1453,7 @@ DBG(
27500 /*
27501 * Send the cmd
27502 */
27503 - info_p->access.submit_command(info_p, c);
27504 + info_p->access->submit_command(info_p, c);
27505 complete = pollcomplete(ctlr);
27506
27507 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27508 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27509 * we check the new geometry. Then turn interrupts back on when
27510 * we're done.
27511 */
27512 - host->access.set_intr_mask(host, 0);
27513 + host->access->set_intr_mask(host, 0);
27514 getgeometry(ctlr);
27515 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27516 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27517
27518 for(i=0; i<NWD; i++) {
27519 struct gendisk *disk = ida_gendisk[ctlr][i];
27520 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27521 /* Wait (up to 2 seconds) for a command to complete */
27522
27523 for (i = 200000; i > 0; i--) {
27524 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27525 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27526 if (done == 0) {
27527 udelay(10); /* a short fixed delay */
27528 } else
27529 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27530 --- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27531 +++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27532 @@ -99,7 +99,7 @@ struct ctlr_info {
27533 drv_info_t drv[NWD];
27534 struct proc_dir_entry *proc;
27535
27536 - struct access_method access;
27537 + struct access_method *access;
27538
27539 cmdlist_t *reqQ;
27540 cmdlist_t *cmpQ;
27541 diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27542 --- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27543 +++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27544 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27545 unsigned long flags;
27546 int Channel, TargetID;
27547
27548 + pax_track_stack();
27549 +
27550 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27551 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27552 sizeof(DAC960_SCSI_Inquiry_T) +
27553 diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27554 --- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27555 +++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27556 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27557 struct kvec iov;
27558 sigset_t blocked, oldset;
27559
27560 + pax_track_stack();
27561 +
27562 if (unlikely(!sock)) {
27563 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27564 lo->disk->disk_name, (send ? "send" : "recv"));
27565 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27566 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27567 unsigned int cmd, unsigned long arg)
27568 {
27569 + pax_track_stack();
27570 +
27571 switch (cmd) {
27572 case NBD_DISCONNECT: {
27573 struct request sreq;
27574 diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27575 --- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27576 +++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27577 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27578 return len;
27579 }
27580
27581 -static struct sysfs_ops kobj_pkt_ops = {
27582 +static const struct sysfs_ops kobj_pkt_ops = {
27583 .show = kobj_pkt_show,
27584 .store = kobj_pkt_store
27585 };
27586 diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27587 --- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27588 +++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27589 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27590 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27591 return -EFAULT;
27592
27593 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27594 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27595 return -EFAULT;
27596
27597 client = agp_find_client_by_pid(reserve.pid);
27598 diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27599 --- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27600 +++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27601 @@ -10,6 +10,7 @@
27602 #include <linux/types.h>
27603 #include <linux/errno.h>
27604 #include <linux/tty.h>
27605 +#include <linux/mutex.h>
27606 #include <linux/timer.h>
27607 #include <linux/kernel.h>
27608 #include <linux/wait.h>
27609 @@ -36,6 +37,7 @@ static int vfd_is_open;
27610 static unsigned char vfd[40];
27611 static int vfd_cursor;
27612 static unsigned char ledpb, led;
27613 +static DEFINE_MUTEX(vfd_mutex);
27614
27615 static void update_vfd(void)
27616 {
27617 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27618 if (!vfd_is_open)
27619 return -EBUSY;
27620
27621 + mutex_lock(&vfd_mutex);
27622 for (;;) {
27623 char c;
27624 if (!indx)
27625 break;
27626 - if (get_user(c, buf))
27627 + if (get_user(c, buf)) {
27628 + mutex_unlock(&vfd_mutex);
27629 return -EFAULT;
27630 + }
27631 if (esc) {
27632 set_led(c);
27633 esc = 0;
27634 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27635 buf++;
27636 }
27637 update_vfd();
27638 + mutex_unlock(&vfd_mutex);
27639
27640 return len;
27641 }
27642 diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27643 --- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27644 +++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27645 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27646 switch (cmd) {
27647
27648 case RTC_PLL_GET:
27649 + memset(&pll, 0, sizeof(pll));
27650 if (get_rtc_pll(&pll))
27651 return -EINVAL;
27652 else
27653 diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27654 --- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27655 +++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27656 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27657 return 0;
27658 }
27659
27660 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27661 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27662
27663 static int
27664 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27665 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27666 }
27667
27668 static int
27669 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27670 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27671 {
27672 struct hpet_timer __iomem *timer;
27673 struct hpet __iomem *hpet;
27674 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27675 {
27676 struct hpet_info info;
27677
27678 + memset(&info, 0, sizeof(info));
27679 +
27680 if (devp->hd_ireqfreq)
27681 info.hi_ireqfreq =
27682 hpet_time_div(hpetp, devp->hd_ireqfreq);
27683 - else
27684 - info.hi_ireqfreq = 0;
27685 info.hi_flags =
27686 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27687 info.hi_hpet = hpetp->hp_which;
27688 diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27689 --- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27690 +++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27691 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27692 return cnt;
27693 }
27694
27695 -static struct hv_ops hvc_beat_get_put_ops = {
27696 +static const struct hv_ops hvc_beat_get_put_ops = {
27697 .get_chars = hvc_beat_get_chars,
27698 .put_chars = hvc_beat_put_chars,
27699 };
27700 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27701 --- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27702 +++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27703 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27704 * console interfaces but can still be used as a tty device. This has to be
27705 * static because kmalloc will not work during early console init.
27706 */
27707 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27708 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27709 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27710 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27711
27712 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27713 * vty adapters do NOT get an hvc_instantiate() callback since they
27714 * appear after early console init.
27715 */
27716 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27717 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27718 {
27719 struct hvc_struct *hp;
27720
27721 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27722 };
27723
27724 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27725 - struct hv_ops *ops, int outbuf_size)
27726 + const struct hv_ops *ops, int outbuf_size)
27727 {
27728 struct hvc_struct *hp;
27729 int i;
27730 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27731 --- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27732 +++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27733 @@ -55,7 +55,7 @@ struct hvc_struct {
27734 int outbuf_size;
27735 int n_outbuf;
27736 uint32_t vtermno;
27737 - struct hv_ops *ops;
27738 + const struct hv_ops *ops;
27739 int irq_requested;
27740 int data;
27741 struct winsize ws;
27742 @@ -76,11 +76,11 @@ struct hv_ops {
27743 };
27744
27745 /* Register a vterm and a slot index for use as a console (console_init) */
27746 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27747 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27748
27749 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27750 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27751 - struct hv_ops *ops, int outbuf_size);
27752 + const struct hv_ops *ops, int outbuf_size);
27753 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27754 extern int hvc_remove(struct hvc_struct *hp);
27755
27756 diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27757 --- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27758 +++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27759 @@ -197,7 +197,7 @@ done:
27760 return sent;
27761 }
27762
27763 -static struct hv_ops hvc_get_put_ops = {
27764 +static const struct hv_ops hvc_get_put_ops = {
27765 .get_chars = get_chars,
27766 .put_chars = put_chars,
27767 .notifier_add = notifier_add_irq,
27768 diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27769 --- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27770 +++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27771 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27772
27773
27774 /* HVC operations */
27775 -static struct hv_ops hvc_iucv_ops = {
27776 +static const struct hv_ops hvc_iucv_ops = {
27777 .get_chars = hvc_iucv_get_chars,
27778 .put_chars = hvc_iucv_put_chars,
27779 .notifier_add = hvc_iucv_notifier_add,
27780 diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27781 --- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27782 +++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27783 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27784 return i;
27785 }
27786
27787 -static struct hv_ops hvc_rtas_get_put_ops = {
27788 +static const struct hv_ops hvc_rtas_get_put_ops = {
27789 .get_chars = hvc_rtas_read_console,
27790 .put_chars = hvc_rtas_write_console,
27791 };
27792 diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27793 --- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27794 +++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27795 @@ -82,6 +82,7 @@
27796 #include <asm/hvcserver.h>
27797 #include <asm/uaccess.h>
27798 #include <asm/vio.h>
27799 +#include <asm/local.h>
27800
27801 /*
27802 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27803 @@ -269,7 +270,7 @@ struct hvcs_struct {
27804 unsigned int index;
27805
27806 struct tty_struct *tty;
27807 - int open_count;
27808 + local_t open_count;
27809
27810 /*
27811 * Used to tell the driver kernel_thread what operations need to take
27812 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27813
27814 spin_lock_irqsave(&hvcsd->lock, flags);
27815
27816 - if (hvcsd->open_count > 0) {
27817 + if (local_read(&hvcsd->open_count) > 0) {
27818 spin_unlock_irqrestore(&hvcsd->lock, flags);
27819 printk(KERN_INFO "HVCS: vterm state unchanged. "
27820 "The hvcs device node is still in use.\n");
27821 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27822 if ((retval = hvcs_partner_connect(hvcsd)))
27823 goto error_release;
27824
27825 - hvcsd->open_count = 1;
27826 + local_set(&hvcsd->open_count, 1);
27827 hvcsd->tty = tty;
27828 tty->driver_data = hvcsd;
27829
27830 @@ -1169,7 +1170,7 @@ fast_open:
27831
27832 spin_lock_irqsave(&hvcsd->lock, flags);
27833 kref_get(&hvcsd->kref);
27834 - hvcsd->open_count++;
27835 + local_inc(&hvcsd->open_count);
27836 hvcsd->todo_mask |= HVCS_SCHED_READ;
27837 spin_unlock_irqrestore(&hvcsd->lock, flags);
27838
27839 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27840 hvcsd = tty->driver_data;
27841
27842 spin_lock_irqsave(&hvcsd->lock, flags);
27843 - if (--hvcsd->open_count == 0) {
27844 + if (local_dec_and_test(&hvcsd->open_count)) {
27845
27846 vio_disable_interrupts(hvcsd->vdev);
27847
27848 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27849 free_irq(irq, hvcsd);
27850 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27851 return;
27852 - } else if (hvcsd->open_count < 0) {
27853 + } else if (local_read(&hvcsd->open_count) < 0) {
27854 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27855 " is missmanaged.\n",
27856 - hvcsd->vdev->unit_address, hvcsd->open_count);
27857 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27858 }
27859
27860 spin_unlock_irqrestore(&hvcsd->lock, flags);
27861 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27862
27863 spin_lock_irqsave(&hvcsd->lock, flags);
27864 /* Preserve this so that we know how many kref refs to put */
27865 - temp_open_count = hvcsd->open_count;
27866 + temp_open_count = local_read(&hvcsd->open_count);
27867
27868 /*
27869 * Don't kref put inside the spinlock because the destruction
27870 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27871 hvcsd->tty->driver_data = NULL;
27872 hvcsd->tty = NULL;
27873
27874 - hvcsd->open_count = 0;
27875 + local_set(&hvcsd->open_count, 0);
27876
27877 /* This will drop any buffered data on the floor which is OK in a hangup
27878 * scenario. */
27879 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27880 * the middle of a write operation? This is a crummy place to do this
27881 * but we want to keep it all in the spinlock.
27882 */
27883 - if (hvcsd->open_count <= 0) {
27884 + if (local_read(&hvcsd->open_count) <= 0) {
27885 spin_unlock_irqrestore(&hvcsd->lock, flags);
27886 return -ENODEV;
27887 }
27888 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27889 {
27890 struct hvcs_struct *hvcsd = tty->driver_data;
27891
27892 - if (!hvcsd || hvcsd->open_count <= 0)
27893 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27894 return 0;
27895
27896 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27897 diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27898 --- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27899 +++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27900 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27901 return i;
27902 }
27903
27904 -static struct hv_ops hvc_udbg_ops = {
27905 +static const struct hv_ops hvc_udbg_ops = {
27906 .get_chars = hvc_udbg_get,
27907 .put_chars = hvc_udbg_put,
27908 };
27909 diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27910 --- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27911 +++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27912 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27913 return got;
27914 }
27915
27916 -static struct hv_ops hvc_get_put_ops = {
27917 +static const struct hv_ops hvc_get_put_ops = {
27918 .get_chars = filtered_get_chars,
27919 .put_chars = hvc_put_chars,
27920 .notifier_add = notifier_add_irq,
27921 diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27922 --- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27923 +++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27924 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27925 return recv;
27926 }
27927
27928 -static struct hv_ops hvc_ops = {
27929 +static const struct hv_ops hvc_ops = {
27930 .get_chars = read_console,
27931 .put_chars = write_console,
27932 .notifier_add = notifier_add_irq,
27933 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27934 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27935 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27936 @@ -414,7 +414,7 @@ struct ipmi_smi {
27937 struct proc_dir_entry *proc_dir;
27938 char proc_dir_name[10];
27939
27940 - atomic_t stats[IPMI_NUM_STATS];
27941 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27942
27943 /*
27944 * run_to_completion duplicate of smb_info, smi_info
27945 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27946
27947
27948 #define ipmi_inc_stat(intf, stat) \
27949 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27950 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27951 #define ipmi_get_stat(intf, stat) \
27952 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27953 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27954
27955 static int is_lan_addr(struct ipmi_addr *addr)
27956 {
27957 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27958 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27959 init_waitqueue_head(&intf->waitq);
27960 for (i = 0; i < IPMI_NUM_STATS; i++)
27961 - atomic_set(&intf->stats[i], 0);
27962 + atomic_set_unchecked(&intf->stats[i], 0);
27963
27964 intf->proc_dir = NULL;
27965
27966 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27967 struct ipmi_smi_msg smi_msg;
27968 struct ipmi_recv_msg recv_msg;
27969
27970 + pax_track_stack();
27971 +
27972 si = (struct ipmi_system_interface_addr *) &addr;
27973 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27974 si->channel = IPMI_BMC_CHANNEL;
27975 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27976 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27977 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27978 @@ -277,7 +277,7 @@ struct smi_info {
27979 unsigned char slave_addr;
27980
27981 /* Counters and things for the proc filesystem. */
27982 - atomic_t stats[SI_NUM_STATS];
27983 + atomic_unchecked_t stats[SI_NUM_STATS];
27984
27985 struct task_struct *thread;
27986
27987 @@ -285,9 +285,9 @@ struct smi_info {
27988 };
27989
27990 #define smi_inc_stat(smi, stat) \
27991 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27992 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27993 #define smi_get_stat(smi, stat) \
27994 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27995 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27996
27997 #define SI_MAX_PARMS 4
27998
27999 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28000 atomic_set(&new_smi->req_events, 0);
28001 new_smi->run_to_completion = 0;
28002 for (i = 0; i < SI_NUM_STATS; i++)
28003 - atomic_set(&new_smi->stats[i], 0);
28004 + atomic_set_unchecked(&new_smi->stats[i], 0);
28005
28006 new_smi->interrupt_disabled = 0;
28007 atomic_set(&new_smi->stop_operation, 0);
28008 diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
28009 --- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
28010 +++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
28011 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28012 * re-used for each stats call.
28013 */
28014 static comstats_t stli_comstats;
28015 -static combrd_t stli_brdstats;
28016 static struct asystats stli_cdkstats;
28017
28018 /*****************************************************************************/
28019 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28020 {
28021 struct stlibrd *brdp;
28022 unsigned int i;
28023 + combrd_t stli_brdstats;
28024
28025 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28026 return -EFAULT;
28027 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28028 struct stliport stli_dummyport;
28029 struct stliport *portp;
28030
28031 + pax_track_stack();
28032 +
28033 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28034 return -EFAULT;
28035 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28036 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28037 struct stlibrd stli_dummybrd;
28038 struct stlibrd *brdp;
28039
28040 + pax_track_stack();
28041 +
28042 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28043 return -EFAULT;
28044 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28045 diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
28046 --- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
28047 +++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
28048 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28049
28050 config DEVKMEM
28051 bool "/dev/kmem virtual device support"
28052 - default y
28053 + default n
28054 + depends on !GRKERNSEC_KMEM
28055 help
28056 Say Y here if you want to support the /dev/kmem device. The
28057 /dev/kmem device is rarely used, but can be used for certain
28058 @@ -1114,6 +1115,7 @@ config DEVPORT
28059 bool
28060 depends on !M68K
28061 depends on ISA || PCI
28062 + depends on !GRKERNSEC_KMEM
28063 default y
28064
28065 source "drivers/s390/char/Kconfig"
28066 diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
28067 --- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
28068 +++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
28069 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28070 kbd->kbdmode == VC_MEDIUMRAW) &&
28071 value != KVAL(K_SAK))
28072 return; /* SAK is allowed even in raw mode */
28073 +
28074 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28075 + {
28076 + void *func = fn_handler[value];
28077 + if (func == fn_show_state || func == fn_show_ptregs ||
28078 + func == fn_show_mem)
28079 + return;
28080 + }
28081 +#endif
28082 +
28083 fn_handler[value](vc);
28084 }
28085
28086 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28087 .evbit = { BIT_MASK(EV_SND) },
28088 },
28089
28090 - { }, /* Terminating entry */
28091 + { 0 }, /* Terminating entry */
28092 };
28093
28094 MODULE_DEVICE_TABLE(input, kbd_ids);
28095 diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
28096 --- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
28097 +++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
28098 @@ -18,6 +18,7 @@
28099 #include <linux/raw.h>
28100 #include <linux/tty.h>
28101 #include <linux/capability.h>
28102 +#include <linux/security.h>
28103 #include <linux/ptrace.h>
28104 #include <linux/device.h>
28105 #include <linux/highmem.h>
28106 @@ -35,6 +36,10 @@
28107 # include <linux/efi.h>
28108 #endif
28109
28110 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28111 +extern struct file_operations grsec_fops;
28112 +#endif
28113 +
28114 static inline unsigned long size_inside_page(unsigned long start,
28115 unsigned long size)
28116 {
28117 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28118
28119 while (cursor < to) {
28120 if (!devmem_is_allowed(pfn)) {
28121 +#ifdef CONFIG_GRKERNSEC_KMEM
28122 + gr_handle_mem_readwrite(from, to);
28123 +#else
28124 printk(KERN_INFO
28125 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28126 current->comm, from, to);
28127 +#endif
28128 return 0;
28129 }
28130 cursor += PAGE_SIZE;
28131 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28132 }
28133 return 1;
28134 }
28135 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28136 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28137 +{
28138 + return 0;
28139 +}
28140 #else
28141 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28142 {
28143 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28144 #endif
28145
28146 while (count > 0) {
28147 + char *temp;
28148 +
28149 /*
28150 * Handle first page in case it's not aligned
28151 */
28152 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28153 if (!ptr)
28154 return -EFAULT;
28155
28156 - if (copy_to_user(buf, ptr, sz)) {
28157 +#ifdef CONFIG_PAX_USERCOPY
28158 + temp = kmalloc(sz, GFP_KERNEL);
28159 + if (!temp) {
28160 + unxlate_dev_mem_ptr(p, ptr);
28161 + return -ENOMEM;
28162 + }
28163 + memcpy(temp, ptr, sz);
28164 +#else
28165 + temp = ptr;
28166 +#endif
28167 +
28168 + if (copy_to_user(buf, temp, sz)) {
28169 +
28170 +#ifdef CONFIG_PAX_USERCOPY
28171 + kfree(temp);
28172 +#endif
28173 +
28174 unxlate_dev_mem_ptr(p, ptr);
28175 return -EFAULT;
28176 }
28177
28178 +#ifdef CONFIG_PAX_USERCOPY
28179 + kfree(temp);
28180 +#endif
28181 +
28182 unxlate_dev_mem_ptr(p, ptr);
28183
28184 buf += sz;
28185 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28186 size_t count, loff_t *ppos)
28187 {
28188 unsigned long p = *ppos;
28189 - ssize_t low_count, read, sz;
28190 + ssize_t low_count, read, sz, err = 0;
28191 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28192 - int err = 0;
28193
28194 read = 0;
28195 if (p < (unsigned long) high_memory) {
28196 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28197 }
28198 #endif
28199 while (low_count > 0) {
28200 + char *temp;
28201 +
28202 sz = size_inside_page(p, low_count);
28203
28204 /*
28205 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28206 */
28207 kbuf = xlate_dev_kmem_ptr((char *)p);
28208
28209 - if (copy_to_user(buf, kbuf, sz))
28210 +#ifdef CONFIG_PAX_USERCOPY
28211 + temp = kmalloc(sz, GFP_KERNEL);
28212 + if (!temp)
28213 + return -ENOMEM;
28214 + memcpy(temp, kbuf, sz);
28215 +#else
28216 + temp = kbuf;
28217 +#endif
28218 +
28219 + err = copy_to_user(buf, temp, sz);
28220 +
28221 +#ifdef CONFIG_PAX_USERCOPY
28222 + kfree(temp);
28223 +#endif
28224 +
28225 + if (err)
28226 return -EFAULT;
28227 buf += sz;
28228 p += sz;
28229 @@ -889,6 +941,9 @@ static const struct memdev {
28230 #ifdef CONFIG_CRASH_DUMP
28231 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28232 #endif
28233 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28234 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28235 +#endif
28236 };
28237
28238 static int memory_open(struct inode *inode, struct file *filp)
28239 diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28240 --- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28241 +++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28242 @@ -29,6 +29,7 @@
28243 #include <linux/tty_driver.h>
28244 #include <linux/tty_flip.h>
28245 #include <linux/uaccess.h>
28246 +#include <asm/local.h>
28247
28248 #include "tty.h"
28249 #include "network.h"
28250 @@ -51,7 +52,7 @@ struct ipw_tty {
28251 int tty_type;
28252 struct ipw_network *network;
28253 struct tty_struct *linux_tty;
28254 - int open_count;
28255 + local_t open_count;
28256 unsigned int control_lines;
28257 struct mutex ipw_tty_mutex;
28258 int tx_bytes_queued;
28259 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28260 mutex_unlock(&tty->ipw_tty_mutex);
28261 return -ENODEV;
28262 }
28263 - if (tty->open_count == 0)
28264 + if (local_read(&tty->open_count) == 0)
28265 tty->tx_bytes_queued = 0;
28266
28267 - tty->open_count++;
28268 + local_inc(&tty->open_count);
28269
28270 tty->linux_tty = linux_tty;
28271 linux_tty->driver_data = tty;
28272 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28273
28274 static void do_ipw_close(struct ipw_tty *tty)
28275 {
28276 - tty->open_count--;
28277 -
28278 - if (tty->open_count == 0) {
28279 + if (local_dec_return(&tty->open_count) == 0) {
28280 struct tty_struct *linux_tty = tty->linux_tty;
28281
28282 if (linux_tty != NULL) {
28283 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28284 return;
28285
28286 mutex_lock(&tty->ipw_tty_mutex);
28287 - if (tty->open_count == 0) {
28288 + if (local_read(&tty->open_count) == 0) {
28289 mutex_unlock(&tty->ipw_tty_mutex);
28290 return;
28291 }
28292 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28293 return;
28294 }
28295
28296 - if (!tty->open_count) {
28297 + if (!local_read(&tty->open_count)) {
28298 mutex_unlock(&tty->ipw_tty_mutex);
28299 return;
28300 }
28301 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28302 return -ENODEV;
28303
28304 mutex_lock(&tty->ipw_tty_mutex);
28305 - if (!tty->open_count) {
28306 + if (!local_read(&tty->open_count)) {
28307 mutex_unlock(&tty->ipw_tty_mutex);
28308 return -EINVAL;
28309 }
28310 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28311 if (!tty)
28312 return -ENODEV;
28313
28314 - if (!tty->open_count)
28315 + if (!local_read(&tty->open_count))
28316 return -EINVAL;
28317
28318 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28319 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28320 if (!tty)
28321 return 0;
28322
28323 - if (!tty->open_count)
28324 + if (!local_read(&tty->open_count))
28325 return 0;
28326
28327 return tty->tx_bytes_queued;
28328 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28329 if (!tty)
28330 return -ENODEV;
28331
28332 - if (!tty->open_count)
28333 + if (!local_read(&tty->open_count))
28334 return -EINVAL;
28335
28336 return get_control_lines(tty);
28337 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28338 if (!tty)
28339 return -ENODEV;
28340
28341 - if (!tty->open_count)
28342 + if (!local_read(&tty->open_count))
28343 return -EINVAL;
28344
28345 return set_control_lines(tty, set, clear);
28346 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28347 if (!tty)
28348 return -ENODEV;
28349
28350 - if (!tty->open_count)
28351 + if (!local_read(&tty->open_count))
28352 return -EINVAL;
28353
28354 /* FIXME: Exactly how is the tty object locked here .. */
28355 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28356 against a parallel ioctl etc */
28357 mutex_lock(&ttyj->ipw_tty_mutex);
28358 }
28359 - while (ttyj->open_count)
28360 + while (local_read(&ttyj->open_count))
28361 do_ipw_close(ttyj);
28362 ipwireless_disassociate_network_ttys(network,
28363 ttyj->channel_idx);
28364 diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28365 --- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28366 +++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28367 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28368 register_sysctl_table(pty_root_table);
28369
28370 /* Now create the /dev/ptmx special device */
28371 + pax_open_kernel();
28372 tty_default_fops(&ptmx_fops);
28373 - ptmx_fops.open = ptmx_open;
28374 + *(void **)&ptmx_fops.open = ptmx_open;
28375 + pax_close_kernel();
28376
28377 cdev_init(&ptmx_cdev, &ptmx_fops);
28378 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28379 diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28380 --- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28381 +++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28382 @@ -254,8 +254,13 @@
28383 /*
28384 * Configuration information
28385 */
28386 +#ifdef CONFIG_GRKERNSEC_RANDNET
28387 +#define INPUT_POOL_WORDS 512
28388 +#define OUTPUT_POOL_WORDS 128
28389 +#else
28390 #define INPUT_POOL_WORDS 128
28391 #define OUTPUT_POOL_WORDS 32
28392 +#endif
28393 #define SEC_XFER_SIZE 512
28394
28395 /*
28396 @@ -292,10 +297,17 @@ static struct poolinfo {
28397 int poolwords;
28398 int tap1, tap2, tap3, tap4, tap5;
28399 } poolinfo_table[] = {
28400 +#ifdef CONFIG_GRKERNSEC_RANDNET
28401 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28402 + { 512, 411, 308, 208, 104, 1 },
28403 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28404 + { 128, 103, 76, 51, 25, 1 },
28405 +#else
28406 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28407 { 128, 103, 76, 51, 25, 1 },
28408 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28409 { 32, 26, 20, 14, 7, 1 },
28410 +#endif
28411 #if 0
28412 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28413 { 2048, 1638, 1231, 819, 411, 1 },
28414 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28415 #include <linux/sysctl.h>
28416
28417 static int min_read_thresh = 8, min_write_thresh;
28418 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28419 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28420 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28421 static char sysctl_bootid[16];
28422
28423 diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28424 --- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28425 +++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28426 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28427 struct rocket_ports tmp;
28428 int board;
28429
28430 + pax_track_stack();
28431 +
28432 if (!retports)
28433 return -EFAULT;
28434 memset(&tmp, 0, sizeof (tmp));
28435 diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28436 --- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28437 +++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28438 @@ -55,6 +55,7 @@
28439 #include <asm/uaccess.h>
28440 #include <asm/io.h>
28441 #include <asm/system.h>
28442 +#include <asm/local.h>
28443
28444 #include <linux/sonypi.h>
28445
28446 @@ -491,7 +492,7 @@ static struct sonypi_device {
28447 spinlock_t fifo_lock;
28448 wait_queue_head_t fifo_proc_list;
28449 struct fasync_struct *fifo_async;
28450 - int open_count;
28451 + local_t open_count;
28452 int model;
28453 struct input_dev *input_jog_dev;
28454 struct input_dev *input_key_dev;
28455 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28456 static int sonypi_misc_release(struct inode *inode, struct file *file)
28457 {
28458 mutex_lock(&sonypi_device.lock);
28459 - sonypi_device.open_count--;
28460 + local_dec(&sonypi_device.open_count);
28461 mutex_unlock(&sonypi_device.lock);
28462 return 0;
28463 }
28464 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28465 lock_kernel();
28466 mutex_lock(&sonypi_device.lock);
28467 /* Flush input queue on first open */
28468 - if (!sonypi_device.open_count)
28469 + if (!local_read(&sonypi_device.open_count))
28470 kfifo_reset(sonypi_device.fifo);
28471 - sonypi_device.open_count++;
28472 + local_inc(&sonypi_device.open_count);
28473 mutex_unlock(&sonypi_device.lock);
28474 unlock_kernel();
28475 return 0;
28476 diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28477 --- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28478 +++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28479 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28480 struct stlport stl_dummyport;
28481 struct stlport *portp;
28482
28483 + pax_track_stack();
28484 +
28485 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28486 return -EFAULT;
28487 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28488 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28489 --- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28490 +++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28491 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28492 event = addr;
28493
28494 if ((event->event_type == 0 && event->event_size == 0) ||
28495 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28496 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28497 return NULL;
28498
28499 return addr;
28500 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28501 return NULL;
28502
28503 if ((event->event_type == 0 && event->event_size == 0) ||
28504 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28505 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28506 return NULL;
28507
28508 (*pos)++;
28509 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28510 int i;
28511
28512 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28513 - seq_putc(m, data[i]);
28514 + if (!seq_putc(m, data[i]))
28515 + return -EFAULT;
28516
28517 return 0;
28518 }
28519 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28520 log->bios_event_log_end = log->bios_event_log + len;
28521
28522 virt = acpi_os_map_memory(start, len);
28523 + if (!virt) {
28524 + kfree(log->bios_event_log);
28525 + log->bios_event_log = NULL;
28526 + return -EFAULT;
28527 + }
28528
28529 memcpy(log->bios_event_log, virt, len);
28530
28531 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28532 --- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28533 +++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28534 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28535 chip->vendor.req_complete_val)
28536 goto out_recv;
28537
28538 - if ((status == chip->vendor.req_canceled)) {
28539 + if (status == chip->vendor.req_canceled) {
28540 dev_err(chip->dev, "Operation Canceled\n");
28541 rc = -ECANCELED;
28542 goto out;
28543 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28544
28545 struct tpm_chip *chip = dev_get_drvdata(dev);
28546
28547 + pax_track_stack();
28548 +
28549 tpm_cmd.header.in = tpm_readpubek_header;
28550 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28551 "attempting to read the PUBEK");
28552 diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28553 --- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28554 +++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28555 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28556 return retval;
28557 }
28558
28559 +EXPORT_SYMBOL(tty_ioctl);
28560 +
28561 #ifdef CONFIG_COMPAT
28562 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28563 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28564 unsigned long arg)
28565 {
28566 struct inode *inode = file->f_dentry->d_inode;
28567 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28568
28569 return retval;
28570 }
28571 +
28572 +EXPORT_SYMBOL(tty_compat_ioctl);
28573 #endif
28574
28575 /*
28576 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28577
28578 void tty_default_fops(struct file_operations *fops)
28579 {
28580 - *fops = tty_fops;
28581 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28582 }
28583
28584 /*
28585 diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28586 --- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28587 +++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28588 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28589 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28590 struct tty_ldisc_ops *ldo = ld->ops;
28591
28592 - ldo->refcount--;
28593 + atomic_dec(&ldo->refcount);
28594 module_put(ldo->owner);
28595 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28596
28597 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28598 spin_lock_irqsave(&tty_ldisc_lock, flags);
28599 tty_ldiscs[disc] = new_ldisc;
28600 new_ldisc->num = disc;
28601 - new_ldisc->refcount = 0;
28602 + atomic_set(&new_ldisc->refcount, 0);
28603 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28604
28605 return ret;
28606 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28607 return -EINVAL;
28608
28609 spin_lock_irqsave(&tty_ldisc_lock, flags);
28610 - if (tty_ldiscs[disc]->refcount)
28611 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28612 ret = -EBUSY;
28613 else
28614 tty_ldiscs[disc] = NULL;
28615 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28616 if (ldops) {
28617 ret = ERR_PTR(-EAGAIN);
28618 if (try_module_get(ldops->owner)) {
28619 - ldops->refcount++;
28620 + atomic_inc(&ldops->refcount);
28621 ret = ldops;
28622 }
28623 }
28624 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28625 unsigned long flags;
28626
28627 spin_lock_irqsave(&tty_ldisc_lock, flags);
28628 - ldops->refcount--;
28629 + atomic_dec(&ldops->refcount);
28630 module_put(ldops->owner);
28631 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28632 }
28633 diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28634 --- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28635 +++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28636 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28637 * virtqueue, so we let the drivers do some boutique early-output thing. */
28638 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28639 {
28640 - virtio_cons.put_chars = put_chars;
28641 + pax_open_kernel();
28642 + *(void **)&virtio_cons.put_chars = put_chars;
28643 + pax_close_kernel();
28644 return hvc_instantiate(0, 0, &virtio_cons);
28645 }
28646
28647 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28648 out_vq = vqs[1];
28649
28650 /* Start using the new console output. */
28651 - virtio_cons.get_chars = get_chars;
28652 - virtio_cons.put_chars = put_chars;
28653 - virtio_cons.notifier_add = notifier_add_vio;
28654 - virtio_cons.notifier_del = notifier_del_vio;
28655 - virtio_cons.notifier_hangup = notifier_del_vio;
28656 + pax_open_kernel();
28657 + *(void **)&virtio_cons.get_chars = get_chars;
28658 + *(void **)&virtio_cons.put_chars = put_chars;
28659 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28660 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28661 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28662 + pax_close_kernel();
28663
28664 /* The first argument of hvc_alloc() is the virtual console number, so
28665 * we use zero. The second argument is the parameter for the
28666 diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28667 --- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28668 +++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28669 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28670
28671 static void notify_write(struct vc_data *vc, unsigned int unicode)
28672 {
28673 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28674 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28675 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28676 }
28677
28678 diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28679 --- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28680 +++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28681 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28682 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28683 return -EFAULT;
28684
28685 - if (!capable(CAP_SYS_TTY_CONFIG))
28686 - perm = 0;
28687 -
28688 switch (cmd) {
28689 case KDGKBENT:
28690 key_map = key_maps[s];
28691 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28692 val = (i ? K_HOLE : K_NOSUCHMAP);
28693 return put_user(val, &user_kbe->kb_value);
28694 case KDSKBENT:
28695 + if (!capable(CAP_SYS_TTY_CONFIG))
28696 + perm = 0;
28697 +
28698 if (!perm)
28699 return -EPERM;
28700 +
28701 if (!i && v == K_NOSUCHMAP) {
28702 /* deallocate map */
28703 key_map = key_maps[s];
28704 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28705 int i, j, k;
28706 int ret;
28707
28708 - if (!capable(CAP_SYS_TTY_CONFIG))
28709 - perm = 0;
28710 -
28711 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28712 if (!kbs) {
28713 ret = -ENOMEM;
28714 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28715 kfree(kbs);
28716 return ((p && *p) ? -EOVERFLOW : 0);
28717 case KDSKBSENT:
28718 + if (!capable(CAP_SYS_TTY_CONFIG))
28719 + perm = 0;
28720 +
28721 if (!perm) {
28722 ret = -EPERM;
28723 goto reterr;
28724 diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28725 --- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28726 +++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28727 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28728 complete(&policy->kobj_unregister);
28729 }
28730
28731 -static struct sysfs_ops sysfs_ops = {
28732 +static const struct sysfs_ops sysfs_ops = {
28733 .show = show,
28734 .store = store,
28735 };
28736 diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28737 --- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28738 +++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28739 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28740 return ret;
28741 }
28742
28743 -static struct sysfs_ops cpuidle_sysfs_ops = {
28744 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28745 .show = cpuidle_show,
28746 .store = cpuidle_store,
28747 };
28748 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28749 return ret;
28750 }
28751
28752 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28753 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28754 .show = cpuidle_state_show,
28755 };
28756
28757 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28758 .release = cpuidle_state_sysfs_release,
28759 };
28760
28761 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28762 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28763 {
28764 kobject_put(&device->kobjs[i]->kobj);
28765 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28766 diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28767 --- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28768 +++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28769 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28770 0xCA, 0x34, 0x2B, 0x2E};
28771 struct scatterlist sg;
28772
28773 + pax_track_stack();
28774 +
28775 memset(src, 0, sizeof(src));
28776 memset(ctx.key, 0, sizeof(ctx.key));
28777
28778 diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28779 --- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28780 +++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28781 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28782 struct crypto_aes_ctx gen_aes;
28783 int cpu;
28784
28785 + pax_track_stack();
28786 +
28787 if (key_len % 8) {
28788 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28789 return -EINVAL;
28790 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28791 --- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28792 +++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28793 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28794 return entry->show(&chan->common, page);
28795 }
28796
28797 -struct sysfs_ops ioat_sysfs_ops = {
28798 +const struct sysfs_ops ioat_sysfs_ops = {
28799 .show = ioat_attr_show,
28800 };
28801
28802 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28803 --- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28804 +++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28805 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28806 unsigned long *phys_complete);
28807 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28808 void ioat_kobject_del(struct ioatdma_device *device);
28809 -extern struct sysfs_ops ioat_sysfs_ops;
28810 +extern const struct sysfs_ops ioat_sysfs_ops;
28811 extern struct ioat_sysfs_entry ioat_version_attr;
28812 extern struct ioat_sysfs_entry ioat_cap_attr;
28813 #endif /* IOATDMA_H */
28814 diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28815 --- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28816 +++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28817 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28818 }
28819
28820 /* edac_dev file operations for an 'ctl_info' */
28821 -static struct sysfs_ops device_ctl_info_ops = {
28822 +static const struct sysfs_ops device_ctl_info_ops = {
28823 .show = edac_dev_ctl_info_show,
28824 .store = edac_dev_ctl_info_store
28825 };
28826 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28827 }
28828
28829 /* edac_dev file operations for an 'instance' */
28830 -static struct sysfs_ops device_instance_ops = {
28831 +static const struct sysfs_ops device_instance_ops = {
28832 .show = edac_dev_instance_show,
28833 .store = edac_dev_instance_store
28834 };
28835 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28836 }
28837
28838 /* edac_dev file operations for a 'block' */
28839 -static struct sysfs_ops device_block_ops = {
28840 +static const struct sysfs_ops device_block_ops = {
28841 .show = edac_dev_block_show,
28842 .store = edac_dev_block_store
28843 };
28844 diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28845 --- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28846 +++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28847 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28848 return -EIO;
28849 }
28850
28851 -static struct sysfs_ops csrowfs_ops = {
28852 +static const struct sysfs_ops csrowfs_ops = {
28853 .show = csrowdev_show,
28854 .store = csrowdev_store
28855 };
28856 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28857 }
28858
28859 /* Intermediate show/store table */
28860 -static struct sysfs_ops mci_ops = {
28861 +static const struct sysfs_ops mci_ops = {
28862 .show = mcidev_show,
28863 .store = mcidev_store
28864 };
28865 diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28866 --- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28867 +++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28868 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28869 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28870 static int edac_pci_poll_msec = 1000; /* one second workq period */
28871
28872 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28873 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28874 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28875 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28876
28877 static struct kobject *edac_pci_top_main_kobj;
28878 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28879 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28880 }
28881
28882 /* fs_ops table */
28883 -static struct sysfs_ops pci_instance_ops = {
28884 +static const struct sysfs_ops pci_instance_ops = {
28885 .show = edac_pci_instance_show,
28886 .store = edac_pci_instance_store
28887 };
28888 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28889 return -EIO;
28890 }
28891
28892 -static struct sysfs_ops edac_pci_sysfs_ops = {
28893 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28894 .show = edac_pci_dev_show,
28895 .store = edac_pci_dev_store
28896 };
28897 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28898 edac_printk(KERN_CRIT, EDAC_PCI,
28899 "Signaled System Error on %s\n",
28900 pci_name(dev));
28901 - atomic_inc(&pci_nonparity_count);
28902 + atomic_inc_unchecked(&pci_nonparity_count);
28903 }
28904
28905 if (status & (PCI_STATUS_PARITY)) {
28906 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28907 "Master Data Parity Error on %s\n",
28908 pci_name(dev));
28909
28910 - atomic_inc(&pci_parity_count);
28911 + atomic_inc_unchecked(&pci_parity_count);
28912 }
28913
28914 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28915 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28916 "Detected Parity Error on %s\n",
28917 pci_name(dev));
28918
28919 - atomic_inc(&pci_parity_count);
28920 + atomic_inc_unchecked(&pci_parity_count);
28921 }
28922 }
28923
28924 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28925 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28926 "Signaled System Error on %s\n",
28927 pci_name(dev));
28928 - atomic_inc(&pci_nonparity_count);
28929 + atomic_inc_unchecked(&pci_nonparity_count);
28930 }
28931
28932 if (status & (PCI_STATUS_PARITY)) {
28933 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28934 "Master Data Parity Error on "
28935 "%s\n", pci_name(dev));
28936
28937 - atomic_inc(&pci_parity_count);
28938 + atomic_inc_unchecked(&pci_parity_count);
28939 }
28940
28941 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28942 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28943 "Detected Parity Error on %s\n",
28944 pci_name(dev));
28945
28946 - atomic_inc(&pci_parity_count);
28947 + atomic_inc_unchecked(&pci_parity_count);
28948 }
28949 }
28950 }
28951 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28952 if (!check_pci_errors)
28953 return;
28954
28955 - before_count = atomic_read(&pci_parity_count);
28956 + before_count = atomic_read_unchecked(&pci_parity_count);
28957
28958 /* scan all PCI devices looking for a Parity Error on devices and
28959 * bridges.
28960 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28961 /* Only if operator has selected panic on PCI Error */
28962 if (edac_pci_get_panic_on_pe()) {
28963 /* If the count is different 'after' from 'before' */
28964 - if (before_count != atomic_read(&pci_parity_count))
28965 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28966 panic("EDAC: PCI Parity Error");
28967 }
28968 }
28969 diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28970 --- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28971 +++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-23 21:22:32.000000000 -0400
28972 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
28973
28974 void fw_core_remove_card(struct fw_card *card)
28975 {
28976 - struct fw_card_driver dummy_driver = dummy_driver_template;
28977 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
28978
28979 card->driver->update_phy_reg(card, 4,
28980 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
28981 diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28982 --- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28983 +++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28984 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28985 int ret;
28986
28987 if ((request->channels == 0 && request->bandwidth == 0) ||
28988 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28989 - request->bandwidth < 0)
28990 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28991 return -EINVAL;
28992
28993 r = kmalloc(sizeof(*r), GFP_KERNEL);
28994 diff -urNp linux-2.6.32.45/drivers/firewire/core.h linux-2.6.32.45/drivers/firewire/core.h
28995 --- linux-2.6.32.45/drivers/firewire/core.h 2011-03-27 14:31:47.000000000 -0400
28996 +++ linux-2.6.32.45/drivers/firewire/core.h 2011-08-23 20:24:26.000000000 -0400
28997 @@ -86,6 +86,7 @@ struct fw_card_driver {
28998
28999 int (*stop_iso)(struct fw_iso_context *ctx);
29000 };
29001 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29002
29003 void fw_card_initialize(struct fw_card *card,
29004 const struct fw_card_driver *driver, struct device *device);
29005 diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
29006 --- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29007 +++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29008 @@ -36,6 +36,7 @@
29009 #include <linux/string.h>
29010 #include <linux/timer.h>
29011 #include <linux/types.h>
29012 +#include <linux/sched.h>
29013
29014 #include <asm/byteorder.h>
29015
29016 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29017 struct transaction_callback_data d;
29018 struct fw_transaction t;
29019
29020 + pax_track_stack();
29021 +
29022 init_completion(&d.done);
29023 d.payload = payload;
29024 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29025 diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
29026 --- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29027 +++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29028 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29029 }
29030 }
29031 else {
29032 - /*
29033 - * no iounmap() for that ioremap(); it would be a no-op, but
29034 - * it's so early in setup that sucker gets confused into doing
29035 - * what it shouldn't if we actually call it.
29036 - */
29037 p = dmi_ioremap(0xF0000, 0x10000);
29038 if (p == NULL)
29039 goto error;
29040 diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
29041 --- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29042 +++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29043 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29044 return ret;
29045 }
29046
29047 -static struct sysfs_ops edd_attr_ops = {
29048 +static const struct sysfs_ops edd_attr_ops = {
29049 .show = edd_attr_show,
29050 };
29051
29052 diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
29053 --- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29054 +++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29055 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29056 return ret;
29057 }
29058
29059 -static struct sysfs_ops efivar_attr_ops = {
29060 +static const struct sysfs_ops efivar_attr_ops = {
29061 .show = efivar_attr_show,
29062 .store = efivar_attr_store,
29063 };
29064 diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
29065 --- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29066 +++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29067 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29068 return ret;
29069 }
29070
29071 -static struct sysfs_ops ibft_attr_ops = {
29072 +static const struct sysfs_ops ibft_attr_ops = {
29073 .show = ibft_show_attribute,
29074 };
29075
29076 diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
29077 --- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29078 +++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29079 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29080 NULL
29081 };
29082
29083 -static struct sysfs_ops memmap_attr_ops = {
29084 +static const struct sysfs_ops memmap_attr_ops = {
29085 .show = memmap_attr_show,
29086 };
29087
29088 diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
29089 --- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29090 +++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29091 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29092 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29093 maskl, pendl, maskh, pendh);
29094
29095 - atomic_inc(&irq_err_count);
29096 + atomic_inc_unchecked(&irq_err_count);
29097
29098 return -EINVAL;
29099 }
29100 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
29101 --- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29102 +++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29103 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29104 struct drm_crtc *tmp;
29105 int crtc_mask = 1;
29106
29107 - WARN(!crtc, "checking null crtc?");
29108 + BUG_ON(!crtc);
29109
29110 dev = crtc->dev;
29111
29112 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29113
29114 adjusted_mode = drm_mode_duplicate(dev, mode);
29115
29116 + pax_track_stack();
29117 +
29118 crtc->enabled = drm_helper_crtc_in_use(crtc);
29119
29120 if (!crtc->enabled)
29121 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
29122 --- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29123 +++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29124 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29125 char *kdata = NULL;
29126
29127 atomic_inc(&dev->ioctl_count);
29128 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29129 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29130 ++file_priv->ioctl_count;
29131
29132 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29133 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
29134 --- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29135 +++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29136 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29137 }
29138
29139 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29140 - atomic_set(&dev->counts[i], 0);
29141 + atomic_set_unchecked(&dev->counts[i], 0);
29142
29143 dev->sigdata.lock = NULL;
29144
29145 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29146
29147 retcode = drm_open_helper(inode, filp, dev);
29148 if (!retcode) {
29149 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29150 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29151 spin_lock(&dev->count_lock);
29152 - if (!dev->open_count++) {
29153 + if (local_inc_return(&dev->open_count) == 1) {
29154 spin_unlock(&dev->count_lock);
29155 retcode = drm_setup(dev);
29156 goto out;
29157 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29158
29159 lock_kernel();
29160
29161 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29162 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29163
29164 if (dev->driver->preclose)
29165 dev->driver->preclose(dev, file_priv);
29166 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29167 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29168 task_pid_nr(current),
29169 (long)old_encode_dev(file_priv->minor->device),
29170 - dev->open_count);
29171 + local_read(&dev->open_count));
29172
29173 /* if the master has gone away we can't do anything with the lock */
29174 if (file_priv->minor->master)
29175 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29176 * End inline drm_release
29177 */
29178
29179 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29180 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29181 spin_lock(&dev->count_lock);
29182 - if (!--dev->open_count) {
29183 + if (local_dec_and_test(&dev->open_count)) {
29184 if (atomic_read(&dev->ioctl_count)) {
29185 DRM_ERROR("Device busy: %d\n",
29186 atomic_read(&dev->ioctl_count));
29187 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
29188 --- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29189 +++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29190 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29191 spin_lock_init(&dev->object_name_lock);
29192 idr_init(&dev->object_name_idr);
29193 atomic_set(&dev->object_count, 0);
29194 - atomic_set(&dev->object_memory, 0);
29195 + atomic_set_unchecked(&dev->object_memory, 0);
29196 atomic_set(&dev->pin_count, 0);
29197 - atomic_set(&dev->pin_memory, 0);
29198 + atomic_set_unchecked(&dev->pin_memory, 0);
29199 atomic_set(&dev->gtt_count, 0);
29200 - atomic_set(&dev->gtt_memory, 0);
29201 + atomic_set_unchecked(&dev->gtt_memory, 0);
29202
29203 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29204 if (!mm) {
29205 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29206 goto fput;
29207 }
29208 atomic_inc(&dev->object_count);
29209 - atomic_add(obj->size, &dev->object_memory);
29210 + atomic_add_unchecked(obj->size, &dev->object_memory);
29211 return obj;
29212 fput:
29213 fput(obj->filp);
29214 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29215
29216 fput(obj->filp);
29217 atomic_dec(&dev->object_count);
29218 - atomic_sub(obj->size, &dev->object_memory);
29219 + atomic_sub_unchecked(obj->size, &dev->object_memory);
29220 kfree(obj);
29221 }
29222 EXPORT_SYMBOL(drm_gem_object_free);
29223 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
29224 --- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29225 +++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29226 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29227 struct drm_local_map *map;
29228 struct drm_map_list *r_list;
29229
29230 - /* Hardcoded from _DRM_FRAME_BUFFER,
29231 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29232 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29233 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29234 + static const char * const types[] = {
29235 + [_DRM_FRAME_BUFFER] = "FB",
29236 + [_DRM_REGISTERS] = "REG",
29237 + [_DRM_SHM] = "SHM",
29238 + [_DRM_AGP] = "AGP",
29239 + [_DRM_SCATTER_GATHER] = "SG",
29240 + [_DRM_CONSISTENT] = "PCI",
29241 + [_DRM_GEM] = "GEM" };
29242 const char *type;
29243 int i;
29244
29245 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29246 map = r_list->map;
29247 if (!map)
29248 continue;
29249 - if (map->type < 0 || map->type > 5)
29250 + if (map->type >= ARRAY_SIZE(types))
29251 type = "??";
29252 else
29253 type = types[map->type];
29254 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29255 struct drm_device *dev = node->minor->dev;
29256
29257 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29258 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29259 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29260 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29261 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29262 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29263 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29264 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29265 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29266 return 0;
29267 }
29268 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29269 mutex_lock(&dev->struct_mutex);
29270 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29271 atomic_read(&dev->vma_count),
29272 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29273 + NULL, 0);
29274 +#else
29275 high_memory, (u64)virt_to_phys(high_memory));
29276 +#endif
29277
29278 list_for_each_entry(pt, &dev->vmalist, head) {
29279 vma = pt->vma;
29280 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29281 continue;
29282 seq_printf(m,
29283 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29284 - pt->pid, vma->vm_start, vma->vm_end,
29285 + pt->pid,
29286 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29287 + 0, 0,
29288 +#else
29289 + vma->vm_start, vma->vm_end,
29290 +#endif
29291 vma->vm_flags & VM_READ ? 'r' : '-',
29292 vma->vm_flags & VM_WRITE ? 'w' : '-',
29293 vma->vm_flags & VM_EXEC ? 'x' : '-',
29294 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29295 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29296 vma->vm_flags & VM_IO ? 'i' : '-',
29297 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29298 + 0);
29299 +#else
29300 vma->vm_pgoff);
29301 +#endif
29302
29303 #if defined(__i386__)
29304 pgprot = pgprot_val(vma->vm_page_prot);
29305 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29306 --- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29307 +++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29308 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29309 stats->data[i].value =
29310 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29311 else
29312 - stats->data[i].value = atomic_read(&dev->counts[i]);
29313 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29314 stats->data[i].type = dev->types[i];
29315 }
29316
29317 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29318 --- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29319 +++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29320 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29321 if (drm_lock_take(&master->lock, lock->context)) {
29322 master->lock.file_priv = file_priv;
29323 master->lock.lock_time = jiffies;
29324 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29325 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29326 break; /* Got lock */
29327 }
29328
29329 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29330 return -EINVAL;
29331 }
29332
29333 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29334 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29335
29336 /* kernel_context_switch isn't used by any of the x86 drm
29337 * modules but is required by the Sparc driver.
29338 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29339 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29340 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29341 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29342 dma->buflist[vertex->idx],
29343 vertex->discard, vertex->used);
29344
29345 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29346 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29347 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29348 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29349 sarea_priv->last_enqueue = dev_priv->counter - 1;
29350 sarea_priv->last_dispatch = (int)hw_status[5];
29351
29352 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29353 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29354 mc->last_render);
29355
29356 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29357 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29358 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29359 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29360 sarea_priv->last_enqueue = dev_priv->counter - 1;
29361 sarea_priv->last_dispatch = (int)hw_status[5];
29362
29363 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29364 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29365 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29366 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29367 int page_flipping;
29368
29369 wait_queue_head_t irq_queue;
29370 - atomic_t irq_received;
29371 - atomic_t irq_emitted;
29372 + atomic_unchecked_t irq_received;
29373 + atomic_unchecked_t irq_emitted;
29374
29375 int front_offset;
29376 } drm_i810_private_t;
29377 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29378 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29379 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29380 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29381 int page_flipping;
29382
29383 wait_queue_head_t irq_queue;
29384 - atomic_t irq_received;
29385 - atomic_t irq_emitted;
29386 + atomic_unchecked_t irq_received;
29387 + atomic_unchecked_t irq_emitted;
29388
29389 int use_mi_batchbuffer_start;
29390
29391 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29392 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29393 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29394 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29395
29396 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29397
29398 - atomic_inc(&dev_priv->irq_received);
29399 + atomic_inc_unchecked(&dev_priv->irq_received);
29400 wake_up_interruptible(&dev_priv->irq_queue);
29401
29402 return IRQ_HANDLED;
29403 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29404
29405 DRM_DEBUG("%s\n", __func__);
29406
29407 - atomic_inc(&dev_priv->irq_emitted);
29408 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29409
29410 BEGIN_LP_RING(2);
29411 OUT_RING(0);
29412 OUT_RING(GFX_OP_USER_INTERRUPT);
29413 ADVANCE_LP_RING();
29414
29415 - return atomic_read(&dev_priv->irq_emitted);
29416 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29417 }
29418
29419 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29420 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29421
29422 DRM_DEBUG("%s\n", __func__);
29423
29424 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29425 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29426 return 0;
29427
29428 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29429 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29430
29431 for (;;) {
29432 __set_current_state(TASK_INTERRUPTIBLE);
29433 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29434 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29435 break;
29436 if ((signed)(end - jiffies) <= 0) {
29437 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29438 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29439 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29440 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29441 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29442 - atomic_set(&dev_priv->irq_received, 0);
29443 - atomic_set(&dev_priv->irq_emitted, 0);
29444 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29445 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29446 init_waitqueue_head(&dev_priv->irq_queue);
29447 }
29448
29449 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29450 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29451 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29452 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29453 }
29454 }
29455
29456 -struct intel_dvo_dev_ops ch7017_ops = {
29457 +const struct intel_dvo_dev_ops ch7017_ops = {
29458 .init = ch7017_init,
29459 .detect = ch7017_detect,
29460 .mode_valid = ch7017_mode_valid,
29461 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29462 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29463 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29464 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29465 }
29466 }
29467
29468 -struct intel_dvo_dev_ops ch7xxx_ops = {
29469 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29470 .init = ch7xxx_init,
29471 .detect = ch7xxx_detect,
29472 .mode_valid = ch7xxx_mode_valid,
29473 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29474 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29475 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29476 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29477 *
29478 * \return singly-linked list of modes or NULL if no modes found.
29479 */
29480 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29481 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29482
29483 /**
29484 * Clean up driver-specific bits of the output
29485 */
29486 - void (*destroy) (struct intel_dvo_device *dvo);
29487 + void (* const destroy) (struct intel_dvo_device *dvo);
29488
29489 /**
29490 * Debugging hook to dump device registers to log file
29491 */
29492 - void (*dump_regs)(struct intel_dvo_device *dvo);
29493 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29494 };
29495
29496 -extern struct intel_dvo_dev_ops sil164_ops;
29497 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29498 -extern struct intel_dvo_dev_ops ivch_ops;
29499 -extern struct intel_dvo_dev_ops tfp410_ops;
29500 -extern struct intel_dvo_dev_ops ch7017_ops;
29501 +extern const struct intel_dvo_dev_ops sil164_ops;
29502 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29503 +extern const struct intel_dvo_dev_ops ivch_ops;
29504 +extern const struct intel_dvo_dev_ops tfp410_ops;
29505 +extern const struct intel_dvo_dev_ops ch7017_ops;
29506
29507 #endif /* _INTEL_DVO_H */
29508 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29509 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29510 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29511 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29512 }
29513 }
29514
29515 -struct intel_dvo_dev_ops ivch_ops= {
29516 +const struct intel_dvo_dev_ops ivch_ops= {
29517 .init = ivch_init,
29518 .dpms = ivch_dpms,
29519 .save = ivch_save,
29520 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29521 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29522 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29523 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29524 }
29525 }
29526
29527 -struct intel_dvo_dev_ops sil164_ops = {
29528 +const struct intel_dvo_dev_ops sil164_ops = {
29529 .init = sil164_init,
29530 .detect = sil164_detect,
29531 .mode_valid = sil164_mode_valid,
29532 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29533 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29534 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29535 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29536 }
29537 }
29538
29539 -struct intel_dvo_dev_ops tfp410_ops = {
29540 +const struct intel_dvo_dev_ops tfp410_ops = {
29541 .init = tfp410_init,
29542 .detect = tfp410_detect,
29543 .mode_valid = tfp410_mode_valid,
29544 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29545 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29546 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29547 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29548 I915_READ(GTIMR));
29549 }
29550 seq_printf(m, "Interrupts received: %d\n",
29551 - atomic_read(&dev_priv->irq_received));
29552 + atomic_read_unchecked(&dev_priv->irq_received));
29553 if (dev_priv->hw_status_page != NULL) {
29554 seq_printf(m, "Current sequence: %d\n",
29555 i915_get_gem_seqno(dev));
29556 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29557 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29558 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29559 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29560 return i915_resume(dev);
29561 }
29562
29563 -static struct vm_operations_struct i915_gem_vm_ops = {
29564 +static const struct vm_operations_struct i915_gem_vm_ops = {
29565 .fault = i915_gem_fault,
29566 .open = drm_gem_vm_open,
29567 .close = drm_gem_vm_close,
29568 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29569 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29570 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29571 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29572 /* display clock increase/decrease */
29573 /* pll clock increase/decrease */
29574 /* clock gating init */
29575 -};
29576 +} __no_const;
29577
29578 typedef struct drm_i915_private {
29579 struct drm_device *dev;
29580 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29581 int page_flipping;
29582
29583 wait_queue_head_t irq_queue;
29584 - atomic_t irq_received;
29585 + atomic_unchecked_t irq_received;
29586 /** Protects user_irq_refcount and irq_mask_reg */
29587 spinlock_t user_irq_lock;
29588 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29589 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29590 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29591 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29592 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29593
29594 args->aper_size = dev->gtt_total;
29595 args->aper_available_size = (args->aper_size -
29596 - atomic_read(&dev->pin_memory));
29597 + atomic_read_unchecked(&dev->pin_memory));
29598
29599 return 0;
29600 }
29601 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29602 return -EINVAL;
29603 }
29604
29605 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29606 + drm_gem_object_unreference(obj);
29607 + return -EFAULT;
29608 + }
29609 +
29610 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29611 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29612 } else {
29613 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29614 return -EINVAL;
29615 }
29616
29617 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29618 + drm_gem_object_unreference(obj);
29619 + return -EFAULT;
29620 + }
29621 +
29622 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29623 * it would end up going through the fenced access, and we'll get
29624 * different detiling behavior between reading and writing.
29625 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29626
29627 if (obj_priv->gtt_space) {
29628 atomic_dec(&dev->gtt_count);
29629 - atomic_sub(obj->size, &dev->gtt_memory);
29630 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29631
29632 drm_mm_put_block(obj_priv->gtt_space);
29633 obj_priv->gtt_space = NULL;
29634 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29635 goto search_free;
29636 }
29637 atomic_inc(&dev->gtt_count);
29638 - atomic_add(obj->size, &dev->gtt_memory);
29639 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29640
29641 /* Assert that the object is not currently in any GPU domain. As it
29642 * wasn't in the GTT, there shouldn't be any way it could have been in
29643 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29644 "%d/%d gtt bytes\n",
29645 atomic_read(&dev->object_count),
29646 atomic_read(&dev->pin_count),
29647 - atomic_read(&dev->object_memory),
29648 - atomic_read(&dev->pin_memory),
29649 - atomic_read(&dev->gtt_memory),
29650 + atomic_read_unchecked(&dev->object_memory),
29651 + atomic_read_unchecked(&dev->pin_memory),
29652 + atomic_read_unchecked(&dev->gtt_memory),
29653 dev->gtt_total);
29654 }
29655 goto err;
29656 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29657 */
29658 if (obj_priv->pin_count == 1) {
29659 atomic_inc(&dev->pin_count);
29660 - atomic_add(obj->size, &dev->pin_memory);
29661 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29662 if (!obj_priv->active &&
29663 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29664 !list_empty(&obj_priv->list))
29665 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29666 list_move_tail(&obj_priv->list,
29667 &dev_priv->mm.inactive_list);
29668 atomic_dec(&dev->pin_count);
29669 - atomic_sub(obj->size, &dev->pin_memory);
29670 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29671 }
29672 i915_verify_inactive(dev, __FILE__, __LINE__);
29673 }
29674 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29675 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29676 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29677 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29678 int irq_received;
29679 int ret = IRQ_NONE;
29680
29681 - atomic_inc(&dev_priv->irq_received);
29682 + atomic_inc_unchecked(&dev_priv->irq_received);
29683
29684 if (IS_IGDNG(dev))
29685 return igdng_irq_handler(dev);
29686 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29687 {
29688 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29689
29690 - atomic_set(&dev_priv->irq_received, 0);
29691 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29692
29693 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29694 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29695 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29696 --- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29697 +++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29698 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29699 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29700
29701 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29702 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29703 + pax_open_kernel();
29704 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29705 + pax_close_kernel();
29706
29707 /* Read the regs to test if we can talk to the device */
29708 for (i = 0; i < 0x40; i++) {
29709 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29710 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29711 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29712 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29713 u32 clear_cmd;
29714 u32 maccess;
29715
29716 - atomic_t vbl_received; /**< Number of vblanks received. */
29717 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29718 wait_queue_head_t fence_queue;
29719 - atomic_t last_fence_retired;
29720 + atomic_unchecked_t last_fence_retired;
29721 u32 next_fence_to_post;
29722
29723 unsigned int fb_cpp;
29724 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29725 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29726 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29727 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29728 if (crtc != 0)
29729 return 0;
29730
29731 - return atomic_read(&dev_priv->vbl_received);
29732 + return atomic_read_unchecked(&dev_priv->vbl_received);
29733 }
29734
29735
29736 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29737 /* VBLANK interrupt */
29738 if (status & MGA_VLINEPEN) {
29739 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29740 - atomic_inc(&dev_priv->vbl_received);
29741 + atomic_inc_unchecked(&dev_priv->vbl_received);
29742 drm_handle_vblank(dev, 0);
29743 handled = 1;
29744 }
29745 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29746 MGA_WRITE(MGA_PRIMEND, prim_end);
29747 }
29748
29749 - atomic_inc(&dev_priv->last_fence_retired);
29750 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29751 DRM_WAKEUP(&dev_priv->fence_queue);
29752 handled = 1;
29753 }
29754 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29755 * using fences.
29756 */
29757 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29758 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29759 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29760 - *sequence) <= (1 << 23)));
29761
29762 *sequence = cur_fence;
29763 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29764 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29765 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29766 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29767
29768 /* GH: Simple idle check.
29769 */
29770 - atomic_set(&dev_priv->idle_count, 0);
29771 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29772
29773 /* We don't support anything other than bus-mastering ring mode,
29774 * but the ring can be in either AGP or PCI space for the ring
29775 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29776 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29777 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29778 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29779 int is_pci;
29780 unsigned long cce_buffers_offset;
29781
29782 - atomic_t idle_count;
29783 + atomic_unchecked_t idle_count;
29784
29785 int page_flipping;
29786 int current_page;
29787 u32 crtc_offset;
29788 u32 crtc_offset_cntl;
29789
29790 - atomic_t vbl_received;
29791 + atomic_unchecked_t vbl_received;
29792
29793 u32 color_fmt;
29794 unsigned int front_offset;
29795 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29796 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29797 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29798 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29799 if (crtc != 0)
29800 return 0;
29801
29802 - return atomic_read(&dev_priv->vbl_received);
29803 + return atomic_read_unchecked(&dev_priv->vbl_received);
29804 }
29805
29806 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29807 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29808 /* VBLANK interrupt */
29809 if (status & R128_CRTC_VBLANK_INT) {
29810 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29811 - atomic_inc(&dev_priv->vbl_received);
29812 + atomic_inc_unchecked(&dev_priv->vbl_received);
29813 drm_handle_vblank(dev, 0);
29814 return IRQ_HANDLED;
29815 }
29816 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29817 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29818 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29819 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29820
29821 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29822 {
29823 - if (atomic_read(&dev_priv->idle_count) == 0) {
29824 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29825 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29826 } else {
29827 - atomic_set(&dev_priv->idle_count, 0);
29828 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29829 }
29830 }
29831
29832 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29833 --- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29834 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29835 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29836 char name[512];
29837 int i;
29838
29839 + pax_track_stack();
29840 +
29841 ctx->card = card;
29842 ctx->bios = bios;
29843
29844 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29845 --- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29846 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29847 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29848 regex_t mask_rex;
29849 regmatch_t match[4];
29850 char buf[1024];
29851 - size_t end;
29852 + long end;
29853 int len;
29854 int done = 0;
29855 int r;
29856 unsigned o;
29857 struct offset *offset;
29858 char last_reg_s[10];
29859 - int last_reg;
29860 + unsigned long last_reg;
29861
29862 if (regcomp
29863 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29864 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29865 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29866 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29867 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29868 bool linkb;
29869 struct radeon_i2c_bus_rec ddc_bus;
29870
29871 + pax_track_stack();
29872 +
29873 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29874
29875 if (data_offset == 0)
29876 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29877 }
29878 }
29879
29880 -struct bios_connector {
29881 +static struct bios_connector {
29882 bool valid;
29883 uint16_t line_mux;
29884 uint16_t devices;
29885 int connector_type;
29886 struct radeon_i2c_bus_rec ddc_bus;
29887 -};
29888 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29889
29890 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29891 drm_device
29892 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29893 uint8_t dac;
29894 union atom_supported_devices *supported_devices;
29895 int i, j;
29896 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29897
29898 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29899
29900 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29901 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29902 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29903 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29904
29905 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29906 error = freq - current_freq;
29907 - error = error < 0 ? 0xffffffff : error;
29908 + error = (int32_t)error < 0 ? 0xffffffff : error;
29909 } else
29910 error = abs(current_freq - freq);
29911 vco_diff = abs(vco - best_vco);
29912 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29913 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29914 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29915 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29916
29917 /* SW interrupt */
29918 wait_queue_head_t swi_queue;
29919 - atomic_t swi_emitted;
29920 + atomic_unchecked_t swi_emitted;
29921 int vblank_crtc;
29922 uint32_t irq_enable_reg;
29923 uint32_t r500_disp_irq_reg;
29924 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29925 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29926 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29927 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29928 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29929 return 0;
29930 }
29931 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29932 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29933 if (!rdev->cp.ready) {
29934 /* FIXME: cp is not running assume everythings is done right
29935 * away
29936 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29937 return r;
29938 }
29939 WREG32(rdev->fence_drv.scratch_reg, 0);
29940 - atomic_set(&rdev->fence_drv.seq, 0);
29941 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29942 INIT_LIST_HEAD(&rdev->fence_drv.created);
29943 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29944 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29945 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29946 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29947 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29948 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29949 */
29950 struct radeon_fence_driver {
29951 uint32_t scratch_reg;
29952 - atomic_t seq;
29953 + atomic_unchecked_t seq;
29954 uint32_t last_seq;
29955 unsigned long count_timeout;
29956 wait_queue_head_t queue;
29957 @@ -640,7 +640,7 @@ struct radeon_asic {
29958 uint32_t offset, uint32_t obj_size);
29959 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29960 void (*bandwidth_update)(struct radeon_device *rdev);
29961 -};
29962 +} __no_const;
29963
29964 /*
29965 * Asic structures
29966 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29967 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29968 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29969 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29970 request = compat_alloc_user_space(sizeof(*request));
29971 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29972 || __put_user(req32.param, &request->param)
29973 - || __put_user((void __user *)(unsigned long)req32.value,
29974 + || __put_user((unsigned long)req32.value,
29975 &request->value))
29976 return -EFAULT;
29977
29978 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29979 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29980 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29981 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29982 unsigned int ret;
29983 RING_LOCALS;
29984
29985 - atomic_inc(&dev_priv->swi_emitted);
29986 - ret = atomic_read(&dev_priv->swi_emitted);
29987 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29988 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29989
29990 BEGIN_RING(4);
29991 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29992 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29993 drm_radeon_private_t *dev_priv =
29994 (drm_radeon_private_t *) dev->dev_private;
29995
29996 - atomic_set(&dev_priv->swi_emitted, 0);
29997 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29998 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29999
30000 dev->max_vblank_count = 0x001fffff;
30001 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
30002 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30003 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30004 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30005 {
30006 drm_radeon_private_t *dev_priv = dev->dev_private;
30007 drm_radeon_getparam_t *param = data;
30008 - int value;
30009 + int value = 0;
30010
30011 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30012
30013 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
30014 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30015 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30016 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30017 DRM_INFO("radeon: ttm finalized\n");
30018 }
30019
30020 -static struct vm_operations_struct radeon_ttm_vm_ops;
30021 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
30022 -
30023 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30024 -{
30025 - struct ttm_buffer_object *bo;
30026 - int r;
30027 -
30028 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
30029 - if (bo == NULL) {
30030 - return VM_FAULT_NOPAGE;
30031 - }
30032 - r = ttm_vm_ops->fault(vma, vmf);
30033 - return r;
30034 -}
30035 -
30036 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30037 {
30038 struct drm_file *file_priv;
30039 struct radeon_device *rdev;
30040 - int r;
30041
30042 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30043 return drm_mmap(filp, vma);
30044 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30045
30046 file_priv = (struct drm_file *)filp->private_data;
30047 rdev = file_priv->minor->dev->dev_private;
30048 - if (rdev == NULL) {
30049 + if (!rdev)
30050 return -EINVAL;
30051 - }
30052 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30053 - if (unlikely(r != 0)) {
30054 - return r;
30055 - }
30056 - if (unlikely(ttm_vm_ops == NULL)) {
30057 - ttm_vm_ops = vma->vm_ops;
30058 - radeon_ttm_vm_ops = *ttm_vm_ops;
30059 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30060 - }
30061 - vma->vm_ops = &radeon_ttm_vm_ops;
30062 - return 0;
30063 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30064 }
30065
30066
30067 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
30068 --- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30069 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30070 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30071 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30072 rdev->pm.sideport_bandwidth.full)
30073 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30074 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30075 + read_delay_latency.full = rfixed_const(800 * 1000);
30076 read_delay_latency.full = rfixed_div(read_delay_latency,
30077 rdev->pm.igp_sideport_mclk);
30078 + a.full = rfixed_const(370);
30079 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30080 } else {
30081 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30082 rdev->pm.k8_bandwidth.full)
30083 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
30084 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30085 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30086 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30087 NULL
30088 };
30089
30090 -static struct sysfs_ops ttm_bo_global_ops = {
30091 +static const struct sysfs_ops ttm_bo_global_ops = {
30092 .show = &ttm_bo_global_show
30093 };
30094
30095 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
30096 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30097 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30098 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30099 {
30100 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30101 vma->vm_private_data;
30102 - struct ttm_bo_device *bdev = bo->bdev;
30103 + struct ttm_bo_device *bdev;
30104 unsigned long bus_base;
30105 unsigned long bus_offset;
30106 unsigned long bus_size;
30107 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30108 unsigned long address = (unsigned long)vmf->virtual_address;
30109 int retval = VM_FAULT_NOPAGE;
30110
30111 + if (!bo)
30112 + return VM_FAULT_NOPAGE;
30113 + bdev = bo->bdev;
30114 +
30115 /*
30116 * Work around locking order reversal in fault / nopfn
30117 * between mmap_sem and bo_reserve: Perform a trylock operation
30118 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
30119 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30120 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30121 @@ -36,7 +36,7 @@
30122 struct ttm_global_item {
30123 struct mutex mutex;
30124 void *object;
30125 - int refcount;
30126 + atomic_t refcount;
30127 };
30128
30129 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30130 @@ -49,7 +49,7 @@ void ttm_global_init(void)
30131 struct ttm_global_item *item = &glob[i];
30132 mutex_init(&item->mutex);
30133 item->object = NULL;
30134 - item->refcount = 0;
30135 + atomic_set(&item->refcount, 0);
30136 }
30137 }
30138
30139 @@ -59,7 +59,7 @@ void ttm_global_release(void)
30140 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30141 struct ttm_global_item *item = &glob[i];
30142 BUG_ON(item->object != NULL);
30143 - BUG_ON(item->refcount != 0);
30144 + BUG_ON(atomic_read(&item->refcount) != 0);
30145 }
30146 }
30147
30148 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30149 void *object;
30150
30151 mutex_lock(&item->mutex);
30152 - if (item->refcount == 0) {
30153 + if (atomic_read(&item->refcount) == 0) {
30154 item->object = kzalloc(ref->size, GFP_KERNEL);
30155 if (unlikely(item->object == NULL)) {
30156 ret = -ENOMEM;
30157 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30158 goto out_err;
30159
30160 }
30161 - ++item->refcount;
30162 + atomic_inc(&item->refcount);
30163 ref->object = item->object;
30164 object = item->object;
30165 mutex_unlock(&item->mutex);
30166 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30167 struct ttm_global_item *item = &glob[ref->global_type];
30168
30169 mutex_lock(&item->mutex);
30170 - BUG_ON(item->refcount == 0);
30171 + BUG_ON(atomic_read(&item->refcount) == 0);
30172 BUG_ON(ref->object != item->object);
30173 - if (--item->refcount == 0) {
30174 + if (atomic_dec_and_test(&item->refcount)) {
30175 ref->release(ref);
30176 item->object = NULL;
30177 }
30178 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
30179 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30180 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30181 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30182 NULL
30183 };
30184
30185 -static struct sysfs_ops ttm_mem_zone_ops = {
30186 +static const struct sysfs_ops ttm_mem_zone_ops = {
30187 .show = &ttm_mem_zone_show,
30188 .store = &ttm_mem_zone_store
30189 };
30190 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
30191 --- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30192 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30193 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30194 typedef uint32_t maskarray_t[5];
30195
30196 typedef struct drm_via_irq {
30197 - atomic_t irq_received;
30198 + atomic_unchecked_t irq_received;
30199 uint32_t pending_mask;
30200 uint32_t enable_mask;
30201 wait_queue_head_t irq_queue;
30202 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30203 struct timeval last_vblank;
30204 int last_vblank_valid;
30205 unsigned usec_per_vblank;
30206 - atomic_t vbl_received;
30207 + atomic_unchecked_t vbl_received;
30208 drm_via_state_t hc_state;
30209 char pci_buf[VIA_PCI_BUF_SIZE];
30210 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30211 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
30212 --- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30213 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30214 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30215 if (crtc != 0)
30216 return 0;
30217
30218 - return atomic_read(&dev_priv->vbl_received);
30219 + return atomic_read_unchecked(&dev_priv->vbl_received);
30220 }
30221
30222 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30223 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30224
30225 status = VIA_READ(VIA_REG_INTERRUPT);
30226 if (status & VIA_IRQ_VBLANK_PENDING) {
30227 - atomic_inc(&dev_priv->vbl_received);
30228 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30229 + atomic_inc_unchecked(&dev_priv->vbl_received);
30230 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30231 do_gettimeofday(&cur_vblank);
30232 if (dev_priv->last_vblank_valid) {
30233 dev_priv->usec_per_vblank =
30234 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30235 dev_priv->last_vblank = cur_vblank;
30236 dev_priv->last_vblank_valid = 1;
30237 }
30238 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30239 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30240 DRM_DEBUG("US per vblank is: %u\n",
30241 dev_priv->usec_per_vblank);
30242 }
30243 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30244
30245 for (i = 0; i < dev_priv->num_irqs; ++i) {
30246 if (status & cur_irq->pending_mask) {
30247 - atomic_inc(&cur_irq->irq_received);
30248 + atomic_inc_unchecked(&cur_irq->irq_received);
30249 DRM_WAKEUP(&cur_irq->irq_queue);
30250 handled = 1;
30251 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30252 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30253 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30254 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30255 masks[irq][4]));
30256 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30257 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30258 } else {
30259 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30260 (((cur_irq_sequence =
30261 - atomic_read(&cur_irq->irq_received)) -
30262 + atomic_read_unchecked(&cur_irq->irq_received)) -
30263 *sequence) <= (1 << 23)));
30264 }
30265 *sequence = cur_irq_sequence;
30266 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30267 }
30268
30269 for (i = 0; i < dev_priv->num_irqs; ++i) {
30270 - atomic_set(&cur_irq->irq_received, 0);
30271 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30272 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30273 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30274 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30275 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30276 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30277 case VIA_IRQ_RELATIVE:
30278 irqwait->request.sequence +=
30279 - atomic_read(&cur_irq->irq_received);
30280 + atomic_read_unchecked(&cur_irq->irq_received);
30281 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30282 case VIA_IRQ_ABSOLUTE:
30283 break;
30284 diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30285 --- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30286 +++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30287 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30288
30289 int hid_add_device(struct hid_device *hdev)
30290 {
30291 - static atomic_t id = ATOMIC_INIT(0);
30292 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30293 int ret;
30294
30295 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30296 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30297 /* XXX hack, any other cleaner solution after the driver core
30298 * is converted to allow more than 20 bytes as the device name? */
30299 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30300 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30301 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30302
30303 ret = device_add(&hdev->dev);
30304 if (!ret)
30305 diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30306 --- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30307 +++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30308 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30309 return put_user(HID_VERSION, (int __user *)arg);
30310
30311 case HIDIOCAPPLICATION:
30312 - if (arg < 0 || arg >= hid->maxapplication)
30313 + if (arg >= hid->maxapplication)
30314 return -EINVAL;
30315
30316 for (i = 0; i < hid->maxcollection; i++)
30317 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30318 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30319 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30320 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30321 * the lid is closed. This leads to interrupts as soon as a little move
30322 * is done.
30323 */
30324 - atomic_inc(&lis3_dev.count);
30325 + atomic_inc_unchecked(&lis3_dev.count);
30326
30327 wake_up_interruptible(&lis3_dev.misc_wait);
30328 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30329 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30330 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30331 return -EBUSY; /* already open */
30332
30333 - atomic_set(&lis3_dev.count, 0);
30334 + atomic_set_unchecked(&lis3_dev.count, 0);
30335
30336 /*
30337 * The sensor can generate interrupts for free-fall and direction
30338 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30339 add_wait_queue(&lis3_dev.misc_wait, &wait);
30340 while (true) {
30341 set_current_state(TASK_INTERRUPTIBLE);
30342 - data = atomic_xchg(&lis3_dev.count, 0);
30343 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30344 if (data)
30345 break;
30346
30347 @@ -244,7 +244,7 @@ out:
30348 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30349 {
30350 poll_wait(file, &lis3_dev.misc_wait, wait);
30351 - if (atomic_read(&lis3_dev.count))
30352 + if (atomic_read_unchecked(&lis3_dev.count))
30353 return POLLIN | POLLRDNORM;
30354 return 0;
30355 }
30356 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30357 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30358 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30359 @@ -201,7 +201,7 @@ struct lis3lv02d {
30360
30361 struct input_polled_dev *idev; /* input device */
30362 struct platform_device *pdev; /* platform device */
30363 - atomic_t count; /* interrupt count after last read */
30364 + atomic_unchecked_t count; /* interrupt count after last read */
30365 int xcalib; /* calibrated null value for x */
30366 int ycalib; /* calibrated null value for y */
30367 int zcalib; /* calibrated null value for z */
30368 diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30369 --- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30370 +++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30371 @@ -112,7 +112,7 @@ struct sht15_data {
30372 int supply_uV;
30373 int supply_uV_valid;
30374 struct work_struct update_supply_work;
30375 - atomic_t interrupt_handled;
30376 + atomic_unchecked_t interrupt_handled;
30377 };
30378
30379 /**
30380 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30381 return ret;
30382
30383 gpio_direction_input(data->pdata->gpio_data);
30384 - atomic_set(&data->interrupt_handled, 0);
30385 + atomic_set_unchecked(&data->interrupt_handled, 0);
30386
30387 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30388 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30389 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30390 /* Only relevant if the interrupt hasn't occured. */
30391 - if (!atomic_read(&data->interrupt_handled))
30392 + if (!atomic_read_unchecked(&data->interrupt_handled))
30393 schedule_work(&data->read_work);
30394 }
30395 ret = wait_event_timeout(data->wait_queue,
30396 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30397 struct sht15_data *data = d;
30398 /* First disable the interrupt */
30399 disable_irq_nosync(irq);
30400 - atomic_inc(&data->interrupt_handled);
30401 + atomic_inc_unchecked(&data->interrupt_handled);
30402 /* Then schedule a reading work struct */
30403 if (data->flag != SHT15_READING_NOTHING)
30404 schedule_work(&data->read_work);
30405 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30406 here as could have gone low in meantime so verify
30407 it hasn't!
30408 */
30409 - atomic_set(&data->interrupt_handled, 0);
30410 + atomic_set_unchecked(&data->interrupt_handled, 0);
30411 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30412 /* If still not occured or another handler has been scheduled */
30413 if (gpio_get_value(data->pdata->gpio_data)
30414 - || atomic_read(&data->interrupt_handled))
30415 + || atomic_read_unchecked(&data->interrupt_handled))
30416 return;
30417 }
30418 /* Read the data back from the device */
30419 diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30420 --- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30421 +++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30422 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30423 struct i2c_board_info *info);
30424 static int w83791d_remove(struct i2c_client *client);
30425
30426 -static int w83791d_read(struct i2c_client *client, u8 register);
30427 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30428 +static int w83791d_read(struct i2c_client *client, u8 reg);
30429 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30430 static struct w83791d_data *w83791d_update_device(struct device *dev);
30431
30432 #ifdef DEBUG
30433 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30434 --- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30435 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:22:32.000000000 -0400
30436 @@ -43,7 +43,7 @@
30437 extern struct i2c_adapter amd756_smbus;
30438
30439 static struct i2c_adapter *s4882_adapter;
30440 -static struct i2c_algorithm *s4882_algo;
30441 +static i2c_algorithm_no_const *s4882_algo;
30442
30443 /* Wrapper access functions for multiplexed SMBus */
30444 static DEFINE_MUTEX(amd756_lock);
30445 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30446 --- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30447 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:22:32.000000000 -0400
30448 @@ -41,7 +41,7 @@
30449 extern struct i2c_adapter *nforce2_smbus;
30450
30451 static struct i2c_adapter *s4985_adapter;
30452 -static struct i2c_algorithm *s4985_algo;
30453 +static i2c_algorithm_no_const *s4985_algo;
30454
30455 /* Wrapper access functions for multiplexed SMBus */
30456 static DEFINE_MUTEX(nforce2_lock);
30457 diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30458 --- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30459 +++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30460 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30461 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30462 if ((unsigned long)buf & alignment
30463 || blk_rq_bytes(rq) & q->dma_pad_mask
30464 - || object_is_on_stack(buf))
30465 + || object_starts_on_stack(buf))
30466 drive->dma = 0;
30467 }
30468 }
30469 diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30470 --- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30471 +++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30472 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30473 u8 pc_buf[256], header_len, desc_cnt;
30474 int i, rc = 1, blocks, length;
30475
30476 + pax_track_stack();
30477 +
30478 ide_debug_log(IDE_DBG_FUNC, "enter");
30479
30480 drive->bios_cyl = 0;
30481 diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30482 --- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30483 +++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30484 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30485 int ret, i, n_ports = dev2 ? 4 : 2;
30486 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30487
30488 + pax_track_stack();
30489 +
30490 for (i = 0; i < n_ports / 2; i++) {
30491 ret = ide_setup_pci_controller(pdev[i], d, !i);
30492 if (ret < 0)
30493 diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30494 --- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30495 +++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30496 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30497 based upon DIF section and sequence
30498 */
30499
30500 -static void inline
30501 +static inline void
30502 frame_put_packet (struct frame *f, struct packet *p)
30503 {
30504 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30505 diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30506 --- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30507 +++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30508 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30509 }
30510
30511 static struct hpsb_host_driver dummy_driver = {
30512 + .name = "dummy",
30513 .transmit_packet = dummy_transmit_packet,
30514 .devctl = dummy_devctl,
30515 .isoctl = dummy_isoctl
30516 diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30517 --- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30518 +++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30519 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30520 for (func = 0; func < 8; func++) {
30521 u32 class = read_pci_config(num,slot,func,
30522 PCI_CLASS_REVISION);
30523 - if ((class == 0xffffffff))
30524 + if (class == 0xffffffff)
30525 continue; /* No device at this func */
30526
30527 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30528 diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30529 --- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30530 +++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30531 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30532 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30533
30534 /* Module Parameters */
30535 -static int phys_dma = 1;
30536 +static int phys_dma;
30537 module_param(phys_dma, int, 0444);
30538 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30539 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30540
30541 static void dma_trm_tasklet(unsigned long data);
30542 static void dma_trm_reset(struct dma_trm_ctx *d);
30543 diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30544 --- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30545 +++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30546 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30547 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30548 MODULE_LICENSE("GPL");
30549
30550 -static int sbp2_module_init(void)
30551 +static int __init sbp2_module_init(void)
30552 {
30553 int ret;
30554
30555 diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30556 --- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30557 +++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30558 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30559
30560 struct cm_counter_group {
30561 struct kobject obj;
30562 - atomic_long_t counter[CM_ATTR_COUNT];
30563 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30564 };
30565
30566 struct cm_counter_attribute {
30567 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30568 struct ib_mad_send_buf *msg = NULL;
30569 int ret;
30570
30571 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30572 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30573 counter[CM_REQ_COUNTER]);
30574
30575 /* Quick state check to discard duplicate REQs. */
30576 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30577 if (!cm_id_priv)
30578 return;
30579
30580 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30581 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30582 counter[CM_REP_COUNTER]);
30583 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30584 if (ret)
30585 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30586 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30587 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30588 spin_unlock_irq(&cm_id_priv->lock);
30589 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30590 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30591 counter[CM_RTU_COUNTER]);
30592 goto out;
30593 }
30594 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30595 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30596 dreq_msg->local_comm_id);
30597 if (!cm_id_priv) {
30598 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30599 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30600 counter[CM_DREQ_COUNTER]);
30601 cm_issue_drep(work->port, work->mad_recv_wc);
30602 return -EINVAL;
30603 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30604 case IB_CM_MRA_REP_RCVD:
30605 break;
30606 case IB_CM_TIMEWAIT:
30607 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30608 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30609 counter[CM_DREQ_COUNTER]);
30610 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30611 goto unlock;
30612 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30613 cm_free_msg(msg);
30614 goto deref;
30615 case IB_CM_DREQ_RCVD:
30616 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30617 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30618 counter[CM_DREQ_COUNTER]);
30619 goto unlock;
30620 default:
30621 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30622 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30623 cm_id_priv->msg, timeout)) {
30624 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30625 - atomic_long_inc(&work->port->
30626 + atomic_long_inc_unchecked(&work->port->
30627 counter_group[CM_RECV_DUPLICATES].
30628 counter[CM_MRA_COUNTER]);
30629 goto out;
30630 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30631 break;
30632 case IB_CM_MRA_REQ_RCVD:
30633 case IB_CM_MRA_REP_RCVD:
30634 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30635 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30636 counter[CM_MRA_COUNTER]);
30637 /* fall through */
30638 default:
30639 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30640 case IB_CM_LAP_IDLE:
30641 break;
30642 case IB_CM_MRA_LAP_SENT:
30643 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30644 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30645 counter[CM_LAP_COUNTER]);
30646 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30647 goto unlock;
30648 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30649 cm_free_msg(msg);
30650 goto deref;
30651 case IB_CM_LAP_RCVD:
30652 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30653 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30654 counter[CM_LAP_COUNTER]);
30655 goto unlock;
30656 default:
30657 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30658 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30659 if (cur_cm_id_priv) {
30660 spin_unlock_irq(&cm.lock);
30661 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30662 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30663 counter[CM_SIDR_REQ_COUNTER]);
30664 goto out; /* Duplicate message. */
30665 }
30666 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30667 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30668 msg->retries = 1;
30669
30670 - atomic_long_add(1 + msg->retries,
30671 + atomic_long_add_unchecked(1 + msg->retries,
30672 &port->counter_group[CM_XMIT].counter[attr_index]);
30673 if (msg->retries)
30674 - atomic_long_add(msg->retries,
30675 + atomic_long_add_unchecked(msg->retries,
30676 &port->counter_group[CM_XMIT_RETRIES].
30677 counter[attr_index]);
30678
30679 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30680 }
30681
30682 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30683 - atomic_long_inc(&port->counter_group[CM_RECV].
30684 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30685 counter[attr_id - CM_ATTR_ID_OFFSET]);
30686
30687 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30688 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30689 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30690
30691 return sprintf(buf, "%ld\n",
30692 - atomic_long_read(&group->counter[cm_attr->index]));
30693 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30694 }
30695
30696 -static struct sysfs_ops cm_counter_ops = {
30697 +static const struct sysfs_ops cm_counter_ops = {
30698 .show = cm_show_counter
30699 };
30700
30701 diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30702 --- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30703 +++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30704 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30705
30706 struct task_struct *thread;
30707
30708 - atomic_t req_ser;
30709 - atomic_t flush_ser;
30710 + atomic_unchecked_t req_ser;
30711 + atomic_unchecked_t flush_ser;
30712
30713 wait_queue_head_t force_wait;
30714 };
30715 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30716 struct ib_fmr_pool *pool = pool_ptr;
30717
30718 do {
30719 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30720 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30721 ib_fmr_batch_release(pool);
30722
30723 - atomic_inc(&pool->flush_ser);
30724 + atomic_inc_unchecked(&pool->flush_ser);
30725 wake_up_interruptible(&pool->force_wait);
30726
30727 if (pool->flush_function)
30728 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30729 }
30730
30731 set_current_state(TASK_INTERRUPTIBLE);
30732 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30733 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30734 !kthread_should_stop())
30735 schedule();
30736 __set_current_state(TASK_RUNNING);
30737 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30738 pool->dirty_watermark = params->dirty_watermark;
30739 pool->dirty_len = 0;
30740 spin_lock_init(&pool->pool_lock);
30741 - atomic_set(&pool->req_ser, 0);
30742 - atomic_set(&pool->flush_ser, 0);
30743 + atomic_set_unchecked(&pool->req_ser, 0);
30744 + atomic_set_unchecked(&pool->flush_ser, 0);
30745 init_waitqueue_head(&pool->force_wait);
30746
30747 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30748 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30749 }
30750 spin_unlock_irq(&pool->pool_lock);
30751
30752 - serial = atomic_inc_return(&pool->req_ser);
30753 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30754 wake_up_process(pool->thread);
30755
30756 if (wait_event_interruptible(pool->force_wait,
30757 - atomic_read(&pool->flush_ser) - serial >= 0))
30758 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30759 return -EINTR;
30760
30761 return 0;
30762 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30763 } else {
30764 list_add_tail(&fmr->list, &pool->dirty_list);
30765 if (++pool->dirty_len >= pool->dirty_watermark) {
30766 - atomic_inc(&pool->req_ser);
30767 + atomic_inc_unchecked(&pool->req_ser);
30768 wake_up_process(pool->thread);
30769 }
30770 }
30771 diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30772 --- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30773 +++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30774 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30775 return port_attr->show(p, port_attr, buf);
30776 }
30777
30778 -static struct sysfs_ops port_sysfs_ops = {
30779 +static const struct sysfs_ops port_sysfs_ops = {
30780 .show = port_attr_show
30781 };
30782
30783 diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30784 --- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30785 +++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30786 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30787 dst->grh.sgid_index = src->grh.sgid_index;
30788 dst->grh.hop_limit = src->grh.hop_limit;
30789 dst->grh.traffic_class = src->grh.traffic_class;
30790 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30791 dst->dlid = src->dlid;
30792 dst->sl = src->sl;
30793 dst->src_path_bits = src->src_path_bits;
30794 dst->static_rate = src->static_rate;
30795 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30796 dst->port_num = src->port_num;
30797 + dst->reserved = 0;
30798 }
30799 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30800
30801 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30802 struct ib_qp_attr *src)
30803 {
30804 + dst->qp_state = src->qp_state;
30805 dst->cur_qp_state = src->cur_qp_state;
30806 dst->path_mtu = src->path_mtu;
30807 dst->path_mig_state = src->path_mig_state;
30808 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30809 dst->rnr_retry = src->rnr_retry;
30810 dst->alt_port_num = src->alt_port_num;
30811 dst->alt_timeout = src->alt_timeout;
30812 + memset(dst->reserved, 0, sizeof(dst->reserved));
30813 }
30814 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30815
30816 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30817 --- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30818 +++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30819 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30820 struct infinipath_counters counters;
30821 struct ipath_devdata *dd;
30822
30823 + pax_track_stack();
30824 +
30825 dd = file->f_path.dentry->d_inode->i_private;
30826 dd->ipath_f_read_counters(dd, &counters);
30827
30828 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30829 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30830 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30831 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30832 LIST_HEAD(nes_adapter_list);
30833 static LIST_HEAD(nes_dev_list);
30834
30835 -atomic_t qps_destroyed;
30836 +atomic_unchecked_t qps_destroyed;
30837
30838 static unsigned int ee_flsh_adapter;
30839 static unsigned int sysfs_nonidx_addr;
30840 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30841 struct nes_adapter *nesadapter = nesdev->nesadapter;
30842 u32 qp_id;
30843
30844 - atomic_inc(&qps_destroyed);
30845 + atomic_inc_unchecked(&qps_destroyed);
30846
30847 /* Free the control structures */
30848
30849 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30850 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30851 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30852 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30853 u32 cm_listens_created;
30854 u32 cm_listens_destroyed;
30855 u32 cm_backlog_drops;
30856 -atomic_t cm_loopbacks;
30857 -atomic_t cm_nodes_created;
30858 -atomic_t cm_nodes_destroyed;
30859 -atomic_t cm_accel_dropped_pkts;
30860 -atomic_t cm_resets_recvd;
30861 +atomic_unchecked_t cm_loopbacks;
30862 +atomic_unchecked_t cm_nodes_created;
30863 +atomic_unchecked_t cm_nodes_destroyed;
30864 +atomic_unchecked_t cm_accel_dropped_pkts;
30865 +atomic_unchecked_t cm_resets_recvd;
30866
30867 static inline int mini_cm_accelerated(struct nes_cm_core *,
30868 struct nes_cm_node *);
30869 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30870
30871 static struct nes_cm_core *g_cm_core;
30872
30873 -atomic_t cm_connects;
30874 -atomic_t cm_accepts;
30875 -atomic_t cm_disconnects;
30876 -atomic_t cm_closes;
30877 -atomic_t cm_connecteds;
30878 -atomic_t cm_connect_reqs;
30879 -atomic_t cm_rejects;
30880 +atomic_unchecked_t cm_connects;
30881 +atomic_unchecked_t cm_accepts;
30882 +atomic_unchecked_t cm_disconnects;
30883 +atomic_unchecked_t cm_closes;
30884 +atomic_unchecked_t cm_connecteds;
30885 +atomic_unchecked_t cm_connect_reqs;
30886 +atomic_unchecked_t cm_rejects;
30887
30888
30889 /**
30890 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30891 cm_node->rem_mac);
30892
30893 add_hte_node(cm_core, cm_node);
30894 - atomic_inc(&cm_nodes_created);
30895 + atomic_inc_unchecked(&cm_nodes_created);
30896
30897 return cm_node;
30898 }
30899 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30900 }
30901
30902 atomic_dec(&cm_core->node_cnt);
30903 - atomic_inc(&cm_nodes_destroyed);
30904 + atomic_inc_unchecked(&cm_nodes_destroyed);
30905 nesqp = cm_node->nesqp;
30906 if (nesqp) {
30907 nesqp->cm_node = NULL;
30908 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30909
30910 static void drop_packet(struct sk_buff *skb)
30911 {
30912 - atomic_inc(&cm_accel_dropped_pkts);
30913 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30914 dev_kfree_skb_any(skb);
30915 }
30916
30917 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30918
30919 int reset = 0; /* whether to send reset in case of err.. */
30920 int passive_state;
30921 - atomic_inc(&cm_resets_recvd);
30922 + atomic_inc_unchecked(&cm_resets_recvd);
30923 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30924 " refcnt=%d\n", cm_node, cm_node->state,
30925 atomic_read(&cm_node->ref_count));
30926 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30927 rem_ref_cm_node(cm_node->cm_core, cm_node);
30928 return NULL;
30929 }
30930 - atomic_inc(&cm_loopbacks);
30931 + atomic_inc_unchecked(&cm_loopbacks);
30932 loopbackremotenode->loopbackpartner = cm_node;
30933 loopbackremotenode->tcp_cntxt.rcv_wscale =
30934 NES_CM_DEFAULT_RCV_WND_SCALE;
30935 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30936 add_ref_cm_node(cm_node);
30937 } else if (cm_node->state == NES_CM_STATE_TSA) {
30938 rem_ref_cm_node(cm_core, cm_node);
30939 - atomic_inc(&cm_accel_dropped_pkts);
30940 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30941 dev_kfree_skb_any(skb);
30942 break;
30943 }
30944 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30945
30946 if ((cm_id) && (cm_id->event_handler)) {
30947 if (issue_disconn) {
30948 - atomic_inc(&cm_disconnects);
30949 + atomic_inc_unchecked(&cm_disconnects);
30950 cm_event.event = IW_CM_EVENT_DISCONNECT;
30951 cm_event.status = disconn_status;
30952 cm_event.local_addr = cm_id->local_addr;
30953 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30954 }
30955
30956 if (issue_close) {
30957 - atomic_inc(&cm_closes);
30958 + atomic_inc_unchecked(&cm_closes);
30959 nes_disconnect(nesqp, 1);
30960
30961 cm_id->provider_data = nesqp;
30962 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30963
30964 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30965 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30966 - atomic_inc(&cm_accepts);
30967 + atomic_inc_unchecked(&cm_accepts);
30968
30969 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30970 atomic_read(&nesvnic->netdev->refcnt));
30971 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30972
30973 struct nes_cm_core *cm_core;
30974
30975 - atomic_inc(&cm_rejects);
30976 + atomic_inc_unchecked(&cm_rejects);
30977 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30978 loopback = cm_node->loopbackpartner;
30979 cm_core = cm_node->cm_core;
30980 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30981 ntohl(cm_id->local_addr.sin_addr.s_addr),
30982 ntohs(cm_id->local_addr.sin_port));
30983
30984 - atomic_inc(&cm_connects);
30985 + atomic_inc_unchecked(&cm_connects);
30986 nesqp->active_conn = 1;
30987
30988 /* cache the cm_id in the qp */
30989 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30990 if (nesqp->destroyed) {
30991 return;
30992 }
30993 - atomic_inc(&cm_connecteds);
30994 + atomic_inc_unchecked(&cm_connecteds);
30995 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30996 " local port 0x%04X. jiffies = %lu.\n",
30997 nesqp->hwqp.qp_id,
30998 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30999
31000 ret = cm_id->event_handler(cm_id, &cm_event);
31001 cm_id->add_ref(cm_id);
31002 - atomic_inc(&cm_closes);
31003 + atomic_inc_unchecked(&cm_closes);
31004 cm_event.event = IW_CM_EVENT_CLOSE;
31005 cm_event.status = IW_CM_EVENT_STATUS_OK;
31006 cm_event.provider_data = cm_id->provider_data;
31007 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31008 return;
31009 cm_id = cm_node->cm_id;
31010
31011 - atomic_inc(&cm_connect_reqs);
31012 + atomic_inc_unchecked(&cm_connect_reqs);
31013 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31014 cm_node, cm_id, jiffies);
31015
31016 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31017 return;
31018 cm_id = cm_node->cm_id;
31019
31020 - atomic_inc(&cm_connect_reqs);
31021 + atomic_inc_unchecked(&cm_connect_reqs);
31022 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31023 cm_node, cm_id, jiffies);
31024
31025 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
31026 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31027 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31028 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31029 extern unsigned int wqm_quanta;
31030 extern struct list_head nes_adapter_list;
31031
31032 -extern atomic_t cm_connects;
31033 -extern atomic_t cm_accepts;
31034 -extern atomic_t cm_disconnects;
31035 -extern atomic_t cm_closes;
31036 -extern atomic_t cm_connecteds;
31037 -extern atomic_t cm_connect_reqs;
31038 -extern atomic_t cm_rejects;
31039 -extern atomic_t mod_qp_timouts;
31040 -extern atomic_t qps_created;
31041 -extern atomic_t qps_destroyed;
31042 -extern atomic_t sw_qps_destroyed;
31043 +extern atomic_unchecked_t cm_connects;
31044 +extern atomic_unchecked_t cm_accepts;
31045 +extern atomic_unchecked_t cm_disconnects;
31046 +extern atomic_unchecked_t cm_closes;
31047 +extern atomic_unchecked_t cm_connecteds;
31048 +extern atomic_unchecked_t cm_connect_reqs;
31049 +extern atomic_unchecked_t cm_rejects;
31050 +extern atomic_unchecked_t mod_qp_timouts;
31051 +extern atomic_unchecked_t qps_created;
31052 +extern atomic_unchecked_t qps_destroyed;
31053 +extern atomic_unchecked_t sw_qps_destroyed;
31054 extern u32 mh_detected;
31055 extern u32 mh_pauses_sent;
31056 extern u32 cm_packets_sent;
31057 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31058 extern u32 cm_listens_created;
31059 extern u32 cm_listens_destroyed;
31060 extern u32 cm_backlog_drops;
31061 -extern atomic_t cm_loopbacks;
31062 -extern atomic_t cm_nodes_created;
31063 -extern atomic_t cm_nodes_destroyed;
31064 -extern atomic_t cm_accel_dropped_pkts;
31065 -extern atomic_t cm_resets_recvd;
31066 +extern atomic_unchecked_t cm_loopbacks;
31067 +extern atomic_unchecked_t cm_nodes_created;
31068 +extern atomic_unchecked_t cm_nodes_destroyed;
31069 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31070 +extern atomic_unchecked_t cm_resets_recvd;
31071
31072 extern u32 int_mod_timer_init;
31073 extern u32 int_mod_cq_depth_256;
31074 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
31075 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31076 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31077 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31078 target_stat_values[++index] = mh_detected;
31079 target_stat_values[++index] = mh_pauses_sent;
31080 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31081 - target_stat_values[++index] = atomic_read(&cm_connects);
31082 - target_stat_values[++index] = atomic_read(&cm_accepts);
31083 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31084 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31085 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31086 - target_stat_values[++index] = atomic_read(&cm_rejects);
31087 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31088 - target_stat_values[++index] = atomic_read(&qps_created);
31089 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31090 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31091 - target_stat_values[++index] = atomic_read(&cm_closes);
31092 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31093 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31094 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31095 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31096 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31097 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31098 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31099 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31100 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31101 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31102 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31103 target_stat_values[++index] = cm_packets_sent;
31104 target_stat_values[++index] = cm_packets_bounced;
31105 target_stat_values[++index] = cm_packets_created;
31106 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31107 target_stat_values[++index] = cm_listens_created;
31108 target_stat_values[++index] = cm_listens_destroyed;
31109 target_stat_values[++index] = cm_backlog_drops;
31110 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31111 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31112 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31113 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31114 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31115 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31116 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31117 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31118 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31119 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31120 target_stat_values[++index] = int_mod_timer_init;
31121 target_stat_values[++index] = int_mod_cq_depth_1;
31122 target_stat_values[++index] = int_mod_cq_depth_4;
31123 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
31124 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31125 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31126 @@ -45,9 +45,9 @@
31127
31128 #include <rdma/ib_umem.h>
31129
31130 -atomic_t mod_qp_timouts;
31131 -atomic_t qps_created;
31132 -atomic_t sw_qps_destroyed;
31133 +atomic_unchecked_t mod_qp_timouts;
31134 +atomic_unchecked_t qps_created;
31135 +atomic_unchecked_t sw_qps_destroyed;
31136
31137 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31138
31139 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31140 if (init_attr->create_flags)
31141 return ERR_PTR(-EINVAL);
31142
31143 - atomic_inc(&qps_created);
31144 + atomic_inc_unchecked(&qps_created);
31145 switch (init_attr->qp_type) {
31146 case IB_QPT_RC:
31147 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31148 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31149 struct iw_cm_event cm_event;
31150 int ret;
31151
31152 - atomic_inc(&sw_qps_destroyed);
31153 + atomic_inc_unchecked(&sw_qps_destroyed);
31154 nesqp->destroyed = 1;
31155
31156 /* Blow away the connection if it exists. */
31157 diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
31158 --- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31159 +++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31160 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31161 */
31162 static void gameport_init_port(struct gameport *gameport)
31163 {
31164 - static atomic_t gameport_no = ATOMIC_INIT(0);
31165 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31166
31167 __module_get(THIS_MODULE);
31168
31169 mutex_init(&gameport->drv_mutex);
31170 device_initialize(&gameport->dev);
31171 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31172 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31173 gameport->dev.bus = &gameport_bus;
31174 gameport->dev.release = gameport_release_port;
31175 if (gameport->parent)
31176 diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
31177 --- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31178 +++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31179 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31180 */
31181 int input_register_device(struct input_dev *dev)
31182 {
31183 - static atomic_t input_no = ATOMIC_INIT(0);
31184 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31185 struct input_handler *handler;
31186 const char *path;
31187 int error;
31188 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31189 dev->setkeycode = input_default_setkeycode;
31190
31191 dev_set_name(&dev->dev, "input%ld",
31192 - (unsigned long) atomic_inc_return(&input_no) - 1);
31193 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31194
31195 error = device_add(&dev->dev);
31196 if (error)
31197 diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31198 --- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31199 +++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31200 @@ -30,6 +30,7 @@
31201 #include <linux/kernel.h>
31202 #include <linux/module.h>
31203 #include <linux/slab.h>
31204 +#include <linux/sched.h>
31205 #include <linux/init.h>
31206 #include <linux/input.h>
31207 #include <linux/gameport.h>
31208 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31209 unsigned char buf[SW_LENGTH];
31210 int i;
31211
31212 + pax_track_stack();
31213 +
31214 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31215
31216 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31217 diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31218 --- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31219 +++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31220 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31221
31222 static int xpad_led_probe(struct usb_xpad *xpad)
31223 {
31224 - static atomic_t led_seq = ATOMIC_INIT(0);
31225 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31226 long led_no;
31227 struct xpad_led *led;
31228 struct led_classdev *led_cdev;
31229 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31230 if (!led)
31231 return -ENOMEM;
31232
31233 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31234 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31235
31236 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31237 led->xpad = xpad;
31238 diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31239 --- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31240 +++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31241 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31242 */
31243 static void serio_init_port(struct serio *serio)
31244 {
31245 - static atomic_t serio_no = ATOMIC_INIT(0);
31246 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31247
31248 __module_get(THIS_MODULE);
31249
31250 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31251 mutex_init(&serio->drv_mutex);
31252 device_initialize(&serio->dev);
31253 dev_set_name(&serio->dev, "serio%ld",
31254 - (long)atomic_inc_return(&serio_no) - 1);
31255 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31256 serio->dev.bus = &serio_bus;
31257 serio->dev.release = serio_release_port;
31258 if (serio->parent) {
31259 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31260 --- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31261 +++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31262 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31263 cs->commands_pending = 0;
31264 cs->cur_at_seq = 0;
31265 cs->gotfwver = -1;
31266 - cs->open_count = 0;
31267 + local_set(&cs->open_count, 0);
31268 cs->dev = NULL;
31269 cs->tty = NULL;
31270 cs->tty_dev = NULL;
31271 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31272 --- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31273 +++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31274 @@ -34,6 +34,7 @@
31275 #include <linux/tty_driver.h>
31276 #include <linux/list.h>
31277 #include <asm/atomic.h>
31278 +#include <asm/local.h>
31279
31280 #define GIG_VERSION {0,5,0,0}
31281 #define GIG_COMPAT {0,4,0,0}
31282 @@ -446,7 +447,7 @@ struct cardstate {
31283 spinlock_t cmdlock;
31284 unsigned curlen, cmdbytes;
31285
31286 - unsigned open_count;
31287 + local_t open_count;
31288 struct tty_struct *tty;
31289 struct tasklet_struct if_wake_tasklet;
31290 unsigned control_state;
31291 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31292 --- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31293 +++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31294 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31295 return -ERESTARTSYS; // FIXME -EINTR?
31296 tty->driver_data = cs;
31297
31298 - ++cs->open_count;
31299 -
31300 - if (cs->open_count == 1) {
31301 + if (local_inc_return(&cs->open_count) == 1) {
31302 spin_lock_irqsave(&cs->lock, flags);
31303 cs->tty = tty;
31304 spin_unlock_irqrestore(&cs->lock, flags);
31305 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31306
31307 if (!cs->connected)
31308 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31309 - else if (!cs->open_count)
31310 + else if (!local_read(&cs->open_count))
31311 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31312 else {
31313 - if (!--cs->open_count) {
31314 + if (!local_dec_return(&cs->open_count)) {
31315 spin_lock_irqsave(&cs->lock, flags);
31316 cs->tty = NULL;
31317 spin_unlock_irqrestore(&cs->lock, flags);
31318 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31319 if (!cs->connected) {
31320 gig_dbg(DEBUG_IF, "not connected");
31321 retval = -ENODEV;
31322 - } else if (!cs->open_count)
31323 + } else if (!local_read(&cs->open_count))
31324 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31325 else {
31326 retval = 0;
31327 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31328 if (!cs->connected) {
31329 gig_dbg(DEBUG_IF, "not connected");
31330 retval = -ENODEV;
31331 - } else if (!cs->open_count)
31332 + } else if (!local_read(&cs->open_count))
31333 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31334 else if (cs->mstate != MS_LOCKED) {
31335 dev_warn(cs->dev, "can't write to unlocked device\n");
31336 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31337 if (!cs->connected) {
31338 gig_dbg(DEBUG_IF, "not connected");
31339 retval = -ENODEV;
31340 - } else if (!cs->open_count)
31341 + } else if (!local_read(&cs->open_count))
31342 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31343 else if (cs->mstate != MS_LOCKED) {
31344 dev_warn(cs->dev, "can't write to unlocked device\n");
31345 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31346
31347 if (!cs->connected)
31348 gig_dbg(DEBUG_IF, "not connected");
31349 - else if (!cs->open_count)
31350 + else if (!local_read(&cs->open_count))
31351 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31352 else if (cs->mstate != MS_LOCKED)
31353 dev_warn(cs->dev, "can't write to unlocked device\n");
31354 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31355
31356 if (!cs->connected)
31357 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31358 - else if (!cs->open_count)
31359 + else if (!local_read(&cs->open_count))
31360 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31361 else {
31362 //FIXME
31363 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31364
31365 if (!cs->connected)
31366 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31367 - else if (!cs->open_count)
31368 + else if (!local_read(&cs->open_count))
31369 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31370 else {
31371 //FIXME
31372 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31373 goto out;
31374 }
31375
31376 - if (!cs->open_count) {
31377 + if (!local_read(&cs->open_count)) {
31378 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31379 goto out;
31380 }
31381 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31382 --- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31383 +++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31384 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31385 }
31386 if (left) {
31387 if (t4file->user) {
31388 - if (copy_from_user(buf, dp, left))
31389 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31390 return -EFAULT;
31391 } else {
31392 memcpy(buf, dp, left);
31393 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31394 }
31395 if (left) {
31396 if (config->user) {
31397 - if (copy_from_user(buf, dp, left))
31398 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31399 return -EFAULT;
31400 } else {
31401 memcpy(buf, dp, left);
31402 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31403 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31404 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31405 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31406 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31407 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31408
31409 + pax_track_stack();
31410
31411 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31412 {
31413 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31414 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31415 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31416 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31417 IDI_SYNC_REQ req;
31418 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31419
31420 + pax_track_stack();
31421 +
31422 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31423
31424 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31425 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31426 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31427 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31428 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31429 IDI_SYNC_REQ req;
31430 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31431
31432 + pax_track_stack();
31433 +
31434 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31435
31436 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31437 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31438 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31439 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31440 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31441 IDI_SYNC_REQ req;
31442 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31443
31444 + pax_track_stack();
31445 +
31446 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31447
31448 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31449 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31450 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31451 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31452 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31453 } diva_didd_add_adapter_t;
31454 typedef struct _diva_didd_remove_adapter {
31455 IDI_CALL p_request;
31456 -} diva_didd_remove_adapter_t;
31457 +} __no_const diva_didd_remove_adapter_t;
31458 typedef struct _diva_didd_read_adapter_array {
31459 void * buffer;
31460 dword length;
31461 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31462 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31463 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31464 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31465 IDI_SYNC_REQ req;
31466 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31467
31468 + pax_track_stack();
31469 +
31470 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31471
31472 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31473 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31474 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31475 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31476 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31477 dword d;
31478 word w;
31479
31480 + pax_track_stack();
31481 +
31482 a = plci->adapter;
31483 Id = ((word)plci->Id<<8)|a->Id;
31484 PUT_WORD(&SS_Ind[4],0x0000);
31485 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31486 word j, n, w;
31487 dword d;
31488
31489 + pax_track_stack();
31490 +
31491
31492 for(i=0;i<8;i++) bp_parms[i].length = 0;
31493 for(i=0;i<2;i++) global_config[i].length = 0;
31494 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31495 const byte llc3[] = {4,3,2,2,6,6,0};
31496 const byte header[] = {0,2,3,3,0,0,0};
31497
31498 + pax_track_stack();
31499 +
31500 for(i=0;i<8;i++) bp_parms[i].length = 0;
31501 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31502 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31503 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31504 word appl_number_group_type[MAX_APPL];
31505 PLCI *auxplci;
31506
31507 + pax_track_stack();
31508 +
31509 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31510
31511 if(!a->group_optimization_enabled)
31512 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31513 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31514 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31515 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31516 IDI_SYNC_REQ req;
31517 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31518
31519 + pax_track_stack();
31520 +
31521 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31522
31523 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31524 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31525 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31526 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31527 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31528 typedef struct _diva_os_idi_adapter_interface {
31529 diva_init_card_proc_t cleanup_adapter_proc;
31530 diva_cmd_card_proc_t cmd_proc;
31531 -} diva_os_idi_adapter_interface_t;
31532 +} __no_const diva_os_idi_adapter_interface_t;
31533
31534 typedef struct _diva_os_xdi_adapter {
31535 struct list_head link;
31536 diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31537 --- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31538 +++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31539 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31540 } iocpar;
31541 void __user *argp = (void __user *)arg;
31542
31543 + pax_track_stack();
31544 +
31545 #define name iocpar.name
31546 #define bname iocpar.bname
31547 #define iocts iocpar.iocts
31548 diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31549 --- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31550 +++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31551 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31552 if (count > len)
31553 count = len;
31554 if (user) {
31555 - if (copy_from_user(msg, buf, count))
31556 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31557 return -EFAULT;
31558 } else
31559 memcpy(msg, buf, count);
31560 diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31561 --- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31562 +++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31563 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31564 if (dev) {
31565 struct mISDN_devinfo di;
31566
31567 + memset(&di, 0, sizeof(di));
31568 di.id = dev->id;
31569 di.Dprotocols = dev->Dprotocols;
31570 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31571 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31572 if (dev) {
31573 struct mISDN_devinfo di;
31574
31575 + memset(&di, 0, sizeof(di));
31576 di.id = dev->id;
31577 di.Dprotocols = dev->Dprotocols;
31578 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31579 diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31580 --- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31581 +++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31582 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31583 }
31584 else if(callid>=0x0000 && callid<=0x7FFF)
31585 {
31586 + int len;
31587 +
31588 pr_debug("%s: Got Incoming Call\n",
31589 sc_adapter[card]->devicename);
31590 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31591 - strcpy(setup.eazmsn,
31592 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31593 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31594 + sizeof(setup.phone));
31595 + if (len >= sizeof(setup.phone))
31596 + continue;
31597 + len = strlcpy(setup.eazmsn,
31598 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31599 + sizeof(setup.eazmsn));
31600 + if (len >= sizeof(setup.eazmsn))
31601 + continue;
31602 setup.si1 = 7;
31603 setup.si2 = 0;
31604 setup.plan = 0;
31605 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31606 * Handle a GetMyNumber Rsp
31607 */
31608 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31609 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31610 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31611 + rcvmsg.msg_data.byte_array,
31612 + sizeof(rcvmsg.msg_data.byte_array));
31613 continue;
31614 }
31615
31616 diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31617 --- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31618 +++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31619 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31620 * it's worked so far. The end address needs +1 because __get_vm_area
31621 * allocates an extra guard page, so we need space for that.
31622 */
31623 +
31624 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31625 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31626 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31627 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31628 +#else
31629 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31630 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31631 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31632 +#endif
31633 +
31634 if (!switcher_vma) {
31635 err = -ENOMEM;
31636 printk("lguest: could not map switcher pages high\n");
31637 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31638 * Now the Switcher is mapped at the right address, we can't fail!
31639 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31640 */
31641 - memcpy(switcher_vma->addr, start_switcher_text,
31642 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31643 end_switcher_text - start_switcher_text);
31644
31645 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31646 diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31647 --- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31648 +++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31649 @@ -59,7 +59,7 @@ static struct {
31650 /* Offset from where switcher.S was compiled to where we've copied it */
31651 static unsigned long switcher_offset(void)
31652 {
31653 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31654 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31655 }
31656
31657 /* This cpu's struct lguest_pages. */
31658 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31659 * These copies are pretty cheap, so we do them unconditionally: */
31660 /* Save the current Host top-level page directory.
31661 */
31662 +
31663 +#ifdef CONFIG_PAX_PER_CPU_PGD
31664 + pages->state.host_cr3 = read_cr3();
31665 +#else
31666 pages->state.host_cr3 = __pa(current->mm->pgd);
31667 +#endif
31668 +
31669 /*
31670 * Set up the Guest's page tables to see this CPU's pages (and no
31671 * other CPU's pages).
31672 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31673 * compiled-in switcher code and the high-mapped copy we just made.
31674 */
31675 for (i = 0; i < IDT_ENTRIES; i++)
31676 - default_idt_entries[i] += switcher_offset();
31677 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31678
31679 /*
31680 * Set up the Switcher's per-cpu areas.
31681 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31682 * it will be undisturbed when we switch. To change %cs and jump we
31683 * need this structure to feed to Intel's "lcall" instruction.
31684 */
31685 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31686 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31687 lguest_entry.segment = LGUEST_CS;
31688
31689 /*
31690 diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31691 --- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31692 +++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31693 @@ -87,6 +87,7 @@
31694 #include <asm/page.h>
31695 #include <asm/segment.h>
31696 #include <asm/lguest.h>
31697 +#include <asm/processor-flags.h>
31698
31699 // We mark the start of the code to copy
31700 // It's placed in .text tho it's never run here
31701 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31702 // Changes type when we load it: damn Intel!
31703 // For after we switch over our page tables
31704 // That entry will be read-only: we'd crash.
31705 +
31706 +#ifdef CONFIG_PAX_KERNEXEC
31707 + mov %cr0, %edx
31708 + xor $X86_CR0_WP, %edx
31709 + mov %edx, %cr0
31710 +#endif
31711 +
31712 movl $(GDT_ENTRY_TSS*8), %edx
31713 ltr %dx
31714
31715 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31716 // Let's clear it again for our return.
31717 // The GDT descriptor of the Host
31718 // Points to the table after two "size" bytes
31719 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31720 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31721 // Clear "used" from type field (byte 5, bit 2)
31722 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31723 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31724 +
31725 +#ifdef CONFIG_PAX_KERNEXEC
31726 + mov %cr0, %eax
31727 + xor $X86_CR0_WP, %eax
31728 + mov %eax, %cr0
31729 +#endif
31730
31731 // Once our page table's switched, the Guest is live!
31732 // The Host fades as we run this final step.
31733 @@ -295,13 +309,12 @@ deliver_to_host:
31734 // I consulted gcc, and it gave
31735 // These instructions, which I gladly credit:
31736 leal (%edx,%ebx,8), %eax
31737 - movzwl (%eax),%edx
31738 - movl 4(%eax), %eax
31739 - xorw %ax, %ax
31740 - orl %eax, %edx
31741 + movl 4(%eax), %edx
31742 + movw (%eax), %dx
31743 // Now the address of the handler's in %edx
31744 // We call it now: its "iret" drops us home.
31745 - jmp *%edx
31746 + ljmp $__KERNEL_CS, $1f
31747 +1: jmp *%edx
31748
31749 // Every interrupt can come to us here
31750 // But we must truly tell each apart.
31751 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31752 --- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31753 +++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31754 @@ -15,7 +15,7 @@
31755
31756 #define MAX_PMU_LEVEL 0xFF
31757
31758 -static struct backlight_ops pmu_backlight_data;
31759 +static const struct backlight_ops pmu_backlight_data;
31760 static DEFINE_SPINLOCK(pmu_backlight_lock);
31761 static int sleeping, uses_pmu_bl;
31762 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31763 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31764 return bd->props.brightness;
31765 }
31766
31767 -static struct backlight_ops pmu_backlight_data = {
31768 +static const struct backlight_ops pmu_backlight_data = {
31769 .get_brightness = pmu_backlight_get_brightness,
31770 .update_status = pmu_backlight_update_status,
31771
31772 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31773 --- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31774 +++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31775 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31776 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31777 }
31778
31779 -static struct platform_suspend_ops pmu_pm_ops = {
31780 +static const struct platform_suspend_ops pmu_pm_ops = {
31781 .enter = powerbook_sleep,
31782 .valid = pmu_sleep_valid,
31783 };
31784 diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31785 --- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31786 +++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31787 @@ -165,9 +165,9 @@ struct mapped_device {
31788 /*
31789 * Event handling.
31790 */
31791 - atomic_t event_nr;
31792 + atomic_unchecked_t event_nr;
31793 wait_queue_head_t eventq;
31794 - atomic_t uevent_seq;
31795 + atomic_unchecked_t uevent_seq;
31796 struct list_head uevent_list;
31797 spinlock_t uevent_lock; /* Protect access to uevent_list */
31798
31799 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31800 rwlock_init(&md->map_lock);
31801 atomic_set(&md->holders, 1);
31802 atomic_set(&md->open_count, 0);
31803 - atomic_set(&md->event_nr, 0);
31804 - atomic_set(&md->uevent_seq, 0);
31805 + atomic_set_unchecked(&md->event_nr, 0);
31806 + atomic_set_unchecked(&md->uevent_seq, 0);
31807 INIT_LIST_HEAD(&md->uevent_list);
31808 spin_lock_init(&md->uevent_lock);
31809
31810 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
31811
31812 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31813
31814 - atomic_inc(&md->event_nr);
31815 + atomic_inc_unchecked(&md->event_nr);
31816 wake_up(&md->eventq);
31817 }
31818
31819 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31820
31821 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31822 {
31823 - return atomic_add_return(1, &md->uevent_seq);
31824 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31825 }
31826
31827 uint32_t dm_get_event_nr(struct mapped_device *md)
31828 {
31829 - return atomic_read(&md->event_nr);
31830 + return atomic_read_unchecked(&md->event_nr);
31831 }
31832
31833 int dm_wait_event(struct mapped_device *md, int event_nr)
31834 {
31835 return wait_event_interruptible(md->eventq,
31836 - (event_nr != atomic_read(&md->event_nr)));
31837 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31838 }
31839
31840 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31841 diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31842 --- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31843 +++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31844 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31845 cmd == DM_LIST_VERSIONS_CMD)
31846 return 0;
31847
31848 - if ((cmd == DM_DEV_CREATE_CMD)) {
31849 + if (cmd == DM_DEV_CREATE_CMD) {
31850 if (!*param->name) {
31851 DMWARN("name not supplied when creating device");
31852 return -EINVAL;
31853 diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31854 --- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31855 +++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31856 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31857
31858 struct mirror {
31859 struct mirror_set *ms;
31860 - atomic_t error_count;
31861 + atomic_unchecked_t error_count;
31862 unsigned long error_type;
31863 struct dm_dev *dev;
31864 sector_t offset;
31865 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31866 * simple way to tell if a device has encountered
31867 * errors.
31868 */
31869 - atomic_inc(&m->error_count);
31870 + atomic_inc_unchecked(&m->error_count);
31871
31872 if (test_and_set_bit(error_type, &m->error_type))
31873 return;
31874 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31875 }
31876
31877 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31878 - if (!atomic_read(&new->error_count)) {
31879 + if (!atomic_read_unchecked(&new->error_count)) {
31880 set_default_mirror(new);
31881 break;
31882 }
31883 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31884 struct mirror *m = get_default_mirror(ms);
31885
31886 do {
31887 - if (likely(!atomic_read(&m->error_count)))
31888 + if (likely(!atomic_read_unchecked(&m->error_count)))
31889 return m;
31890
31891 if (m-- == ms->mirror)
31892 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31893 {
31894 struct mirror *default_mirror = get_default_mirror(m->ms);
31895
31896 - return !atomic_read(&default_mirror->error_count);
31897 + return !atomic_read_unchecked(&default_mirror->error_count);
31898 }
31899
31900 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31901 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31902 */
31903 if (likely(region_in_sync(ms, region, 1)))
31904 m = choose_mirror(ms, bio->bi_sector);
31905 - else if (m && atomic_read(&m->error_count))
31906 + else if (m && atomic_read_unchecked(&m->error_count))
31907 m = NULL;
31908
31909 if (likely(m))
31910 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31911 }
31912
31913 ms->mirror[mirror].ms = ms;
31914 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31915 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31916 ms->mirror[mirror].error_type = 0;
31917 ms->mirror[mirror].offset = offset;
31918
31919 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31920 */
31921 static char device_status_char(struct mirror *m)
31922 {
31923 - if (!atomic_read(&(m->error_count)))
31924 + if (!atomic_read_unchecked(&(m->error_count)))
31925 return 'A';
31926
31927 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31928 diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31929 --- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31930 +++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31931 @@ -20,7 +20,7 @@ struct stripe {
31932 struct dm_dev *dev;
31933 sector_t physical_start;
31934
31935 - atomic_t error_count;
31936 + atomic_unchecked_t error_count;
31937 };
31938
31939 struct stripe_c {
31940 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31941 kfree(sc);
31942 return r;
31943 }
31944 - atomic_set(&(sc->stripe[i].error_count), 0);
31945 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31946 }
31947
31948 ti->private = sc;
31949 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31950 DMEMIT("%d ", sc->stripes);
31951 for (i = 0; i < sc->stripes; i++) {
31952 DMEMIT("%s ", sc->stripe[i].dev->name);
31953 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31954 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31955 'D' : 'A';
31956 }
31957 buffer[i] = '\0';
31958 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31959 */
31960 for (i = 0; i < sc->stripes; i++)
31961 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31962 - atomic_inc(&(sc->stripe[i].error_count));
31963 - if (atomic_read(&(sc->stripe[i].error_count)) <
31964 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31965 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31966 DM_IO_ERROR_THRESHOLD)
31967 queue_work(kstriped, &sc->kstriped_ws);
31968 }
31969 diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31970 --- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31971 +++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31972 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31973 NULL,
31974 };
31975
31976 -static struct sysfs_ops dm_sysfs_ops = {
31977 +static const struct sysfs_ops dm_sysfs_ops = {
31978 .show = dm_attr_show,
31979 };
31980
31981 diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31982 --- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31983 +++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31984 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31985 if (!dev_size)
31986 return 0;
31987
31988 - if ((start >= dev_size) || (start + len > dev_size)) {
31989 + if ((start >= dev_size) || (len > dev_size - start)) {
31990 DMWARN("%s: %s too small for target: "
31991 "start=%llu, len=%llu, dev_size=%llu",
31992 dm_device_name(ti->table->md), bdevname(bdev, b),
31993 diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31994 --- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31995 +++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31996 @@ -153,10 +153,10 @@ static int start_readonly;
31997 * start build, activate spare
31998 */
31999 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32000 -static atomic_t md_event_count;
32001 +static atomic_unchecked_t md_event_count;
32002 void md_new_event(mddev_t *mddev)
32003 {
32004 - atomic_inc(&md_event_count);
32005 + atomic_inc_unchecked(&md_event_count);
32006 wake_up(&md_event_waiters);
32007 }
32008 EXPORT_SYMBOL_GPL(md_new_event);
32009 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32010 */
32011 static void md_new_event_inintr(mddev_t *mddev)
32012 {
32013 - atomic_inc(&md_event_count);
32014 + atomic_inc_unchecked(&md_event_count);
32015 wake_up(&md_event_waiters);
32016 }
32017
32018 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32019
32020 rdev->preferred_minor = 0xffff;
32021 rdev->data_offset = le64_to_cpu(sb->data_offset);
32022 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32023 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32024
32025 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32026 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32027 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32028 else
32029 sb->resync_offset = cpu_to_le64(0);
32030
32031 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32032 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32033
32034 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32035 sb->size = cpu_to_le64(mddev->dev_sectors);
32036 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32037 static ssize_t
32038 errors_show(mdk_rdev_t *rdev, char *page)
32039 {
32040 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32041 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32042 }
32043
32044 static ssize_t
32045 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32046 char *e;
32047 unsigned long n = simple_strtoul(buf, &e, 10);
32048 if (*buf && (*e == 0 || *e == '\n')) {
32049 - atomic_set(&rdev->corrected_errors, n);
32050 + atomic_set_unchecked(&rdev->corrected_errors, n);
32051 return len;
32052 }
32053 return -EINVAL;
32054 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32055 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32056 kfree(rdev);
32057 }
32058 -static struct sysfs_ops rdev_sysfs_ops = {
32059 +static const struct sysfs_ops rdev_sysfs_ops = {
32060 .show = rdev_attr_show,
32061 .store = rdev_attr_store,
32062 };
32063 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32064 rdev->data_offset = 0;
32065 rdev->sb_events = 0;
32066 atomic_set(&rdev->nr_pending, 0);
32067 - atomic_set(&rdev->read_errors, 0);
32068 - atomic_set(&rdev->corrected_errors, 0);
32069 + atomic_set_unchecked(&rdev->read_errors, 0);
32070 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32071
32072 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32073 if (!size) {
32074 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32075 kfree(mddev);
32076 }
32077
32078 -static struct sysfs_ops md_sysfs_ops = {
32079 +static const struct sysfs_ops md_sysfs_ops = {
32080 .show = md_attr_show,
32081 .store = md_attr_store,
32082 };
32083 @@ -4474,7 +4474,8 @@ out:
32084 err = 0;
32085 blk_integrity_unregister(disk);
32086 md_new_event(mddev);
32087 - sysfs_notify_dirent(mddev->sysfs_state);
32088 + if (mddev->sysfs_state)
32089 + sysfs_notify_dirent(mddev->sysfs_state);
32090 return err;
32091 }
32092
32093 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32094
32095 spin_unlock(&pers_lock);
32096 seq_printf(seq, "\n");
32097 - mi->event = atomic_read(&md_event_count);
32098 + mi->event = atomic_read_unchecked(&md_event_count);
32099 return 0;
32100 }
32101 if (v == (void*)2) {
32102 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32103 chunk_kb ? "KB" : "B");
32104 if (bitmap->file) {
32105 seq_printf(seq, ", file: ");
32106 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32107 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32108 }
32109
32110 seq_printf(seq, "\n");
32111 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32112 else {
32113 struct seq_file *p = file->private_data;
32114 p->private = mi;
32115 - mi->event = atomic_read(&md_event_count);
32116 + mi->event = atomic_read_unchecked(&md_event_count);
32117 }
32118 return error;
32119 }
32120 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32121 /* always allow read */
32122 mask = POLLIN | POLLRDNORM;
32123
32124 - if (mi->event != atomic_read(&md_event_count))
32125 + if (mi->event != atomic_read_unchecked(&md_event_count))
32126 mask |= POLLERR | POLLPRI;
32127 return mask;
32128 }
32129 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32130 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32131 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32132 (int)part_stat_read(&disk->part0, sectors[1]) -
32133 - atomic_read(&disk->sync_io);
32134 + atomic_read_unchecked(&disk->sync_io);
32135 /* sync IO will cause sync_io to increase before the disk_stats
32136 * as sync_io is counted when a request starts, and
32137 * disk_stats is counted when it completes.
32138 diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
32139 --- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32140 +++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32141 @@ -94,10 +94,10 @@ struct mdk_rdev_s
32142 * only maintained for arrays that
32143 * support hot removal
32144 */
32145 - atomic_t read_errors; /* number of consecutive read errors that
32146 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32147 * we have tried to ignore.
32148 */
32149 - atomic_t corrected_errors; /* number of corrected read errors,
32150 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32151 * for reporting to userspace and storing
32152 * in superblock.
32153 */
32154 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32155
32156 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32157 {
32158 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32159 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32160 }
32161
32162 struct mdk_personality
32163 diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
32164 --- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32165 +++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32166 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32167 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32168 set_bit(R10BIO_Uptodate, &r10_bio->state);
32169 else {
32170 - atomic_add(r10_bio->sectors,
32171 + atomic_add_unchecked(r10_bio->sectors,
32172 &conf->mirrors[d].rdev->corrected_errors);
32173 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32174 md_error(r10_bio->mddev,
32175 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32176 test_bit(In_sync, &rdev->flags)) {
32177 atomic_inc(&rdev->nr_pending);
32178 rcu_read_unlock();
32179 - atomic_add(s, &rdev->corrected_errors);
32180 + atomic_add_unchecked(s, &rdev->corrected_errors);
32181 if (sync_page_io(rdev->bdev,
32182 r10_bio->devs[sl].addr +
32183 sect + rdev->data_offset,
32184 diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
32185 --- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32186 +++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32187 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32188 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32189 continue;
32190 rdev = conf->mirrors[d].rdev;
32191 - atomic_add(s, &rdev->corrected_errors);
32192 + atomic_add_unchecked(s, &rdev->corrected_errors);
32193 if (sync_page_io(rdev->bdev,
32194 sect + rdev->data_offset,
32195 s<<9,
32196 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32197 /* Well, this device is dead */
32198 md_error(mddev, rdev);
32199 else {
32200 - atomic_add(s, &rdev->corrected_errors);
32201 + atomic_add_unchecked(s, &rdev->corrected_errors);
32202 printk(KERN_INFO
32203 "raid1:%s: read error corrected "
32204 "(%d sectors at %llu on %s)\n",
32205 diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32206 --- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32207 +++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32208 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32209 bi->bi_next = NULL;
32210 if ((rw & WRITE) &&
32211 test_bit(R5_ReWrite, &sh->dev[i].flags))
32212 - atomic_add(STRIPE_SECTORS,
32213 + atomic_add_unchecked(STRIPE_SECTORS,
32214 &rdev->corrected_errors);
32215 generic_make_request(bi);
32216 } else {
32217 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32218 clear_bit(R5_ReadError, &sh->dev[i].flags);
32219 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32220 }
32221 - if (atomic_read(&conf->disks[i].rdev->read_errors))
32222 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
32223 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32224 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32225 } else {
32226 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32227 int retry = 0;
32228 rdev = conf->disks[i].rdev;
32229
32230 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32231 - atomic_inc(&rdev->read_errors);
32232 + atomic_inc_unchecked(&rdev->read_errors);
32233 if (conf->mddev->degraded >= conf->max_degraded)
32234 printk_rl(KERN_WARNING
32235 "raid5:%s: read error not correctable "
32236 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32237 (unsigned long long)(sh->sector
32238 + rdev->data_offset),
32239 bdn);
32240 - else if (atomic_read(&rdev->read_errors)
32241 + else if (atomic_read_unchecked(&rdev->read_errors)
32242 > conf->max_nr_stripes)
32243 printk(KERN_WARNING
32244 "raid5:%s: Too many read errors, failing device %s.\n",
32245 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32246 sector_t r_sector;
32247 struct stripe_head sh2;
32248
32249 + pax_track_stack();
32250
32251 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32252 stripe = new_sector;
32253 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32254 --- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32255 +++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32256 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32257
32258 int x[32], y[32], w[32], h[32];
32259
32260 + pax_track_stack();
32261 +
32262 /* clear out memory */
32263 memset(&line_list[0], 0x00, sizeof(u32)*32);
32264 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32265 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32266 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32267 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32268 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32269 u8 buf[HOST_LINK_BUF_SIZE];
32270 int i;
32271
32272 + pax_track_stack();
32273 +
32274 dprintk("%s\n", __func__);
32275
32276 /* check if we have space for a link buf in the rx_buffer */
32277 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32278 unsigned long timeout;
32279 int written;
32280
32281 + pax_track_stack();
32282 +
32283 dprintk("%s\n", __func__);
32284
32285 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32286 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32287 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32288 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32289 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32290 union {
32291 dmx_ts_cb ts;
32292 dmx_section_cb sec;
32293 - } cb;
32294 + } __no_const cb;
32295
32296 struct dvb_demux *demux;
32297 void *priv;
32298 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32299 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32300 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-23 21:22:32.000000000 -0400
32301 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
32302 const struct dvb_device *template, void *priv, int type)
32303 {
32304 struct dvb_device *dvbdev;
32305 - struct file_operations *dvbdevfops;
32306 + file_operations_no_const *dvbdevfops;
32307 struct device *clsdev;
32308 int minor;
32309 int id;
32310 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32311 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32312 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32313 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32314 struct dib0700_adapter_state {
32315 int (*set_param_save) (struct dvb_frontend *,
32316 struct dvb_frontend_parameters *);
32317 -};
32318 +} __no_const;
32319
32320 static int dib7070_set_param_override(struct dvb_frontend *fe,
32321 struct dvb_frontend_parameters *fep)
32322 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32323 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32324 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32325 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32326
32327 u8 buf[260];
32328
32329 + pax_track_stack();
32330 +
32331 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32332 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32333
32334 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32335 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32336 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32337 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32338
32339 struct dib0700_adapter_state {
32340 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32341 -};
32342 +} __no_const;
32343
32344 /* Hauppauge Nova-T 500 (aka Bristol)
32345 * has a LNA on GPIO0 which is enabled by setting 1 */
32346 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32347 --- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32348 +++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32349 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32350 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32351 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32352 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32353 -};
32354 +} __no_const;
32355
32356 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32357 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32358 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32359 --- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32360 +++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32361 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32362 u8 tudata[585];
32363 int i;
32364
32365 + pax_track_stack();
32366 +
32367 dprintk("Firmware is %zd bytes\n",fw->size);
32368
32369 /* Get eprom data */
32370 diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32371 --- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32372 +++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32373 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32374 while (i < count && dev->rdsin != dev->rdsout)
32375 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32376
32377 - if (copy_to_user(data, readbuf, i))
32378 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32379 return -EFAULT;
32380 return i;
32381 }
32382 diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32383 --- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32384 +++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32385 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32386
32387 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32388
32389 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32390 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32391
32392 /* Parameter declarations */
32393 static int cardtype[CX18_MAX_CARDS];
32394 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32395 struct i2c_client c;
32396 u8 eedata[256];
32397
32398 + pax_track_stack();
32399 +
32400 memset(&c, 0, sizeof(c));
32401 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32402 c.adapter = &cx->i2c_adap[0];
32403 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32404 struct cx18 *cx;
32405
32406 /* FIXME - module parameter arrays constrain max instances */
32407 - i = atomic_inc_return(&cx18_instance) - 1;
32408 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32409 if (i >= CX18_MAX_CARDS) {
32410 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32411 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32412 diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32413 --- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32414 +++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32415 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32416 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32417
32418 /* ivtv instance counter */
32419 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32420 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32421
32422 /* Parameter declarations */
32423 static int cardtype[IVTV_MAX_CARDS];
32424 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32425 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32426 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32427 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32428 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32429
32430 do_gettimeofday(&vb->ts);
32431 - vb->field_count = atomic_add_return(2, &fh->field_count);
32432 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32433 if (csr & csr_error) {
32434 vb->state = VIDEOBUF_ERROR;
32435 if (!atomic_read(&fh->cam->in_reset)) {
32436 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32437 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32438 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32439 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32440 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32441 struct videobuf_queue vbq;
32442 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32443 - atomic_t field_count; /* field counter for videobuf_buffer */
32444 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32445 /* accessing cam here doesn't need serialisation: it's constant */
32446 struct omap24xxcam_device *cam;
32447 };
32448 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32449 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32450 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32451 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32452 u8 *eeprom;
32453 struct tveeprom tvdata;
32454
32455 + pax_track_stack();
32456 +
32457 memset(&tvdata,0,sizeof(tvdata));
32458
32459 eeprom = pvr2_eeprom_fetch(hdw);
32460 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32461 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-03-27 14:31:47.000000000 -0400
32462 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-23 21:22:38.000000000 -0400
32463 @@ -195,7 +195,7 @@ struct pvr2_hdw {
32464
32465 /* I2C stuff */
32466 struct i2c_adapter i2c_adap;
32467 - struct i2c_algorithm i2c_algo;
32468 + i2c_algorithm_no_const i2c_algo;
32469 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32470 int i2c_cx25840_hack_state;
32471 int i2c_linked;
32472 diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32473 --- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32474 +++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32475 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32476 unsigned char localPAT[256];
32477 unsigned char localPMT[256];
32478
32479 + pax_track_stack();
32480 +
32481 /* Set video format - must be done first as it resets other settings */
32482 set_reg8(client, 0x41, h->video_format);
32483
32484 diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32485 --- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32486 +++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32487 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32488 wait_queue_head_t *q = 0;
32489 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32490
32491 + pax_track_stack();
32492 +
32493 /* While any outstand message on the bus exists... */
32494 do {
32495
32496 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32497 u8 tmp[512];
32498 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32499
32500 + pax_track_stack();
32501 +
32502 while (loop) {
32503
32504 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32505 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32506 --- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32507 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32508 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32509 static int __init ibmcam_init(void)
32510 {
32511 struct usbvideo_cb cbTbl;
32512 - memset(&cbTbl, 0, sizeof(cbTbl));
32513 - cbTbl.probe = ibmcam_probe;
32514 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32515 - cbTbl.videoStart = ibmcam_video_start;
32516 - cbTbl.videoStop = ibmcam_video_stop;
32517 - cbTbl.processData = ibmcam_ProcessIsocData;
32518 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32519 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32520 - cbTbl.getFPS = ibmcam_calculate_fps;
32521 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32522 + *(void **)&cbTbl.probe = ibmcam_probe;
32523 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32524 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32525 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32526 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32527 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32528 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32529 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32530 return usbvideo_register(
32531 &cams,
32532 MAX_IBMCAM,
32533 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32534 --- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32535 +++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32536 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32537 int error;
32538
32539 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32540 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32541 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32542
32543 cam->input = input_dev = input_allocate_device();
32544 if (!input_dev) {
32545 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32546 struct usbvideo_cb cbTbl;
32547 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32548 DRIVER_DESC "\n");
32549 - memset(&cbTbl, 0, sizeof(cbTbl));
32550 - cbTbl.probe = konicawc_probe;
32551 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32552 - cbTbl.processData = konicawc_process_isoc;
32553 - cbTbl.getFPS = konicawc_calculate_fps;
32554 - cbTbl.setVideoMode = konicawc_set_video_mode;
32555 - cbTbl.startDataPump = konicawc_start_data;
32556 - cbTbl.stopDataPump = konicawc_stop_data;
32557 - cbTbl.adjustPicture = konicawc_adjust_picture;
32558 - cbTbl.userFree = konicawc_free_uvd;
32559 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32560 + *(void **)&cbTbl.probe = konicawc_probe;
32561 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32562 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32563 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32564 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32565 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32566 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32567 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32568 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32569 return usbvideo_register(
32570 &cams,
32571 MAX_CAMERAS,
32572 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32573 --- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32574 +++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32575 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32576 int error;
32577
32578 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32579 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32580 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32581
32582 cam->input = input_dev = input_allocate_device();
32583 if (!input_dev) {
32584 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32585 --- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32586 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32587 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32588 {
32589 struct usbvideo_cb cbTbl;
32590 memset(&cbTbl, 0, sizeof(cbTbl));
32591 - cbTbl.probe = ultracam_probe;
32592 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32593 - cbTbl.videoStart = ultracam_video_start;
32594 - cbTbl.videoStop = ultracam_video_stop;
32595 - cbTbl.processData = ultracam_ProcessIsocData;
32596 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32597 - cbTbl.adjustPicture = ultracam_adjust_picture;
32598 - cbTbl.getFPS = ultracam_calculate_fps;
32599 + *(void **)&cbTbl.probe = ultracam_probe;
32600 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32601 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32602 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32603 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32604 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32605 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32606 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32607 return usbvideo_register(
32608 &cams,
32609 MAX_CAMERAS,
32610 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32611 --- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32612 +++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32613 @@ -697,15 +697,15 @@ int usbvideo_register(
32614 __func__, cams, base_size, num_cams);
32615
32616 /* Copy callbacks, apply defaults for those that are not set */
32617 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32618 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32619 if (cams->cb.getFrame == NULL)
32620 - cams->cb.getFrame = usbvideo_GetFrame;
32621 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32622 if (cams->cb.disconnect == NULL)
32623 - cams->cb.disconnect = usbvideo_Disconnect;
32624 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32625 if (cams->cb.startDataPump == NULL)
32626 - cams->cb.startDataPump = usbvideo_StartDataPump;
32627 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32628 if (cams->cb.stopDataPump == NULL)
32629 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32630 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32631
32632 cams->num_cameras = num_cams;
32633 cams->cam = (struct uvd *) &cams[1];
32634 diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32635 --- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32636 +++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32637 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32638 unsigned char rv, gv, bv;
32639 static unsigned char *Y, *U, *V;
32640
32641 + pax_track_stack();
32642 +
32643 frame = usbvision->curFrame;
32644 imageSize = frame->frmwidth * frame->frmheight;
32645 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32646 diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32647 --- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32648 +++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32649 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32650 EXPORT_SYMBOL_GPL(v4l2_device_register);
32651
32652 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32653 - atomic_t *instance)
32654 + atomic_unchecked_t *instance)
32655 {
32656 - int num = atomic_inc_return(instance) - 1;
32657 + int num = atomic_inc_return_unchecked(instance) - 1;
32658 int len = strlen(basename);
32659
32660 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32661 diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32662 --- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32663 +++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32664 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32665 {
32666 struct videobuf_queue q;
32667
32668 + pax_track_stack();
32669 +
32670 /* Required to make generic handler to call __videobuf_alloc */
32671 q.int_ops = &sg_ops;
32672
32673 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32674 --- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32675 +++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32676 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32677 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32678 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32679
32680 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32681 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32682 + NULL, NULL);
32683 +#else
32684 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32685 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32686 +#endif
32687 +
32688 /*
32689 * Rounding UP to nearest 4-kB boundary here...
32690 */
32691 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32692 --- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32693 +++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32694 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32695 return 0;
32696 }
32697
32698 +static inline void
32699 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32700 +{
32701 + if (phy_info->port_details) {
32702 + phy_info->port_details->rphy = rphy;
32703 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32704 + ioc->name, rphy));
32705 + }
32706 +
32707 + if (rphy) {
32708 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32709 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32710 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32711 + ioc->name, rphy, rphy->dev.release));
32712 + }
32713 +}
32714 +
32715 /* no mutex */
32716 static void
32717 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32718 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32719 return NULL;
32720 }
32721
32722 -static inline void
32723 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32724 -{
32725 - if (phy_info->port_details) {
32726 - phy_info->port_details->rphy = rphy;
32727 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32728 - ioc->name, rphy));
32729 - }
32730 -
32731 - if (rphy) {
32732 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32733 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32734 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32735 - ioc->name, rphy, rphy->dev.release));
32736 - }
32737 -}
32738 -
32739 static inline struct sas_port *
32740 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32741 {
32742 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32743 --- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32744 +++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32745 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32746
32747 h = shost_priv(SChost);
32748
32749 - if (h) {
32750 - if (h->info_kbuf == NULL)
32751 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32752 - return h->info_kbuf;
32753 - h->info_kbuf[0] = '\0';
32754 + if (!h)
32755 + return NULL;
32756
32757 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32758 - h->info_kbuf[size-1] = '\0';
32759 - }
32760 + if (h->info_kbuf == NULL)
32761 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32762 + return h->info_kbuf;
32763 + h->info_kbuf[0] = '\0';
32764 +
32765 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32766 + h->info_kbuf[size-1] = '\0';
32767
32768 return h->info_kbuf;
32769 }
32770 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32771 --- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32772 +++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32773 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32774 struct i2o_message *msg;
32775 unsigned int iop;
32776
32777 + pax_track_stack();
32778 +
32779 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32780 return -EFAULT;
32781
32782 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32783 --- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32784 +++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32785 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32786 "Array Controller Device"
32787 };
32788
32789 -static char *chtostr(u8 * chars, int n)
32790 -{
32791 - char tmp[256];
32792 - tmp[0] = 0;
32793 - return strncat(tmp, (char *)chars, n);
32794 -}
32795 -
32796 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32797 char *group)
32798 {
32799 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32800
32801 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32802 seq_printf(seq, "%-#8x", ddm_table.module_id);
32803 - seq_printf(seq, "%-29s",
32804 - chtostr(ddm_table.module_name_version, 28));
32805 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32806 seq_printf(seq, "%9d ", ddm_table.data_size);
32807 seq_printf(seq, "%8d", ddm_table.code_size);
32808
32809 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32810
32811 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32812 seq_printf(seq, "%-#8x", dst->module_id);
32813 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32814 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32815 + seq_printf(seq, "%-.28s", dst->module_name_version);
32816 + seq_printf(seq, "%-.8s", dst->date);
32817 seq_printf(seq, "%8d ", dst->module_size);
32818 seq_printf(seq, "%8d ", dst->mpb_size);
32819 seq_printf(seq, "0x%04x", dst->module_flags);
32820 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32821 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32822 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32823 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32824 - seq_printf(seq, "Vendor info : %s\n",
32825 - chtostr((u8 *) (work32 + 2), 16));
32826 - seq_printf(seq, "Product info : %s\n",
32827 - chtostr((u8 *) (work32 + 6), 16));
32828 - seq_printf(seq, "Description : %s\n",
32829 - chtostr((u8 *) (work32 + 10), 16));
32830 - seq_printf(seq, "Product rev. : %s\n",
32831 - chtostr((u8 *) (work32 + 14), 8));
32832 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32833 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32834 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32835 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32836
32837 seq_printf(seq, "Serial number : ");
32838 print_serial_number(seq, (u8 *) (work32 + 16),
32839 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32840 }
32841
32842 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32843 - seq_printf(seq, "Module name : %s\n",
32844 - chtostr(result.module_name, 24));
32845 - seq_printf(seq, "Module revision : %s\n",
32846 - chtostr(result.module_rev, 8));
32847 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32848 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32849
32850 seq_printf(seq, "Serial number : ");
32851 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32852 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32853 return 0;
32854 }
32855
32856 - seq_printf(seq, "Device name : %s\n",
32857 - chtostr(result.device_name, 64));
32858 - seq_printf(seq, "Service name : %s\n",
32859 - chtostr(result.service_name, 64));
32860 - seq_printf(seq, "Physical name : %s\n",
32861 - chtostr(result.physical_location, 64));
32862 - seq_printf(seq, "Instance number : %s\n",
32863 - chtostr(result.instance_number, 4));
32864 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32865 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32866 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32867 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32868
32869 return 0;
32870 }
32871 diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32872 --- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32873 +++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32874 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32875
32876 spin_lock_irqsave(&c->context_list_lock, flags);
32877
32878 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32879 - atomic_inc(&c->context_list_counter);
32880 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32881 + atomic_inc_unchecked(&c->context_list_counter);
32882
32883 - entry->context = atomic_read(&c->context_list_counter);
32884 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32885
32886 list_add(&entry->list, &c->context_list);
32887
32888 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32889
32890 #if BITS_PER_LONG == 64
32891 spin_lock_init(&c->context_list_lock);
32892 - atomic_set(&c->context_list_counter, 0);
32893 + atomic_set_unchecked(&c->context_list_counter, 0);
32894 INIT_LIST_HEAD(&c->context_list);
32895 #endif
32896
32897 diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32898 --- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32899 +++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32900 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32901 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32902 int ret;
32903
32904 + pax_track_stack();
32905 +
32906 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32907 return -EINVAL;
32908
32909 diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32910 --- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32911 +++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32912 @@ -118,7 +118,7 @@
32913 } while (0)
32914 #define MAX_CONFIG_LEN 40
32915
32916 -static struct kgdb_io kgdbts_io_ops;
32917 +static const struct kgdb_io kgdbts_io_ops;
32918 static char get_buf[BUFMAX];
32919 static int get_buf_cnt;
32920 static char put_buf[BUFMAX];
32921 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32922 module_put(THIS_MODULE);
32923 }
32924
32925 -static struct kgdb_io kgdbts_io_ops = {
32926 +static const struct kgdb_io kgdbts_io_ops = {
32927 .name = "kgdbts",
32928 .read_char = kgdbts_get_char,
32929 .write_char = kgdbts_put_char,
32930 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32931 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32932 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32933 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32934
32935 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32936 {
32937 - atomic_long_inc(&mcs_op_statistics[op].count);
32938 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32939 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32940 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32941 if (mcs_op_statistics[op].max < clks)
32942 mcs_op_statistics[op].max = clks;
32943 }
32944 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32945 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32946 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32947 @@ -32,9 +32,9 @@
32948
32949 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32950
32951 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32952 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32953 {
32954 - unsigned long val = atomic_long_read(v);
32955 + unsigned long val = atomic_long_read_unchecked(v);
32956
32957 if (val)
32958 seq_printf(s, "%16lu %s\n", val, id);
32959 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32960 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32961
32962 for (op = 0; op < mcsop_last; op++) {
32963 - count = atomic_long_read(&mcs_op_statistics[op].count);
32964 - total = atomic_long_read(&mcs_op_statistics[op].total);
32965 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32966 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32967 max = mcs_op_statistics[op].max;
32968 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32969 count ? total / count : 0, max);
32970 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32971 --- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32972 +++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32973 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32974 * GRU statistics.
32975 */
32976 struct gru_stats_s {
32977 - atomic_long_t vdata_alloc;
32978 - atomic_long_t vdata_free;
32979 - atomic_long_t gts_alloc;
32980 - atomic_long_t gts_free;
32981 - atomic_long_t vdata_double_alloc;
32982 - atomic_long_t gts_double_allocate;
32983 - atomic_long_t assign_context;
32984 - atomic_long_t assign_context_failed;
32985 - atomic_long_t free_context;
32986 - atomic_long_t load_user_context;
32987 - atomic_long_t load_kernel_context;
32988 - atomic_long_t lock_kernel_context;
32989 - atomic_long_t unlock_kernel_context;
32990 - atomic_long_t steal_user_context;
32991 - atomic_long_t steal_kernel_context;
32992 - atomic_long_t steal_context_failed;
32993 - atomic_long_t nopfn;
32994 - atomic_long_t break_cow;
32995 - atomic_long_t asid_new;
32996 - atomic_long_t asid_next;
32997 - atomic_long_t asid_wrap;
32998 - atomic_long_t asid_reuse;
32999 - atomic_long_t intr;
33000 - atomic_long_t intr_mm_lock_failed;
33001 - atomic_long_t call_os;
33002 - atomic_long_t call_os_offnode_reference;
33003 - atomic_long_t call_os_check_for_bug;
33004 - atomic_long_t call_os_wait_queue;
33005 - atomic_long_t user_flush_tlb;
33006 - atomic_long_t user_unload_context;
33007 - atomic_long_t user_exception;
33008 - atomic_long_t set_context_option;
33009 - atomic_long_t migrate_check;
33010 - atomic_long_t migrated_retarget;
33011 - atomic_long_t migrated_unload;
33012 - atomic_long_t migrated_unload_delay;
33013 - atomic_long_t migrated_nopfn_retarget;
33014 - atomic_long_t migrated_nopfn_unload;
33015 - atomic_long_t tlb_dropin;
33016 - atomic_long_t tlb_dropin_fail_no_asid;
33017 - atomic_long_t tlb_dropin_fail_upm;
33018 - atomic_long_t tlb_dropin_fail_invalid;
33019 - atomic_long_t tlb_dropin_fail_range_active;
33020 - atomic_long_t tlb_dropin_fail_idle;
33021 - atomic_long_t tlb_dropin_fail_fmm;
33022 - atomic_long_t tlb_dropin_fail_no_exception;
33023 - atomic_long_t tlb_dropin_fail_no_exception_war;
33024 - atomic_long_t tfh_stale_on_fault;
33025 - atomic_long_t mmu_invalidate_range;
33026 - atomic_long_t mmu_invalidate_page;
33027 - atomic_long_t mmu_clear_flush_young;
33028 - atomic_long_t flush_tlb;
33029 - atomic_long_t flush_tlb_gru;
33030 - atomic_long_t flush_tlb_gru_tgh;
33031 - atomic_long_t flush_tlb_gru_zero_asid;
33032 -
33033 - atomic_long_t copy_gpa;
33034 -
33035 - atomic_long_t mesq_receive;
33036 - atomic_long_t mesq_receive_none;
33037 - atomic_long_t mesq_send;
33038 - atomic_long_t mesq_send_failed;
33039 - atomic_long_t mesq_noop;
33040 - atomic_long_t mesq_send_unexpected_error;
33041 - atomic_long_t mesq_send_lb_overflow;
33042 - atomic_long_t mesq_send_qlimit_reached;
33043 - atomic_long_t mesq_send_amo_nacked;
33044 - atomic_long_t mesq_send_put_nacked;
33045 - atomic_long_t mesq_qf_not_full;
33046 - atomic_long_t mesq_qf_locked;
33047 - atomic_long_t mesq_qf_noop_not_full;
33048 - atomic_long_t mesq_qf_switch_head_failed;
33049 - atomic_long_t mesq_qf_unexpected_error;
33050 - atomic_long_t mesq_noop_unexpected_error;
33051 - atomic_long_t mesq_noop_lb_overflow;
33052 - atomic_long_t mesq_noop_qlimit_reached;
33053 - atomic_long_t mesq_noop_amo_nacked;
33054 - atomic_long_t mesq_noop_put_nacked;
33055 + atomic_long_unchecked_t vdata_alloc;
33056 + atomic_long_unchecked_t vdata_free;
33057 + atomic_long_unchecked_t gts_alloc;
33058 + atomic_long_unchecked_t gts_free;
33059 + atomic_long_unchecked_t vdata_double_alloc;
33060 + atomic_long_unchecked_t gts_double_allocate;
33061 + atomic_long_unchecked_t assign_context;
33062 + atomic_long_unchecked_t assign_context_failed;
33063 + atomic_long_unchecked_t free_context;
33064 + atomic_long_unchecked_t load_user_context;
33065 + atomic_long_unchecked_t load_kernel_context;
33066 + atomic_long_unchecked_t lock_kernel_context;
33067 + atomic_long_unchecked_t unlock_kernel_context;
33068 + atomic_long_unchecked_t steal_user_context;
33069 + atomic_long_unchecked_t steal_kernel_context;
33070 + atomic_long_unchecked_t steal_context_failed;
33071 + atomic_long_unchecked_t nopfn;
33072 + atomic_long_unchecked_t break_cow;
33073 + atomic_long_unchecked_t asid_new;
33074 + atomic_long_unchecked_t asid_next;
33075 + atomic_long_unchecked_t asid_wrap;
33076 + atomic_long_unchecked_t asid_reuse;
33077 + atomic_long_unchecked_t intr;
33078 + atomic_long_unchecked_t intr_mm_lock_failed;
33079 + atomic_long_unchecked_t call_os;
33080 + atomic_long_unchecked_t call_os_offnode_reference;
33081 + atomic_long_unchecked_t call_os_check_for_bug;
33082 + atomic_long_unchecked_t call_os_wait_queue;
33083 + atomic_long_unchecked_t user_flush_tlb;
33084 + atomic_long_unchecked_t user_unload_context;
33085 + atomic_long_unchecked_t user_exception;
33086 + atomic_long_unchecked_t set_context_option;
33087 + atomic_long_unchecked_t migrate_check;
33088 + atomic_long_unchecked_t migrated_retarget;
33089 + atomic_long_unchecked_t migrated_unload;
33090 + atomic_long_unchecked_t migrated_unload_delay;
33091 + atomic_long_unchecked_t migrated_nopfn_retarget;
33092 + atomic_long_unchecked_t migrated_nopfn_unload;
33093 + atomic_long_unchecked_t tlb_dropin;
33094 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33095 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33096 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33097 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33098 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33099 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33100 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33101 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33102 + atomic_long_unchecked_t tfh_stale_on_fault;
33103 + atomic_long_unchecked_t mmu_invalidate_range;
33104 + atomic_long_unchecked_t mmu_invalidate_page;
33105 + atomic_long_unchecked_t mmu_clear_flush_young;
33106 + atomic_long_unchecked_t flush_tlb;
33107 + atomic_long_unchecked_t flush_tlb_gru;
33108 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33109 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33110 +
33111 + atomic_long_unchecked_t copy_gpa;
33112 +
33113 + atomic_long_unchecked_t mesq_receive;
33114 + atomic_long_unchecked_t mesq_receive_none;
33115 + atomic_long_unchecked_t mesq_send;
33116 + atomic_long_unchecked_t mesq_send_failed;
33117 + atomic_long_unchecked_t mesq_noop;
33118 + atomic_long_unchecked_t mesq_send_unexpected_error;
33119 + atomic_long_unchecked_t mesq_send_lb_overflow;
33120 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33121 + atomic_long_unchecked_t mesq_send_amo_nacked;
33122 + atomic_long_unchecked_t mesq_send_put_nacked;
33123 + atomic_long_unchecked_t mesq_qf_not_full;
33124 + atomic_long_unchecked_t mesq_qf_locked;
33125 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33126 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33127 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33128 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33129 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33130 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33131 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33132 + atomic_long_unchecked_t mesq_noop_put_nacked;
33133
33134 };
33135
33136 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33137 cchop_deallocate, tghop_invalidate, mcsop_last};
33138
33139 struct mcs_op_statistic {
33140 - atomic_long_t count;
33141 - atomic_long_t total;
33142 + atomic_long_unchecked_t count;
33143 + atomic_long_unchecked_t total;
33144 unsigned long max;
33145 };
33146
33147 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33148
33149 #define STAT(id) do { \
33150 if (gru_options & OPT_STATS) \
33151 - atomic_long_inc(&gru_stats.id); \
33152 + atomic_long_inc_unchecked(&gru_stats.id); \
33153 } while (0)
33154
33155 #ifdef CONFIG_SGI_GRU_DEBUG
33156 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33157 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33158 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33159 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33160 /* found in xpc_main.c */
33161 extern struct device *xpc_part;
33162 extern struct device *xpc_chan;
33163 -extern struct xpc_arch_operations xpc_arch_ops;
33164 +extern const struct xpc_arch_operations xpc_arch_ops;
33165 extern int xpc_disengage_timelimit;
33166 extern int xpc_disengage_timedout;
33167 extern int xpc_activate_IRQ_rcvd;
33168 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33169 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33170 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33171 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33172 .notifier_call = xpc_system_die,
33173 };
33174
33175 -struct xpc_arch_operations xpc_arch_ops;
33176 +const struct xpc_arch_operations xpc_arch_ops;
33177
33178 /*
33179 * Timer function to enforce the timelimit on the partition disengage.
33180 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33181 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33182 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33183 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33184 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33185 }
33186
33187 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33188 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33189 .setup_partitions = xpc_setup_partitions_sn2,
33190 .teardown_partitions = xpc_teardown_partitions_sn2,
33191 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33192 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33193 int ret;
33194 size_t buf_size;
33195
33196 - xpc_arch_ops = xpc_arch_ops_sn2;
33197 + pax_open_kernel();
33198 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33199 + pax_close_kernel();
33200
33201 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33202 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33203 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33204 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33205 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33206 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33207 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33208 }
33209
33210 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33211 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33212 .setup_partitions = xpc_setup_partitions_uv,
33213 .teardown_partitions = xpc_teardown_partitions_uv,
33214 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33215 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33216 int
33217 xpc_init_uv(void)
33218 {
33219 - xpc_arch_ops = xpc_arch_ops_uv;
33220 + pax_open_kernel();
33221 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33222 + pax_close_kernel();
33223
33224 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33225 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33226 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33227 --- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33228 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33229 @@ -289,7 +289,7 @@ struct xpc_interface {
33230 xpc_notify_func, void *);
33231 void (*received) (short, int, void *);
33232 enum xp_retval (*partid_to_nasids) (short, void *);
33233 -};
33234 +} __no_const;
33235
33236 extern struct xpc_interface xpc_interface;
33237
33238 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33239 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33240 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33241 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33242 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33243 unsigned long timeo = jiffies + HZ;
33244
33245 + pax_track_stack();
33246 +
33247 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33248 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33249 goto sleep;
33250 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33251 unsigned long initial_adr;
33252 int initial_len = len;
33253
33254 + pax_track_stack();
33255 +
33256 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33257 adr += chip->start;
33258 initial_adr = adr;
33259 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33260 int retries = 3;
33261 int ret;
33262
33263 + pax_track_stack();
33264 +
33265 adr += chip->start;
33266
33267 retry:
33268 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33269 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33270 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33271 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33272 unsigned long cmd_addr;
33273 struct cfi_private *cfi = map->fldrv_priv;
33274
33275 + pax_track_stack();
33276 +
33277 adr += chip->start;
33278
33279 /* Ensure cmd read/writes are aligned. */
33280 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33281 DECLARE_WAITQUEUE(wait, current);
33282 int wbufsize, z;
33283
33284 + pax_track_stack();
33285 +
33286 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33287 if (adr & (map_bankwidth(map)-1))
33288 return -EINVAL;
33289 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33290 DECLARE_WAITQUEUE(wait, current);
33291 int ret = 0;
33292
33293 + pax_track_stack();
33294 +
33295 adr += chip->start;
33296
33297 /* Let's determine this according to the interleave only once */
33298 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33299 unsigned long timeo = jiffies + HZ;
33300 DECLARE_WAITQUEUE(wait, current);
33301
33302 + pax_track_stack();
33303 +
33304 adr += chip->start;
33305
33306 /* Let's determine this according to the interleave only once */
33307 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33308 unsigned long timeo = jiffies + HZ;
33309 DECLARE_WAITQUEUE(wait, current);
33310
33311 + pax_track_stack();
33312 +
33313 adr += chip->start;
33314
33315 /* Let's determine this according to the interleave only once */
33316 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33317 --- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33318 +++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33319 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33320
33321 /* The ECC will not be calculated correctly if less than 512 is written */
33322 /* DBB-
33323 - if (len != 0x200 && eccbuf)
33324 + if (len != 0x200)
33325 printk(KERN_WARNING
33326 "ECC needs a full sector write (adr: %lx size %lx)\n",
33327 (long) to, (long) len);
33328 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33329 --- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33330 +++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33331 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33332 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33333
33334 /* Don't allow read past end of device */
33335 - if (from >= this->totlen)
33336 + if (from >= this->totlen || !len)
33337 return -EINVAL;
33338
33339 /* Don't allow a single read to cross a 512-byte block boundary */
33340 diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33341 --- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33342 +++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33343 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33344 loff_t offset;
33345 uint16_t srcunitswap = cpu_to_le16(srcunit);
33346
33347 + pax_track_stack();
33348 +
33349 eun = &part->EUNInfo[srcunit];
33350 xfer = &part->XferInfo[xferunit];
33351 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33352 diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33353 --- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33354 +++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33355 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33356 struct inftl_oob oob;
33357 size_t retlen;
33358
33359 + pax_track_stack();
33360 +
33361 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33362 "pending=%d)\n", inftl, thisVUC, pendingblock);
33363
33364 diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33365 --- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33366 +++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33367 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33368 struct INFTLPartition *ip;
33369 size_t retlen;
33370
33371 + pax_track_stack();
33372 +
33373 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33374
33375 /*
33376 diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33377 --- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33378 +++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33379 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33380 {
33381 map_word pfow_val[4];
33382
33383 + pax_track_stack();
33384 +
33385 /* Check identification string */
33386 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33387 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33388 diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33389 --- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33390 +++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33391 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33392 u_long size;
33393 struct mtd_info_user info;
33394
33395 + pax_track_stack();
33396 +
33397 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33398
33399 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33400 diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33401 --- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33402 +++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33403 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33404 int inplace = 1;
33405 size_t retlen;
33406
33407 + pax_track_stack();
33408 +
33409 memset(BlockMap, 0xff, sizeof(BlockMap));
33410 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33411
33412 diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33413 --- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33414 +++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33415 @@ -23,6 +23,7 @@
33416 #include <asm/errno.h>
33417 #include <linux/delay.h>
33418 #include <linux/slab.h>
33419 +#include <linux/sched.h>
33420 #include <linux/mtd/mtd.h>
33421 #include <linux/mtd/nand.h>
33422 #include <linux/mtd/nftl.h>
33423 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33424 struct mtd_info *mtd = nftl->mbd.mtd;
33425 unsigned int i;
33426
33427 + pax_track_stack();
33428 +
33429 /* Assume logical EraseSize == physical erasesize for starting the scan.
33430 We'll sort it out later if we find a MediaHeader which says otherwise */
33431 /* Actually, we won't. The new DiskOnChip driver has already scanned
33432 diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33433 --- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33434 +++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33435 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33436 static int __init bytes_str_to_int(const char *str)
33437 {
33438 char *endp;
33439 - unsigned long result;
33440 + unsigned long result, scale = 1;
33441
33442 result = simple_strtoul(str, &endp, 0);
33443 if (str == endp || result >= INT_MAX) {
33444 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33445
33446 switch (*endp) {
33447 case 'G':
33448 - result *= 1024;
33449 + scale *= 1024;
33450 case 'M':
33451 - result *= 1024;
33452 + scale *= 1024;
33453 case 'K':
33454 - result *= 1024;
33455 + scale *= 1024;
33456 if (endp[1] == 'i' && endp[2] == 'B')
33457 endp += 2;
33458 case '\0':
33459 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33460 return -EINVAL;
33461 }
33462
33463 - return result;
33464 + if ((intoverflow_t)result*scale >= INT_MAX) {
33465 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33466 + str);
33467 + return -EINVAL;
33468 + }
33469 +
33470 + return result*scale;
33471 }
33472
33473 /**
33474 diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33475 --- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33476 +++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33477 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33478 int rc = 0;
33479 u32 magic, csum;
33480
33481 + pax_track_stack();
33482 +
33483 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33484 goto test_nvram_done;
33485
33486 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33487 --- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33488 +++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33489 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33490 */
33491 struct l2t_skb_cb {
33492 arp_failure_handler_func arp_failure_handler;
33493 -};
33494 +} __no_const;
33495
33496 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33497
33498 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33499 --- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33500 +++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33501 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33502 int i, addr, ret;
33503 struct t3_vpd vpd;
33504
33505 + pax_track_stack();
33506 +
33507 /*
33508 * Card information is normally at VPD_BASE but some early cards had
33509 * it at 0.
33510 diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33511 --- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33512 +++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-23 21:22:32.000000000 -0400
33513 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
33514 {
33515 struct e1000_hw *hw = &adapter->hw;
33516 struct e1000_mac_info *mac = &hw->mac;
33517 - struct e1000_mac_operations *func = &mac->ops;
33518 + e1000_mac_operations_no_const *func = &mac->ops;
33519 u32 swsm = 0;
33520 u32 swsm2 = 0;
33521 bool force_clear_smbi = false;
33522 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33523 temp = er32(ICRXDMTC);
33524 }
33525
33526 -static struct e1000_mac_operations e82571_mac_ops = {
33527 +static const struct e1000_mac_operations e82571_mac_ops = {
33528 /* .check_mng_mode: mac type dependent */
33529 /* .check_for_link: media type dependent */
33530 .id_led_init = e1000e_id_led_init,
33531 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33532 .setup_led = e1000e_setup_led_generic,
33533 };
33534
33535 -static struct e1000_phy_operations e82_phy_ops_igp = {
33536 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33537 .acquire_phy = e1000_get_hw_semaphore_82571,
33538 .check_reset_block = e1000e_check_reset_block_generic,
33539 .commit_phy = NULL,
33540 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33541 .cfg_on_link_up = NULL,
33542 };
33543
33544 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33545 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33546 .acquire_phy = e1000_get_hw_semaphore_82571,
33547 .check_reset_block = e1000e_check_reset_block_generic,
33548 .commit_phy = e1000e_phy_sw_reset,
33549 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33550 .cfg_on_link_up = NULL,
33551 };
33552
33553 -static struct e1000_phy_operations e82_phy_ops_bm = {
33554 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33555 .acquire_phy = e1000_get_hw_semaphore_82571,
33556 .check_reset_block = e1000e_check_reset_block_generic,
33557 .commit_phy = e1000e_phy_sw_reset,
33558 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33559 .cfg_on_link_up = NULL,
33560 };
33561
33562 -static struct e1000_nvm_operations e82571_nvm_ops = {
33563 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33564 .acquire_nvm = e1000_acquire_nvm_82571,
33565 .read_nvm = e1000e_read_nvm_eerd,
33566 .release_nvm = e1000_release_nvm_82571,
33567 diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33568 --- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33569 +++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33570 @@ -375,9 +375,9 @@ struct e1000_info {
33571 u32 pba;
33572 u32 max_hw_frame_size;
33573 s32 (*get_variants)(struct e1000_adapter *);
33574 - struct e1000_mac_operations *mac_ops;
33575 - struct e1000_phy_operations *phy_ops;
33576 - struct e1000_nvm_operations *nvm_ops;
33577 + const struct e1000_mac_operations *mac_ops;
33578 + const struct e1000_phy_operations *phy_ops;
33579 + const struct e1000_nvm_operations *nvm_ops;
33580 };
33581
33582 /* hardware capability, feature, and workaround flags */
33583 diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33584 --- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33585 +++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-23 21:22:32.000000000 -0400
33586 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
33587 {
33588 struct e1000_hw *hw = &adapter->hw;
33589 struct e1000_mac_info *mac = &hw->mac;
33590 - struct e1000_mac_operations *func = &mac->ops;
33591 + e1000_mac_operations_no_const *func = &mac->ops;
33592
33593 /* Set media type */
33594 switch (adapter->pdev->device) {
33595 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33596 temp = er32(ICRXDMTC);
33597 }
33598
33599 -static struct e1000_mac_operations es2_mac_ops = {
33600 +static const struct e1000_mac_operations es2_mac_ops = {
33601 .id_led_init = e1000e_id_led_init,
33602 .check_mng_mode = e1000e_check_mng_mode_generic,
33603 /* check_for_link dependent on media type */
33604 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33605 .setup_led = e1000e_setup_led_generic,
33606 };
33607
33608 -static struct e1000_phy_operations es2_phy_ops = {
33609 +static const struct e1000_phy_operations es2_phy_ops = {
33610 .acquire_phy = e1000_acquire_phy_80003es2lan,
33611 .check_reset_block = e1000e_check_reset_block_generic,
33612 .commit_phy = e1000e_phy_sw_reset,
33613 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33614 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33615 };
33616
33617 -static struct e1000_nvm_operations es2_nvm_ops = {
33618 +static const struct e1000_nvm_operations es2_nvm_ops = {
33619 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33620 .read_nvm = e1000e_read_nvm_eerd,
33621 .release_nvm = e1000_release_nvm_80003es2lan,
33622 diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33623 --- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33624 +++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-08-23 21:27:38.000000000 -0400
33625 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
33626 s32 (*setup_physical_interface)(struct e1000_hw *);
33627 s32 (*setup_led)(struct e1000_hw *);
33628 };
33629 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33630
33631 /* Function pointers for the PHY. */
33632 struct e1000_phy_operations {
33633 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
33634 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33635 s32 (*cfg_on_link_up)(struct e1000_hw *);
33636 };
33637 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33638
33639 /* Function pointers for the NVM. */
33640 struct e1000_nvm_operations {
33641 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
33642 s32 (*validate_nvm)(struct e1000_hw *);
33643 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33644 };
33645 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33646
33647 struct e1000_mac_info {
33648 - struct e1000_mac_operations ops;
33649 + e1000_mac_operations_no_const ops;
33650
33651 u8 addr[6];
33652 u8 perm_addr[6];
33653 @@ -823,7 +826,7 @@ struct e1000_mac_info {
33654 };
33655
33656 struct e1000_phy_info {
33657 - struct e1000_phy_operations ops;
33658 + e1000_phy_operations_no_const ops;
33659
33660 enum e1000_phy_type type;
33661
33662 @@ -857,7 +860,7 @@ struct e1000_phy_info {
33663 };
33664
33665 struct e1000_nvm_info {
33666 - struct e1000_nvm_operations ops;
33667 + e1000_nvm_operations_no_const ops;
33668
33669 enum e1000_nvm_type type;
33670 enum e1000_nvm_override override;
33671 diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33672 --- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33673 +++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-23 21:22:32.000000000 -0400
33674 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33675 }
33676 }
33677
33678 -static struct e1000_mac_operations ich8_mac_ops = {
33679 +static const struct e1000_mac_operations ich8_mac_ops = {
33680 .id_led_init = e1000e_id_led_init,
33681 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33682 .check_for_link = e1000_check_for_copper_link_ich8lan,
33683 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33684 /* id_led_init dependent on mac type */
33685 };
33686
33687 -static struct e1000_phy_operations ich8_phy_ops = {
33688 +static const struct e1000_phy_operations ich8_phy_ops = {
33689 .acquire_phy = e1000_acquire_swflag_ich8lan,
33690 .check_reset_block = e1000_check_reset_block_ich8lan,
33691 .commit_phy = NULL,
33692 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33693 .write_phy_reg = e1000e_write_phy_reg_igp,
33694 };
33695
33696 -static struct e1000_nvm_operations ich8_nvm_ops = {
33697 +static const struct e1000_nvm_operations ich8_nvm_ops = {
33698 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33699 .read_nvm = e1000_read_nvm_ich8lan,
33700 .release_nvm = e1000_release_nvm_ich8lan,
33701 diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33702 --- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33703 +++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33704 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33705 unsigned char buf[512];
33706 int count1;
33707
33708 + pax_track_stack();
33709 +
33710 if (!count)
33711 return;
33712
33713 diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33714 --- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33715 +++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33716 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33717 NULL,
33718 };
33719
33720 -static struct sysfs_ops veth_pool_ops = {
33721 +static const struct sysfs_ops veth_pool_ops = {
33722 .show = veth_pool_show,
33723 .store = veth_pool_store,
33724 };
33725 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33726 --- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33727 +++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-23 21:22:32.000000000 -0400
33728 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33729 wr32(E1000_VT_CTL, vt_ctl);
33730 }
33731
33732 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
33733 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33734 .reset_hw = igb_reset_hw_82575,
33735 .init_hw = igb_init_hw_82575,
33736 .check_for_link = igb_check_for_link_82575,
33737 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33738 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33739 };
33740
33741 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
33742 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33743 .acquire = igb_acquire_phy_82575,
33744 .get_cfg_done = igb_get_cfg_done_82575,
33745 .release = igb_release_phy_82575,
33746 };
33747
33748 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33749 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33750 .acquire = igb_acquire_nvm_82575,
33751 .read = igb_read_nvm_eerd,
33752 .release = igb_release_nvm_82575,
33753 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33754 --- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33755 +++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-08-23 21:28:01.000000000 -0400
33756 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
33757 s32 (*read_mac_addr)(struct e1000_hw *);
33758 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33759 };
33760 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33761
33762 struct e1000_phy_operations {
33763 s32 (*acquire)(struct e1000_hw *);
33764 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
33765 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33766 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33767 };
33768 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33769
33770 struct e1000_nvm_operations {
33771 s32 (*acquire)(struct e1000_hw *);
33772 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
33773 void (*release)(struct e1000_hw *);
33774 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33775 };
33776 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33777
33778 struct e1000_info {
33779 s32 (*get_invariants)(struct e1000_hw *);
33780 @@ -321,7 +324,7 @@ struct e1000_info {
33781 extern const struct e1000_info e1000_82575_info;
33782
33783 struct e1000_mac_info {
33784 - struct e1000_mac_operations ops;
33785 + e1000_mac_operations_no_const ops;
33786
33787 u8 addr[6];
33788 u8 perm_addr[6];
33789 @@ -365,7 +368,7 @@ struct e1000_mac_info {
33790 };
33791
33792 struct e1000_phy_info {
33793 - struct e1000_phy_operations ops;
33794 + e1000_phy_operations_no_const ops;
33795
33796 enum e1000_phy_type type;
33797
33798 @@ -400,7 +403,7 @@ struct e1000_phy_info {
33799 };
33800
33801 struct e1000_nvm_info {
33802 - struct e1000_nvm_operations ops;
33803 + e1000_nvm_operations_no_const ops;
33804
33805 enum e1000_nvm_type type;
33806 enum e1000_nvm_override override;
33807 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
33808 s32 (*check_for_ack)(struct e1000_hw *, u16);
33809 s32 (*check_for_rst)(struct e1000_hw *, u16);
33810 };
33811 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33812
33813 struct e1000_mbx_stats {
33814 u32 msgs_tx;
33815 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
33816 };
33817
33818 struct e1000_mbx_info {
33819 - struct e1000_mbx_operations ops;
33820 + e1000_mbx_operations_no_const ops;
33821 struct e1000_mbx_stats stats;
33822 u32 timeout;
33823 u32 usec_delay;
33824 diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.h linux-2.6.32.45/drivers/net/igbvf/vf.h
33825 --- linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-03-27 14:31:47.000000000 -0400
33826 +++ linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-08-23 21:22:38.000000000 -0400
33827 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
33828 s32 (*read_mac_addr)(struct e1000_hw *);
33829 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33830 };
33831 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33832
33833 struct e1000_mac_info {
33834 - struct e1000_mac_operations ops;
33835 + e1000_mac_operations_no_const ops;
33836 u8 addr[6];
33837 u8 perm_addr[6];
33838
33839 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
33840 s32 (*check_for_ack)(struct e1000_hw *);
33841 s32 (*check_for_rst)(struct e1000_hw *);
33842 };
33843 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33844
33845 struct e1000_mbx_stats {
33846 u32 msgs_tx;
33847 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
33848 };
33849
33850 struct e1000_mbx_info {
33851 - struct e1000_mbx_operations ops;
33852 + e1000_mbx_operations_no_const ops;
33853 struct e1000_mbx_stats stats;
33854 u32 timeout;
33855 u32 usec_delay;
33856 diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
33857 --- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
33858 +++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
33859 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
33860 NULL
33861 };
33862
33863 -static struct sysfs_ops veth_cnx_sysfs_ops = {
33864 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
33865 .show = veth_cnx_attribute_show
33866 };
33867
33868 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
33869 NULL
33870 };
33871
33872 -static struct sysfs_ops veth_port_sysfs_ops = {
33873 +static const struct sysfs_ops veth_port_sysfs_ops = {
33874 .show = veth_port_attribute_show
33875 };
33876
33877 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
33878 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
33879 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
33880 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
33881 u32 rctl;
33882 int i;
33883
33884 + pax_track_stack();
33885 +
33886 /* Check for Promiscuous and All Multicast modes */
33887
33888 rctl = IXGB_READ_REG(hw, RCTL);
33889 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
33890 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
33891 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
33892 @@ -260,6 +260,9 @@ void __devinit
33893 ixgb_check_options(struct ixgb_adapter *adapter)
33894 {
33895 int bd = adapter->bd_number;
33896 +
33897 + pax_track_stack();
33898 +
33899 if (bd >= IXGB_MAX_NIC) {
33900 printk(KERN_NOTICE
33901 "Warning: no configuration for board #%i\n", bd);
33902 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h
33903 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-03-27 14:31:47.000000000 -0400
33904 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:22:38.000000000 -0400
33905 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
33906 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
33907 s32 (*update_checksum)(struct ixgbe_hw *);
33908 };
33909 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33910
33911 struct ixgbe_mac_operations {
33912 s32 (*init_hw)(struct ixgbe_hw *);
33913 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
33914 /* Flow Control */
33915 s32 (*fc_enable)(struct ixgbe_hw *, s32);
33916 };
33917 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33918
33919 struct ixgbe_phy_operations {
33920 s32 (*identify)(struct ixgbe_hw *);
33921 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
33922 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
33923 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33924 };
33925 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33926
33927 struct ixgbe_eeprom_info {
33928 - struct ixgbe_eeprom_operations ops;
33929 + ixgbe_eeprom_operations_no_const ops;
33930 enum ixgbe_eeprom_type type;
33931 u32 semaphore_delay;
33932 u16 word_size;
33933 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
33934 };
33935
33936 struct ixgbe_mac_info {
33937 - struct ixgbe_mac_operations ops;
33938 + ixgbe_mac_operations_no_const ops;
33939 enum ixgbe_mac_type type;
33940 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33941 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33942 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
33943 };
33944
33945 struct ixgbe_phy_info {
33946 - struct ixgbe_phy_operations ops;
33947 + ixgbe_phy_operations_no_const ops;
33948 struct mdio_if_info mdio;
33949 enum ixgbe_phy_type type;
33950 u32 id;
33951 diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
33952 --- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
33953 +++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
33954 @@ -38,6 +38,7 @@
33955 #include <linux/errno.h>
33956 #include <linux/pci.h>
33957 #include <linux/dma-mapping.h>
33958 +#include <linux/sched.h>
33959
33960 #include <linux/mlx4/device.h>
33961 #include <linux/mlx4/doorbell.h>
33962 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
33963 u64 icm_size;
33964 int err;
33965
33966 + pax_track_stack();
33967 +
33968 err = mlx4_QUERY_FW(dev);
33969 if (err) {
33970 if (err == -EACCES)
33971 diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
33972 --- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
33973 +++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
33974 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
33975 int i, num_irqs, err;
33976 u8 first_ldg;
33977
33978 + pax_track_stack();
33979 +
33980 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33981 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33982 ldg_num_map[i] = first_ldg + i;
33983 diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
33984 --- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
33985 +++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
33986 @@ -79,7 +79,7 @@ static int cards_found;
33987 /*
33988 * VLB I/O addresses
33989 */
33990 -static unsigned int pcnet32_portlist[] __initdata =
33991 +static unsigned int pcnet32_portlist[] __devinitdata =
33992 { 0x300, 0x320, 0x340, 0x360, 0 };
33993
33994 static int pcnet32_debug = 0;
33995 @@ -267,7 +267,7 @@ struct pcnet32_private {
33996 struct sk_buff **rx_skbuff;
33997 dma_addr_t *tx_dma_addr;
33998 dma_addr_t *rx_dma_addr;
33999 - struct pcnet32_access a;
34000 + struct pcnet32_access *a;
34001 spinlock_t lock; /* Guard lock */
34002 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34003 unsigned int rx_ring_size; /* current rx ring size */
34004 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34005 u16 val;
34006
34007 netif_wake_queue(dev);
34008 - val = lp->a.read_csr(ioaddr, CSR3);
34009 + val = lp->a->read_csr(ioaddr, CSR3);
34010 val &= 0x00ff;
34011 - lp->a.write_csr(ioaddr, CSR3, val);
34012 + lp->a->write_csr(ioaddr, CSR3, val);
34013 napi_enable(&lp->napi);
34014 }
34015
34016 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34017 r = mii_link_ok(&lp->mii_if);
34018 } else if (lp->chip_version >= PCNET32_79C970A) {
34019 ulong ioaddr = dev->base_addr; /* card base I/O address */
34020 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34021 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34022 } else { /* can not detect link on really old chips */
34023 r = 1;
34024 }
34025 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34026 pcnet32_netif_stop(dev);
34027
34028 spin_lock_irqsave(&lp->lock, flags);
34029 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34030 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34031
34032 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34033
34034 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34035 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34036 {
34037 struct pcnet32_private *lp = netdev_priv(dev);
34038 - struct pcnet32_access *a = &lp->a; /* access to registers */
34039 + struct pcnet32_access *a = lp->a; /* access to registers */
34040 ulong ioaddr = dev->base_addr; /* card base I/O address */
34041 struct sk_buff *skb; /* sk buff */
34042 int x, i; /* counters */
34043 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34044 pcnet32_netif_stop(dev);
34045
34046 spin_lock_irqsave(&lp->lock, flags);
34047 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34048 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34049
34050 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34051
34052 /* Reset the PCNET32 */
34053 - lp->a.reset(ioaddr);
34054 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34055 + lp->a->reset(ioaddr);
34056 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34057
34058 /* switch pcnet32 to 32bit mode */
34059 - lp->a.write_bcr(ioaddr, 20, 2);
34060 + lp->a->write_bcr(ioaddr, 20, 2);
34061
34062 /* purge & init rings but don't actually restart */
34063 pcnet32_restart(dev, 0x0000);
34064
34065 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34066 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34067
34068 /* Initialize Transmit buffers. */
34069 size = data_len + 15;
34070 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34071
34072 /* set int loopback in CSR15 */
34073 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34074 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34075 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34076
34077 teststatus = cpu_to_le16(0x8000);
34078 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34079 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34080
34081 /* Check status of descriptors */
34082 for (x = 0; x < numbuffs; x++) {
34083 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34084 }
34085 }
34086
34087 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34088 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34089 wmb();
34090 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34091 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34092 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34093 pcnet32_restart(dev, CSR0_NORMAL);
34094 } else {
34095 pcnet32_purge_rx_ring(dev);
34096 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34097 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34098 }
34099 spin_unlock_irqrestore(&lp->lock, flags);
34100
34101 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34102 static void pcnet32_led_blink_callback(struct net_device *dev)
34103 {
34104 struct pcnet32_private *lp = netdev_priv(dev);
34105 - struct pcnet32_access *a = &lp->a;
34106 + struct pcnet32_access *a = lp->a;
34107 ulong ioaddr = dev->base_addr;
34108 unsigned long flags;
34109 int i;
34110 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34111 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34112 {
34113 struct pcnet32_private *lp = netdev_priv(dev);
34114 - struct pcnet32_access *a = &lp->a;
34115 + struct pcnet32_access *a = lp->a;
34116 ulong ioaddr = dev->base_addr;
34117 unsigned long flags;
34118 int i, regs[4];
34119 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34120 {
34121 int csr5;
34122 struct pcnet32_private *lp = netdev_priv(dev);
34123 - struct pcnet32_access *a = &lp->a;
34124 + struct pcnet32_access *a = lp->a;
34125 ulong ioaddr = dev->base_addr;
34126 int ticks;
34127
34128 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34129 spin_lock_irqsave(&lp->lock, flags);
34130 if (pcnet32_tx(dev)) {
34131 /* reset the chip to clear the error condition, then restart */
34132 - lp->a.reset(ioaddr);
34133 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34134 + lp->a->reset(ioaddr);
34135 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34136 pcnet32_restart(dev, CSR0_START);
34137 netif_wake_queue(dev);
34138 }
34139 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34140 __napi_complete(napi);
34141
34142 /* clear interrupt masks */
34143 - val = lp->a.read_csr(ioaddr, CSR3);
34144 + val = lp->a->read_csr(ioaddr, CSR3);
34145 val &= 0x00ff;
34146 - lp->a.write_csr(ioaddr, CSR3, val);
34147 + lp->a->write_csr(ioaddr, CSR3, val);
34148
34149 /* Set interrupt enable. */
34150 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34151 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34152
34153 spin_unlock_irqrestore(&lp->lock, flags);
34154 }
34155 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34156 int i, csr0;
34157 u16 *buff = ptr;
34158 struct pcnet32_private *lp = netdev_priv(dev);
34159 - struct pcnet32_access *a = &lp->a;
34160 + struct pcnet32_access *a = lp->a;
34161 ulong ioaddr = dev->base_addr;
34162 unsigned long flags;
34163
34164 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34165 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34166 if (lp->phymask & (1 << j)) {
34167 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34168 - lp->a.write_bcr(ioaddr, 33,
34169 + lp->a->write_bcr(ioaddr, 33,
34170 (j << 5) | i);
34171 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34172 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34173 }
34174 }
34175 }
34176 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34177 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34178 lp->options |= PCNET32_PORT_FD;
34179
34180 - lp->a = *a;
34181 + lp->a = a;
34182
34183 /* prior to register_netdev, dev->name is not yet correct */
34184 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34185 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34186 if (lp->mii) {
34187 /* lp->phycount and lp->phymask are set to 0 by memset above */
34188
34189 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34190 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34191 /* scan for PHYs */
34192 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34193 unsigned short id1, id2;
34194 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34195 "Found PHY %04x:%04x at address %d.\n",
34196 id1, id2, i);
34197 }
34198 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34199 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34200 if (lp->phycount > 1) {
34201 lp->options |= PCNET32_PORT_MII;
34202 }
34203 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34204 }
34205
34206 /* Reset the PCNET32 */
34207 - lp->a.reset(ioaddr);
34208 + lp->a->reset(ioaddr);
34209
34210 /* switch pcnet32 to 32bit mode */
34211 - lp->a.write_bcr(ioaddr, 20, 2);
34212 + lp->a->write_bcr(ioaddr, 20, 2);
34213
34214 if (netif_msg_ifup(lp))
34215 printk(KERN_DEBUG
34216 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34217 (u32) (lp->init_dma_addr));
34218
34219 /* set/reset autoselect bit */
34220 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34221 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34222 if (lp->options & PCNET32_PORT_ASEL)
34223 val |= 2;
34224 - lp->a.write_bcr(ioaddr, 2, val);
34225 + lp->a->write_bcr(ioaddr, 2, val);
34226
34227 /* handle full duplex setting */
34228 if (lp->mii_if.full_duplex) {
34229 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34230 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34231 if (lp->options & PCNET32_PORT_FD) {
34232 val |= 1;
34233 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34234 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34235 if (lp->chip_version == 0x2627)
34236 val |= 3;
34237 }
34238 - lp->a.write_bcr(ioaddr, 9, val);
34239 + lp->a->write_bcr(ioaddr, 9, val);
34240 }
34241
34242 /* set/reset GPSI bit in test register */
34243 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34244 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34245 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34246 val |= 0x10;
34247 - lp->a.write_csr(ioaddr, 124, val);
34248 + lp->a->write_csr(ioaddr, 124, val);
34249
34250 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34251 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34252 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34253 * duplex, and/or enable auto negotiation, and clear DANAS
34254 */
34255 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34256 - lp->a.write_bcr(ioaddr, 32,
34257 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34258 + lp->a->write_bcr(ioaddr, 32,
34259 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34260 /* disable Auto Negotiation, set 10Mpbs, HD */
34261 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34262 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34263 if (lp->options & PCNET32_PORT_FD)
34264 val |= 0x10;
34265 if (lp->options & PCNET32_PORT_100)
34266 val |= 0x08;
34267 - lp->a.write_bcr(ioaddr, 32, val);
34268 + lp->a->write_bcr(ioaddr, 32, val);
34269 } else {
34270 if (lp->options & PCNET32_PORT_ASEL) {
34271 - lp->a.write_bcr(ioaddr, 32,
34272 - lp->a.read_bcr(ioaddr,
34273 + lp->a->write_bcr(ioaddr, 32,
34274 + lp->a->read_bcr(ioaddr,
34275 32) | 0x0080);
34276 /* enable auto negotiate, setup, disable fd */
34277 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34278 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34279 val |= 0x20;
34280 - lp->a.write_bcr(ioaddr, 32, val);
34281 + lp->a->write_bcr(ioaddr, 32, val);
34282 }
34283 }
34284 } else {
34285 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34286 * There is really no good other way to handle multiple PHYs
34287 * other than turning off all automatics
34288 */
34289 - val = lp->a.read_bcr(ioaddr, 2);
34290 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34291 - val = lp->a.read_bcr(ioaddr, 32);
34292 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34293 + val = lp->a->read_bcr(ioaddr, 2);
34294 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34295 + val = lp->a->read_bcr(ioaddr, 32);
34296 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34297
34298 if (!(lp->options & PCNET32_PORT_ASEL)) {
34299 /* setup ecmd */
34300 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34301 ecmd.speed =
34302 lp->
34303 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34304 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34305 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34306
34307 if (lp->options & PCNET32_PORT_FD) {
34308 ecmd.duplex = DUPLEX_FULL;
34309 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34310 ecmd.duplex = DUPLEX_HALF;
34311 bcr9 |= ~(1 << 0);
34312 }
34313 - lp->a.write_bcr(ioaddr, 9, bcr9);
34314 + lp->a->write_bcr(ioaddr, 9, bcr9);
34315 }
34316
34317 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34318 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34319
34320 #ifdef DO_DXSUFLO
34321 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34322 - val = lp->a.read_csr(ioaddr, CSR3);
34323 + val = lp->a->read_csr(ioaddr, CSR3);
34324 val |= 0x40;
34325 - lp->a.write_csr(ioaddr, CSR3, val);
34326 + lp->a->write_csr(ioaddr, CSR3, val);
34327 }
34328 #endif
34329
34330 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34331 napi_enable(&lp->napi);
34332
34333 /* Re-initialize the PCNET32, and start it when done. */
34334 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34335 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34336 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34337 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34338
34339 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34340 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34341 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34342 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34343
34344 netif_start_queue(dev);
34345
34346 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34347
34348 i = 0;
34349 while (i++ < 100)
34350 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34351 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34352 break;
34353 /*
34354 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34355 * reports that doing so triggers a bug in the '974.
34356 */
34357 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34358 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34359
34360 if (netif_msg_ifup(lp))
34361 printk(KERN_DEBUG
34362 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34363 dev->name, i,
34364 (u32) (lp->init_dma_addr),
34365 - lp->a.read_csr(ioaddr, CSR0));
34366 + lp->a->read_csr(ioaddr, CSR0));
34367
34368 spin_unlock_irqrestore(&lp->lock, flags);
34369
34370 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34371 * Switch back to 16bit mode to avoid problems with dumb
34372 * DOS packet driver after a warm reboot
34373 */
34374 - lp->a.write_bcr(ioaddr, 20, 4);
34375 + lp->a->write_bcr(ioaddr, 20, 4);
34376
34377 err_free_irq:
34378 spin_unlock_irqrestore(&lp->lock, flags);
34379 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34380
34381 /* wait for stop */
34382 for (i = 0; i < 100; i++)
34383 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34384 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34385 break;
34386
34387 if (i >= 100 && netif_msg_drv(lp))
34388 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34389 return;
34390
34391 /* ReInit Ring */
34392 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34393 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34394 i = 0;
34395 while (i++ < 1000)
34396 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34397 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34398 break;
34399
34400 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34401 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34402 }
34403
34404 static void pcnet32_tx_timeout(struct net_device *dev)
34405 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34406 if (pcnet32_debug & NETIF_MSG_DRV)
34407 printk(KERN_ERR
34408 "%s: transmit timed out, status %4.4x, resetting.\n",
34409 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34410 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34411 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34412 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34413 dev->stats.tx_errors++;
34414 if (netif_msg_tx_err(lp)) {
34415 int i;
34416 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34417 if (netif_msg_tx_queued(lp)) {
34418 printk(KERN_DEBUG
34419 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34420 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34421 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34422 }
34423
34424 /* Default status -- will not enable Successful-TxDone
34425 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34426 dev->stats.tx_bytes += skb->len;
34427
34428 /* Trigger an immediate send poll. */
34429 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34430 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34431
34432 dev->trans_start = jiffies;
34433
34434 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34435
34436 spin_lock(&lp->lock);
34437
34438 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34439 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34440 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34441 if (csr0 == 0xffff) {
34442 break; /* PCMCIA remove happened */
34443 }
34444 /* Acknowledge all of the current interrupt sources ASAP. */
34445 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34446 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34447
34448 if (netif_msg_intr(lp))
34449 printk(KERN_DEBUG
34450 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34451 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34452 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34453
34454 /* Log misc errors. */
34455 if (csr0 & 0x4000)
34456 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34457 if (napi_schedule_prep(&lp->napi)) {
34458 u16 val;
34459 /* set interrupt masks */
34460 - val = lp->a.read_csr(ioaddr, CSR3);
34461 + val = lp->a->read_csr(ioaddr, CSR3);
34462 val |= 0x5f00;
34463 - lp->a.write_csr(ioaddr, CSR3, val);
34464 + lp->a->write_csr(ioaddr, CSR3, val);
34465
34466 __napi_schedule(&lp->napi);
34467 break;
34468 }
34469 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34470 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34471 }
34472
34473 if (netif_msg_intr(lp))
34474 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34475 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34476 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34477
34478 spin_unlock(&lp->lock);
34479
34480 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34481
34482 spin_lock_irqsave(&lp->lock, flags);
34483
34484 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34485 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34486
34487 if (netif_msg_ifdown(lp))
34488 printk(KERN_DEBUG
34489 "%s: Shutting down ethercard, status was %2.2x.\n",
34490 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34491 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34492
34493 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34494 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34495 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34496
34497 /*
34498 * Switch back to 16bit mode to avoid problems with dumb
34499 * DOS packet driver after a warm reboot
34500 */
34501 - lp->a.write_bcr(ioaddr, 20, 4);
34502 + lp->a->write_bcr(ioaddr, 20, 4);
34503
34504 spin_unlock_irqrestore(&lp->lock, flags);
34505
34506 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34507 unsigned long flags;
34508
34509 spin_lock_irqsave(&lp->lock, flags);
34510 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34511 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34512 spin_unlock_irqrestore(&lp->lock, flags);
34513
34514 return &dev->stats;
34515 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34516 if (dev->flags & IFF_ALLMULTI) {
34517 ib->filter[0] = cpu_to_le32(~0U);
34518 ib->filter[1] = cpu_to_le32(~0U);
34519 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34520 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34521 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34522 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34523 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34524 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34525 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34526 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34527 return;
34528 }
34529 /* clear the multicast filter */
34530 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34531 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34532 }
34533 for (i = 0; i < 4; i++)
34534 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34535 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34536 le16_to_cpu(mcast_table[i]));
34537 return;
34538 }
34539 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34540
34541 spin_lock_irqsave(&lp->lock, flags);
34542 suspended = pcnet32_suspend(dev, &flags, 0);
34543 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34544 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34545 if (dev->flags & IFF_PROMISC) {
34546 /* Log any net taps. */
34547 if (netif_msg_hw(lp))
34548 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34549 lp->init_block->mode =
34550 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34551 7);
34552 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34553 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34554 } else {
34555 lp->init_block->mode =
34556 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34557 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34558 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34559 pcnet32_load_multicast(dev);
34560 }
34561
34562 if (suspended) {
34563 int csr5;
34564 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34565 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34566 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34567 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34568 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34569 } else {
34570 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34571 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34572 pcnet32_restart(dev, CSR0_NORMAL);
34573 netif_wake_queue(dev);
34574 }
34575 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34576 if (!lp->mii)
34577 return 0;
34578
34579 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34580 - val_out = lp->a.read_bcr(ioaddr, 34);
34581 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34582 + val_out = lp->a->read_bcr(ioaddr, 34);
34583
34584 return val_out;
34585 }
34586 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34587 if (!lp->mii)
34588 return;
34589
34590 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34591 - lp->a.write_bcr(ioaddr, 34, val);
34592 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34593 + lp->a->write_bcr(ioaddr, 34, val);
34594 }
34595
34596 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34597 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34598 curr_link = mii_link_ok(&lp->mii_if);
34599 } else {
34600 ulong ioaddr = dev->base_addr; /* card base I/O address */
34601 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34602 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34603 }
34604 if (!curr_link) {
34605 if (prev_link || verbose) {
34606 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34607 (ecmd.duplex ==
34608 DUPLEX_FULL) ? "full" : "half");
34609 }
34610 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34611 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34612 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34613 if (lp->mii_if.full_duplex)
34614 bcr9 |= (1 << 0);
34615 else
34616 bcr9 &= ~(1 << 0);
34617 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34618 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34619 }
34620 } else {
34621 if (netif_msg_link(lp))
34622 diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34623 --- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34624 +++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34625 @@ -95,6 +95,7 @@
34626 #define CHIPREV_ID_5750_A0 0x4000
34627 #define CHIPREV_ID_5750_A1 0x4001
34628 #define CHIPREV_ID_5750_A3 0x4003
34629 +#define CHIPREV_ID_5750_C1 0x4201
34630 #define CHIPREV_ID_5750_C2 0x4202
34631 #define CHIPREV_ID_5752_A0_HW 0x5000
34632 #define CHIPREV_ID_5752_A0 0x6000
34633 diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34634 --- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34635 +++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34636 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34637
34638 static int __init abyss_init (void)
34639 {
34640 - abyss_netdev_ops = tms380tr_netdev_ops;
34641 + pax_open_kernel();
34642 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34643
34644 - abyss_netdev_ops.ndo_open = abyss_open;
34645 - abyss_netdev_ops.ndo_stop = abyss_close;
34646 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34647 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34648 + pax_close_kernel();
34649
34650 return pci_register_driver(&abyss_driver);
34651 }
34652 diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34653 --- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34654 +++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34655 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34656
34657 static int __init madgemc_init (void)
34658 {
34659 - madgemc_netdev_ops = tms380tr_netdev_ops;
34660 - madgemc_netdev_ops.ndo_open = madgemc_open;
34661 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34662 + pax_open_kernel();
34663 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34664 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34665 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34666 + pax_close_kernel();
34667
34668 return mca_register_driver (&madgemc_driver);
34669 }
34670 diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34671 --- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34672 +++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34673 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34674 struct platform_device *pdev;
34675 int i, num = 0, err = 0;
34676
34677 - proteon_netdev_ops = tms380tr_netdev_ops;
34678 - proteon_netdev_ops.ndo_open = proteon_open;
34679 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34680 + pax_open_kernel();
34681 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34682 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34683 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34684 + pax_close_kernel();
34685
34686 err = platform_driver_register(&proteon_driver);
34687 if (err)
34688 diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34689 --- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34690 +++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34691 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34692 struct platform_device *pdev;
34693 int i, num = 0, err = 0;
34694
34695 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34696 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34697 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34698 + pax_open_kernel();
34699 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34700 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34701 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34702 + pax_close_kernel();
34703
34704 err = platform_driver_register(&sk_isa_driver);
34705 if (err)
34706 diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34707 --- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34708 +++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34709 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34710 struct de_srom_info_leaf *il;
34711 void *bufp;
34712
34713 + pax_track_stack();
34714 +
34715 /* download entire eeprom */
34716 for (i = 0; i < DE_EEPROM_WORDS; i++)
34717 ((__le16 *)ee_data)[i] =
34718 diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34719 --- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34720 +++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34721 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34722 for (i=0; i<ETH_ALEN; i++) {
34723 tmp.addr[i] = dev->dev_addr[i];
34724 }
34725 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34726 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34727 break;
34728
34729 case DE4X5_SET_HWADDR: /* Set the hardware address */
34730 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34731 spin_lock_irqsave(&lp->lock, flags);
34732 memcpy(&statbuf, &lp->pktStats, ioc->len);
34733 spin_unlock_irqrestore(&lp->lock, flags);
34734 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34735 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34736 return -EFAULT;
34737 break;
34738 }
34739 diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34740 --- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34741 +++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34742 @@ -71,7 +71,7 @@
34743 #include <asm/byteorder.h>
34744 #include <linux/serial_core.h>
34745 #include <linux/serial.h>
34746 -
34747 +#include <asm/local.h>
34748
34749 #define DRIVER_VERSION "1.2"
34750 #define MOD_AUTHOR "Option Wireless"
34751 @@ -258,7 +258,7 @@ struct hso_serial {
34752
34753 /* from usb_serial_port */
34754 struct tty_struct *tty;
34755 - int open_count;
34756 + local_t open_count;
34757 spinlock_t serial_lock;
34758
34759 int (*write_data) (struct hso_serial *serial);
34760 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34761 struct urb *urb;
34762
34763 urb = serial->rx_urb[0];
34764 - if (serial->open_count > 0) {
34765 + if (local_read(&serial->open_count) > 0) {
34766 count = put_rxbuf_data(urb, serial);
34767 if (count == -1)
34768 return;
34769 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34770 DUMP1(urb->transfer_buffer, urb->actual_length);
34771
34772 /* Anyone listening? */
34773 - if (serial->open_count == 0)
34774 + if (local_read(&serial->open_count) == 0)
34775 return;
34776
34777 if (status == 0) {
34778 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34779 spin_unlock_irq(&serial->serial_lock);
34780
34781 /* check for port already opened, if not set the termios */
34782 - serial->open_count++;
34783 - if (serial->open_count == 1) {
34784 + if (local_inc_return(&serial->open_count) == 1) {
34785 tty->low_latency = 1;
34786 serial->rx_state = RX_IDLE;
34787 /* Force default termio settings */
34788 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
34789 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34790 if (result) {
34791 hso_stop_serial_device(serial->parent);
34792 - serial->open_count--;
34793 + local_dec(&serial->open_count);
34794 kref_put(&serial->parent->ref, hso_serial_ref_free);
34795 }
34796 } else {
34797 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
34798
34799 /* reset the rts and dtr */
34800 /* do the actual close */
34801 - serial->open_count--;
34802 + local_dec(&serial->open_count);
34803
34804 - if (serial->open_count <= 0) {
34805 - serial->open_count = 0;
34806 + if (local_read(&serial->open_count) <= 0) {
34807 + local_set(&serial->open_count, 0);
34808 spin_lock_irq(&serial->serial_lock);
34809 if (serial->tty == tty) {
34810 serial->tty->driver_data = NULL;
34811 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
34812
34813 /* the actual setup */
34814 spin_lock_irqsave(&serial->serial_lock, flags);
34815 - if (serial->open_count)
34816 + if (local_read(&serial->open_count))
34817 _hso_serial_set_termios(tty, old);
34818 else
34819 tty->termios = old;
34820 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
34821 /* Start all serial ports */
34822 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34823 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34824 - if (dev2ser(serial_table[i])->open_count) {
34825 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34826 result =
34827 hso_start_serial_device(serial_table[i], GFP_NOIO);
34828 hso_kick_transmit(dev2ser(serial_table[i]));
34829 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
34830 --- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
34831 +++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
34832 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
34833 void (*link_down)(struct __vxge_hw_device *devh);
34834 void (*crit_err)(struct __vxge_hw_device *devh,
34835 enum vxge_hw_event type, u64 ext_data);
34836 -};
34837 +} __no_const;
34838
34839 /*
34840 * struct __vxge_hw_blockpool_entry - Block private data structure
34841 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
34842 --- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
34843 +++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
34844 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
34845 struct sk_buff *completed[NR_SKB_COMPLETED];
34846 int more;
34847
34848 + pax_track_stack();
34849 +
34850 do {
34851 more = 0;
34852 skb_ptr = completed;
34853 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
34854 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34855 int index;
34856
34857 + pax_track_stack();
34858 +
34859 /*
34860 * Filling
34861 * - itable with bucket numbers
34862 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
34863 --- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
34864 +++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
34865 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
34866 struct vxge_hw_mempool_dma *dma_object,
34867 u32 index,
34868 u32 is_last);
34869 -};
34870 +} __no_const;
34871
34872 void
34873 __vxge_hw_mempool_destroy(
34874 diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
34875 --- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
34876 +++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
34877 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
34878 unsigned char hex[1024],
34879 * phex = hex;
34880
34881 + pax_track_stack();
34882 +
34883 if (len >= (sizeof(hex) / 2))
34884 len = (sizeof(hex) / 2) - 1;
34885
34886 diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
34887 --- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
34888 +++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
34889 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
34890
34891 static int x25_open(struct net_device *dev)
34892 {
34893 - struct lapb_register_struct cb;
34894 + static struct lapb_register_struct cb = {
34895 + .connect_confirmation = x25_connected,
34896 + .connect_indication = x25_connected,
34897 + .disconnect_confirmation = x25_disconnected,
34898 + .disconnect_indication = x25_disconnected,
34899 + .data_indication = x25_data_indication,
34900 + .data_transmit = x25_data_transmit
34901 + };
34902 int result;
34903
34904 - cb.connect_confirmation = x25_connected;
34905 - cb.connect_indication = x25_connected;
34906 - cb.disconnect_confirmation = x25_disconnected;
34907 - cb.disconnect_indication = x25_disconnected;
34908 - cb.data_indication = x25_data_indication;
34909 - cb.data_transmit = x25_data_transmit;
34910 -
34911 result = lapb_register(dev, &cb);
34912 if (result != LAPB_OK)
34913 return result;
34914 diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
34915 --- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
34916 +++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
34917 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
34918 int do_autopm = 1;
34919 DECLARE_COMPLETION_ONSTACK(notif_completion);
34920
34921 + pax_track_stack();
34922 +
34923 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34924 i2400m, ack, ack_size);
34925 BUG_ON(_ack == i2400m->bm_ack_buf);
34926 diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
34927 --- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
34928 +++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
34929 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
34930 BSSListElement * loop_net;
34931 BSSListElement * tmp_net;
34932
34933 + pax_track_stack();
34934 +
34935 /* Blow away current list of scan results */
34936 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34937 list_move_tail (&loop_net->list, &ai->network_free_list);
34938 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
34939 WepKeyRid wkr;
34940 int rc;
34941
34942 + pax_track_stack();
34943 +
34944 memset( &mySsid, 0, sizeof( mySsid ) );
34945 kfree (ai->flash);
34946 ai->flash = NULL;
34947 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
34948 __le32 *vals = stats.vals;
34949 int len;
34950
34951 + pax_track_stack();
34952 +
34953 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34954 return -ENOMEM;
34955 data = (struct proc_data *)file->private_data;
34956 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
34957 /* If doLoseSync is not 1, we won't do a Lose Sync */
34958 int doLoseSync = -1;
34959
34960 + pax_track_stack();
34961 +
34962 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34963 return -ENOMEM;
34964 data = (struct proc_data *)file->private_data;
34965 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
34966 int i;
34967 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34968
34969 + pax_track_stack();
34970 +
34971 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34972 if (!qual)
34973 return -ENOMEM;
34974 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
34975 CapabilityRid cap_rid;
34976 __le32 *vals = stats_rid.vals;
34977
34978 + pax_track_stack();
34979 +
34980 /* Get stats out of the card */
34981 clear_bit(JOB_WSTATS, &local->jobs);
34982 if (local->power.event) {
34983 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
34984 --- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
34985 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
34986 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
34987 unsigned int v;
34988 u64 tsf;
34989
34990 + pax_track_stack();
34991 +
34992 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
34993 len += snprintf(buf+len, sizeof(buf)-len,
34994 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
34995 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
34996 unsigned int len = 0;
34997 unsigned int i;
34998
34999 + pax_track_stack();
35000 +
35001 len += snprintf(buf+len, sizeof(buf)-len,
35002 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35003
35004 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35005 --- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35006 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35007 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35008 char buf[512];
35009 unsigned int len = 0;
35010
35011 + pax_track_stack();
35012 +
35013 len += snprintf(buf + len, sizeof(buf) - len,
35014 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35015 len += snprintf(buf + len, sizeof(buf) - len,
35016 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35017 int i;
35018 u8 addr[ETH_ALEN];
35019
35020 + pax_track_stack();
35021 +
35022 len += snprintf(buf + len, sizeof(buf) - len,
35023 "primary: %s (%s chan=%d ht=%d)\n",
35024 wiphy_name(sc->pri_wiphy->hw->wiphy),
35025 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35026 --- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35027 +++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35028 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35029 struct b43_debugfs_fops {
35030 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35031 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35032 - struct file_operations fops;
35033 + const struct file_operations fops;
35034 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35035 size_t file_struct_offset;
35036 };
35037 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35038 --- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35039 +++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35040 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35041 struct b43legacy_debugfs_fops {
35042 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35043 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35044 - struct file_operations fops;
35045 + const struct file_operations fops;
35046 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35047 size_t file_struct_offset;
35048 /* Take wl->irq_lock before calling read/write? */
35049 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35050 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35051 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35052 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35053 int err;
35054 DECLARE_SSID_BUF(ssid);
35055
35056 + pax_track_stack();
35057 +
35058 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35059
35060 if (ssid_len)
35061 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35062 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35063 int err;
35064
35065 + pax_track_stack();
35066 +
35067 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35068 idx, keylen, len);
35069
35070 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35071 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35072 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35073 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35074 unsigned long flags;
35075 DECLARE_SSID_BUF(ssid);
35076
35077 + pax_track_stack();
35078 +
35079 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35080 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35081 print_ssid(ssid, info_element->data, info_element->len),
35082 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35083 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35084 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35085 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35086 },
35087 };
35088
35089 -static struct iwl_ops iwl1000_ops = {
35090 +static const struct iwl_ops iwl1000_ops = {
35091 .ucode = &iwl5000_ucode,
35092 .lib = &iwl1000_lib,
35093 .hcmd = &iwl5000_hcmd,
35094 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35095 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35096 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35097 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35098 */
35099 if (iwl3945_mod_params.disable_hw_scan) {
35100 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35101 - iwl3945_hw_ops.hw_scan = NULL;
35102 + pax_open_kernel();
35103 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35104 + pax_close_kernel();
35105 }
35106
35107
35108 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35109 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35110 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35111 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35112 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35113 };
35114
35115 -static struct iwl_ops iwl3945_ops = {
35116 +static const struct iwl_ops iwl3945_ops = {
35117 .ucode = &iwl3945_ucode,
35118 .lib = &iwl3945_lib,
35119 .hcmd = &iwl3945_hcmd,
35120 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35121 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35122 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35123 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35124 },
35125 };
35126
35127 -static struct iwl_ops iwl4965_ops = {
35128 +static const struct iwl_ops iwl4965_ops = {
35129 .ucode = &iwl4965_ucode,
35130 .lib = &iwl4965_lib,
35131 .hcmd = &iwl4965_hcmd,
35132 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35133 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35134 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35135 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35136 },
35137 };
35138
35139 -struct iwl_ops iwl5000_ops = {
35140 +const struct iwl_ops iwl5000_ops = {
35141 .ucode = &iwl5000_ucode,
35142 .lib = &iwl5000_lib,
35143 .hcmd = &iwl5000_hcmd,
35144 .utils = &iwl5000_hcmd_utils,
35145 };
35146
35147 -static struct iwl_ops iwl5150_ops = {
35148 +static const struct iwl_ops iwl5150_ops = {
35149 .ucode = &iwl5000_ucode,
35150 .lib = &iwl5150_lib,
35151 .hcmd = &iwl5000_hcmd,
35152 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35153 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35154 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35155 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35156 .calc_rssi = iwl5000_calc_rssi,
35157 };
35158
35159 -static struct iwl_ops iwl6000_ops = {
35160 +static const struct iwl_ops iwl6000_ops = {
35161 .ucode = &iwl5000_ucode,
35162 .lib = &iwl6000_lib,
35163 .hcmd = &iwl5000_hcmd,
35164 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35165 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35166 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35167 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35168 if (iwl_debug_level & IWL_DL_INFO)
35169 dev_printk(KERN_DEBUG, &(pdev->dev),
35170 "Disabling hw_scan\n");
35171 - iwl_hw_ops.hw_scan = NULL;
35172 + pax_open_kernel();
35173 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35174 + pax_close_kernel();
35175 }
35176
35177 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35178 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35179 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35180 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35181 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35182 u8 active_index = 0;
35183 s32 tpt = 0;
35184
35185 + pax_track_stack();
35186 +
35187 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35188
35189 if (!ieee80211_is_data(hdr->frame_control) ||
35190 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35191 u8 valid_tx_ant = 0;
35192 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35193
35194 + pax_track_stack();
35195 +
35196 /* Override starting rate (index 0) if needed for debug purposes */
35197 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35198
35199 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35200 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35201 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35202 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35203 int pos = 0;
35204 const size_t bufsz = sizeof(buf);
35205
35206 + pax_track_stack();
35207 +
35208 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35209 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35210 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35211 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35212 const size_t bufsz = sizeof(buf);
35213 ssize_t ret;
35214
35215 + pax_track_stack();
35216 +
35217 for (i = 0; i < AC_NUM; i++) {
35218 pos += scnprintf(buf + pos, bufsz - pos,
35219 "\tcw_min\tcw_max\taifsn\ttxop\n");
35220 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35221 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35222 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35223 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35224 #endif
35225
35226 #else
35227 -#define IWL_DEBUG(__priv, level, fmt, args...)
35228 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35229 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35230 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35231 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35232 void *p, u32 len)
35233 {}
35234 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35235 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35236 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35237 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35238
35239 /* shared structures from iwl-5000.c */
35240 extern struct iwl_mod_params iwl50_mod_params;
35241 -extern struct iwl_ops iwl5000_ops;
35242 +extern const struct iwl_ops iwl5000_ops;
35243 extern struct iwl_ucode_ops iwl5000_ucode;
35244 extern struct iwl_lib_ops iwl5000_lib;
35245 extern struct iwl_hcmd_ops iwl5000_hcmd;
35246 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35247 --- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35248 +++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35249 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35250 int buf_len = 512;
35251 size_t len = 0;
35252
35253 + pax_track_stack();
35254 +
35255 if (*ppos != 0)
35256 return 0;
35257 if (count < sizeof(buf))
35258 diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35259 --- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35260 +++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35261 @@ -708,7 +708,7 @@ out_unlock:
35262 struct lbs_debugfs_files {
35263 const char *name;
35264 int perm;
35265 - struct file_operations fops;
35266 + const struct file_operations fops;
35267 };
35268
35269 static const struct lbs_debugfs_files debugfs_files[] = {
35270 diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35271 --- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35272 +++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35273 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35274
35275 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35276
35277 - if (rts_threshold < 0 || rts_threshold > 2347)
35278 + if (rts_threshold > 2347)
35279 rts_threshold = 2347;
35280
35281 tmp = cpu_to_le32(rts_threshold);
35282 diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35283 --- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35284 +++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35285 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35286 if (cookie == NO_COOKIE)
35287 offset = pc;
35288 if (cookie == INVALID_COOKIE) {
35289 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35290 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35291 offset = pc;
35292 }
35293 if (cookie != last_cookie) {
35294 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35295 /* add userspace sample */
35296
35297 if (!mm) {
35298 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35299 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35300 return 0;
35301 }
35302
35303 cookie = lookup_dcookie(mm, s->eip, &offset);
35304
35305 if (cookie == INVALID_COOKIE) {
35306 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35307 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35308 return 0;
35309 }
35310
35311 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35312 /* ignore backtraces if failed to add a sample */
35313 if (state == sb_bt_start) {
35314 state = sb_bt_ignore;
35315 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35316 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35317 }
35318 }
35319 release_mm(mm);
35320 diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35321 --- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35322 +++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35323 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35324 }
35325
35326 if (buffer_pos == buffer_size) {
35327 - atomic_inc(&oprofile_stats.event_lost_overflow);
35328 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35329 return;
35330 }
35331
35332 diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35333 --- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35334 +++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35335 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35336 if (oprofile_ops.switch_events())
35337 return;
35338
35339 - atomic_inc(&oprofile_stats.multiplex_counter);
35340 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35341 start_switch_worker();
35342 }
35343
35344 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35345 --- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35346 +++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35347 @@ -187,7 +187,7 @@ static const struct file_operations atom
35348
35349
35350 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35351 - char const *name, atomic_t *val)
35352 + char const *name, atomic_unchecked_t *val)
35353 {
35354 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35355 &atomic_ro_fops, 0444);
35356 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35357 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35358 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35359 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35360 cpu_buf->sample_invalid_eip = 0;
35361 }
35362
35363 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35364 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35365 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35366 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35367 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35368 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35369 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35370 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35371 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35372 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35373 }
35374
35375
35376 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35377 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35378 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35379 @@ -13,11 +13,11 @@
35380 #include <asm/atomic.h>
35381
35382 struct oprofile_stat_struct {
35383 - atomic_t sample_lost_no_mm;
35384 - atomic_t sample_lost_no_mapping;
35385 - atomic_t bt_lost_no_mapping;
35386 - atomic_t event_lost_overflow;
35387 - atomic_t multiplex_counter;
35388 + atomic_unchecked_t sample_lost_no_mm;
35389 + atomic_unchecked_t sample_lost_no_mapping;
35390 + atomic_unchecked_t bt_lost_no_mapping;
35391 + atomic_unchecked_t event_lost_overflow;
35392 + atomic_unchecked_t multiplex_counter;
35393 };
35394
35395 extern struct oprofile_stat_struct oprofile_stats;
35396 diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35397 --- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35398 +++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35399 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35400 return ret;
35401 }
35402
35403 -static struct sysfs_ops pdcspath_attr_ops = {
35404 +static const struct sysfs_ops pdcspath_attr_ops = {
35405 .show = pdcspath_attr_show,
35406 .store = pdcspath_attr_store,
35407 };
35408 diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35409 --- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35410 +++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35411 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35412
35413 *ppos += len;
35414
35415 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35416 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35417 }
35418
35419 #ifdef CONFIG_PARPORT_1284
35420 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35421
35422 *ppos += len;
35423
35424 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35425 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35426 }
35427 #endif /* IEEE1284.3 support. */
35428
35429 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35430 --- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35431 +++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35432 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35433 }
35434
35435
35436 -static struct acpi_dock_ops acpiphp_dock_ops = {
35437 +static const struct acpi_dock_ops acpiphp_dock_ops = {
35438 .handler = handle_hotplug_event_func,
35439 };
35440
35441 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35442 --- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35443 +++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35444 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35445 int (*hardware_test) (struct slot* slot, u32 value);
35446 u8 (*get_power) (struct slot* slot);
35447 int (*set_power) (struct slot* slot, int value);
35448 -};
35449 +} __no_const;
35450
35451 struct cpci_hp_controller {
35452 unsigned int irq;
35453 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35454 --- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35455 +++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35456 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35457
35458 void compaq_nvram_init (void __iomem *rom_start)
35459 {
35460 +
35461 +#ifndef CONFIG_PAX_KERNEXEC
35462 if (rom_start) {
35463 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35464 }
35465 +#endif
35466 +
35467 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35468
35469 /* initialize our int15 lock */
35470 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35471 --- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35472 +++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35473 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35474 }
35475
35476 static struct kobj_type legacy_ktype = {
35477 - .sysfs_ops = &(struct sysfs_ops){
35478 + .sysfs_ops = &(const struct sysfs_ops){
35479 .store = legacy_store, .show = legacy_show
35480 },
35481 .release = &legacy_release,
35482 diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35483 --- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35484 +++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35485 @@ -2643,7 +2643,7 @@ error:
35486 return 0;
35487 }
35488
35489 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35490 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
35491 unsigned long offset, size_t size,
35492 enum dma_data_direction dir,
35493 struct dma_attrs *attrs)
35494 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35495 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35496 }
35497
35498 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35499 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35500 size_t size, enum dma_data_direction dir,
35501 struct dma_attrs *attrs)
35502 {
35503 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35504 }
35505 }
35506
35507 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35508 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
35509 dma_addr_t *dma_handle, gfp_t flags)
35510 {
35511 void *vaddr;
35512 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35513 return NULL;
35514 }
35515
35516 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35517 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35518 dma_addr_t dma_handle)
35519 {
35520 int order;
35521 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35522 free_pages((unsigned long)vaddr, order);
35523 }
35524
35525 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35526 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35527 int nelems, enum dma_data_direction dir,
35528 struct dma_attrs *attrs)
35529 {
35530 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35531 return nelems;
35532 }
35533
35534 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35535 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35536 enum dma_data_direction dir, struct dma_attrs *attrs)
35537 {
35538 int i;
35539 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35540 return nelems;
35541 }
35542
35543 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35544 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35545 {
35546 return !dma_addr;
35547 }
35548
35549 -struct dma_map_ops intel_dma_ops = {
35550 +const struct dma_map_ops intel_dma_ops = {
35551 .alloc_coherent = intel_alloc_coherent,
35552 .free_coherent = intel_free_coherent,
35553 .map_sg = intel_map_sg,
35554 diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35555 --- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35556 +++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35557 @@ -27,9 +27,9 @@
35558 #define MODULE_PARAM_PREFIX "pcie_aspm."
35559
35560 /* Note: those are not register definitions */
35561 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35562 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35563 -#define ASPM_STATE_L1 (4) /* L1 state */
35564 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35565 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35566 +#define ASPM_STATE_L1 (4U) /* L1 state */
35567 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35568 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35569
35570 diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35571 --- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35572 +++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35573 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35574 return ret;
35575 }
35576
35577 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35578 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35579 struct device_attribute *attr,
35580 char *buf)
35581 {
35582 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35583 }
35584
35585 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35586 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35587 struct device_attribute *attr,
35588 char *buf)
35589 {
35590 diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35591 --- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35592 +++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35593 @@ -480,7 +480,16 @@ static const struct file_operations proc
35594 static int __init pci_proc_init(void)
35595 {
35596 struct pci_dev *dev = NULL;
35597 +
35598 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35599 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35600 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35601 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35602 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35603 +#endif
35604 +#else
35605 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35606 +#endif
35607 proc_create("devices", 0, proc_bus_pci_dir,
35608 &proc_bus_pci_dev_operations);
35609 proc_initialized = 1;
35610 diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35611 --- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35612 +++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35613 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35614 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35615 }
35616
35617 -static struct sysfs_ops pci_slot_sysfs_ops = {
35618 +static const struct sysfs_ops pci_slot_sysfs_ops = {
35619 .show = pci_slot_attr_show,
35620 .store = pci_slot_attr_store,
35621 };
35622 diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35623 --- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35624 +++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35625 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35626 return -EFAULT;
35627 }
35628 }
35629 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35630 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35631 if (!buf)
35632 return -ENOMEM;
35633
35634 diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35635 --- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35636 +++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35637 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35638 return 0;
35639 }
35640
35641 -static struct backlight_ops acer_bl_ops = {
35642 +static const struct backlight_ops acer_bl_ops = {
35643 .get_brightness = read_brightness,
35644 .update_status = update_bl_status,
35645 };
35646 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35647 --- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35648 +++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35649 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35650 return 0;
35651 }
35652
35653 -static struct backlight_ops asus_backlight_data = {
35654 +static const struct backlight_ops asus_backlight_data = {
35655 .get_brightness = read_brightness,
35656 .update_status = set_brightness_status,
35657 };
35658 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35659 --- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35660 +++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35661 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35662 */
35663 static int read_brightness(struct backlight_device *bd);
35664 static int update_bl_status(struct backlight_device *bd);
35665 -static struct backlight_ops asusbl_ops = {
35666 +static const struct backlight_ops asusbl_ops = {
35667 .get_brightness = read_brightness,
35668 .update_status = update_bl_status,
35669 };
35670 diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35671 --- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35672 +++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35673 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35674 return set_lcd_level(b->props.brightness);
35675 }
35676
35677 -static struct backlight_ops compalbl_ops = {
35678 +static const struct backlight_ops compalbl_ops = {
35679 .get_brightness = bl_get_brightness,
35680 .update_status = bl_update_status,
35681 };
35682 diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35683 --- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35684 +++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35685 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35686 return buffer.output[1];
35687 }
35688
35689 -static struct backlight_ops dell_ops = {
35690 +static const struct backlight_ops dell_ops = {
35691 .get_brightness = dell_get_intensity,
35692 .update_status = dell_send_intensity,
35693 };
35694 diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35695 --- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35696 +++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35697 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35698 */
35699 static int read_brightness(struct backlight_device *bd);
35700 static int update_bl_status(struct backlight_device *bd);
35701 -static struct backlight_ops eeepcbl_ops = {
35702 +static const struct backlight_ops eeepcbl_ops = {
35703 .get_brightness = read_brightness,
35704 .update_status = update_bl_status,
35705 };
35706 diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35707 --- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35708 +++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35709 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35710 return ret;
35711 }
35712
35713 -static struct backlight_ops fujitsubl_ops = {
35714 +static const struct backlight_ops fujitsubl_ops = {
35715 .get_brightness = bl_get_brightness,
35716 .update_status = bl_update_status,
35717 };
35718 diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35719 --- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35720 +++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35721 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35722 return set_lcd_level(b->props.brightness);
35723 }
35724
35725 -static struct backlight_ops msibl_ops = {
35726 +static const struct backlight_ops msibl_ops = {
35727 .get_brightness = bl_get_brightness,
35728 .update_status = bl_update_status,
35729 };
35730 diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35731 --- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35732 +++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35733 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35734 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35735 }
35736
35737 -static struct backlight_ops pcc_backlight_ops = {
35738 +static const struct backlight_ops pcc_backlight_ops = {
35739 .get_brightness = bl_get,
35740 .update_status = bl_set_status,
35741 };
35742 diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35743 --- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35744 +++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35745 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35746 }
35747
35748 static struct backlight_device *sony_backlight_device;
35749 -static struct backlight_ops sony_backlight_ops = {
35750 +static const struct backlight_ops sony_backlight_ops = {
35751 .update_status = sony_backlight_update_status,
35752 .get_brightness = sony_backlight_get_brightness,
35753 };
35754 diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
35755 --- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35756 +++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35757 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35758 return 0;
35759 }
35760
35761 -void static hotkey_mask_warn_incomplete_mask(void)
35762 +static void hotkey_mask_warn_incomplete_mask(void)
35763 {
35764 /* log only what the user can fix... */
35765 const u32 wantedmask = hotkey_driver_mask &
35766 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35767 BACKLIGHT_UPDATE_HOTKEY);
35768 }
35769
35770 -static struct backlight_ops ibm_backlight_data = {
35771 +static const struct backlight_ops ibm_backlight_data = {
35772 .get_brightness = brightness_get,
35773 .update_status = brightness_update_status,
35774 };
35775 diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
35776 --- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35777 +++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35778 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35779 return AE_OK;
35780 }
35781
35782 -static struct backlight_ops toshiba_backlight_data = {
35783 +static const struct backlight_ops toshiba_backlight_data = {
35784 .get_brightness = get_lcd,
35785 .update_status = set_lcd_status,
35786 };
35787 diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
35788 --- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
35789 +++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
35790 @@ -60,7 +60,7 @@ do { \
35791 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35792 } while(0)
35793
35794 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35795 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35796 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35797
35798 /*
35799 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
35800
35801 cpu = get_cpu();
35802 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35803 +
35804 + pax_open_kernel();
35805 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35806 + pax_close_kernel();
35807
35808 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35809 spin_lock_irqsave(&pnp_bios_lock, flags);
35810 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
35811 :"memory");
35812 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35813
35814 + pax_open_kernel();
35815 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35816 + pax_close_kernel();
35817 +
35818 put_cpu();
35819
35820 /* If we get here and this is set then the PnP BIOS faulted on us. */
35821 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
35822 return status;
35823 }
35824
35825 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35826 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35827 {
35828 int i;
35829
35830 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
35831 pnp_bios_callpoint.offset = header->fields.pm16offset;
35832 pnp_bios_callpoint.segment = PNP_CS16;
35833
35834 + pax_open_kernel();
35835 +
35836 for_each_possible_cpu(i) {
35837 struct desc_struct *gdt = get_cpu_gdt_table(i);
35838 if (!gdt)
35839 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
35840 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35841 (unsigned long)__va(header->fields.pm16dseg));
35842 }
35843 +
35844 + pax_close_kernel();
35845 }
35846 diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
35847 --- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
35848 +++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
35849 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
35850 return 1;
35851
35852 /* check if the resource is valid */
35853 - if (*irq < 0 || *irq > 15)
35854 + if (*irq > 15)
35855 return 0;
35856
35857 /* check if the resource is reserved */
35858 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
35859 return 1;
35860
35861 /* check if the resource is valid */
35862 - if (*dma < 0 || *dma == 4 || *dma > 7)
35863 + if (*dma == 4 || *dma > 7)
35864 return 0;
35865
35866 /* check if the resource is reserved */
35867 diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
35868 --- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
35869 +++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
35870 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
35871 struct bq27x00_access_methods {
35872 int (*read)(u8 reg, int *rt_value, int b_single,
35873 struct bq27x00_device_info *di);
35874 -};
35875 +} __no_const;
35876
35877 struct bq27x00_device_info {
35878 struct device *dev;
35879 diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
35880 --- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
35881 +++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
35882 @@ -14,6 +14,7 @@
35883 #include <linux/module.h>
35884 #include <linux/rtc.h>
35885 #include <linux/sched.h>
35886 +#include <linux/grsecurity.h>
35887 #include "rtc-core.h"
35888
35889 static dev_t rtc_devt;
35890 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
35891 if (copy_from_user(&tm, uarg, sizeof(tm)))
35892 return -EFAULT;
35893
35894 + gr_log_timechange();
35895 +
35896 return rtc_set_time(rtc, &tm);
35897
35898 case RTC_PIE_ON:
35899 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
35900 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
35901 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
35902 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
35903 static int qdio_perf_proc_show(struct seq_file *m, void *v)
35904 {
35905 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
35906 - (long)atomic_long_read(&perf_stats.qdio_int));
35907 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
35908 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
35909 - (long)atomic_long_read(&perf_stats.pci_int));
35910 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
35911 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
35912 - (long)atomic_long_read(&perf_stats.thin_int));
35913 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
35914 seq_printf(m, "\n");
35915 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
35916 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
35917 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
35918 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
35919 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
35920 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
35921 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
35922 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
35923 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
35924 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
35925 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
35926 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
35927 - (long)atomic_long_read(&perf_stats.thinint_inbound),
35928 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
35929 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
35930 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
35931 seq_printf(m, "\n");
35932 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
35933 - (long)atomic_long_read(&perf_stats.siga_in));
35934 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
35935 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
35936 - (long)atomic_long_read(&perf_stats.siga_out));
35937 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
35938 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
35939 - (long)atomic_long_read(&perf_stats.siga_sync));
35940 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
35941 seq_printf(m, "\n");
35942 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
35943 - (long)atomic_long_read(&perf_stats.inbound_handler));
35944 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
35945 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
35946 - (long)atomic_long_read(&perf_stats.outbound_handler));
35947 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
35948 seq_printf(m, "\n");
35949 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
35950 - (long)atomic_long_read(&perf_stats.fast_requeue));
35951 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
35952 seq_printf(m, "Number of outbound target full condition\t: %li\n",
35953 - (long)atomic_long_read(&perf_stats.outbound_target_full));
35954 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
35955 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
35956 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
35957 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
35958 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
35959 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
35960 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
35961 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
35962 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
35963 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
35964 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
35965 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
35966 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
35967 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
35968 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
35969 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
35970 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
35971 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
35972 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
35973 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
35974 seq_printf(m, "\n");
35975 return 0;
35976 }
35977 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
35978 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
35979 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
35980 @@ -13,46 +13,46 @@
35981
35982 struct qdio_perf_stats {
35983 /* interrupt handler calls */
35984 - atomic_long_t qdio_int;
35985 - atomic_long_t pci_int;
35986 - atomic_long_t thin_int;
35987 + atomic_long_unchecked_t qdio_int;
35988 + atomic_long_unchecked_t pci_int;
35989 + atomic_long_unchecked_t thin_int;
35990
35991 /* tasklet runs */
35992 - atomic_long_t tasklet_inbound;
35993 - atomic_long_t tasklet_outbound;
35994 - atomic_long_t tasklet_thinint;
35995 - atomic_long_t tasklet_thinint_loop;
35996 - atomic_long_t thinint_inbound;
35997 - atomic_long_t thinint_inbound_loop;
35998 - atomic_long_t thinint_inbound_loop2;
35999 + atomic_long_unchecked_t tasklet_inbound;
36000 + atomic_long_unchecked_t tasklet_outbound;
36001 + atomic_long_unchecked_t tasklet_thinint;
36002 + atomic_long_unchecked_t tasklet_thinint_loop;
36003 + atomic_long_unchecked_t thinint_inbound;
36004 + atomic_long_unchecked_t thinint_inbound_loop;
36005 + atomic_long_unchecked_t thinint_inbound_loop2;
36006
36007 /* signal adapter calls */
36008 - atomic_long_t siga_out;
36009 - atomic_long_t siga_in;
36010 - atomic_long_t siga_sync;
36011 + atomic_long_unchecked_t siga_out;
36012 + atomic_long_unchecked_t siga_in;
36013 + atomic_long_unchecked_t siga_sync;
36014
36015 /* misc */
36016 - atomic_long_t inbound_handler;
36017 - atomic_long_t outbound_handler;
36018 - atomic_long_t fast_requeue;
36019 - atomic_long_t outbound_target_full;
36020 + atomic_long_unchecked_t inbound_handler;
36021 + atomic_long_unchecked_t outbound_handler;
36022 + atomic_long_unchecked_t fast_requeue;
36023 + atomic_long_unchecked_t outbound_target_full;
36024
36025 /* for debugging */
36026 - atomic_long_t debug_tl_out_timer;
36027 - atomic_long_t debug_stop_polling;
36028 - atomic_long_t debug_eqbs_all;
36029 - atomic_long_t debug_eqbs_incomplete;
36030 - atomic_long_t debug_sqbs_all;
36031 - atomic_long_t debug_sqbs_incomplete;
36032 + atomic_long_unchecked_t debug_tl_out_timer;
36033 + atomic_long_unchecked_t debug_stop_polling;
36034 + atomic_long_unchecked_t debug_eqbs_all;
36035 + atomic_long_unchecked_t debug_eqbs_incomplete;
36036 + atomic_long_unchecked_t debug_sqbs_all;
36037 + atomic_long_unchecked_t debug_sqbs_incomplete;
36038 };
36039
36040 extern struct qdio_perf_stats perf_stats;
36041 extern int qdio_performance_stats;
36042
36043 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36044 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36045 {
36046 if (qdio_performance_stats)
36047 - atomic_long_inc(count);
36048 + atomic_long_inc_unchecked(count);
36049 }
36050
36051 int qdio_setup_perf_stats(void);
36052 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36053 --- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36054 +++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36055 @@ -471,7 +471,7 @@ struct adapter_ops
36056 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36057 /* Administrative operations */
36058 int (*adapter_comm)(struct aac_dev * dev, int comm);
36059 -};
36060 +} __no_const;
36061
36062 /*
36063 * Define which interrupt handler needs to be installed
36064 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36065 --- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36066 +++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36067 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36068 u32 actual_fibsize64, actual_fibsize = 0;
36069 int i;
36070
36071 + pax_track_stack();
36072
36073 if (dev->in_reset) {
36074 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36075 diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36076 --- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36077 +++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36078 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36079 flash_error_table[i].reason);
36080 }
36081
36082 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36083 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36084 asd_show_update_bios, asd_store_update_bios);
36085
36086 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36087 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36088 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36089 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36090 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36091 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36092 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36093 u32 *nvecs, u32 *maxvec);
36094 -};
36095 +} __no_const;
36096 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36097
36098 struct bfa_iocfc_s {
36099 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36100 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36101 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36102 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36103 bfa_ioc_disable_cbfn_t disable_cbfn;
36104 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36105 bfa_ioc_reset_cbfn_t reset_cbfn;
36106 -};
36107 +} __no_const;
36108
36109 /**
36110 * Heartbeat failure notification queue element.
36111 diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36112 --- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36113 +++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36114 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36115 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36116 *PrototypeHostAdapter)
36117 {
36118 + pax_track_stack();
36119 +
36120 /*
36121 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36122 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36123 diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36124 --- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36125 +++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36126 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36127 dma_addr_t addr;
36128 ulong flags = 0;
36129
36130 + pax_track_stack();
36131 +
36132 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36133 // get user msg size in u32s
36134 if(get_user(size, &user_msg[0])){
36135 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36136 s32 rcode;
36137 dma_addr_t addr;
36138
36139 + pax_track_stack();
36140 +
36141 memset(msg, 0 , sizeof(msg));
36142 len = scsi_bufflen(cmd);
36143 direction = 0x00000000;
36144 diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36145 --- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36146 +++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36147 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36148 struct hostdata *ha;
36149 char name[16];
36150
36151 + pax_track_stack();
36152 +
36153 sprintf(name, "%s%d", driver_name, j);
36154
36155 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36156 diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36157 --- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36158 +++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36159 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36160 size_t rlen;
36161 size_t dlen;
36162
36163 + pax_track_stack();
36164 +
36165 fiph = (struct fip_header *)skb->data;
36166 sub = fiph->fip_subcode;
36167 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36168 diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36169 --- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36170 +++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36171 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36172 /* Start local port initiatialization */
36173
36174 lp->link_up = 0;
36175 - lp->tt = fnic_transport_template;
36176 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36177
36178 lp->max_retry_count = fnic->config.flogi_retries;
36179 lp->max_rport_retry_count = fnic->config.plogi_retries;
36180 diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36181 --- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36182 +++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36183 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36184 ulong flags;
36185 gdth_ha_str *ha;
36186
36187 + pax_track_stack();
36188 +
36189 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36190 return -EFAULT;
36191 ha = gdth_find_ha(ldrv.ionode);
36192 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36193 gdth_ha_str *ha;
36194 int rval;
36195
36196 + pax_track_stack();
36197 +
36198 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36199 res.number >= MAX_HDRIVES)
36200 return -EFAULT;
36201 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36202 gdth_ha_str *ha;
36203 int rval;
36204
36205 + pax_track_stack();
36206 +
36207 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36208 return -EFAULT;
36209 ha = gdth_find_ha(gen.ionode);
36210 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36211 int i;
36212 gdth_cmd_str gdtcmd;
36213 char cmnd[MAX_COMMAND_SIZE];
36214 +
36215 + pax_track_stack();
36216 +
36217 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36218
36219 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36220 diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36221 --- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36222 +++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36223 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36224 ulong64 paddr;
36225
36226 char cmnd[MAX_COMMAND_SIZE];
36227 +
36228 + pax_track_stack();
36229 +
36230 memset(cmnd, 0xff, 12);
36231 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36232
36233 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36234 gdth_hget_str *phg;
36235 char cmnd[MAX_COMMAND_SIZE];
36236
36237 + pax_track_stack();
36238 +
36239 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36240 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36241 if (!gdtcmd || !estr)
36242 diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36243 --- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36244 +++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36245 @@ -40,7 +40,7 @@
36246 #include "scsi_logging.h"
36247
36248
36249 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36250 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36251
36252
36253 static void scsi_host_cls_release(struct device *dev)
36254 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36255 * subtract one because we increment first then return, but we need to
36256 * know what the next host number was before increment
36257 */
36258 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36259 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36260 shost->dma_channel = 0xff;
36261
36262 /* These three are default values which can be overridden */
36263 diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36264 --- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36265 +++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36266 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36267 return true;
36268 }
36269
36270 -static struct ata_port_operations ipr_sata_ops = {
36271 +static const struct ata_port_operations ipr_sata_ops = {
36272 .phy_reset = ipr_ata_phy_reset,
36273 .hardreset = ipr_sata_reset,
36274 .post_internal_cmd = ipr_ata_post_internal,
36275 diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36276 --- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36277 +++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36278 @@ -1027,7 +1027,7 @@ typedef struct {
36279 int (*intr)(struct ips_ha *);
36280 void (*enableint)(struct ips_ha *);
36281 uint32_t (*statupd)(struct ips_ha *);
36282 -} ips_hw_func_t;
36283 +} __no_const ips_hw_func_t;
36284
36285 typedef struct ips_ha {
36286 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36287 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36288 --- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36289 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:22:32.000000000 -0400
36290 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
36291 * all together if not used XXX
36292 */
36293 struct {
36294 - atomic_t no_free_exch;
36295 - atomic_t no_free_exch_xid;
36296 - atomic_t xid_not_found;
36297 - atomic_t xid_busy;
36298 - atomic_t seq_not_found;
36299 - atomic_t non_bls_resp;
36300 + atomic_unchecked_t no_free_exch;
36301 + atomic_unchecked_t no_free_exch_xid;
36302 + atomic_unchecked_t xid_not_found;
36303 + atomic_unchecked_t xid_busy;
36304 + atomic_unchecked_t seq_not_found;
36305 + atomic_unchecked_t non_bls_resp;
36306 } stats;
36307 };
36308 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36309 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36310 /* allocate memory for exchange */
36311 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36312 if (!ep) {
36313 - atomic_inc(&mp->stats.no_free_exch);
36314 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36315 goto out;
36316 }
36317 memset(ep, 0, sizeof(*ep));
36318 @@ -557,7 +557,7 @@ out:
36319 return ep;
36320 err:
36321 spin_unlock_bh(&pool->lock);
36322 - atomic_inc(&mp->stats.no_free_exch_xid);
36323 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36324 mempool_free(ep, mp->ep_pool);
36325 return NULL;
36326 }
36327 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36328 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36329 ep = fc_exch_find(mp, xid);
36330 if (!ep) {
36331 - atomic_inc(&mp->stats.xid_not_found);
36332 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36333 reject = FC_RJT_OX_ID;
36334 goto out;
36335 }
36336 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36337 ep = fc_exch_find(mp, xid);
36338 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36339 if (ep) {
36340 - atomic_inc(&mp->stats.xid_busy);
36341 + atomic_inc_unchecked(&mp->stats.xid_busy);
36342 reject = FC_RJT_RX_ID;
36343 goto rel;
36344 }
36345 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36346 }
36347 xid = ep->xid; /* get our XID */
36348 } else if (!ep) {
36349 - atomic_inc(&mp->stats.xid_not_found);
36350 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36351 reject = FC_RJT_RX_ID; /* XID not found */
36352 goto out;
36353 }
36354 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36355 } else {
36356 sp = &ep->seq;
36357 if (sp->id != fh->fh_seq_id) {
36358 - atomic_inc(&mp->stats.seq_not_found);
36359 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36360 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36361 goto rel;
36362 }
36363 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36364
36365 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36366 if (!ep) {
36367 - atomic_inc(&mp->stats.xid_not_found);
36368 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36369 goto out;
36370 }
36371 if (ep->esb_stat & ESB_ST_COMPLETE) {
36372 - atomic_inc(&mp->stats.xid_not_found);
36373 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36374 goto out;
36375 }
36376 if (ep->rxid == FC_XID_UNKNOWN)
36377 ep->rxid = ntohs(fh->fh_rx_id);
36378 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36379 - atomic_inc(&mp->stats.xid_not_found);
36380 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36381 goto rel;
36382 }
36383 if (ep->did != ntoh24(fh->fh_s_id) &&
36384 ep->did != FC_FID_FLOGI) {
36385 - atomic_inc(&mp->stats.xid_not_found);
36386 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36387 goto rel;
36388 }
36389 sof = fr_sof(fp);
36390 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36391 } else {
36392 sp = &ep->seq;
36393 if (sp->id != fh->fh_seq_id) {
36394 - atomic_inc(&mp->stats.seq_not_found);
36395 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36396 goto rel;
36397 }
36398 }
36399 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36400 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36401
36402 if (!sp)
36403 - atomic_inc(&mp->stats.xid_not_found);
36404 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36405 else
36406 - atomic_inc(&mp->stats.non_bls_resp);
36407 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36408
36409 fc_frame_free(fp);
36410 }
36411 diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36412 --- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36413 +++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36414 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36415 }
36416 }
36417
36418 -static struct ata_port_operations sas_sata_ops = {
36419 +static const struct ata_port_operations sas_sata_ops = {
36420 .phy_reset = sas_ata_phy_reset,
36421 .post_internal_cmd = sas_ata_post_internal,
36422 .qc_defer = ata_std_qc_defer,
36423 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36424 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36425 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36426 @@ -124,7 +124,7 @@ struct lpfc_debug {
36427 int len;
36428 };
36429
36430 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36431 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36432 static unsigned long lpfc_debugfs_start_time = 0L;
36433
36434 /**
36435 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36436 lpfc_debugfs_enable = 0;
36437
36438 len = 0;
36439 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36440 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36441 (lpfc_debugfs_max_disc_trc - 1);
36442 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36443 dtp = vport->disc_trc + i;
36444 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36445 lpfc_debugfs_enable = 0;
36446
36447 len = 0;
36448 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36449 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36450 (lpfc_debugfs_max_slow_ring_trc - 1);
36451 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36452 dtp = phba->slow_ring_trc + i;
36453 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36454 uint32_t *ptr;
36455 char buffer[1024];
36456
36457 + pax_track_stack();
36458 +
36459 off = 0;
36460 spin_lock_irq(&phba->hbalock);
36461
36462 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36463 !vport || !vport->disc_trc)
36464 return;
36465
36466 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36467 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36468 (lpfc_debugfs_max_disc_trc - 1);
36469 dtp = vport->disc_trc + index;
36470 dtp->fmt = fmt;
36471 dtp->data1 = data1;
36472 dtp->data2 = data2;
36473 dtp->data3 = data3;
36474 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36475 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36476 dtp->jif = jiffies;
36477 #endif
36478 return;
36479 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36480 !phba || !phba->slow_ring_trc)
36481 return;
36482
36483 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36484 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36485 (lpfc_debugfs_max_slow_ring_trc - 1);
36486 dtp = phba->slow_ring_trc + index;
36487 dtp->fmt = fmt;
36488 dtp->data1 = data1;
36489 dtp->data2 = data2;
36490 dtp->data3 = data3;
36491 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36492 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36493 dtp->jif = jiffies;
36494 #endif
36495 return;
36496 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36497 "slow_ring buffer\n");
36498 goto debug_failed;
36499 }
36500 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36501 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36502 memset(phba->slow_ring_trc, 0,
36503 (sizeof(struct lpfc_debugfs_trc) *
36504 lpfc_debugfs_max_slow_ring_trc));
36505 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36506 "buffer\n");
36507 goto debug_failed;
36508 }
36509 - atomic_set(&vport->disc_trc_cnt, 0);
36510 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36511
36512 snprintf(name, sizeof(name), "discovery_trace");
36513 vport->debug_disc_trc =
36514 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36515 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36516 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36517 @@ -400,7 +400,7 @@ struct lpfc_vport {
36518 struct dentry *debug_nodelist;
36519 struct dentry *vport_debugfs_root;
36520 struct lpfc_debugfs_trc *disc_trc;
36521 - atomic_t disc_trc_cnt;
36522 + atomic_unchecked_t disc_trc_cnt;
36523 #endif
36524 uint8_t stat_data_enabled;
36525 uint8_t stat_data_blocked;
36526 @@ -725,8 +725,8 @@ struct lpfc_hba {
36527 struct timer_list fabric_block_timer;
36528 unsigned long bit_flags;
36529 #define FABRIC_COMANDS_BLOCKED 0
36530 - atomic_t num_rsrc_err;
36531 - atomic_t num_cmd_success;
36532 + atomic_unchecked_t num_rsrc_err;
36533 + atomic_unchecked_t num_cmd_success;
36534 unsigned long last_rsrc_error_time;
36535 unsigned long last_ramp_down_time;
36536 unsigned long last_ramp_up_time;
36537 @@ -740,7 +740,7 @@ struct lpfc_hba {
36538 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36539 struct dentry *debug_slow_ring_trc;
36540 struct lpfc_debugfs_trc *slow_ring_trc;
36541 - atomic_t slow_ring_trc_cnt;
36542 + atomic_unchecked_t slow_ring_trc_cnt;
36543 #endif
36544
36545 /* Used for deferred freeing of ELS data buffers */
36546 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36547 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36548 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36549 @@ -8021,8 +8021,10 @@ lpfc_init(void)
36550 printk(LPFC_COPYRIGHT "\n");
36551
36552 if (lpfc_enable_npiv) {
36553 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36554 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36555 + pax_open_kernel();
36556 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36557 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36558 + pax_close_kernel();
36559 }
36560 lpfc_transport_template =
36561 fc_attach_transport(&lpfc_transport_functions);
36562 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36563 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36564 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36565 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36566 uint32_t evt_posted;
36567
36568 spin_lock_irqsave(&phba->hbalock, flags);
36569 - atomic_inc(&phba->num_rsrc_err);
36570 + atomic_inc_unchecked(&phba->num_rsrc_err);
36571 phba->last_rsrc_error_time = jiffies;
36572
36573 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36574 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36575 unsigned long flags;
36576 struct lpfc_hba *phba = vport->phba;
36577 uint32_t evt_posted;
36578 - atomic_inc(&phba->num_cmd_success);
36579 + atomic_inc_unchecked(&phba->num_cmd_success);
36580
36581 if (vport->cfg_lun_queue_depth <= queue_depth)
36582 return;
36583 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36584 int i;
36585 struct lpfc_rport_data *rdata;
36586
36587 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36588 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36589 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36590 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36591
36592 vports = lpfc_create_vport_work_array(phba);
36593 if (vports != NULL)
36594 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36595 }
36596 }
36597 lpfc_destroy_vport_work_array(phba, vports);
36598 - atomic_set(&phba->num_rsrc_err, 0);
36599 - atomic_set(&phba->num_cmd_success, 0);
36600 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36601 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36602 }
36603
36604 /**
36605 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36606 }
36607 }
36608 lpfc_destroy_vport_work_array(phba, vports);
36609 - atomic_set(&phba->num_rsrc_err, 0);
36610 - atomic_set(&phba->num_cmd_success, 0);
36611 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36612 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36613 }
36614
36615 /**
36616 diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
36617 --- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36618 +++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36619 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36620 int rval;
36621 int i;
36622
36623 + pax_track_stack();
36624 +
36625 // Allocate memory for the base list of scb for management module.
36626 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36627
36628 diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
36629 --- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36630 +++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36631 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36632 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36633 int ret;
36634
36635 + pax_track_stack();
36636 +
36637 or = osd_start_request(od, GFP_KERNEL);
36638 if (!or)
36639 return -ENOMEM;
36640 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
36641 --- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
36642 +++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
36643 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
36644 res->scsi_dev = scsi_dev;
36645 scsi_dev->hostdata = res;
36646 res->change_detected = 0;
36647 - atomic_set(&res->read_failures, 0);
36648 - atomic_set(&res->write_failures, 0);
36649 + atomic_set_unchecked(&res->read_failures, 0);
36650 + atomic_set_unchecked(&res->write_failures, 0);
36651 rc = 0;
36652 }
36653 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36654 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
36655
36656 /* If this was a SCSI read/write command keep count of errors */
36657 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36658 - atomic_inc(&res->read_failures);
36659 + atomic_inc_unchecked(&res->read_failures);
36660 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36661 - atomic_inc(&res->write_failures);
36662 + atomic_inc_unchecked(&res->write_failures);
36663
36664 if (!RES_IS_GSCSI(res->cfg_entry) &&
36665 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36666 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
36667
36668 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36669 /* add resources only after host is added into system */
36670 - if (!atomic_read(&pinstance->expose_resources))
36671 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36672 return;
36673
36674 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
36675 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
36676 init_waitqueue_head(&pinstance->reset_wait_q);
36677
36678 atomic_set(&pinstance->outstanding_cmds, 0);
36679 - atomic_set(&pinstance->expose_resources, 0);
36680 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36681
36682 INIT_LIST_HEAD(&pinstance->free_res_q);
36683 INIT_LIST_HEAD(&pinstance->used_res_q);
36684 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
36685 /* Schedule worker thread to handle CCN and take care of adding and
36686 * removing devices to OS
36687 */
36688 - atomic_set(&pinstance->expose_resources, 1);
36689 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36690 schedule_work(&pinstance->worker_q);
36691 return rc;
36692
36693 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
36694 --- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
36695 +++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
36696 @@ -690,7 +690,7 @@ struct pmcraid_instance {
36697 atomic_t outstanding_cmds;
36698
36699 /* should add/delete resources to mid-layer now ?*/
36700 - atomic_t expose_resources;
36701 + atomic_unchecked_t expose_resources;
36702
36703 /* Tasklet to handle deferred processing */
36704 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
36705 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
36706 struct list_head queue; /* link to "to be exposed" resources */
36707 struct pmcraid_config_table_entry cfg_entry;
36708 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36709 - atomic_t read_failures; /* count of failed READ commands */
36710 - atomic_t write_failures; /* count of failed WRITE commands */
36711 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36712 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36713
36714 /* To indicate add/delete/modify during CCN */
36715 u8 change_detected;
36716 diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
36717 --- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
36718 +++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
36719 @@ -2089,7 +2089,7 @@ struct isp_operations {
36720
36721 int (*get_flash_version) (struct scsi_qla_host *, void *);
36722 int (*start_scsi) (srb_t *);
36723 -};
36724 +} __no_const;
36725
36726 /* MSI-X Support *************************************************************/
36727
36728 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
36729 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
36730 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
36731 @@ -240,7 +240,7 @@ struct ddb_entry {
36732 atomic_t retry_relogin_timer; /* Min Time between relogins
36733 * (4000 only) */
36734 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36735 - atomic_t relogin_retry_count; /* Num of times relogin has been
36736 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36737 * retried */
36738
36739 uint16_t port;
36740 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
36741 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
36742 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
36743 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
36744 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
36745 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36746 atomic_set(&ddb_entry->relogin_timer, 0);
36747 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36748 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36749 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36750 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36751 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36752 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
36753 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36754 atomic_set(&ddb_entry->port_down_timer,
36755 ha->port_down_retry_count);
36756 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36757 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36758 atomic_set(&ddb_entry->relogin_timer, 0);
36759 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36760 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
36761 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
36762 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
36763 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
36764 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
36765 ddb_entry->fw_ddb_device_state ==
36766 DDB_DS_SESSION_FAILED) {
36767 /* Reset retry relogin timer */
36768 - atomic_inc(&ddb_entry->relogin_retry_count);
36769 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36770 DEBUG2(printk("scsi%ld: index[%d] relogin"
36771 " timed out-retrying"
36772 " relogin (%d)\n",
36773 ha->host_no,
36774 ddb_entry->fw_ddb_index,
36775 - atomic_read(&ddb_entry->
36776 + atomic_read_unchecked(&ddb_entry->
36777 relogin_retry_count))
36778 );
36779 start_dpc++;
36780 diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
36781 --- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
36782 +++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
36783 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
36784 unsigned long timeout;
36785 int rtn = 0;
36786
36787 - atomic_inc(&cmd->device->iorequest_cnt);
36788 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36789
36790 /* check if the device is still usable */
36791 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36792 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
36793 --- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
36794 +++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
36795 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
36796 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36797 unsigned char *cmd = (unsigned char *)scp->cmnd;
36798
36799 + pax_track_stack();
36800 +
36801 if ((errsts = check_readiness(scp, 1, devip)))
36802 return errsts;
36803 memset(arr, 0, sizeof(arr));
36804 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
36805 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36806 unsigned char *cmd = (unsigned char *)scp->cmnd;
36807
36808 + pax_track_stack();
36809 +
36810 if ((errsts = check_readiness(scp, 1, devip)))
36811 return errsts;
36812 memset(arr, 0, sizeof(arr));
36813 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
36814 --- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
36815 +++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
36816 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
36817
36818 scsi_init_cmd_errh(cmd);
36819 cmd->result = DID_NO_CONNECT << 16;
36820 - atomic_inc(&cmd->device->iorequest_cnt);
36821 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36822
36823 /*
36824 * SCSI request completion path will do scsi_device_unbusy(),
36825 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
36826 */
36827 cmd->serial_number = 0;
36828
36829 - atomic_inc(&cmd->device->iodone_cnt);
36830 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36831 if (cmd->result)
36832 - atomic_inc(&cmd->device->ioerr_cnt);
36833 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36834
36835 disposition = scsi_decide_disposition(cmd);
36836 if (disposition != SUCCESS &&
36837 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
36838 --- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
36839 +++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
36840 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
36841 char *buf) \
36842 { \
36843 struct scsi_device *sdev = to_scsi_device(dev); \
36844 - unsigned long long count = atomic_read(&sdev->field); \
36845 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36846 return snprintf(buf, 20, "0x%llx\n", count); \
36847 } \
36848 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36849 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
36850 --- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
36851 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
36852 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
36853 * Netlink Infrastructure
36854 */
36855
36856 -static atomic_t fc_event_seq;
36857 +static atomic_unchecked_t fc_event_seq;
36858
36859 /**
36860 * fc_get_event_number - Obtain the next sequential FC event number
36861 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
36862 u32
36863 fc_get_event_number(void)
36864 {
36865 - return atomic_add_return(1, &fc_event_seq);
36866 + return atomic_add_return_unchecked(1, &fc_event_seq);
36867 }
36868 EXPORT_SYMBOL(fc_get_event_number);
36869
36870 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
36871 {
36872 int error;
36873
36874 - atomic_set(&fc_event_seq, 0);
36875 + atomic_set_unchecked(&fc_event_seq, 0);
36876
36877 error = transport_class_register(&fc_host_class);
36878 if (error)
36879 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
36880 --- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
36881 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
36882 @@ -81,7 +81,7 @@ struct iscsi_internal {
36883 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36884 };
36885
36886 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36887 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36888 static struct workqueue_struct *iscsi_eh_timer_workq;
36889
36890 /*
36891 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
36892 int err;
36893
36894 ihost = shost->shost_data;
36895 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36896 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36897
36898 if (id == ISCSI_MAX_TARGET) {
36899 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36900 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
36901 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36902 ISCSI_TRANSPORT_VERSION);
36903
36904 - atomic_set(&iscsi_session_nr, 0);
36905 + atomic_set_unchecked(&iscsi_session_nr, 0);
36906
36907 err = class_register(&iscsi_transport_class);
36908 if (err)
36909 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
36910 --- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
36911 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
36912 @@ -33,7 +33,7 @@
36913 #include "scsi_transport_srp_internal.h"
36914
36915 struct srp_host_attrs {
36916 - atomic_t next_port_id;
36917 + atomic_unchecked_t next_port_id;
36918 };
36919 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36920
36921 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
36922 struct Scsi_Host *shost = dev_to_shost(dev);
36923 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36924
36925 - atomic_set(&srp_host->next_port_id, 0);
36926 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36927 return 0;
36928 }
36929
36930 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
36931 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36932 rport->roles = ids->roles;
36933
36934 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36935 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36936 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36937
36938 transport_setup_device(&rport->dev);
36939 diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
36940 --- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
36941 +++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
36942 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
36943 const struct file_operations * fops;
36944 };
36945
36946 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36947 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36948 {"allow_dio", &adio_fops},
36949 {"debug", &debug_fops},
36950 {"def_reserved_size", &dressz_fops},
36951 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
36952 {
36953 int k, mask;
36954 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36955 - struct sg_proc_leaf * leaf;
36956 + const struct sg_proc_leaf * leaf;
36957
36958 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36959 if (!sg_proc_sgp)
36960 diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
36961 --- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
36962 +++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
36963 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
36964 int do_iounmap = 0;
36965 int do_disable_device = 1;
36966
36967 + pax_track_stack();
36968 +
36969 memset(&sym_dev, 0, sizeof(sym_dev));
36970 memset(&nvram, 0, sizeof(nvram));
36971 sym_dev.pdev = pdev;
36972 diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
36973 --- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
36974 +++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
36975 @@ -18,7 +18,7 @@
36976
36977 #define MAX_CONFIG_LEN 40
36978
36979 -static struct kgdb_io kgdboc_io_ops;
36980 +static const struct kgdb_io kgdboc_io_ops;
36981
36982 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
36983 static int configured = -1;
36984 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
36985 module_put(THIS_MODULE);
36986 }
36987
36988 -static struct kgdb_io kgdboc_io_ops = {
36989 +static const struct kgdb_io kgdboc_io_ops = {
36990 .name = "kgdboc",
36991 .read_char = kgdboc_get_char,
36992 .write_char = kgdboc_put_char,
36993 diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
36994 --- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
36995 +++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
36996 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
36997 EXPORT_SYMBOL_GPL(spi_sync);
36998
36999 /* portable code must never pass more than 32 bytes */
37000 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37001 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37002
37003 static u8 *buf;
37004
37005 diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37006 --- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37007 +++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37008 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37009 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37010 }
37011
37012 -static struct vm_operations_struct binder_vm_ops = {
37013 +static const struct vm_operations_struct binder_vm_ops = {
37014 .open = binder_vma_open,
37015 .close = binder_vma_close,
37016 };
37017 diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37018 --- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37019 +++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37020 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37021 return VM_FAULT_NOPAGE;
37022 }
37023
37024 -static struct vm_operations_struct b3dfg_vm_ops = {
37025 +static const struct vm_operations_struct b3dfg_vm_ops = {
37026 .fault = b3dfg_vma_fault,
37027 };
37028
37029 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37030 return r;
37031 }
37032
37033 -static struct file_operations b3dfg_fops = {
37034 +static const struct file_operations b3dfg_fops = {
37035 .owner = THIS_MODULE,
37036 .open = b3dfg_open,
37037 .release = b3dfg_release,
37038 diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37039 --- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37040 +++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37041 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37042 mutex_unlock(&dev->mutex);
37043 }
37044
37045 -static struct vm_operations_struct comedi_vm_ops = {
37046 +static const struct vm_operations_struct comedi_vm_ops = {
37047 .close = comedi_unmap,
37048 };
37049
37050 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37051 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37052 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37053 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37054 static dev_t adsp_devno;
37055 static struct class *adsp_class;
37056
37057 -static struct file_operations adsp_fops = {
37058 +static const struct file_operations adsp_fops = {
37059 .owner = THIS_MODULE,
37060 .open = adsp_open,
37061 .unlocked_ioctl = adsp_ioctl,
37062 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37063 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37064 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37065 @@ -1022,7 +1022,7 @@ done:
37066 return rc;
37067 }
37068
37069 -static struct file_operations audio_aac_fops = {
37070 +static const struct file_operations audio_aac_fops = {
37071 .owner = THIS_MODULE,
37072 .open = audio_open,
37073 .release = audio_release,
37074 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37075 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37076 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37077 @@ -833,7 +833,7 @@ done:
37078 return rc;
37079 }
37080
37081 -static struct file_operations audio_amrnb_fops = {
37082 +static const struct file_operations audio_amrnb_fops = {
37083 .owner = THIS_MODULE,
37084 .open = audamrnb_open,
37085 .release = audamrnb_release,
37086 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37087 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37088 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37089 @@ -805,7 +805,7 @@ dma_fail:
37090 return rc;
37091 }
37092
37093 -static struct file_operations audio_evrc_fops = {
37094 +static const struct file_operations audio_evrc_fops = {
37095 .owner = THIS_MODULE,
37096 .open = audevrc_open,
37097 .release = audevrc_release,
37098 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37099 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37100 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37101 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37102 return 0;
37103 }
37104
37105 -static struct file_operations audio_fops = {
37106 +static const struct file_operations audio_fops = {
37107 .owner = THIS_MODULE,
37108 .open = audio_in_open,
37109 .release = audio_in_release,
37110 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37111 .unlocked_ioctl = audio_in_ioctl,
37112 };
37113
37114 -static struct file_operations audpre_fops = {
37115 +static const struct file_operations audpre_fops = {
37116 .owner = THIS_MODULE,
37117 .open = audpre_open,
37118 .unlocked_ioctl = audpre_ioctl,
37119 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37120 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37121 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37122 @@ -941,7 +941,7 @@ done:
37123 return rc;
37124 }
37125
37126 -static struct file_operations audio_mp3_fops = {
37127 +static const struct file_operations audio_mp3_fops = {
37128 .owner = THIS_MODULE,
37129 .open = audio_open,
37130 .release = audio_release,
37131 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37132 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37133 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37134 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37135 return 0;
37136 }
37137
37138 -static struct file_operations audio_fops = {
37139 +static const struct file_operations audio_fops = {
37140 .owner = THIS_MODULE,
37141 .open = audio_open,
37142 .release = audio_release,
37143 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37144 .unlocked_ioctl = audio_ioctl,
37145 };
37146
37147 -static struct file_operations audpp_fops = {
37148 +static const struct file_operations audpp_fops = {
37149 .owner = THIS_MODULE,
37150 .open = audpp_open,
37151 .unlocked_ioctl = audpp_ioctl,
37152 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37153 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37154 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37155 @@ -816,7 +816,7 @@ err:
37156 return rc;
37157 }
37158
37159 -static struct file_operations audio_qcelp_fops = {
37160 +static const struct file_operations audio_qcelp_fops = {
37161 .owner = THIS_MODULE,
37162 .open = audqcelp_open,
37163 .release = audqcelp_release,
37164 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37165 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37166 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37167 @@ -242,7 +242,7 @@ err:
37168 return rc;
37169 }
37170
37171 -static struct file_operations snd_fops = {
37172 +static const struct file_operations snd_fops = {
37173 .owner = THIS_MODULE,
37174 .open = snd_open,
37175 .release = snd_release,
37176 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37177 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37178 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37179 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37180 return 0;
37181 }
37182
37183 -static struct file_operations qmi_fops = {
37184 +static const struct file_operations qmi_fops = {
37185 .owner = THIS_MODULE,
37186 .read = qmi_read,
37187 .write = qmi_write,
37188 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37189 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37190 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37191 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37192 return rc;
37193 }
37194
37195 -static struct file_operations rpcrouter_server_fops = {
37196 +static const struct file_operations rpcrouter_server_fops = {
37197 .owner = THIS_MODULE,
37198 .open = rpcrouter_open,
37199 .release = rpcrouter_release,
37200 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37201 .unlocked_ioctl = rpcrouter_ioctl,
37202 };
37203
37204 -static struct file_operations rpcrouter_router_fops = {
37205 +static const struct file_operations rpcrouter_router_fops = {
37206 .owner = THIS_MODULE,
37207 .open = rpcrouter_open,
37208 .release = rpcrouter_release,
37209 diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37210 --- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37211 +++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37212 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37213 return 0;
37214 }
37215
37216 -static struct block_device_operations dst_blk_ops = {
37217 +static const struct block_device_operations dst_blk_ops = {
37218 .open = dst_bdev_open,
37219 .release = dst_bdev_release,
37220 .owner = THIS_MODULE,
37221 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37222 n->size = ctl->size;
37223
37224 atomic_set(&n->refcnt, 1);
37225 - atomic_long_set(&n->gen, 0);
37226 + atomic_long_set_unchecked(&n->gen, 0);
37227 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37228
37229 err = dst_node_sysfs_init(n);
37230 diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37231 --- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37232 +++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37233 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37234 t->error = 0;
37235 t->retries = 0;
37236 atomic_set(&t->refcnt, 1);
37237 - t->gen = atomic_long_inc_return(&n->gen);
37238 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
37239
37240 t->enc = bio_data_dir(bio);
37241 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37242 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37243 --- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37244 +++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37245 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37246 struct net_device_stats *stats = &etdev->net_stats;
37247
37248 if (pMpTcb->Flags & fMP_DEST_BROAD)
37249 - atomic_inc(&etdev->Stats.brdcstxmt);
37250 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37251 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37252 - atomic_inc(&etdev->Stats.multixmt);
37253 + atomic_inc_unchecked(&etdev->Stats.multixmt);
37254 else
37255 - atomic_inc(&etdev->Stats.unixmt);
37256 + atomic_inc_unchecked(&etdev->Stats.unixmt);
37257
37258 if (pMpTcb->Packet) {
37259 stats->tx_bytes += pMpTcb->Packet->len;
37260 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37261 --- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37262 +++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37263 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37264 * operations
37265 */
37266 u32 unircv; /* # multicast packets received */
37267 - atomic_t unixmt; /* # multicast packets for Tx */
37268 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37269 u32 multircv; /* # multicast packets received */
37270 - atomic_t multixmt; /* # multicast packets for Tx */
37271 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37272 u32 brdcstrcv; /* # broadcast packets received */
37273 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
37274 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37275 u32 norcvbuf; /* # Rx packets discarded */
37276 u32 noxmtbuf; /* # Tx packets discarded */
37277
37278 diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37279 --- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37280 +++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37281 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37282 return 0;
37283 }
37284
37285 -static struct vm_operations_struct go7007_vm_ops = {
37286 +static const struct vm_operations_struct go7007_vm_ops = {
37287 .open = go7007_vm_open,
37288 .close = go7007_vm_close,
37289 .fault = go7007_vm_fault,
37290 diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37291 --- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37292 +++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37293 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37294 /* The one and only one */
37295 static struct blkvsc_driver_context g_blkvsc_drv;
37296
37297 -static struct block_device_operations block_ops = {
37298 +static const struct block_device_operations block_ops = {
37299 .owner = THIS_MODULE,
37300 .open = blkvsc_open,
37301 .release = blkvsc_release,
37302 diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37303 --- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37304 +++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37305 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37306
37307 DPRINT_ENTER(VMBUS);
37308
37309 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37310 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
37311 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37312 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37313
37314 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37315 ASSERT(msgInfo != NULL);
37316 diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37317 --- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37318 +++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37319 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37320 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37321 u32 outputAddressHi = outputAddress >> 32;
37322 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37323 - volatile void *hypercallPage = gHvContext.HypercallPage;
37324 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37325
37326 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37327 Control, Input, Output);
37328 diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37329 --- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37330 +++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37331 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37332 to_device_context(root_device_obj);
37333 struct device_context *child_device_ctx =
37334 to_device_context(child_device_obj);
37335 - static atomic_t device_num = ATOMIC_INIT(0);
37336 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37337
37338 DPRINT_ENTER(VMBUS_DRV);
37339
37340 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37341
37342 /* Set the device name. Otherwise, device_register() will fail. */
37343 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37344 - atomic_inc_return(&device_num));
37345 + atomic_inc_return_unchecked(&device_num));
37346
37347 /* The new device belongs to this bus */
37348 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37349 diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37350 --- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37351 +++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37352 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37353 struct VMBUS_CONNECTION {
37354 enum VMBUS_CONNECT_STATE ConnectState;
37355
37356 - atomic_t NextGpadlHandle;
37357 + atomic_unchecked_t NextGpadlHandle;
37358
37359 /*
37360 * Represents channel interrupts. Each bit position represents a
37361 diff -urNp linux-2.6.32.45/drivers/staging/iio/ring_generic.h linux-2.6.32.45/drivers/staging/iio/ring_generic.h
37362 --- linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-03-27 14:31:47.000000000 -0400
37363 +++ linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-08-23 20:24:26.000000000 -0400
37364 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
37365
37366 int (*is_enabled)(struct iio_ring_buffer *ring);
37367 int (*enable)(struct iio_ring_buffer *ring);
37368 -};
37369 +} __no_const;
37370
37371 /**
37372 * struct iio_ring_buffer - general ring buffer structure
37373 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37374 --- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37375 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37376 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37377 * since the RX tasklet also increments it.
37378 */
37379 #ifdef CONFIG_64BIT
37380 - atomic64_add(rx_status.dropped_packets,
37381 - (atomic64_t *)&priv->stats.rx_dropped);
37382 + atomic64_add_unchecked(rx_status.dropped_packets,
37383 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37384 #else
37385 - atomic_add(rx_status.dropped_packets,
37386 - (atomic_t *)&priv->stats.rx_dropped);
37387 + atomic_add_unchecked(rx_status.dropped_packets,
37388 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37389 #endif
37390 }
37391
37392 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37393 --- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37394 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37395 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37396 /* Increment RX stats for virtual ports */
37397 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37398 #ifdef CONFIG_64BIT
37399 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37400 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37401 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37402 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37403 #else
37404 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37405 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37406 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37407 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37408 #endif
37409 }
37410 netif_receive_skb(skb);
37411 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37412 dev->name);
37413 */
37414 #ifdef CONFIG_64BIT
37415 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37416 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37417 #else
37418 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37419 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37420 #endif
37421 dev_kfree_skb_irq(skb);
37422 }
37423 diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37424 --- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37425 +++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37426 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37427 return 0;
37428 }
37429
37430 -static struct file_operations lcd_fops = {
37431 +static const struct file_operations lcd_fops = {
37432 .write = lcd_write,
37433 .open = lcd_open,
37434 .release = lcd_release,
37435 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37436 return 0;
37437 }
37438
37439 -static struct file_operations keypad_fops = {
37440 +static const struct file_operations keypad_fops = {
37441 .read = keypad_read, /* read */
37442 .open = keypad_open, /* open */
37443 .release = keypad_release, /* close */
37444 diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37445 --- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37446 +++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37447 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37448 ATA_BMDMA_SHT(DRV_NAME),
37449 };
37450
37451 -static struct ata_port_operations phison_ops = {
37452 +static const struct ata_port_operations phison_ops = {
37453 .inherits = &ata_bmdma_port_ops,
37454 .prereset = phison_pre_reset,
37455 };
37456 diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37457 --- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37458 +++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37459 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37460 return 0;
37461 }
37462
37463 -static struct file_operations poch_fops = {
37464 +static const struct file_operations poch_fops = {
37465 .owner = THIS_MODULE,
37466 .open = poch_open,
37467 .release = poch_release,
37468 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37469 --- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37470 +++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37471 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37472 mutex_init(&psb->mcache_lock);
37473 psb->mcache_root = RB_ROOT;
37474 psb->mcache_timeout = msecs_to_jiffies(5000);
37475 - atomic_long_set(&psb->mcache_gen, 0);
37476 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37477
37478 psb->trans_max_pages = 100;
37479
37480 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37481 INIT_LIST_HEAD(&psb->crypto_ready_list);
37482 INIT_LIST_HEAD(&psb->crypto_active_list);
37483
37484 - atomic_set(&psb->trans_gen, 1);
37485 + atomic_set_unchecked(&psb->trans_gen, 1);
37486 atomic_long_set(&psb->total_inodes, 0);
37487
37488 mutex_init(&psb->state_lock);
37489 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37490 --- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37491 +++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37492 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37493 m->data = data;
37494 m->start = start;
37495 m->size = size;
37496 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37497 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37498
37499 mutex_lock(&psb->mcache_lock);
37500 err = pohmelfs_mcache_insert(psb, m);
37501 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37502 --- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37503 +++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37504 @@ -570,14 +570,14 @@ struct pohmelfs_config;
37505 struct pohmelfs_sb {
37506 struct rb_root mcache_root;
37507 struct mutex mcache_lock;
37508 - atomic_long_t mcache_gen;
37509 + atomic_long_unchecked_t mcache_gen;
37510 unsigned long mcache_timeout;
37511
37512 unsigned int idx;
37513
37514 unsigned int trans_retries;
37515
37516 - atomic_t trans_gen;
37517 + atomic_unchecked_t trans_gen;
37518
37519 unsigned int crypto_attached_size;
37520 unsigned int crypto_align_size;
37521 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37522 --- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37523 +++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37524 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37525 int err;
37526 struct netfs_cmd *cmd = t->iovec.iov_base;
37527
37528 - t->gen = atomic_inc_return(&psb->trans_gen);
37529 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37530
37531 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37532 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37533 diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37534 --- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37535 +++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37536 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37537 static dev_t sep_devno;
37538
37539 /* the files operations structure of the driver */
37540 -static struct file_operations sep_file_operations = {
37541 +static const struct file_operations sep_file_operations = {
37542 .owner = THIS_MODULE,
37543 .ioctl = sep_ioctl,
37544 .poll = sep_poll,
37545 diff -urNp linux-2.6.32.45/drivers/staging/usbip/usbip_common.h linux-2.6.32.45/drivers/staging/usbip/usbip_common.h
37546 --- linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-04-17 17:00:52.000000000 -0400
37547 +++ linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-08-23 20:24:26.000000000 -0400
37548 @@ -374,7 +374,7 @@ struct usbip_device {
37549 void (*shutdown)(struct usbip_device *);
37550 void (*reset)(struct usbip_device *);
37551 void (*unusable)(struct usbip_device *);
37552 - } eh_ops;
37553 + } __no_const eh_ops;
37554 };
37555
37556
37557 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37558 --- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37559 +++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37560 @@ -92,7 +92,7 @@ struct vhci_hcd {
37561 unsigned resuming:1;
37562 unsigned long re_timeout;
37563
37564 - atomic_t seqnum;
37565 + atomic_unchecked_t seqnum;
37566
37567 /*
37568 * NOTE:
37569 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37570 --- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37571 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37572 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37573 return;
37574 }
37575
37576 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37577 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37578 if (priv->seqnum == 0xffff)
37579 usbip_uinfo("seqnum max\n");
37580
37581 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37582 return -ENOMEM;
37583 }
37584
37585 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37586 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37587 if (unlink->seqnum == 0xffff)
37588 usbip_uinfo("seqnum max\n");
37589
37590 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37591 vdev->rhport = rhport;
37592 }
37593
37594 - atomic_set(&vhci->seqnum, 0);
37595 + atomic_set_unchecked(&vhci->seqnum, 0);
37596 spin_lock_init(&vhci->lock);
37597
37598
37599 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37600 --- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37601 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37602 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37603 usbip_uerr("cannot find a urb of seqnum %u\n",
37604 pdu->base.seqnum);
37605 usbip_uinfo("max seqnum %d\n",
37606 - atomic_read(&the_controller->seqnum));
37607 + atomic_read_unchecked(&the_controller->seqnum));
37608 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37609 return;
37610 }
37611 diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37612 --- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37613 +++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37614 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37615 static int __init vme_user_probe(struct device *, int, int);
37616 static int __exit vme_user_remove(struct device *, int, int);
37617
37618 -static struct file_operations vme_user_fops = {
37619 +static const struct file_operations vme_user_fops = {
37620 .open = vme_user_open,
37621 .release = vme_user_release,
37622 .read = vme_user_read,
37623 diff -urNp linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c
37624 --- linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-03-27 14:31:47.000000000 -0400
37625 +++ linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 20:24:26.000000000 -0400
37626 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
37627
37628 struct usbctlx_completor {
37629 int (*complete) (struct usbctlx_completor *);
37630 -};
37631 +} __no_const;
37632 typedef struct usbctlx_completor usbctlx_completor_t;
37633
37634 static int
37635 diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
37636 --- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37637 +++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37638 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37639 bool mContinue;
37640 char *pIn, *pOut;
37641
37642 + pax_track_stack();
37643 +
37644 if (!SCI_Prepare(j))
37645 return 0;
37646
37647 diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
37648 --- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37649 +++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37650 @@ -23,6 +23,7 @@
37651 #include <linux/string.h>
37652 #include <linux/kobject.h>
37653 #include <linux/uio_driver.h>
37654 +#include <asm/local.h>
37655
37656 #define UIO_MAX_DEVICES 255
37657
37658 @@ -30,10 +31,10 @@ struct uio_device {
37659 struct module *owner;
37660 struct device *dev;
37661 int minor;
37662 - atomic_t event;
37663 + atomic_unchecked_t event;
37664 struct fasync_struct *async_queue;
37665 wait_queue_head_t wait;
37666 - int vma_count;
37667 + local_t vma_count;
37668 struct uio_info *info;
37669 struct kobject *map_dir;
37670 struct kobject *portio_dir;
37671 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
37672 return entry->show(mem, buf);
37673 }
37674
37675 -static struct sysfs_ops map_sysfs_ops = {
37676 +static const struct sysfs_ops map_sysfs_ops = {
37677 .show = map_type_show,
37678 };
37679
37680 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
37681 return entry->show(port, buf);
37682 }
37683
37684 -static struct sysfs_ops portio_sysfs_ops = {
37685 +static const struct sysfs_ops portio_sysfs_ops = {
37686 .show = portio_type_show,
37687 };
37688
37689 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
37690 struct uio_device *idev = dev_get_drvdata(dev);
37691 if (idev)
37692 return sprintf(buf, "%u\n",
37693 - (unsigned int)atomic_read(&idev->event));
37694 + (unsigned int)atomic_read_unchecked(&idev->event));
37695 else
37696 return -ENODEV;
37697 }
37698 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
37699 {
37700 struct uio_device *idev = info->uio_dev;
37701
37702 - atomic_inc(&idev->event);
37703 + atomic_inc_unchecked(&idev->event);
37704 wake_up_interruptible(&idev->wait);
37705 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37706 }
37707 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
37708 }
37709
37710 listener->dev = idev;
37711 - listener->event_count = atomic_read(&idev->event);
37712 + listener->event_count = atomic_read_unchecked(&idev->event);
37713 filep->private_data = listener;
37714
37715 if (idev->info->open) {
37716 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
37717 return -EIO;
37718
37719 poll_wait(filep, &idev->wait, wait);
37720 - if (listener->event_count != atomic_read(&idev->event))
37721 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37722 return POLLIN | POLLRDNORM;
37723 return 0;
37724 }
37725 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
37726 do {
37727 set_current_state(TASK_INTERRUPTIBLE);
37728
37729 - event_count = atomic_read(&idev->event);
37730 + event_count = atomic_read_unchecked(&idev->event);
37731 if (event_count != listener->event_count) {
37732 if (copy_to_user(buf, &event_count, count))
37733 retval = -EFAULT;
37734 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
37735 static void uio_vma_open(struct vm_area_struct *vma)
37736 {
37737 struct uio_device *idev = vma->vm_private_data;
37738 - idev->vma_count++;
37739 + local_inc(&idev->vma_count);
37740 }
37741
37742 static void uio_vma_close(struct vm_area_struct *vma)
37743 {
37744 struct uio_device *idev = vma->vm_private_data;
37745 - idev->vma_count--;
37746 + local_dec(&idev->vma_count);
37747 }
37748
37749 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37750 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
37751 idev->owner = owner;
37752 idev->info = info;
37753 init_waitqueue_head(&idev->wait);
37754 - atomic_set(&idev->event, 0);
37755 + atomic_set_unchecked(&idev->event, 0);
37756
37757 ret = uio_get_minor(idev);
37758 if (ret)
37759 diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
37760 --- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
37761 +++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
37762 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37763 if (printk_ratelimit())
37764 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37765 __func__, vpi, vci);
37766 - atomic_inc(&vcc->stats->rx_err);
37767 + atomic_inc_unchecked(&vcc->stats->rx_err);
37768 return;
37769 }
37770
37771 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37772 if (length > ATM_MAX_AAL5_PDU) {
37773 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37774 __func__, length, vcc);
37775 - atomic_inc(&vcc->stats->rx_err);
37776 + atomic_inc_unchecked(&vcc->stats->rx_err);
37777 goto out;
37778 }
37779
37780 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37781 if (sarb->len < pdu_length) {
37782 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37783 __func__, pdu_length, sarb->len, vcc);
37784 - atomic_inc(&vcc->stats->rx_err);
37785 + atomic_inc_unchecked(&vcc->stats->rx_err);
37786 goto out;
37787 }
37788
37789 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37790 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37791 __func__, vcc);
37792 - atomic_inc(&vcc->stats->rx_err);
37793 + atomic_inc_unchecked(&vcc->stats->rx_err);
37794 goto out;
37795 }
37796
37797 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37798 if (printk_ratelimit())
37799 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37800 __func__, length);
37801 - atomic_inc(&vcc->stats->rx_drop);
37802 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37803 goto out;
37804 }
37805
37806 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37807
37808 vcc->push(vcc, skb);
37809
37810 - atomic_inc(&vcc->stats->rx);
37811 + atomic_inc_unchecked(&vcc->stats->rx);
37812 out:
37813 skb_trim(sarb, 0);
37814 }
37815 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
37816 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37817
37818 usbatm_pop(vcc, skb);
37819 - atomic_inc(&vcc->stats->tx);
37820 + atomic_inc_unchecked(&vcc->stats->tx);
37821
37822 skb = skb_dequeue(&instance->sndqueue);
37823 }
37824 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
37825 if (!left--)
37826 return sprintf(page,
37827 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37828 - atomic_read(&atm_dev->stats.aal5.tx),
37829 - atomic_read(&atm_dev->stats.aal5.tx_err),
37830 - atomic_read(&atm_dev->stats.aal5.rx),
37831 - atomic_read(&atm_dev->stats.aal5.rx_err),
37832 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37833 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37834 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37835 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37836 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37837 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37838
37839 if (!left--) {
37840 if (instance->disconnected)
37841 diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
37842 --- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
37843 +++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
37844 @@ -314,7 +314,7 @@ static ssize_t wdm_write
37845 if (r < 0)
37846 goto outnp;
37847
37848 - if (!file->f_flags && O_NONBLOCK)
37849 + if (!(file->f_flags & O_NONBLOCK))
37850 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
37851 &desc->flags));
37852 else
37853 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
37854 --- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
37855 +++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
37856 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
37857
37858 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37859
37860 -struct usb_mon_operations *mon_ops;
37861 +const struct usb_mon_operations *mon_ops;
37862
37863 /*
37864 * The registration is unlocked.
37865 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
37866 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
37867 */
37868
37869 -int usb_mon_register (struct usb_mon_operations *ops)
37870 +int usb_mon_register (const struct usb_mon_operations *ops)
37871 {
37872
37873 if (mon_ops)
37874 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
37875 --- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
37876 +++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
37877 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
37878 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37879
37880 struct usb_mon_operations {
37881 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
37882 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37883 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37884 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
37885 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37886 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37887 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
37888 };
37889
37890 -extern struct usb_mon_operations *mon_ops;
37891 +extern const struct usb_mon_operations *mon_ops;
37892
37893 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
37894 {
37895 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
37896 (*mon_ops->urb_complete)(bus, urb, status);
37897 }
37898
37899 -int usb_mon_register(struct usb_mon_operations *ops);
37900 +int usb_mon_register(const struct usb_mon_operations *ops);
37901 void usb_mon_deregister(void);
37902
37903 #else
37904 diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
37905 --- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
37906 +++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
37907 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
37908 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37909 if (buf) {
37910 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37911 - if (len > 0) {
37912 - smallbuf = kmalloc(++len, GFP_NOIO);
37913 + if (len++ > 0) {
37914 + smallbuf = kmalloc(len, GFP_NOIO);
37915 if (!smallbuf)
37916 return buf;
37917 memcpy(smallbuf, buf, len);
37918 diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
37919 --- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
37920 +++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
37921 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
37922 return pdata->msgdata[1];
37923 }
37924
37925 -static struct backlight_ops appledisplay_bl_data = {
37926 +static const struct backlight_ops appledisplay_bl_data = {
37927 .get_brightness = appledisplay_bl_get_brightness,
37928 .update_status = appledisplay_bl_update_status,
37929 };
37930 diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
37931 --- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
37932 +++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
37933 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
37934 /*
37935 * Ops
37936 */
37937 -static struct usb_mon_operations mon_ops_0 = {
37938 +static const struct usb_mon_operations mon_ops_0 = {
37939 .urb_submit = mon_submit,
37940 .urb_submit_error = mon_submit_error,
37941 .urb_complete = mon_complete,
37942 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
37943 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
37944 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
37945 @@ -192,7 +192,7 @@ struct wahc {
37946 struct list_head xfer_delayed_list;
37947 spinlock_t xfer_list_lock;
37948 struct work_struct xfer_work;
37949 - atomic_t xfer_id_count;
37950 + atomic_unchecked_t xfer_id_count;
37951 };
37952
37953
37954 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37955 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37956 spin_lock_init(&wa->xfer_list_lock);
37957 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37958 - atomic_set(&wa->xfer_id_count, 1);
37959 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37960 }
37961
37962 /**
37963 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
37964 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
37965 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
37966 @@ -293,7 +293,7 @@ out:
37967 */
37968 static void wa_xfer_id_init(struct wa_xfer *xfer)
37969 {
37970 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37971 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37972 }
37973
37974 /*
37975 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
37976 --- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
37977 +++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
37978 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
37979 size_t len = skb->len;
37980 size_t used;
37981 ssize_t result;
37982 - struct wlp_nonce enonce, rnonce;
37983 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
37984 enum wlp_assc_error assc_err;
37985 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
37986 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
37987 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
37988 --- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37989 +++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37990 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
37991 return ret;
37992 }
37993
37994 -static
37995 -struct sysfs_ops wss_sysfs_ops = {
37996 +static const struct sysfs_ops wss_sysfs_ops = {
37997 .show = wlp_wss_attr_show,
37998 .store = wlp_wss_attr_store,
37999 };
38000 diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38001 --- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38002 +++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38003 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38004 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38005 }
38006
38007 -static struct backlight_ops atmel_lcdc_bl_ops = {
38008 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38009 .update_status = atmel_bl_update_status,
38010 .get_brightness = atmel_bl_get_brightness,
38011 };
38012 diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38013 --- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38014 +++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38015 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38016 return bd->props.brightness;
38017 }
38018
38019 -static struct backlight_ops aty128_bl_data = {
38020 +static const struct backlight_ops aty128_bl_data = {
38021 .get_brightness = aty128_bl_get_brightness,
38022 .update_status = aty128_bl_update_status,
38023 };
38024 diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38025 --- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38026 +++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38027 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38028 return bd->props.brightness;
38029 }
38030
38031 -static struct backlight_ops aty_bl_data = {
38032 +static const struct backlight_ops aty_bl_data = {
38033 .get_brightness = aty_bl_get_brightness,
38034 .update_status = aty_bl_update_status,
38035 };
38036 diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38037 --- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38038 +++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38039 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38040 return bd->props.brightness;
38041 }
38042
38043 -static struct backlight_ops radeon_bl_data = {
38044 +static const struct backlight_ops radeon_bl_data = {
38045 .get_brightness = radeon_bl_get_brightness,
38046 .update_status = radeon_bl_update_status,
38047 };
38048 diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38049 --- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38050 +++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38051 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38052 return error ? data->current_brightness : reg_val;
38053 }
38054
38055 -static struct backlight_ops adp5520_bl_ops = {
38056 +static const struct backlight_ops adp5520_bl_ops = {
38057 .update_status = adp5520_bl_update_status,
38058 .get_brightness = adp5520_bl_get_brightness,
38059 };
38060 diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38061 --- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38062 +++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38063 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38064 return 1;
38065 }
38066
38067 -static struct backlight_ops adx_backlight_ops = {
38068 +static const struct backlight_ops adx_backlight_ops = {
38069 .options = 0,
38070 .update_status = adx_backlight_update_status,
38071 .get_brightness = adx_backlight_get_brightness,
38072 diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38073 --- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38074 +++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38075 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38076 return pwm_channel_enable(&pwmbl->pwmc);
38077 }
38078
38079 -static struct backlight_ops atmel_pwm_bl_ops = {
38080 +static const struct backlight_ops atmel_pwm_bl_ops = {
38081 .get_brightness = atmel_pwm_bl_get_intensity,
38082 .update_status = atmel_pwm_bl_set_intensity,
38083 };
38084 diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38085 --- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38086 +++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38087 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38088 * ERR_PTR() or a pointer to the newly allocated device.
38089 */
38090 struct backlight_device *backlight_device_register(const char *name,
38091 - struct device *parent, void *devdata, struct backlight_ops *ops)
38092 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38093 {
38094 struct backlight_device *new_bd;
38095 int rc;
38096 diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38097 --- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38098 +++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38099 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38100 }
38101 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38102
38103 -static struct backlight_ops corgi_bl_ops = {
38104 +static const struct backlight_ops corgi_bl_ops = {
38105 .get_brightness = corgi_bl_get_intensity,
38106 .update_status = corgi_bl_update_status,
38107 };
38108 diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38109 --- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38110 +++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38111 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38112 return intensity;
38113 }
38114
38115 -static struct backlight_ops cr_backlight_ops = {
38116 +static const struct backlight_ops cr_backlight_ops = {
38117 .get_brightness = cr_backlight_get_intensity,
38118 .update_status = cr_backlight_set_intensity,
38119 };
38120 diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38121 --- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38122 +++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38123 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38124 return data->current_brightness;
38125 }
38126
38127 -static struct backlight_ops da903x_backlight_ops = {
38128 +static const struct backlight_ops da903x_backlight_ops = {
38129 .update_status = da903x_backlight_update_status,
38130 .get_brightness = da903x_backlight_get_brightness,
38131 };
38132 diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38133 --- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38134 +++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38135 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38136 }
38137 EXPORT_SYMBOL(corgibl_limit_intensity);
38138
38139 -static struct backlight_ops genericbl_ops = {
38140 +static const struct backlight_ops genericbl_ops = {
38141 .options = BL_CORE_SUSPENDRESUME,
38142 .get_brightness = genericbl_get_intensity,
38143 .update_status = genericbl_send_intensity,
38144 diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38145 --- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38146 +++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38147 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38148 return current_intensity;
38149 }
38150
38151 -static struct backlight_ops hp680bl_ops = {
38152 +static const struct backlight_ops hp680bl_ops = {
38153 .get_brightness = hp680bl_get_intensity,
38154 .update_status = hp680bl_set_intensity,
38155 };
38156 diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38157 --- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38158 +++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38159 @@ -93,7 +93,7 @@ out:
38160 return ret;
38161 }
38162
38163 -static struct backlight_ops jornada_bl_ops = {
38164 +static const struct backlight_ops jornada_bl_ops = {
38165 .get_brightness = jornada_bl_get_brightness,
38166 .update_status = jornada_bl_update_status,
38167 .options = BL_CORE_SUSPENDRESUME,
38168 diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38169 --- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38170 +++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38171 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38172 return kb3886bl_intensity;
38173 }
38174
38175 -static struct backlight_ops kb3886bl_ops = {
38176 +static const struct backlight_ops kb3886bl_ops = {
38177 .get_brightness = kb3886bl_get_intensity,
38178 .update_status = kb3886bl_send_intensity,
38179 };
38180 diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38181 --- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38182 +++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38183 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38184 return current_intensity;
38185 }
38186
38187 -static struct backlight_ops locomobl_data = {
38188 +static const struct backlight_ops locomobl_data = {
38189 .get_brightness = locomolcd_get_intensity,
38190 .update_status = locomolcd_set_intensity,
38191 };
38192 diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38193 --- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38194 +++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38195 @@ -33,7 +33,7 @@ struct dmi_match_data {
38196 unsigned long iostart;
38197 unsigned long iolen;
38198 /* Backlight operations structure. */
38199 - struct backlight_ops backlight_ops;
38200 + const struct backlight_ops backlight_ops;
38201 };
38202
38203 /* Module parameters. */
38204 diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38205 --- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38206 +++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38207 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38208 return bl->current_intensity;
38209 }
38210
38211 -static struct backlight_ops omapbl_ops = {
38212 +static const struct backlight_ops omapbl_ops = {
38213 .get_brightness = omapbl_get_intensity,
38214 .update_status = omapbl_update_status,
38215 };
38216 diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38217 --- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38218 +++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38219 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38220 return intensity - HW_LEVEL_MIN;
38221 }
38222
38223 -static struct backlight_ops progearbl_ops = {
38224 +static const struct backlight_ops progearbl_ops = {
38225 .get_brightness = progearbl_get_intensity,
38226 .update_status = progearbl_set_intensity,
38227 };
38228 diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38229 --- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38230 +++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38231 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38232 return bl->props.brightness;
38233 }
38234
38235 -static struct backlight_ops pwm_backlight_ops = {
38236 +static const struct backlight_ops pwm_backlight_ops = {
38237 .update_status = pwm_backlight_update_status,
38238 .get_brightness = pwm_backlight_get_brightness,
38239 };
38240 diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38241 --- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38242 +++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38243 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38244 return props->brightness;
38245 }
38246
38247 -static struct backlight_ops bl_ops = {
38248 +static const struct backlight_ops bl_ops = {
38249 .get_brightness = tosa_bl_get_brightness,
38250 .update_status = tosa_bl_update_status,
38251 };
38252 diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38253 --- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38254 +++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38255 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38256 return data->current_brightness;
38257 }
38258
38259 -static struct backlight_ops wm831x_backlight_ops = {
38260 +static const struct backlight_ops wm831x_backlight_ops = {
38261 .options = BL_CORE_SUSPENDRESUME,
38262 .update_status = wm831x_backlight_update_status,
38263 .get_brightness = wm831x_backlight_get_brightness,
38264 diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38265 --- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38266 +++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38267 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38268 return 0;
38269 }
38270
38271 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38272 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38273 .get_brightness = bl_get_brightness,
38274 };
38275
38276 diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38277 --- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38278 +++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38279 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38280 return 0;
38281 }
38282
38283 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38284 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38285 .get_brightness = bl_get_brightness,
38286 };
38287
38288 diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38289 --- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38290 +++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38291 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38292 rc = -ENODEV;
38293 goto out;
38294 }
38295 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38296 - !info->fbops->fb_setcmap)) {
38297 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38298 rc = -EINVAL;
38299 goto out1;
38300 }
38301 diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38302 --- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38303 +++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38304 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38305 image->dx += image->width + 8;
38306 }
38307 } else if (rotate == FB_ROTATE_UD) {
38308 - for (x = 0; x < num && image->dx >= 0; x++) {
38309 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38310 info->fbops->fb_imageblit(info, image);
38311 image->dx -= image->width + 8;
38312 }
38313 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38314 image->dy += image->height + 8;
38315 }
38316 } else if (rotate == FB_ROTATE_CCW) {
38317 - for (x = 0; x < num && image->dy >= 0; x++) {
38318 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38319 info->fbops->fb_imageblit(info, image);
38320 image->dy -= image->height + 8;
38321 }
38322 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38323 int flags = info->flags;
38324 int ret = 0;
38325
38326 + pax_track_stack();
38327 +
38328 if (var->activate & FB_ACTIVATE_INV_MODE) {
38329 struct fb_videomode mode1, mode2;
38330
38331 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38332 void __user *argp = (void __user *)arg;
38333 long ret = 0;
38334
38335 + pax_track_stack();
38336 +
38337 switch (cmd) {
38338 case FBIOGET_VSCREENINFO:
38339 if (!lock_fb_info(info))
38340 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38341 return -EFAULT;
38342 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38343 return -EINVAL;
38344 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38345 + if (con2fb.framebuffer >= FB_MAX)
38346 return -EINVAL;
38347 if (!registered_fb[con2fb.framebuffer])
38348 request_module("fb%d", con2fb.framebuffer);
38349 diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38350 --- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38351 +++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38352 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38353 }
38354 }
38355 printk("ringbuffer lockup!!!\n");
38356 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38357 i810_report_error(mmio);
38358 par->dev_flags |= LOCKUP;
38359 info->pixmap.scan_align = 1;
38360 diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38361 --- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38362 +++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38363 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38364 return bd->props.brightness;
38365 }
38366
38367 -static struct backlight_ops nvidia_bl_ops = {
38368 +static const struct backlight_ops nvidia_bl_ops = {
38369 .get_brightness = nvidia_bl_get_brightness,
38370 .update_status = nvidia_bl_update_status,
38371 };
38372 diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38373 --- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38374 +++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38375 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38376 return bd->props.brightness;
38377 }
38378
38379 -static struct backlight_ops riva_bl_ops = {
38380 +static const struct backlight_ops riva_bl_ops = {
38381 .get_brightness = riva_bl_get_brightness,
38382 .update_status = riva_bl_update_status,
38383 };
38384 diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38385 --- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38386 +++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38387 @@ -18,6 +18,7 @@
38388 #include <linux/fb.h>
38389 #include <linux/io.h>
38390 #include <linux/mutex.h>
38391 +#include <linux/moduleloader.h>
38392 #include <video/edid.h>
38393 #include <video/uvesafb.h>
38394 #ifdef CONFIG_X86
38395 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38396 NULL,
38397 };
38398
38399 - return call_usermodehelper(v86d_path, argv, envp, 1);
38400 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38401 }
38402
38403 /*
38404 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38405 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38406 par->pmi_setpal = par->ypan = 0;
38407 } else {
38408 +
38409 +#ifdef CONFIG_PAX_KERNEXEC
38410 +#ifdef CONFIG_MODULES
38411 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38412 +#endif
38413 + if (!par->pmi_code) {
38414 + par->pmi_setpal = par->ypan = 0;
38415 + return 0;
38416 + }
38417 +#endif
38418 +
38419 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38420 + task->t.regs.edi);
38421 +
38422 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38423 + pax_open_kernel();
38424 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38425 + pax_close_kernel();
38426 +
38427 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38428 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38429 +#else
38430 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38431 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38432 +#endif
38433 +
38434 printk(KERN_INFO "uvesafb: protected mode interface info at "
38435 "%04x:%04x\n",
38436 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38437 @@ -1799,6 +1822,11 @@ out:
38438 if (par->vbe_modes)
38439 kfree(par->vbe_modes);
38440
38441 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38442 + if (par->pmi_code)
38443 + module_free_exec(NULL, par->pmi_code);
38444 +#endif
38445 +
38446 framebuffer_release(info);
38447 return err;
38448 }
38449 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38450 kfree(par->vbe_state_orig);
38451 if (par->vbe_state_saved)
38452 kfree(par->vbe_state_saved);
38453 +
38454 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38455 + if (par->pmi_code)
38456 + module_free_exec(NULL, par->pmi_code);
38457 +#endif
38458 +
38459 }
38460
38461 framebuffer_release(info);
38462 diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38463 --- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38464 +++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38465 @@ -9,6 +9,7 @@
38466 */
38467
38468 #include <linux/module.h>
38469 +#include <linux/moduleloader.h>
38470 #include <linux/kernel.h>
38471 #include <linux/errno.h>
38472 #include <linux/string.h>
38473 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38474 static int vram_total __initdata; /* Set total amount of memory */
38475 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38476 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38477 -static void (*pmi_start)(void) __read_mostly;
38478 -static void (*pmi_pal) (void) __read_mostly;
38479 +static void (*pmi_start)(void) __read_only;
38480 +static void (*pmi_pal) (void) __read_only;
38481 static int depth __read_mostly;
38482 static int vga_compat __read_mostly;
38483 /* --------------------------------------------------------------------- */
38484 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38485 unsigned int size_vmode;
38486 unsigned int size_remap;
38487 unsigned int size_total;
38488 + void *pmi_code = NULL;
38489
38490 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38491 return -ENODEV;
38492 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38493 size_remap = size_total;
38494 vesafb_fix.smem_len = size_remap;
38495
38496 -#ifndef __i386__
38497 - screen_info.vesapm_seg = 0;
38498 -#endif
38499 -
38500 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38501 printk(KERN_WARNING
38502 "vesafb: cannot reserve video memory at 0x%lx\n",
38503 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38504 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38505 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38506
38507 +#ifdef __i386__
38508 +
38509 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38510 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38511 + if (!pmi_code)
38512 +#elif !defined(CONFIG_PAX_KERNEXEC)
38513 + if (0)
38514 +#endif
38515 +
38516 +#endif
38517 + screen_info.vesapm_seg = 0;
38518 +
38519 if (screen_info.vesapm_seg) {
38520 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38521 - screen_info.vesapm_seg,screen_info.vesapm_off);
38522 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38523 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38524 }
38525
38526 if (screen_info.vesapm_seg < 0xc000)
38527 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38528
38529 if (ypan || pmi_setpal) {
38530 unsigned short *pmi_base;
38531 +
38532 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38533 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38534 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38535 +
38536 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38537 + pax_open_kernel();
38538 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38539 +#else
38540 + pmi_code = pmi_base;
38541 +#endif
38542 +
38543 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38544 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38545 +
38546 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38547 + pmi_start = ktva_ktla(pmi_start);
38548 + pmi_pal = ktva_ktla(pmi_pal);
38549 + pax_close_kernel();
38550 +#endif
38551 +
38552 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38553 if (pmi_base[3]) {
38554 printk(KERN_INFO "vesafb: pmi: ports = ");
38555 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38556 info->node, info->fix.id);
38557 return 0;
38558 err:
38559 +
38560 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38561 + module_free_exec(NULL, pmi_code);
38562 +#endif
38563 +
38564 if (info->screen_base)
38565 iounmap(info->screen_base);
38566 framebuffer_release(info);
38567 diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38568 --- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38569 +++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38570 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38571 return 0;
38572 }
38573
38574 -static struct sysfs_ops hyp_sysfs_ops = {
38575 +static const struct sysfs_ops hyp_sysfs_ops = {
38576 .show = hyp_sysfs_show,
38577 .store = hyp_sysfs_store,
38578 };
38579 diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38580 --- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38581 +++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38582 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38583 static void
38584 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38585 {
38586 - char *s = nd_get_link(nd);
38587 + const char *s = nd_get_link(nd);
38588
38589 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38590 IS_ERR(s) ? "<error>" : s);
38591 diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38592 --- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38593 +++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38594 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38595 size += sizeof(struct io_event) * nr_events;
38596 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38597
38598 - if (nr_pages < 0)
38599 + if (nr_pages <= 0)
38600 return -EINVAL;
38601
38602 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38603 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38604 struct aio_timeout to;
38605 int retry = 0;
38606
38607 + pax_track_stack();
38608 +
38609 /* needed to zero any padding within an entry (there shouldn't be
38610 * any, but C is fun!
38611 */
38612 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38613 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38614 {
38615 ssize_t ret;
38616 + struct iovec iovstack;
38617
38618 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38619 kiocb->ki_nbytes, 1,
38620 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38621 + &iovstack, &kiocb->ki_iovec);
38622 if (ret < 0)
38623 goto out;
38624
38625 + if (kiocb->ki_iovec == &iovstack) {
38626 + kiocb->ki_inline_vec = iovstack;
38627 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
38628 + }
38629 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38630 kiocb->ki_cur_seg = 0;
38631 /* ki_nbytes/left now reflect bytes instead of segs */
38632 diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
38633 --- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38634 +++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38635 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38636 unsigned long limit;
38637
38638 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38639 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38640 if (limit != RLIM_INFINITY && offset > limit)
38641 goto out_sig;
38642 if (offset > inode->i_sb->s_maxbytes)
38643 diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
38644 --- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38645 +++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38646 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38647 set_bit(n,sbi->symlink_bitmap);
38648 sl = &sbi->symlink[n];
38649 sl->len = strlen(symname);
38650 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38651 + slsize = sl->len+1;
38652 + sl->data = kmalloc(slsize, GFP_KERNEL);
38653 if (!sl->data) {
38654 clear_bit(n,sbi->symlink_bitmap);
38655 unlock_kernel();
38656 diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
38657 --- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38658 +++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38659 @@ -15,7 +15,7 @@
38660 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
38661 {
38662 struct autofs_info *ino = autofs4_dentry_ino(dentry);
38663 - nd_set_link(nd, (char *)ino->u.symlink);
38664 + nd_set_link(nd, ino->u.symlink);
38665 return NULL;
38666 }
38667
38668 diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
38669 --- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
38670 +++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
38671 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
38672 {
38673 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
38674 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38675 - char *link = nd_get_link(nd);
38676 + const char *link = nd_get_link(nd);
38677 if (!IS_ERR(link))
38678 kfree(link);
38679 }
38680 diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
38681 --- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
38682 +++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
38683 @@ -16,6 +16,7 @@
38684 #include <linux/string.h>
38685 #include <linux/fs.h>
38686 #include <linux/file.h>
38687 +#include <linux/security.h>
38688 #include <linux/stat.h>
38689 #include <linux/fcntl.h>
38690 #include <linux/ptrace.h>
38691 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
38692 #endif
38693 # define START_STACK(u) (u.start_stack)
38694
38695 + memset(&dump, 0, sizeof(dump));
38696 +
38697 fs = get_fs();
38698 set_fs(KERNEL_DS);
38699 has_dumped = 1;
38700 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
38701
38702 /* If the size of the dump file exceeds the rlimit, then see what would happen
38703 if we wrote the stack, but not the data area. */
38704 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38705 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
38706 dump.u_dsize = 0;
38707
38708 /* Make sure we have enough room to write the stack and data areas. */
38709 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38710 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
38711 dump.u_ssize = 0;
38712
38713 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
38714 dump_size = dump.u_ssize << PAGE_SHIFT;
38715 DUMP_WRITE(dump_start,dump_size);
38716 }
38717 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
38718 - set_fs(KERNEL_DS);
38719 - DUMP_WRITE(current,sizeof(*current));
38720 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
38721 end_coredump:
38722 set_fs(fs);
38723 return has_dumped;
38724 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
38725 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
38726 if (rlim >= RLIM_INFINITY)
38727 rlim = ~0;
38728 +
38729 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38730 if (ex.a_data + ex.a_bss > rlim)
38731 return -ENOMEM;
38732
38733 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
38734 install_exec_creds(bprm);
38735 current->flags &= ~PF_FORKNOEXEC;
38736
38737 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38738 + current->mm->pax_flags = 0UL;
38739 +#endif
38740 +
38741 +#ifdef CONFIG_PAX_PAGEEXEC
38742 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38743 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38744 +
38745 +#ifdef CONFIG_PAX_EMUTRAMP
38746 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38747 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38748 +#endif
38749 +
38750 +#ifdef CONFIG_PAX_MPROTECT
38751 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38752 + current->mm->pax_flags |= MF_PAX_MPROTECT;
38753 +#endif
38754 +
38755 + }
38756 +#endif
38757 +
38758 if (N_MAGIC(ex) == OMAGIC) {
38759 unsigned long text_addr, map_size;
38760 loff_t pos;
38761 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
38762
38763 down_write(&current->mm->mmap_sem);
38764 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38765 - PROT_READ | PROT_WRITE | PROT_EXEC,
38766 + PROT_READ | PROT_WRITE,
38767 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38768 fd_offset + ex.a_text);
38769 up_write(&current->mm->mmap_sem);
38770 diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
38771 --- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38772 +++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
38773 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
38774 #define elf_core_dump NULL
38775 #endif
38776
38777 +#ifdef CONFIG_PAX_MPROTECT
38778 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38779 +#endif
38780 +
38781 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38782 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38783 #else
38784 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
38785 .load_binary = load_elf_binary,
38786 .load_shlib = load_elf_library,
38787 .core_dump = elf_core_dump,
38788 +
38789 +#ifdef CONFIG_PAX_MPROTECT
38790 + .handle_mprotect= elf_handle_mprotect,
38791 +#endif
38792 +
38793 .min_coredump = ELF_EXEC_PAGESIZE,
38794 .hasvdso = 1
38795 };
38796 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38797
38798 static int set_brk(unsigned long start, unsigned long end)
38799 {
38800 + unsigned long e = end;
38801 +
38802 start = ELF_PAGEALIGN(start);
38803 end = ELF_PAGEALIGN(end);
38804 if (end > start) {
38805 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38806 if (BAD_ADDR(addr))
38807 return addr;
38808 }
38809 - current->mm->start_brk = current->mm->brk = end;
38810 + current->mm->start_brk = current->mm->brk = e;
38811 return 0;
38812 }
38813
38814 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38815 elf_addr_t __user *u_rand_bytes;
38816 const char *k_platform = ELF_PLATFORM;
38817 const char *k_base_platform = ELF_BASE_PLATFORM;
38818 - unsigned char k_rand_bytes[16];
38819 + u32 k_rand_bytes[4];
38820 int items;
38821 elf_addr_t *elf_info;
38822 int ei_index = 0;
38823 const struct cred *cred = current_cred();
38824 struct vm_area_struct *vma;
38825 + unsigned long saved_auxv[AT_VECTOR_SIZE];
38826 +
38827 + pax_track_stack();
38828
38829 /*
38830 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38831 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38832 * Generate 16 random bytes for userspace PRNG seeding.
38833 */
38834 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38835 - u_rand_bytes = (elf_addr_t __user *)
38836 - STACK_ALLOC(p, sizeof(k_rand_bytes));
38837 + srandom32(k_rand_bytes[0] ^ random32());
38838 + srandom32(k_rand_bytes[1] ^ random32());
38839 + srandom32(k_rand_bytes[2] ^ random32());
38840 + srandom32(k_rand_bytes[3] ^ random32());
38841 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
38842 + u_rand_bytes = (elf_addr_t __user *) p;
38843 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38844 return -EFAULT;
38845
38846 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38847 return -EFAULT;
38848 current->mm->env_end = p;
38849
38850 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38851 +
38852 /* Put the elf_info on the stack in the right place. */
38853 sp = (elf_addr_t __user *)envp + 1;
38854 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38855 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38856 return -EFAULT;
38857 return 0;
38858 }
38859 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
38860 {
38861 struct elf_phdr *elf_phdata;
38862 struct elf_phdr *eppnt;
38863 - unsigned long load_addr = 0;
38864 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38865 int load_addr_set = 0;
38866 unsigned long last_bss = 0, elf_bss = 0;
38867 - unsigned long error = ~0UL;
38868 + unsigned long error = -EINVAL;
38869 unsigned long total_size;
38870 int retval, i, size;
38871
38872 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
38873 goto out_close;
38874 }
38875
38876 +#ifdef CONFIG_PAX_SEGMEXEC
38877 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38878 + pax_task_size = SEGMEXEC_TASK_SIZE;
38879 +#endif
38880 +
38881 eppnt = elf_phdata;
38882 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38883 if (eppnt->p_type == PT_LOAD) {
38884 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
38885 k = load_addr + eppnt->p_vaddr;
38886 if (BAD_ADDR(k) ||
38887 eppnt->p_filesz > eppnt->p_memsz ||
38888 - eppnt->p_memsz > TASK_SIZE ||
38889 - TASK_SIZE - eppnt->p_memsz < k) {
38890 + eppnt->p_memsz > pax_task_size ||
38891 + pax_task_size - eppnt->p_memsz < k) {
38892 error = -ENOMEM;
38893 goto out_close;
38894 }
38895 @@ -532,6 +557,194 @@ out:
38896 return error;
38897 }
38898
38899 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38900 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38901 +{
38902 + unsigned long pax_flags = 0UL;
38903 +
38904 +#ifdef CONFIG_PAX_PAGEEXEC
38905 + if (elf_phdata->p_flags & PF_PAGEEXEC)
38906 + pax_flags |= MF_PAX_PAGEEXEC;
38907 +#endif
38908 +
38909 +#ifdef CONFIG_PAX_SEGMEXEC
38910 + if (elf_phdata->p_flags & PF_SEGMEXEC)
38911 + pax_flags |= MF_PAX_SEGMEXEC;
38912 +#endif
38913 +
38914 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38915 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38916 + if (nx_enabled)
38917 + pax_flags &= ~MF_PAX_SEGMEXEC;
38918 + else
38919 + pax_flags &= ~MF_PAX_PAGEEXEC;
38920 + }
38921 +#endif
38922 +
38923 +#ifdef CONFIG_PAX_EMUTRAMP
38924 + if (elf_phdata->p_flags & PF_EMUTRAMP)
38925 + pax_flags |= MF_PAX_EMUTRAMP;
38926 +#endif
38927 +
38928 +#ifdef CONFIG_PAX_MPROTECT
38929 + if (elf_phdata->p_flags & PF_MPROTECT)
38930 + pax_flags |= MF_PAX_MPROTECT;
38931 +#endif
38932 +
38933 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38934 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38935 + pax_flags |= MF_PAX_RANDMMAP;
38936 +#endif
38937 +
38938 + return pax_flags;
38939 +}
38940 +#endif
38941 +
38942 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
38943 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38944 +{
38945 + unsigned long pax_flags = 0UL;
38946 +
38947 +#ifdef CONFIG_PAX_PAGEEXEC
38948 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38949 + pax_flags |= MF_PAX_PAGEEXEC;
38950 +#endif
38951 +
38952 +#ifdef CONFIG_PAX_SEGMEXEC
38953 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38954 + pax_flags |= MF_PAX_SEGMEXEC;
38955 +#endif
38956 +
38957 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38958 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38959 + if (nx_enabled)
38960 + pax_flags &= ~MF_PAX_SEGMEXEC;
38961 + else
38962 + pax_flags &= ~MF_PAX_PAGEEXEC;
38963 + }
38964 +#endif
38965 +
38966 +#ifdef CONFIG_PAX_EMUTRAMP
38967 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
38968 + pax_flags |= MF_PAX_EMUTRAMP;
38969 +#endif
38970 +
38971 +#ifdef CONFIG_PAX_MPROTECT
38972 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
38973 + pax_flags |= MF_PAX_MPROTECT;
38974 +#endif
38975 +
38976 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38977 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
38978 + pax_flags |= MF_PAX_RANDMMAP;
38979 +#endif
38980 +
38981 + return pax_flags;
38982 +}
38983 +#endif
38984 +
38985 +#ifdef CONFIG_PAX_EI_PAX
38986 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
38987 +{
38988 + unsigned long pax_flags = 0UL;
38989 +
38990 +#ifdef CONFIG_PAX_PAGEEXEC
38991 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
38992 + pax_flags |= MF_PAX_PAGEEXEC;
38993 +#endif
38994 +
38995 +#ifdef CONFIG_PAX_SEGMEXEC
38996 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
38997 + pax_flags |= MF_PAX_SEGMEXEC;
38998 +#endif
38999 +
39000 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39001 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39002 + if (nx_enabled)
39003 + pax_flags &= ~MF_PAX_SEGMEXEC;
39004 + else
39005 + pax_flags &= ~MF_PAX_PAGEEXEC;
39006 + }
39007 +#endif
39008 +
39009 +#ifdef CONFIG_PAX_EMUTRAMP
39010 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39011 + pax_flags |= MF_PAX_EMUTRAMP;
39012 +#endif
39013 +
39014 +#ifdef CONFIG_PAX_MPROTECT
39015 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39016 + pax_flags |= MF_PAX_MPROTECT;
39017 +#endif
39018 +
39019 +#ifdef CONFIG_PAX_ASLR
39020 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39021 + pax_flags |= MF_PAX_RANDMMAP;
39022 +#endif
39023 +
39024 + return pax_flags;
39025 +}
39026 +#endif
39027 +
39028 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39029 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39030 +{
39031 + unsigned long pax_flags = 0UL;
39032 +
39033 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39034 + unsigned long i;
39035 + int found_flags = 0;
39036 +#endif
39037 +
39038 +#ifdef CONFIG_PAX_EI_PAX
39039 + pax_flags = pax_parse_ei_pax(elf_ex);
39040 +#endif
39041 +
39042 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39043 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39044 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39045 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39046 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39047 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39048 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39049 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39050 + return -EINVAL;
39051 +
39052 +#ifdef CONFIG_PAX_SOFTMODE
39053 + if (pax_softmode)
39054 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39055 + else
39056 +#endif
39057 +
39058 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39059 + found_flags = 1;
39060 + break;
39061 + }
39062 +#endif
39063 +
39064 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39065 + if (found_flags == 0) {
39066 + struct elf_phdr phdr;
39067 + memset(&phdr, 0, sizeof(phdr));
39068 + phdr.p_flags = PF_NOEMUTRAMP;
39069 +#ifdef CONFIG_PAX_SOFTMODE
39070 + if (pax_softmode)
39071 + pax_flags = pax_parse_softmode(&phdr);
39072 + else
39073 +#endif
39074 + pax_flags = pax_parse_hardmode(&phdr);
39075 + }
39076 +#endif
39077 +
39078 +
39079 + if (0 > pax_check_flags(&pax_flags))
39080 + return -EINVAL;
39081 +
39082 + current->mm->pax_flags = pax_flags;
39083 + return 0;
39084 +}
39085 +#endif
39086 +
39087 /*
39088 * These are the functions used to load ELF style executables and shared
39089 * libraries. There is no binary dependent code anywhere else.
39090 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39091 {
39092 unsigned int random_variable = 0;
39093
39094 +#ifdef CONFIG_PAX_RANDUSTACK
39095 + if (randomize_va_space)
39096 + return stack_top - current->mm->delta_stack;
39097 +#endif
39098 +
39099 if ((current->flags & PF_RANDOMIZE) &&
39100 !(current->personality & ADDR_NO_RANDOMIZE)) {
39101 random_variable = get_random_int() & STACK_RND_MASK;
39102 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39103 unsigned long load_addr = 0, load_bias = 0;
39104 int load_addr_set = 0;
39105 char * elf_interpreter = NULL;
39106 - unsigned long error;
39107 + unsigned long error = 0;
39108 struct elf_phdr *elf_ppnt, *elf_phdata;
39109 unsigned long elf_bss, elf_brk;
39110 int retval, i;
39111 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39112 unsigned long start_code, end_code, start_data, end_data;
39113 unsigned long reloc_func_desc = 0;
39114 int executable_stack = EXSTACK_DEFAULT;
39115 - unsigned long def_flags = 0;
39116 struct {
39117 struct elfhdr elf_ex;
39118 struct elfhdr interp_elf_ex;
39119 } *loc;
39120 + unsigned long pax_task_size = TASK_SIZE;
39121
39122 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39123 if (!loc) {
39124 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39125
39126 /* OK, This is the point of no return */
39127 current->flags &= ~PF_FORKNOEXEC;
39128 - current->mm->def_flags = def_flags;
39129 +
39130 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39131 + current->mm->pax_flags = 0UL;
39132 +#endif
39133 +
39134 +#ifdef CONFIG_PAX_DLRESOLVE
39135 + current->mm->call_dl_resolve = 0UL;
39136 +#endif
39137 +
39138 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39139 + current->mm->call_syscall = 0UL;
39140 +#endif
39141 +
39142 +#ifdef CONFIG_PAX_ASLR
39143 + current->mm->delta_mmap = 0UL;
39144 + current->mm->delta_stack = 0UL;
39145 +#endif
39146 +
39147 + current->mm->def_flags = 0;
39148 +
39149 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39150 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39151 + send_sig(SIGKILL, current, 0);
39152 + goto out_free_dentry;
39153 + }
39154 +#endif
39155 +
39156 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39157 + pax_set_initial_flags(bprm);
39158 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39159 + if (pax_set_initial_flags_func)
39160 + (pax_set_initial_flags_func)(bprm);
39161 +#endif
39162 +
39163 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39164 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39165 + current->mm->context.user_cs_limit = PAGE_SIZE;
39166 + current->mm->def_flags |= VM_PAGEEXEC;
39167 + }
39168 +#endif
39169 +
39170 +#ifdef CONFIG_PAX_SEGMEXEC
39171 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39172 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39173 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39174 + pax_task_size = SEGMEXEC_TASK_SIZE;
39175 + }
39176 +#endif
39177 +
39178 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39179 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39180 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39181 + put_cpu();
39182 + }
39183 +#endif
39184
39185 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39186 may depend on the personality. */
39187 SET_PERSONALITY(loc->elf_ex);
39188 +
39189 +#ifdef CONFIG_PAX_ASLR
39190 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39191 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39192 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39193 + }
39194 +#endif
39195 +
39196 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39197 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39198 + executable_stack = EXSTACK_DISABLE_X;
39199 + current->personality &= ~READ_IMPLIES_EXEC;
39200 + } else
39201 +#endif
39202 +
39203 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39204 current->personality |= READ_IMPLIES_EXEC;
39205
39206 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39207 #else
39208 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39209 #endif
39210 +
39211 +#ifdef CONFIG_PAX_RANDMMAP
39212 + /* PaX: randomize base address at the default exe base if requested */
39213 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39214 +#ifdef CONFIG_SPARC64
39215 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39216 +#else
39217 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39218 +#endif
39219 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39220 + elf_flags |= MAP_FIXED;
39221 + }
39222 +#endif
39223 +
39224 }
39225
39226 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39227 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39228 * allowed task size. Note that p_filesz must always be
39229 * <= p_memsz so it is only necessary to check p_memsz.
39230 */
39231 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39232 - elf_ppnt->p_memsz > TASK_SIZE ||
39233 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39234 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39235 + elf_ppnt->p_memsz > pax_task_size ||
39236 + pax_task_size - elf_ppnt->p_memsz < k) {
39237 /* set_brk can never work. Avoid overflows. */
39238 send_sig(SIGKILL, current, 0);
39239 retval = -EINVAL;
39240 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39241 start_data += load_bias;
39242 end_data += load_bias;
39243
39244 +#ifdef CONFIG_PAX_RANDMMAP
39245 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39246 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39247 +#endif
39248 +
39249 /* Calling set_brk effectively mmaps the pages that we need
39250 * for the bss and break sections. We must do this before
39251 * mapping in the interpreter, to make sure it doesn't wind
39252 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39253 goto out_free_dentry;
39254 }
39255 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39256 - send_sig(SIGSEGV, current, 0);
39257 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39258 - goto out_free_dentry;
39259 + /*
39260 + * This bss-zeroing can fail if the ELF
39261 + * file specifies odd protections. So
39262 + * we don't check the return value
39263 + */
39264 }
39265
39266 if (elf_interpreter) {
39267 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39268 unsigned long n = off;
39269 if (n > PAGE_SIZE)
39270 n = PAGE_SIZE;
39271 - if (!dump_write(file, buf, n))
39272 + if (!dump_write(file, buf, n)) {
39273 + free_page((unsigned long)buf);
39274 return 0;
39275 + }
39276 off -= n;
39277 }
39278 free_page((unsigned long)buf);
39279 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39280 * Decide what to dump of a segment, part, all or none.
39281 */
39282 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39283 - unsigned long mm_flags)
39284 + unsigned long mm_flags, long signr)
39285 {
39286 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39287
39288 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39289 if (vma->vm_file == NULL)
39290 return 0;
39291
39292 - if (FILTER(MAPPED_PRIVATE))
39293 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39294 goto whole;
39295
39296 /*
39297 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39298 #undef DUMP_WRITE
39299
39300 #define DUMP_WRITE(addr, nr) \
39301 + do { \
39302 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39303 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39304 - goto end_coredump;
39305 + goto end_coredump; \
39306 + } while (0);
39307
39308 static void fill_elf_header(struct elfhdr *elf, int segs,
39309 u16 machine, u32 flags, u8 osabi)
39310 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39311 {
39312 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39313 int i = 0;
39314 - do
39315 + do {
39316 i += 2;
39317 - while (auxv[i - 2] != AT_NULL);
39318 + } while (auxv[i - 2] != AT_NULL);
39319 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39320 }
39321
39322 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39323 phdr.p_offset = offset;
39324 phdr.p_vaddr = vma->vm_start;
39325 phdr.p_paddr = 0;
39326 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
39327 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39328 phdr.p_memsz = vma->vm_end - vma->vm_start;
39329 offset += phdr.p_filesz;
39330 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39331 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39332 unsigned long addr;
39333 unsigned long end;
39334
39335 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
39336 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39337
39338 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39339 struct page *page;
39340 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39341 page = get_dump_page(addr);
39342 if (page) {
39343 void *kaddr = kmap(page);
39344 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39345 stop = ((size += PAGE_SIZE) > limit) ||
39346 !dump_write(file, kaddr, PAGE_SIZE);
39347 kunmap(page);
39348 @@ -2042,6 +2356,97 @@ out:
39349
39350 #endif /* USE_ELF_CORE_DUMP */
39351
39352 +#ifdef CONFIG_PAX_MPROTECT
39353 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39354 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39355 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39356 + *
39357 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39358 + * basis because we want to allow the common case and not the special ones.
39359 + */
39360 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39361 +{
39362 + struct elfhdr elf_h;
39363 + struct elf_phdr elf_p;
39364 + unsigned long i;
39365 + unsigned long oldflags;
39366 + bool is_textrel_rw, is_textrel_rx, is_relro;
39367 +
39368 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39369 + return;
39370 +
39371 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39372 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39373 +
39374 +#ifdef CONFIG_PAX_ELFRELOCS
39375 + /* possible TEXTREL */
39376 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39377 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39378 +#else
39379 + is_textrel_rw = false;
39380 + is_textrel_rx = false;
39381 +#endif
39382 +
39383 + /* possible RELRO */
39384 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39385 +
39386 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39387 + return;
39388 +
39389 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39390 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39391 +
39392 +#ifdef CONFIG_PAX_ETEXECRELOCS
39393 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39394 +#else
39395 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39396 +#endif
39397 +
39398 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39399 + !elf_check_arch(&elf_h) ||
39400 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39401 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39402 + return;
39403 +
39404 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39405 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39406 + return;
39407 + switch (elf_p.p_type) {
39408 + case PT_DYNAMIC:
39409 + if (!is_textrel_rw && !is_textrel_rx)
39410 + continue;
39411 + i = 0UL;
39412 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39413 + elf_dyn dyn;
39414 +
39415 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39416 + return;
39417 + if (dyn.d_tag == DT_NULL)
39418 + return;
39419 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39420 + gr_log_textrel(vma);
39421 + if (is_textrel_rw)
39422 + vma->vm_flags |= VM_MAYWRITE;
39423 + else
39424 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39425 + vma->vm_flags &= ~VM_MAYWRITE;
39426 + return;
39427 + }
39428 + i++;
39429 + }
39430 + return;
39431 +
39432 + case PT_GNU_RELRO:
39433 + if (!is_relro)
39434 + continue;
39435 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39436 + vma->vm_flags &= ~VM_MAYWRITE;
39437 + return;
39438 + }
39439 + }
39440 +}
39441 +#endif
39442 +
39443 static int __init init_elf_binfmt(void)
39444 {
39445 return register_binfmt(&elf_format);
39446 diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39447 --- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39448 +++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39449 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39450 realdatastart = (unsigned long) -ENOMEM;
39451 printk("Unable to allocate RAM for process data, errno %d\n",
39452 (int)-realdatastart);
39453 + down_write(&current->mm->mmap_sem);
39454 do_munmap(current->mm, textpos, text_len);
39455 + up_write(&current->mm->mmap_sem);
39456 ret = realdatastart;
39457 goto err;
39458 }
39459 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39460 }
39461 if (IS_ERR_VALUE(result)) {
39462 printk("Unable to read data+bss, errno %d\n", (int)-result);
39463 + down_write(&current->mm->mmap_sem);
39464 do_munmap(current->mm, textpos, text_len);
39465 do_munmap(current->mm, realdatastart, data_len + extra);
39466 + up_write(&current->mm->mmap_sem);
39467 ret = result;
39468 goto err;
39469 }
39470 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39471 }
39472 if (IS_ERR_VALUE(result)) {
39473 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39474 + down_write(&current->mm->mmap_sem);
39475 do_munmap(current->mm, textpos, text_len + data_len + extra +
39476 MAX_SHARED_LIBS * sizeof(unsigned long));
39477 + up_write(&current->mm->mmap_sem);
39478 ret = result;
39479 goto err;
39480 }
39481 diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39482 --- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39483 +++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39484 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39485
39486 i = 0;
39487 while (i < bio_slab_nr) {
39488 - struct bio_slab *bslab = &bio_slabs[i];
39489 + bslab = &bio_slabs[i];
39490
39491 if (!bslab->slab && entry == -1)
39492 entry = i;
39493 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39494 const int read = bio_data_dir(bio) == READ;
39495 struct bio_map_data *bmd = bio->bi_private;
39496 int i;
39497 - char *p = bmd->sgvecs[0].iov_base;
39498 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
39499
39500 __bio_for_each_segment(bvec, bio, i, 0) {
39501 char *addr = page_address(bvec->bv_page);
39502 diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39503 --- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39504 +++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39505 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39506 else if (bdev->bd_contains == bdev)
39507 res = 0; /* is a whole device which isn't held */
39508
39509 - else if (bdev->bd_contains->bd_holder == bd_claim)
39510 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39511 res = 0; /* is a partition of a device that is being partitioned */
39512 else if (bdev->bd_contains->bd_holder != NULL)
39513 res = -EBUSY; /* is a partition of a held device */
39514 diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39515 --- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39516 +++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39517 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39518 free_extent_buffer(buf);
39519 add_root_to_dirty_list(root);
39520 } else {
39521 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39522 - parent_start = parent->start;
39523 - else
39524 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39525 + if (parent)
39526 + parent_start = parent->start;
39527 + else
39528 + parent_start = 0;
39529 + } else
39530 parent_start = 0;
39531
39532 WARN_ON(trans->transid != btrfs_header_generation(parent));
39533 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39534
39535 ret = 0;
39536 if (slot == 0) {
39537 - struct btrfs_disk_key disk_key;
39538 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39539 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39540 }
39541 diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39542 --- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39543 +++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39544 @@ -39,7 +39,7 @@
39545 #include "tree-log.h"
39546 #include "free-space-cache.h"
39547
39548 -static struct extent_io_ops btree_extent_io_ops;
39549 +static const struct extent_io_ops btree_extent_io_ops;
39550 static void end_workqueue_fn(struct btrfs_work *work);
39551 static void free_fs_root(struct btrfs_root *root);
39552
39553 @@ -2607,7 +2607,7 @@ out:
39554 return 0;
39555 }
39556
39557 -static struct extent_io_ops btree_extent_io_ops = {
39558 +static const struct extent_io_ops btree_extent_io_ops = {
39559 .write_cache_pages_lock_hook = btree_lock_page_hook,
39560 .readpage_end_io_hook = btree_readpage_end_io_hook,
39561 .submit_bio_hook = btree_submit_bio_hook,
39562 diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39563 --- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39564 +++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39565 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39566 struct bio *bio, int mirror_num,
39567 unsigned long bio_flags);
39568 struct extent_io_ops {
39569 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39570 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39571 u64 start, u64 end, int *page_started,
39572 unsigned long *nr_written);
39573 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39574 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39575 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39576 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39577 extent_submit_bio_hook_t *submit_bio_hook;
39578 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
39579 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39580 size_t size, struct bio *bio,
39581 unsigned long bio_flags);
39582 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39583 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39584 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39585 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39586 u64 start, u64 end,
39587 struct extent_state *state);
39588 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39589 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39590 u64 start, u64 end,
39591 struct extent_state *state);
39592 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39593 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39594 struct extent_state *state);
39595 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39596 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39597 struct extent_state *state, int uptodate);
39598 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39599 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39600 unsigned long old, unsigned long bits);
39601 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39602 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39603 unsigned long bits);
39604 - int (*merge_extent_hook)(struct inode *inode,
39605 + int (* const merge_extent_hook)(struct inode *inode,
39606 struct extent_state *new,
39607 struct extent_state *other);
39608 - int (*split_extent_hook)(struct inode *inode,
39609 + int (* const split_extent_hook)(struct inode *inode,
39610 struct extent_state *orig, u64 split);
39611 - int (*write_cache_pages_lock_hook)(struct page *page);
39612 + int (* const write_cache_pages_lock_hook)(struct page *page);
39613 };
39614
39615 struct extent_io_tree {
39616 @@ -88,7 +88,7 @@ struct extent_io_tree {
39617 u64 dirty_bytes;
39618 spinlock_t lock;
39619 spinlock_t buffer_lock;
39620 - struct extent_io_ops *ops;
39621 + const struct extent_io_ops *ops;
39622 };
39623
39624 struct extent_state {
39625 diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
39626 --- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39627 +++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39628 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39629 u64 group_start = group->key.objectid;
39630 new_extents = kmalloc(sizeof(*new_extents),
39631 GFP_NOFS);
39632 + if (!new_extents) {
39633 + ret = -ENOMEM;
39634 + goto out;
39635 + }
39636 nr_extents = 1;
39637 ret = get_new_locations(reloc_inode,
39638 extent_key,
39639 diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
39640 --- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39641 +++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39642 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39643
39644 while(1) {
39645 if (entry->bytes < bytes || entry->offset < min_start) {
39646 - struct rb_node *node;
39647 -
39648 node = rb_next(&entry->offset_index);
39649 if (!node)
39650 break;
39651 @@ -1226,7 +1224,7 @@ again:
39652 */
39653 while (entry->bitmap || found_bitmap ||
39654 (!entry->bitmap && entry->bytes < min_bytes)) {
39655 - struct rb_node *node = rb_next(&entry->offset_index);
39656 + node = rb_next(&entry->offset_index);
39657
39658 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39659 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
39660 diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
39661 --- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
39662 +++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
39663 @@ -63,7 +63,7 @@ static const struct inode_operations btr
39664 static const struct address_space_operations btrfs_aops;
39665 static const struct address_space_operations btrfs_symlink_aops;
39666 static const struct file_operations btrfs_dir_file_operations;
39667 -static struct extent_io_ops btrfs_extent_io_ops;
39668 +static const struct extent_io_ops btrfs_extent_io_ops;
39669
39670 static struct kmem_cache *btrfs_inode_cachep;
39671 struct kmem_cache *btrfs_trans_handle_cachep;
39672 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
39673 1, 0, NULL, GFP_NOFS);
39674 while (start < end) {
39675 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
39676 + BUG_ON(!async_cow);
39677 async_cow->inode = inode;
39678 async_cow->root = root;
39679 async_cow->locked_page = locked_page;
39680 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
39681 inline_size = btrfs_file_extent_inline_item_len(leaf,
39682 btrfs_item_nr(leaf, path->slots[0]));
39683 tmp = kmalloc(inline_size, GFP_NOFS);
39684 + if (!tmp)
39685 + return -ENOMEM;
39686 ptr = btrfs_file_extent_inline_start(item);
39687
39688 read_extent_buffer(leaf, tmp, ptr, inline_size);
39689 @@ -5410,7 +5413,7 @@ fail:
39690 return -ENOMEM;
39691 }
39692
39693 -static int btrfs_getattr(struct vfsmount *mnt,
39694 +int btrfs_getattr(struct vfsmount *mnt,
39695 struct dentry *dentry, struct kstat *stat)
39696 {
39697 struct inode *inode = dentry->d_inode;
39698 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
39699 return 0;
39700 }
39701
39702 +EXPORT_SYMBOL(btrfs_getattr);
39703 +
39704 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
39705 +{
39706 + return BTRFS_I(inode)->root->anon_super.s_dev;
39707 +}
39708 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39709 +
39710 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
39711 struct inode *new_dir, struct dentry *new_dentry)
39712 {
39713 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
39714 .fsync = btrfs_sync_file,
39715 };
39716
39717 -static struct extent_io_ops btrfs_extent_io_ops = {
39718 +static const struct extent_io_ops btrfs_extent_io_ops = {
39719 .fill_delalloc = run_delalloc_range,
39720 .submit_bio_hook = btrfs_submit_bio_hook,
39721 .merge_bio_hook = btrfs_merge_bio_hook,
39722 diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
39723 --- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
39724 +++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
39725 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
39726 }
39727 spin_unlock(&rc->reloc_root_tree.lock);
39728
39729 - BUG_ON((struct btrfs_root *)node->data != root);
39730 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
39731
39732 if (!del) {
39733 spin_lock(&rc->reloc_root_tree.lock);
39734 diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
39735 --- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
39736 +++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
39737 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
39738 complete(&root->kobj_unregister);
39739 }
39740
39741 -static struct sysfs_ops btrfs_super_attr_ops = {
39742 +static const struct sysfs_ops btrfs_super_attr_ops = {
39743 .show = btrfs_super_attr_show,
39744 .store = btrfs_super_attr_store,
39745 };
39746
39747 -static struct sysfs_ops btrfs_root_attr_ops = {
39748 +static const struct sysfs_ops btrfs_root_attr_ops = {
39749 .show = btrfs_root_attr_show,
39750 .store = btrfs_root_attr_store,
39751 };
39752 diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
39753 --- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
39754 +++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
39755 @@ -25,6 +25,7 @@
39756 #include <linux/percpu.h>
39757 #include <linux/slab.h>
39758 #include <linux/capability.h>
39759 +#include <linux/security.h>
39760 #include <linux/blkdev.h>
39761 #include <linux/file.h>
39762 #include <linux/quotaops.h>
39763 diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
39764 --- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
39765 +++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
39766 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
39767 args);
39768
39769 /* start by checking things over */
39770 - ASSERT(cache->fstop_percent >= 0 &&
39771 - cache->fstop_percent < cache->fcull_percent &&
39772 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
39773 cache->fcull_percent < cache->frun_percent &&
39774 cache->frun_percent < 100);
39775
39776 - ASSERT(cache->bstop_percent >= 0 &&
39777 - cache->bstop_percent < cache->bcull_percent &&
39778 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
39779 cache->bcull_percent < cache->brun_percent &&
39780 cache->brun_percent < 100);
39781
39782 diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
39783 --- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
39784 +++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
39785 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
39786 if (test_bit(CACHEFILES_DEAD, &cache->flags))
39787 return -EIO;
39788
39789 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
39790 + if (datalen > PAGE_SIZE - 1)
39791 return -EOPNOTSUPP;
39792
39793 /* drag the command string into the kernel so we can parse it */
39794 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
39795 if (args[0] != '%' || args[1] != '\0')
39796 return -EINVAL;
39797
39798 - if (fstop < 0 || fstop >= cache->fcull_percent)
39799 + if (fstop >= cache->fcull_percent)
39800 return cachefiles_daemon_range_error(cache, args);
39801
39802 cache->fstop_percent = fstop;
39803 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
39804 if (args[0] != '%' || args[1] != '\0')
39805 return -EINVAL;
39806
39807 - if (bstop < 0 || bstop >= cache->bcull_percent)
39808 + if (bstop >= cache->bcull_percent)
39809 return cachefiles_daemon_range_error(cache, args);
39810
39811 cache->bstop_percent = bstop;
39812 diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
39813 --- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
39814 +++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
39815 @@ -56,7 +56,7 @@ struct cachefiles_cache {
39816 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39817 struct rb_root active_nodes; /* active nodes (can't be culled) */
39818 rwlock_t active_lock; /* lock for active_nodes */
39819 - atomic_t gravecounter; /* graveyard uniquifier */
39820 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39821 unsigned frun_percent; /* when to stop culling (% files) */
39822 unsigned fcull_percent; /* when to start culling (% files) */
39823 unsigned fstop_percent; /* when to stop allocating (% files) */
39824 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
39825 * proc.c
39826 */
39827 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39828 -extern atomic_t cachefiles_lookup_histogram[HZ];
39829 -extern atomic_t cachefiles_mkdir_histogram[HZ];
39830 -extern atomic_t cachefiles_create_histogram[HZ];
39831 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39832 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39833 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39834
39835 extern int __init cachefiles_proc_init(void);
39836 extern void cachefiles_proc_cleanup(void);
39837 static inline
39838 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39839 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39840 {
39841 unsigned long jif = jiffies - start_jif;
39842 if (jif >= HZ)
39843 jif = HZ - 1;
39844 - atomic_inc(&histogram[jif]);
39845 + atomic_inc_unchecked(&histogram[jif]);
39846 }
39847
39848 #else
39849 diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
39850 --- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
39851 +++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
39852 @@ -250,7 +250,7 @@ try_again:
39853 /* first step is to make up a grave dentry in the graveyard */
39854 sprintf(nbuffer, "%08x%08x",
39855 (uint32_t) get_seconds(),
39856 - (uint32_t) atomic_inc_return(&cache->gravecounter));
39857 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39858
39859 /* do the multiway lock magic */
39860 trap = lock_rename(cache->graveyard, dir);
39861 diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
39862 --- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
39863 +++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
39864 @@ -14,9 +14,9 @@
39865 #include <linux/seq_file.h>
39866 #include "internal.h"
39867
39868 -atomic_t cachefiles_lookup_histogram[HZ];
39869 -atomic_t cachefiles_mkdir_histogram[HZ];
39870 -atomic_t cachefiles_create_histogram[HZ];
39871 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39872 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39873 +atomic_unchecked_t cachefiles_create_histogram[HZ];
39874
39875 /*
39876 * display the latency histogram
39877 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39878 return 0;
39879 default:
39880 index = (unsigned long) v - 3;
39881 - x = atomic_read(&cachefiles_lookup_histogram[index]);
39882 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
39883 - z = atomic_read(&cachefiles_create_histogram[index]);
39884 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39885 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39886 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39887 if (x == 0 && y == 0 && z == 0)
39888 return 0;
39889
39890 diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
39891 --- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
39892 +++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
39893 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
39894 old_fs = get_fs();
39895 set_fs(KERNEL_DS);
39896 ret = file->f_op->write(
39897 - file, (const void __user *) data, len, &pos);
39898 + file, (__force const void __user *) data, len, &pos);
39899 set_fs(old_fs);
39900 kunmap(page);
39901 if (ret != len)
39902 diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
39903 --- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
39904 +++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
39905 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
39906 tcon = list_entry(tmp3,
39907 struct cifsTconInfo,
39908 tcon_list);
39909 - atomic_set(&tcon->num_smbs_sent, 0);
39910 - atomic_set(&tcon->num_writes, 0);
39911 - atomic_set(&tcon->num_reads, 0);
39912 - atomic_set(&tcon->num_oplock_brks, 0);
39913 - atomic_set(&tcon->num_opens, 0);
39914 - atomic_set(&tcon->num_posixopens, 0);
39915 - atomic_set(&tcon->num_posixmkdirs, 0);
39916 - atomic_set(&tcon->num_closes, 0);
39917 - atomic_set(&tcon->num_deletes, 0);
39918 - atomic_set(&tcon->num_mkdirs, 0);
39919 - atomic_set(&tcon->num_rmdirs, 0);
39920 - atomic_set(&tcon->num_renames, 0);
39921 - atomic_set(&tcon->num_t2renames, 0);
39922 - atomic_set(&tcon->num_ffirst, 0);
39923 - atomic_set(&tcon->num_fnext, 0);
39924 - atomic_set(&tcon->num_fclose, 0);
39925 - atomic_set(&tcon->num_hardlinks, 0);
39926 - atomic_set(&tcon->num_symlinks, 0);
39927 - atomic_set(&tcon->num_locks, 0);
39928 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39929 + atomic_set_unchecked(&tcon->num_writes, 0);
39930 + atomic_set_unchecked(&tcon->num_reads, 0);
39931 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39932 + atomic_set_unchecked(&tcon->num_opens, 0);
39933 + atomic_set_unchecked(&tcon->num_posixopens, 0);
39934 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39935 + atomic_set_unchecked(&tcon->num_closes, 0);
39936 + atomic_set_unchecked(&tcon->num_deletes, 0);
39937 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
39938 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
39939 + atomic_set_unchecked(&tcon->num_renames, 0);
39940 + atomic_set_unchecked(&tcon->num_t2renames, 0);
39941 + atomic_set_unchecked(&tcon->num_ffirst, 0);
39942 + atomic_set_unchecked(&tcon->num_fnext, 0);
39943 + atomic_set_unchecked(&tcon->num_fclose, 0);
39944 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
39945 + atomic_set_unchecked(&tcon->num_symlinks, 0);
39946 + atomic_set_unchecked(&tcon->num_locks, 0);
39947 }
39948 }
39949 }
39950 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
39951 if (tcon->need_reconnect)
39952 seq_puts(m, "\tDISCONNECTED ");
39953 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39954 - atomic_read(&tcon->num_smbs_sent),
39955 - atomic_read(&tcon->num_oplock_brks));
39956 + atomic_read_unchecked(&tcon->num_smbs_sent),
39957 + atomic_read_unchecked(&tcon->num_oplock_brks));
39958 seq_printf(m, "\nReads: %d Bytes: %lld",
39959 - atomic_read(&tcon->num_reads),
39960 + atomic_read_unchecked(&tcon->num_reads),
39961 (long long)(tcon->bytes_read));
39962 seq_printf(m, "\nWrites: %d Bytes: %lld",
39963 - atomic_read(&tcon->num_writes),
39964 + atomic_read_unchecked(&tcon->num_writes),
39965 (long long)(tcon->bytes_written));
39966 seq_printf(m, "\nFlushes: %d",
39967 - atomic_read(&tcon->num_flushes));
39968 + atomic_read_unchecked(&tcon->num_flushes));
39969 seq_printf(m, "\nLocks: %d HardLinks: %d "
39970 "Symlinks: %d",
39971 - atomic_read(&tcon->num_locks),
39972 - atomic_read(&tcon->num_hardlinks),
39973 - atomic_read(&tcon->num_symlinks));
39974 + atomic_read_unchecked(&tcon->num_locks),
39975 + atomic_read_unchecked(&tcon->num_hardlinks),
39976 + atomic_read_unchecked(&tcon->num_symlinks));
39977 seq_printf(m, "\nOpens: %d Closes: %d "
39978 "Deletes: %d",
39979 - atomic_read(&tcon->num_opens),
39980 - atomic_read(&tcon->num_closes),
39981 - atomic_read(&tcon->num_deletes));
39982 + atomic_read_unchecked(&tcon->num_opens),
39983 + atomic_read_unchecked(&tcon->num_closes),
39984 + atomic_read_unchecked(&tcon->num_deletes));
39985 seq_printf(m, "\nPosix Opens: %d "
39986 "Posix Mkdirs: %d",
39987 - atomic_read(&tcon->num_posixopens),
39988 - atomic_read(&tcon->num_posixmkdirs));
39989 + atomic_read_unchecked(&tcon->num_posixopens),
39990 + atomic_read_unchecked(&tcon->num_posixmkdirs));
39991 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
39992 - atomic_read(&tcon->num_mkdirs),
39993 - atomic_read(&tcon->num_rmdirs));
39994 + atomic_read_unchecked(&tcon->num_mkdirs),
39995 + atomic_read_unchecked(&tcon->num_rmdirs));
39996 seq_printf(m, "\nRenames: %d T2 Renames %d",
39997 - atomic_read(&tcon->num_renames),
39998 - atomic_read(&tcon->num_t2renames));
39999 + atomic_read_unchecked(&tcon->num_renames),
40000 + atomic_read_unchecked(&tcon->num_t2renames));
40001 seq_printf(m, "\nFindFirst: %d FNext %d "
40002 "FClose %d",
40003 - atomic_read(&tcon->num_ffirst),
40004 - atomic_read(&tcon->num_fnext),
40005 - atomic_read(&tcon->num_fclose));
40006 + atomic_read_unchecked(&tcon->num_ffirst),
40007 + atomic_read_unchecked(&tcon->num_fnext),
40008 + atomic_read_unchecked(&tcon->num_fclose));
40009 }
40010 }
40011 }
40012 diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40013 --- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40014 +++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40015 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40016 __u16 Flags; /* optional support bits */
40017 enum statusEnum tidStatus;
40018 #ifdef CONFIG_CIFS_STATS
40019 - atomic_t num_smbs_sent;
40020 - atomic_t num_writes;
40021 - atomic_t num_reads;
40022 - atomic_t num_flushes;
40023 - atomic_t num_oplock_brks;
40024 - atomic_t num_opens;
40025 - atomic_t num_closes;
40026 - atomic_t num_deletes;
40027 - atomic_t num_mkdirs;
40028 - atomic_t num_posixopens;
40029 - atomic_t num_posixmkdirs;
40030 - atomic_t num_rmdirs;
40031 - atomic_t num_renames;
40032 - atomic_t num_t2renames;
40033 - atomic_t num_ffirst;
40034 - atomic_t num_fnext;
40035 - atomic_t num_fclose;
40036 - atomic_t num_hardlinks;
40037 - atomic_t num_symlinks;
40038 - atomic_t num_locks;
40039 - atomic_t num_acl_get;
40040 - atomic_t num_acl_set;
40041 + atomic_unchecked_t num_smbs_sent;
40042 + atomic_unchecked_t num_writes;
40043 + atomic_unchecked_t num_reads;
40044 + atomic_unchecked_t num_flushes;
40045 + atomic_unchecked_t num_oplock_brks;
40046 + atomic_unchecked_t num_opens;
40047 + atomic_unchecked_t num_closes;
40048 + atomic_unchecked_t num_deletes;
40049 + atomic_unchecked_t num_mkdirs;
40050 + atomic_unchecked_t num_posixopens;
40051 + atomic_unchecked_t num_posixmkdirs;
40052 + atomic_unchecked_t num_rmdirs;
40053 + atomic_unchecked_t num_renames;
40054 + atomic_unchecked_t num_t2renames;
40055 + atomic_unchecked_t num_ffirst;
40056 + atomic_unchecked_t num_fnext;
40057 + atomic_unchecked_t num_fclose;
40058 + atomic_unchecked_t num_hardlinks;
40059 + atomic_unchecked_t num_symlinks;
40060 + atomic_unchecked_t num_locks;
40061 + atomic_unchecked_t num_acl_get;
40062 + atomic_unchecked_t num_acl_set;
40063 #ifdef CONFIG_CIFS_STATS2
40064 unsigned long long time_writes;
40065 unsigned long long time_reads;
40066 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40067 }
40068
40069 #ifdef CONFIG_CIFS_STATS
40070 -#define cifs_stats_inc atomic_inc
40071 +#define cifs_stats_inc atomic_inc_unchecked
40072
40073 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40074 unsigned int bytes)
40075 diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40076 --- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40077 +++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40078 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40079
40080 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40081 {
40082 - char *p = nd_get_link(nd);
40083 + const char *p = nd_get_link(nd);
40084 if (!IS_ERR(p))
40085 kfree(p);
40086 }
40087 diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40088 --- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40089 +++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40090 @@ -24,14 +24,14 @@
40091 #include <linux/coda_fs_i.h>
40092 #include <linux/coda_cache.h>
40093
40094 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40095 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40096
40097 /* replace or extend an acl cache hit */
40098 void coda_cache_enter(struct inode *inode, int mask)
40099 {
40100 struct coda_inode_info *cii = ITOC(inode);
40101
40102 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40103 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40104 if (cii->c_uid != current_fsuid()) {
40105 cii->c_uid = current_fsuid();
40106 cii->c_cached_perm = mask;
40107 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40108 void coda_cache_clear_inode(struct inode *inode)
40109 {
40110 struct coda_inode_info *cii = ITOC(inode);
40111 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40112 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40113 }
40114
40115 /* remove all acl caches */
40116 void coda_cache_clear_all(struct super_block *sb)
40117 {
40118 - atomic_inc(&permission_epoch);
40119 + atomic_inc_unchecked(&permission_epoch);
40120 }
40121
40122
40123 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40124
40125 hit = (mask & cii->c_cached_perm) == mask &&
40126 cii->c_uid == current_fsuid() &&
40127 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40128 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40129
40130 return hit;
40131 }
40132 diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40133 --- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40134 +++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40135 @@ -29,10 +29,12 @@
40136 #undef elfhdr
40137 #undef elf_phdr
40138 #undef elf_note
40139 +#undef elf_dyn
40140 #undef elf_addr_t
40141 #define elfhdr elf32_hdr
40142 #define elf_phdr elf32_phdr
40143 #define elf_note elf32_note
40144 +#define elf_dyn Elf32_Dyn
40145 #define elf_addr_t Elf32_Addr
40146
40147 /*
40148 diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40149 --- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40150 +++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40151 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40152
40153 struct compat_readdir_callback {
40154 struct compat_old_linux_dirent __user *dirent;
40155 + struct file * file;
40156 int result;
40157 };
40158
40159 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40160 buf->result = -EOVERFLOW;
40161 return -EOVERFLOW;
40162 }
40163 +
40164 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40165 + return 0;
40166 +
40167 buf->result++;
40168 dirent = buf->dirent;
40169 if (!access_ok(VERIFY_WRITE, dirent,
40170 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40171
40172 buf.result = 0;
40173 buf.dirent = dirent;
40174 + buf.file = file;
40175
40176 error = vfs_readdir(file, compat_fillonedir, &buf);
40177 if (buf.result)
40178 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40179 struct compat_getdents_callback {
40180 struct compat_linux_dirent __user *current_dir;
40181 struct compat_linux_dirent __user *previous;
40182 + struct file * file;
40183 int count;
40184 int error;
40185 };
40186 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40187 buf->error = -EOVERFLOW;
40188 return -EOVERFLOW;
40189 }
40190 +
40191 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40192 + return 0;
40193 +
40194 dirent = buf->previous;
40195 if (dirent) {
40196 if (__put_user(offset, &dirent->d_off))
40197 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40198 buf.previous = NULL;
40199 buf.count = count;
40200 buf.error = 0;
40201 + buf.file = file;
40202
40203 error = vfs_readdir(file, compat_filldir, &buf);
40204 if (error >= 0)
40205 @@ -987,6 +999,7 @@ out:
40206 struct compat_getdents_callback64 {
40207 struct linux_dirent64 __user *current_dir;
40208 struct linux_dirent64 __user *previous;
40209 + struct file * file;
40210 int count;
40211 int error;
40212 };
40213 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40214 buf->error = -EINVAL; /* only used if we fail.. */
40215 if (reclen > buf->count)
40216 return -EINVAL;
40217 +
40218 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40219 + return 0;
40220 +
40221 dirent = buf->previous;
40222
40223 if (dirent) {
40224 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40225 buf.previous = NULL;
40226 buf.count = count;
40227 buf.error = 0;
40228 + buf.file = file;
40229
40230 error = vfs_readdir(file, compat_filldir64, &buf);
40231 if (error >= 0)
40232 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40233 * verify all the pointers
40234 */
40235 ret = -EINVAL;
40236 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40237 + if (nr_segs > UIO_MAXIOV)
40238 goto out;
40239 if (!file->f_op)
40240 goto out;
40241 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40242 compat_uptr_t __user *envp,
40243 struct pt_regs * regs)
40244 {
40245 +#ifdef CONFIG_GRKERNSEC
40246 + struct file *old_exec_file;
40247 + struct acl_subject_label *old_acl;
40248 + struct rlimit old_rlim[RLIM_NLIMITS];
40249 +#endif
40250 struct linux_binprm *bprm;
40251 struct file *file;
40252 struct files_struct *displaced;
40253 bool clear_in_exec;
40254 int retval;
40255 + const struct cred *cred = current_cred();
40256 +
40257 + /*
40258 + * We move the actual failure in case of RLIMIT_NPROC excess from
40259 + * set*uid() to execve() because too many poorly written programs
40260 + * don't check setuid() return code. Here we additionally recheck
40261 + * whether NPROC limit is still exceeded.
40262 + */
40263 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40264 +
40265 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40266 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40267 + retval = -EAGAIN;
40268 + goto out_ret;
40269 + }
40270 +
40271 + /* We're below the limit (still or again), so we don't want to make
40272 + * further execve() calls fail. */
40273 + current->flags &= ~PF_NPROC_EXCEEDED;
40274
40275 retval = unshare_files(&displaced);
40276 if (retval)
40277 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40278 bprm->filename = filename;
40279 bprm->interp = filename;
40280
40281 + if (gr_process_user_ban()) {
40282 + retval = -EPERM;
40283 + goto out_file;
40284 + }
40285 +
40286 + retval = -EACCES;
40287 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40288 + goto out_file;
40289 +
40290 retval = bprm_mm_init(bprm);
40291 if (retval)
40292 goto out_file;
40293 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40294 if (retval < 0)
40295 goto out;
40296
40297 + if (!gr_tpe_allow(file)) {
40298 + retval = -EACCES;
40299 + goto out;
40300 + }
40301 +
40302 + if (gr_check_crash_exec(file)) {
40303 + retval = -EACCES;
40304 + goto out;
40305 + }
40306 +
40307 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40308 +
40309 + gr_handle_exec_args_compat(bprm, argv);
40310 +
40311 +#ifdef CONFIG_GRKERNSEC
40312 + old_acl = current->acl;
40313 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40314 + old_exec_file = current->exec_file;
40315 + get_file(file);
40316 + current->exec_file = file;
40317 +#endif
40318 +
40319 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40320 + bprm->unsafe & LSM_UNSAFE_SHARE);
40321 + if (retval < 0)
40322 + goto out_fail;
40323 +
40324 retval = search_binary_handler(bprm, regs);
40325 if (retval < 0)
40326 - goto out;
40327 + goto out_fail;
40328 +#ifdef CONFIG_GRKERNSEC
40329 + if (old_exec_file)
40330 + fput(old_exec_file);
40331 +#endif
40332
40333 /* execve succeeded */
40334 current->fs->in_exec = 0;
40335 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40336 put_files_struct(displaced);
40337 return retval;
40338
40339 +out_fail:
40340 +#ifdef CONFIG_GRKERNSEC
40341 + current->acl = old_acl;
40342 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40343 + fput(current->exec_file);
40344 + current->exec_file = old_exec_file;
40345 +#endif
40346 +
40347 out:
40348 if (bprm->mm) {
40349 acct_arg_size(bprm, 0);
40350 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40351 struct fdtable *fdt;
40352 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40353
40354 + pax_track_stack();
40355 +
40356 if (n < 0)
40357 goto out_nofds;
40358
40359 diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40360 --- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40361 +++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40362 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40363 up = (struct compat_video_spu_palette __user *) arg;
40364 err = get_user(palp, &up->palette);
40365 err |= get_user(length, &up->length);
40366 + if (err)
40367 + return -EFAULT;
40368
40369 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40370 err = put_user(compat_ptr(palp), &up_native->palette);
40371 diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40372 --- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40373 +++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40374 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40375 }
40376 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40377 struct configfs_dirent *next;
40378 - const char * name;
40379 + const unsigned char * name;
40380 + char d_name[sizeof(next->s_dentry->d_iname)];
40381 int len;
40382
40383 next = list_entry(p, struct configfs_dirent,
40384 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40385 continue;
40386
40387 name = configfs_get_name(next);
40388 - len = strlen(name);
40389 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40390 + len = next->s_dentry->d_name.len;
40391 + memcpy(d_name, name, len);
40392 + name = d_name;
40393 + } else
40394 + len = strlen(name);
40395 if (next->s_dentry)
40396 ino = next->s_dentry->d_inode->i_ino;
40397 else
40398 diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40399 --- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40400 +++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40401 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40402
40403 static struct kmem_cache *dentry_cache __read_mostly;
40404
40405 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40406 -
40407 /*
40408 * This is the single most critical data structure when it comes
40409 * to the dcache: the hashtable for lookups. Somebody should try
40410 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40411 mempages -= reserve;
40412
40413 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40414 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40415 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40416
40417 dcache_init();
40418 inode_init();
40419 diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40420 --- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40421 +++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40422 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40423 kfree(ls);
40424 }
40425
40426 -static struct sysfs_ops dlm_attr_ops = {
40427 +static const struct sysfs_ops dlm_attr_ops = {
40428 .show = dlm_attr_show,
40429 .store = dlm_attr_store,
40430 };
40431 diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40432 --- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40433 +++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40434 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40435 old_fs = get_fs();
40436 set_fs(get_ds());
40437 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40438 - (char __user *)lower_buf,
40439 + (__force char __user *)lower_buf,
40440 lower_bufsiz);
40441 set_fs(old_fs);
40442 if (rc < 0)
40443 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40444 }
40445 old_fs = get_fs();
40446 set_fs(get_ds());
40447 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40448 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40449 set_fs(old_fs);
40450 if (rc < 0)
40451 goto out_free;
40452 diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40453 --- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40454 +++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40455 @@ -56,12 +56,24 @@
40456 #include <linux/fsnotify.h>
40457 #include <linux/fs_struct.h>
40458 #include <linux/pipe_fs_i.h>
40459 +#include <linux/random.h>
40460 +#include <linux/seq_file.h>
40461 +
40462 +#ifdef CONFIG_PAX_REFCOUNT
40463 +#include <linux/kallsyms.h>
40464 +#include <linux/kdebug.h>
40465 +#endif
40466
40467 #include <asm/uaccess.h>
40468 #include <asm/mmu_context.h>
40469 #include <asm/tlb.h>
40470 #include "internal.h"
40471
40472 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40473 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40474 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40475 +#endif
40476 +
40477 int core_uses_pid;
40478 char core_pattern[CORENAME_MAX_SIZE] = "core";
40479 unsigned int core_pipe_limit;
40480 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40481 goto out;
40482
40483 file = do_filp_open(AT_FDCWD, tmp,
40484 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40485 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40486 MAY_READ | MAY_EXEC | MAY_OPEN);
40487 putname(tmp);
40488 error = PTR_ERR(file);
40489 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40490 int write)
40491 {
40492 struct page *page;
40493 - int ret;
40494
40495 -#ifdef CONFIG_STACK_GROWSUP
40496 - if (write) {
40497 - ret = expand_stack_downwards(bprm->vma, pos);
40498 - if (ret < 0)
40499 - return NULL;
40500 - }
40501 -#endif
40502 - ret = get_user_pages(current, bprm->mm, pos,
40503 - 1, write, 1, &page, NULL);
40504 - if (ret <= 0)
40505 + if (0 > expand_stack_downwards(bprm->vma, pos))
40506 + return NULL;
40507 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40508 return NULL;
40509
40510 if (write) {
40511 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40512 vma->vm_end = STACK_TOP_MAX;
40513 vma->vm_start = vma->vm_end - PAGE_SIZE;
40514 vma->vm_flags = VM_STACK_FLAGS;
40515 +
40516 +#ifdef CONFIG_PAX_SEGMEXEC
40517 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40518 +#endif
40519 +
40520 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40521
40522 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40523 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40524 mm->stack_vm = mm->total_vm = 1;
40525 up_write(&mm->mmap_sem);
40526 bprm->p = vma->vm_end - sizeof(void *);
40527 +
40528 +#ifdef CONFIG_PAX_RANDUSTACK
40529 + if (randomize_va_space)
40530 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40531 +#endif
40532 +
40533 return 0;
40534 err:
40535 up_write(&mm->mmap_sem);
40536 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40537 int r;
40538 mm_segment_t oldfs = get_fs();
40539 set_fs(KERNEL_DS);
40540 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
40541 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40542 set_fs(oldfs);
40543 return r;
40544 }
40545 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40546 unsigned long new_end = old_end - shift;
40547 struct mmu_gather *tlb;
40548
40549 - BUG_ON(new_start > new_end);
40550 + if (new_start >= new_end || new_start < mmap_min_addr)
40551 + return -ENOMEM;
40552
40553 /*
40554 * ensure there are no vmas between where we want to go
40555 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40556 if (vma != find_vma(mm, new_start))
40557 return -EFAULT;
40558
40559 +#ifdef CONFIG_PAX_SEGMEXEC
40560 + BUG_ON(pax_find_mirror_vma(vma));
40561 +#endif
40562 +
40563 /*
40564 * cover the whole range: [new_start, old_end)
40565 */
40566 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40567 stack_top = arch_align_stack(stack_top);
40568 stack_top = PAGE_ALIGN(stack_top);
40569
40570 - if (unlikely(stack_top < mmap_min_addr) ||
40571 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40572 - return -ENOMEM;
40573 -
40574 stack_shift = vma->vm_end - stack_top;
40575
40576 bprm->p -= stack_shift;
40577 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40578 bprm->exec -= stack_shift;
40579
40580 down_write(&mm->mmap_sem);
40581 +
40582 + /* Move stack pages down in memory. */
40583 + if (stack_shift) {
40584 + ret = shift_arg_pages(vma, stack_shift);
40585 + if (ret)
40586 + goto out_unlock;
40587 + }
40588 +
40589 vm_flags = VM_STACK_FLAGS;
40590
40591 /*
40592 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40593 vm_flags &= ~VM_EXEC;
40594 vm_flags |= mm->def_flags;
40595
40596 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40597 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40598 + vm_flags &= ~VM_EXEC;
40599 +
40600 +#ifdef CONFIG_PAX_MPROTECT
40601 + if (mm->pax_flags & MF_PAX_MPROTECT)
40602 + vm_flags &= ~VM_MAYEXEC;
40603 +#endif
40604 +
40605 + }
40606 +#endif
40607 +
40608 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40609 vm_flags);
40610 if (ret)
40611 goto out_unlock;
40612 BUG_ON(prev != vma);
40613
40614 - /* Move stack pages down in memory. */
40615 - if (stack_shift) {
40616 - ret = shift_arg_pages(vma, stack_shift);
40617 - if (ret)
40618 - goto out_unlock;
40619 - }
40620 -
40621 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40622 stack_size = vma->vm_end - vma->vm_start;
40623 /*
40624 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40625 int err;
40626
40627 file = do_filp_open(AT_FDCWD, name,
40628 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40629 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40630 MAY_EXEC | MAY_OPEN);
40631 if (IS_ERR(file))
40632 goto out;
40633 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40634 old_fs = get_fs();
40635 set_fs(get_ds());
40636 /* The cast to a user pointer is valid due to the set_fs() */
40637 - result = vfs_read(file, (void __user *)addr, count, &pos);
40638 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
40639 set_fs(old_fs);
40640 return result;
40641 }
40642 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40643 }
40644 rcu_read_unlock();
40645
40646 - if (p->fs->users > n_fs) {
40647 + if (atomic_read(&p->fs->users) > n_fs) {
40648 bprm->unsafe |= LSM_UNSAFE_SHARE;
40649 } else {
40650 res = -EAGAIN;
40651 @@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40652 char __user *__user *envp,
40653 struct pt_regs * regs)
40654 {
40655 +#ifdef CONFIG_GRKERNSEC
40656 + struct file *old_exec_file;
40657 + struct acl_subject_label *old_acl;
40658 + struct rlimit old_rlim[RLIM_NLIMITS];
40659 +#endif
40660 struct linux_binprm *bprm;
40661 struct file *file;
40662 struct files_struct *displaced;
40663 bool clear_in_exec;
40664 int retval;
40665 + const struct cred *cred = current_cred();
40666 +
40667 + /*
40668 + * We move the actual failure in case of RLIMIT_NPROC excess from
40669 + * set*uid() to execve() because too many poorly written programs
40670 + * don't check setuid() return code. Here we additionally recheck
40671 + * whether NPROC limit is still exceeded.
40672 + */
40673 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40674 +
40675 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40676 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40677 + retval = -EAGAIN;
40678 + goto out_ret;
40679 + }
40680 +
40681 + /* We're below the limit (still or again), so we don't want to make
40682 + * further execve() calls fail. */
40683 + current->flags &= ~PF_NPROC_EXCEEDED;
40684
40685 retval = unshare_files(&displaced);
40686 if (retval)
40687 @@ -1383,6 +1436,16 @@ int do_execve(char * filename,
40688 bprm->filename = filename;
40689 bprm->interp = filename;
40690
40691 + if (gr_process_user_ban()) {
40692 + retval = -EPERM;
40693 + goto out_file;
40694 + }
40695 +
40696 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40697 + retval = -EACCES;
40698 + goto out_file;
40699 + }
40700 +
40701 retval = bprm_mm_init(bprm);
40702 if (retval)
40703 goto out_file;
40704 @@ -1412,10 +1475,41 @@ int do_execve(char * filename,
40705 if (retval < 0)
40706 goto out;
40707
40708 + if (!gr_tpe_allow(file)) {
40709 + retval = -EACCES;
40710 + goto out;
40711 + }
40712 +
40713 + if (gr_check_crash_exec(file)) {
40714 + retval = -EACCES;
40715 + goto out;
40716 + }
40717 +
40718 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40719 +
40720 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
40721 +
40722 +#ifdef CONFIG_GRKERNSEC
40723 + old_acl = current->acl;
40724 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40725 + old_exec_file = current->exec_file;
40726 + get_file(file);
40727 + current->exec_file = file;
40728 +#endif
40729 +
40730 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40731 + bprm->unsafe & LSM_UNSAFE_SHARE);
40732 + if (retval < 0)
40733 + goto out_fail;
40734 +
40735 current->flags &= ~PF_KTHREAD;
40736 retval = search_binary_handler(bprm,regs);
40737 if (retval < 0)
40738 - goto out;
40739 + goto out_fail;
40740 +#ifdef CONFIG_GRKERNSEC
40741 + if (old_exec_file)
40742 + fput(old_exec_file);
40743 +#endif
40744
40745 /* execve succeeded */
40746 current->fs->in_exec = 0;
40747 @@ -1426,6 +1520,14 @@ int do_execve(char * filename,
40748 put_files_struct(displaced);
40749 return retval;
40750
40751 +out_fail:
40752 +#ifdef CONFIG_GRKERNSEC
40753 + current->acl = old_acl;
40754 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40755 + fput(current->exec_file);
40756 + current->exec_file = old_exec_file;
40757 +#endif
40758 +
40759 out:
40760 if (bprm->mm) {
40761 acct_arg_size(bprm, 0);
40762 @@ -1591,6 +1693,220 @@ out:
40763 return ispipe;
40764 }
40765
40766 +int pax_check_flags(unsigned long *flags)
40767 +{
40768 + int retval = 0;
40769 +
40770 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40771 + if (*flags & MF_PAX_SEGMEXEC)
40772 + {
40773 + *flags &= ~MF_PAX_SEGMEXEC;
40774 + retval = -EINVAL;
40775 + }
40776 +#endif
40777 +
40778 + if ((*flags & MF_PAX_PAGEEXEC)
40779 +
40780 +#ifdef CONFIG_PAX_PAGEEXEC
40781 + && (*flags & MF_PAX_SEGMEXEC)
40782 +#endif
40783 +
40784 + )
40785 + {
40786 + *flags &= ~MF_PAX_PAGEEXEC;
40787 + retval = -EINVAL;
40788 + }
40789 +
40790 + if ((*flags & MF_PAX_MPROTECT)
40791 +
40792 +#ifdef CONFIG_PAX_MPROTECT
40793 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40794 +#endif
40795 +
40796 + )
40797 + {
40798 + *flags &= ~MF_PAX_MPROTECT;
40799 + retval = -EINVAL;
40800 + }
40801 +
40802 + if ((*flags & MF_PAX_EMUTRAMP)
40803 +
40804 +#ifdef CONFIG_PAX_EMUTRAMP
40805 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40806 +#endif
40807 +
40808 + )
40809 + {
40810 + *flags &= ~MF_PAX_EMUTRAMP;
40811 + retval = -EINVAL;
40812 + }
40813 +
40814 + return retval;
40815 +}
40816 +
40817 +EXPORT_SYMBOL(pax_check_flags);
40818 +
40819 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40820 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40821 +{
40822 + struct task_struct *tsk = current;
40823 + struct mm_struct *mm = current->mm;
40824 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40825 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40826 + char *path_exec = NULL;
40827 + char *path_fault = NULL;
40828 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
40829 +
40830 + if (buffer_exec && buffer_fault) {
40831 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40832 +
40833 + down_read(&mm->mmap_sem);
40834 + vma = mm->mmap;
40835 + while (vma && (!vma_exec || !vma_fault)) {
40836 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40837 + vma_exec = vma;
40838 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40839 + vma_fault = vma;
40840 + vma = vma->vm_next;
40841 + }
40842 + if (vma_exec) {
40843 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40844 + if (IS_ERR(path_exec))
40845 + path_exec = "<path too long>";
40846 + else {
40847 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40848 + if (path_exec) {
40849 + *path_exec = 0;
40850 + path_exec = buffer_exec;
40851 + } else
40852 + path_exec = "<path too long>";
40853 + }
40854 + }
40855 + if (vma_fault) {
40856 + start = vma_fault->vm_start;
40857 + end = vma_fault->vm_end;
40858 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40859 + if (vma_fault->vm_file) {
40860 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40861 + if (IS_ERR(path_fault))
40862 + path_fault = "<path too long>";
40863 + else {
40864 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40865 + if (path_fault) {
40866 + *path_fault = 0;
40867 + path_fault = buffer_fault;
40868 + } else
40869 + path_fault = "<path too long>";
40870 + }
40871 + } else
40872 + path_fault = "<anonymous mapping>";
40873 + }
40874 + up_read(&mm->mmap_sem);
40875 + }
40876 + if (tsk->signal->curr_ip)
40877 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40878 + else
40879 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40880 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40881 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40882 + task_uid(tsk), task_euid(tsk), pc, sp);
40883 + free_page((unsigned long)buffer_exec);
40884 + free_page((unsigned long)buffer_fault);
40885 + pax_report_insns(pc, sp);
40886 + do_coredump(SIGKILL, SIGKILL, regs);
40887 +}
40888 +#endif
40889 +
40890 +#ifdef CONFIG_PAX_REFCOUNT
40891 +void pax_report_refcount_overflow(struct pt_regs *regs)
40892 +{
40893 + if (current->signal->curr_ip)
40894 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40895 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40896 + else
40897 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40898 + current->comm, task_pid_nr(current), current_uid(), current_euid());
40899 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40900 + show_regs(regs);
40901 + force_sig_specific(SIGKILL, current);
40902 +}
40903 +#endif
40904 +
40905 +#ifdef CONFIG_PAX_USERCOPY
40906 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
40907 +int object_is_on_stack(const void *obj, unsigned long len)
40908 +{
40909 + const void * const stack = task_stack_page(current);
40910 + const void * const stackend = stack + THREAD_SIZE;
40911 +
40912 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40913 + const void *frame = NULL;
40914 + const void *oldframe;
40915 +#endif
40916 +
40917 + if (obj + len < obj)
40918 + return -1;
40919 +
40920 + if (obj + len <= stack || stackend <= obj)
40921 + return 0;
40922 +
40923 + if (obj < stack || stackend < obj + len)
40924 + return -1;
40925 +
40926 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40927 + oldframe = __builtin_frame_address(1);
40928 + if (oldframe)
40929 + frame = __builtin_frame_address(2);
40930 + /*
40931 + low ----------------------------------------------> high
40932 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
40933 + ^----------------^
40934 + allow copies only within here
40935 + */
40936 + while (stack <= frame && frame < stackend) {
40937 + /* if obj + len extends past the last frame, this
40938 + check won't pass and the next frame will be 0,
40939 + causing us to bail out and correctly report
40940 + the copy as invalid
40941 + */
40942 + if (obj + len <= frame)
40943 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
40944 + oldframe = frame;
40945 + frame = *(const void * const *)frame;
40946 + }
40947 + return -1;
40948 +#else
40949 + return 1;
40950 +#endif
40951 +}
40952 +
40953 +
40954 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
40955 +{
40956 + if (current->signal->curr_ip)
40957 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40958 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40959 + else
40960 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
40961 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
40962 +
40963 + dump_stack();
40964 + gr_handle_kernel_exploit();
40965 + do_group_exit(SIGKILL);
40966 +}
40967 +#endif
40968 +
40969 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
40970 +void pax_track_stack(void)
40971 +{
40972 + unsigned long sp = (unsigned long)&sp;
40973 + if (sp < current_thread_info()->lowest_stack &&
40974 + sp > (unsigned long)task_stack_page(current))
40975 + current_thread_info()->lowest_stack = sp;
40976 +}
40977 +EXPORT_SYMBOL(pax_track_stack);
40978 +#endif
40979 +
40980 static int zap_process(struct task_struct *start)
40981 {
40982 struct task_struct *t;
40983 @@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
40984 pipe = file->f_path.dentry->d_inode->i_pipe;
40985
40986 pipe_lock(pipe);
40987 - pipe->readers++;
40988 - pipe->writers--;
40989 + atomic_inc(&pipe->readers);
40990 + atomic_dec(&pipe->writers);
40991
40992 - while ((pipe->readers > 1) && (!signal_pending(current))) {
40993 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
40994 wake_up_interruptible_sync(&pipe->wait);
40995 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
40996 pipe_wait(pipe);
40997 }
40998
40999 - pipe->readers--;
41000 - pipe->writers++;
41001 + atomic_dec(&pipe->readers);
41002 + atomic_inc(&pipe->writers);
41003 pipe_unlock(pipe);
41004
41005 }
41006 @@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41007 char **helper_argv = NULL;
41008 int helper_argc = 0;
41009 int dump_count = 0;
41010 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41011 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41012
41013 audit_core_dumps(signr);
41014
41015 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41016 + gr_handle_brute_attach(current, mm->flags);
41017 +
41018 binfmt = mm->binfmt;
41019 if (!binfmt || !binfmt->core_dump)
41020 goto fail;
41021 @@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41022 */
41023 clear_thread_flag(TIF_SIGPENDING);
41024
41025 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41026 +
41027 /*
41028 * lock_kernel() because format_corename() is controlled by sysctl, which
41029 * uses lock_kernel()
41030 @@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41031 goto fail_unlock;
41032 }
41033
41034 - dump_count = atomic_inc_return(&core_dump_count);
41035 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41036 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41037 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41038 task_tgid_vnr(current), current->comm);
41039 @@ -1972,7 +2293,7 @@ close_fail:
41040 filp_close(file, NULL);
41041 fail_dropcount:
41042 if (dump_count)
41043 - atomic_dec(&core_dump_count);
41044 + atomic_dec_unchecked(&core_dump_count);
41045 fail_unlock:
41046 if (helper_argv)
41047 argv_free(helper_argv);
41048 diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41049 --- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41050 +++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41051 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41052
41053 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41054 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41055 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41056 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41057 sbi->s_resuid != current_fsuid() &&
41058 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41059 return 0;
41060 diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41061 --- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41062 +++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41063 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41064
41065 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41066 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41067 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41068 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41069 sbi->s_resuid != current_fsuid() &&
41070 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41071 return 0;
41072 diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41073 --- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41074 +++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41075 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41076 /* Hm, nope. Are (enough) root reserved blocks available? */
41077 if (sbi->s_resuid == current_fsuid() ||
41078 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41079 - capable(CAP_SYS_RESOURCE)) {
41080 + capable_nolog(CAP_SYS_RESOURCE)) {
41081 if (free_blocks >= (nblocks + dirty_blocks))
41082 return 1;
41083 }
41084 diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41085 --- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41086 +++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41087 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41088
41089 /* stats for buddy allocator */
41090 spinlock_t s_mb_pa_lock;
41091 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41092 - atomic_t s_bal_success; /* we found long enough chunks */
41093 - atomic_t s_bal_allocated; /* in blocks */
41094 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41095 - atomic_t s_bal_goals; /* goal hits */
41096 - atomic_t s_bal_breaks; /* too long searches */
41097 - atomic_t s_bal_2orders; /* 2^order hits */
41098 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41099 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41100 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41101 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41102 + atomic_unchecked_t s_bal_goals; /* goal hits */
41103 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41104 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41105 spinlock_t s_bal_lock;
41106 unsigned long s_mb_buddies_generated;
41107 unsigned long long s_mb_generation_time;
41108 - atomic_t s_mb_lost_chunks;
41109 - atomic_t s_mb_preallocated;
41110 - atomic_t s_mb_discarded;
41111 + atomic_unchecked_t s_mb_lost_chunks;
41112 + atomic_unchecked_t s_mb_preallocated;
41113 + atomic_unchecked_t s_mb_discarded;
41114 atomic_t s_lock_busy;
41115
41116 /* locality groups */
41117 diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41118 --- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41119 +++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41120 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41121 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41122
41123 if (EXT4_SB(sb)->s_mb_stats)
41124 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41125 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41126
41127 break;
41128 }
41129 @@ -2131,7 +2131,7 @@ repeat:
41130 ac->ac_status = AC_STATUS_CONTINUE;
41131 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41132 cr = 3;
41133 - atomic_inc(&sbi->s_mb_lost_chunks);
41134 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41135 goto repeat;
41136 }
41137 }
41138 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41139 ext4_grpblk_t counters[16];
41140 } sg;
41141
41142 + pax_track_stack();
41143 +
41144 group--;
41145 if (group == 0)
41146 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41147 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41148 if (sbi->s_mb_stats) {
41149 printk(KERN_INFO
41150 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41151 - atomic_read(&sbi->s_bal_allocated),
41152 - atomic_read(&sbi->s_bal_reqs),
41153 - atomic_read(&sbi->s_bal_success));
41154 + atomic_read_unchecked(&sbi->s_bal_allocated),
41155 + atomic_read_unchecked(&sbi->s_bal_reqs),
41156 + atomic_read_unchecked(&sbi->s_bal_success));
41157 printk(KERN_INFO
41158 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41159 "%u 2^N hits, %u breaks, %u lost\n",
41160 - atomic_read(&sbi->s_bal_ex_scanned),
41161 - atomic_read(&sbi->s_bal_goals),
41162 - atomic_read(&sbi->s_bal_2orders),
41163 - atomic_read(&sbi->s_bal_breaks),
41164 - atomic_read(&sbi->s_mb_lost_chunks));
41165 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41166 + atomic_read_unchecked(&sbi->s_bal_goals),
41167 + atomic_read_unchecked(&sbi->s_bal_2orders),
41168 + atomic_read_unchecked(&sbi->s_bal_breaks),
41169 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41170 printk(KERN_INFO
41171 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41172 sbi->s_mb_buddies_generated++,
41173 sbi->s_mb_generation_time);
41174 printk(KERN_INFO
41175 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41176 - atomic_read(&sbi->s_mb_preallocated),
41177 - atomic_read(&sbi->s_mb_discarded));
41178 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41179 + atomic_read_unchecked(&sbi->s_mb_discarded));
41180 }
41181
41182 free_percpu(sbi->s_locality_groups);
41183 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41184 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41185
41186 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41187 - atomic_inc(&sbi->s_bal_reqs);
41188 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41189 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41190 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41191 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41192 - atomic_inc(&sbi->s_bal_success);
41193 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41194 + atomic_inc_unchecked(&sbi->s_bal_success);
41195 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41196 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41197 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41198 - atomic_inc(&sbi->s_bal_goals);
41199 + atomic_inc_unchecked(&sbi->s_bal_goals);
41200 if (ac->ac_found > sbi->s_mb_max_to_scan)
41201 - atomic_inc(&sbi->s_bal_breaks);
41202 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41203 }
41204
41205 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41206 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41207 trace_ext4_mb_new_inode_pa(ac, pa);
41208
41209 ext4_mb_use_inode_pa(ac, pa);
41210 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41211 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41212
41213 ei = EXT4_I(ac->ac_inode);
41214 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41215 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41216 trace_ext4_mb_new_group_pa(ac, pa);
41217
41218 ext4_mb_use_group_pa(ac, pa);
41219 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41220 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41221
41222 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41223 lg = ac->ac_lg;
41224 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41225 * from the bitmap and continue.
41226 */
41227 }
41228 - atomic_add(free, &sbi->s_mb_discarded);
41229 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41230
41231 return err;
41232 }
41233 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41234 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41235 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41236 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41237 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41238 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41239
41240 if (ac) {
41241 ac->ac_sb = sb;
41242 diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41243 --- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41244 +++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41245 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41246 }
41247
41248
41249 -static struct sysfs_ops ext4_attr_ops = {
41250 +static const struct sysfs_ops ext4_attr_ops = {
41251 .show = ext4_attr_show,
41252 .store = ext4_attr_store,
41253 };
41254 diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41255 --- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41256 +++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41257 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41258 if (err)
41259 return err;
41260
41261 + if (gr_handle_chroot_fowner(pid, type))
41262 + return -ENOENT;
41263 + if (gr_check_protected_task_fowner(pid, type))
41264 + return -EACCES;
41265 +
41266 f_modown(filp, pid, type, force);
41267 return 0;
41268 }
41269 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41270 switch (cmd) {
41271 case F_DUPFD:
41272 case F_DUPFD_CLOEXEC:
41273 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41274 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41275 break;
41276 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41277 diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41278 --- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41279 +++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41280 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41281 */
41282 filp->f_op = &read_pipefifo_fops;
41283 pipe->r_counter++;
41284 - if (pipe->readers++ == 0)
41285 + if (atomic_inc_return(&pipe->readers) == 1)
41286 wake_up_partner(inode);
41287
41288 - if (!pipe->writers) {
41289 + if (!atomic_read(&pipe->writers)) {
41290 if ((filp->f_flags & O_NONBLOCK)) {
41291 /* suppress POLLHUP until we have
41292 * seen a writer */
41293 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41294 * errno=ENXIO when there is no process reading the FIFO.
41295 */
41296 ret = -ENXIO;
41297 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41298 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41299 goto err;
41300
41301 filp->f_op = &write_pipefifo_fops;
41302 pipe->w_counter++;
41303 - if (!pipe->writers++)
41304 + if (atomic_inc_return(&pipe->writers) == 1)
41305 wake_up_partner(inode);
41306
41307 - if (!pipe->readers) {
41308 + if (!atomic_read(&pipe->readers)) {
41309 wait_for_partner(inode, &pipe->r_counter);
41310 if (signal_pending(current))
41311 goto err_wr;
41312 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41313 */
41314 filp->f_op = &rdwr_pipefifo_fops;
41315
41316 - pipe->readers++;
41317 - pipe->writers++;
41318 + atomic_inc(&pipe->readers);
41319 + atomic_inc(&pipe->writers);
41320 pipe->r_counter++;
41321 pipe->w_counter++;
41322 - if (pipe->readers == 1 || pipe->writers == 1)
41323 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41324 wake_up_partner(inode);
41325 break;
41326
41327 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41328 return 0;
41329
41330 err_rd:
41331 - if (!--pipe->readers)
41332 + if (atomic_dec_and_test(&pipe->readers))
41333 wake_up_interruptible(&pipe->wait);
41334 ret = -ERESTARTSYS;
41335 goto err;
41336
41337 err_wr:
41338 - if (!--pipe->writers)
41339 + if (atomic_dec_and_test(&pipe->writers))
41340 wake_up_interruptible(&pipe->wait);
41341 ret = -ERESTARTSYS;
41342 goto err;
41343
41344 err:
41345 - if (!pipe->readers && !pipe->writers)
41346 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41347 free_pipe_info(inode);
41348
41349 err_nocleanup:
41350 diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41351 --- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41352 +++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41353 @@ -14,6 +14,7 @@
41354 #include <linux/slab.h>
41355 #include <linux/vmalloc.h>
41356 #include <linux/file.h>
41357 +#include <linux/security.h>
41358 #include <linux/fdtable.h>
41359 #include <linux/bitops.h>
41360 #include <linux/interrupt.h>
41361 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41362 * N.B. For clone tasks sharing a files structure, this test
41363 * will limit the total number of files that can be opened.
41364 */
41365 +
41366 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41367 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41368 return -EMFILE;
41369
41370 diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41371 --- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41372 +++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41373 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41374 int len = dot ? dot - name : strlen(name);
41375
41376 fs = __get_fs_type(name, len);
41377 +
41378 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41379 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41380 +#else
41381 if (!fs && (request_module("%.*s", len, name) == 0))
41382 +#endif
41383 fs = __get_fs_type(name, len);
41384
41385 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41386 diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41387 --- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41388 +++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41389 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41390 parent ? (char *) parent->def->name : "<no-parent>",
41391 def->name, netfs_data);
41392
41393 - fscache_stat(&fscache_n_acquires);
41394 + fscache_stat_unchecked(&fscache_n_acquires);
41395
41396 /* if there's no parent cookie, then we don't create one here either */
41397 if (!parent) {
41398 - fscache_stat(&fscache_n_acquires_null);
41399 + fscache_stat_unchecked(&fscache_n_acquires_null);
41400 _leave(" [no parent]");
41401 return NULL;
41402 }
41403 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41404 /* allocate and initialise a cookie */
41405 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41406 if (!cookie) {
41407 - fscache_stat(&fscache_n_acquires_oom);
41408 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41409 _leave(" [ENOMEM]");
41410 return NULL;
41411 }
41412 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41413
41414 switch (cookie->def->type) {
41415 case FSCACHE_COOKIE_TYPE_INDEX:
41416 - fscache_stat(&fscache_n_cookie_index);
41417 + fscache_stat_unchecked(&fscache_n_cookie_index);
41418 break;
41419 case FSCACHE_COOKIE_TYPE_DATAFILE:
41420 - fscache_stat(&fscache_n_cookie_data);
41421 + fscache_stat_unchecked(&fscache_n_cookie_data);
41422 break;
41423 default:
41424 - fscache_stat(&fscache_n_cookie_special);
41425 + fscache_stat_unchecked(&fscache_n_cookie_special);
41426 break;
41427 }
41428
41429 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41430 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41431 atomic_dec(&parent->n_children);
41432 __fscache_cookie_put(cookie);
41433 - fscache_stat(&fscache_n_acquires_nobufs);
41434 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41435 _leave(" = NULL");
41436 return NULL;
41437 }
41438 }
41439
41440 - fscache_stat(&fscache_n_acquires_ok);
41441 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41442 _leave(" = %p", cookie);
41443 return cookie;
41444 }
41445 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41446 cache = fscache_select_cache_for_object(cookie->parent);
41447 if (!cache) {
41448 up_read(&fscache_addremove_sem);
41449 - fscache_stat(&fscache_n_acquires_no_cache);
41450 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41451 _leave(" = -ENOMEDIUM [no cache]");
41452 return -ENOMEDIUM;
41453 }
41454 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41455 object = cache->ops->alloc_object(cache, cookie);
41456 fscache_stat_d(&fscache_n_cop_alloc_object);
41457 if (IS_ERR(object)) {
41458 - fscache_stat(&fscache_n_object_no_alloc);
41459 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41460 ret = PTR_ERR(object);
41461 goto error;
41462 }
41463
41464 - fscache_stat(&fscache_n_object_alloc);
41465 + fscache_stat_unchecked(&fscache_n_object_alloc);
41466
41467 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41468
41469 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41470 struct fscache_object *object;
41471 struct hlist_node *_p;
41472
41473 - fscache_stat(&fscache_n_updates);
41474 + fscache_stat_unchecked(&fscache_n_updates);
41475
41476 if (!cookie) {
41477 - fscache_stat(&fscache_n_updates_null);
41478 + fscache_stat_unchecked(&fscache_n_updates_null);
41479 _leave(" [no cookie]");
41480 return;
41481 }
41482 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41483 struct fscache_object *object;
41484 unsigned long event;
41485
41486 - fscache_stat(&fscache_n_relinquishes);
41487 + fscache_stat_unchecked(&fscache_n_relinquishes);
41488 if (retire)
41489 - fscache_stat(&fscache_n_relinquishes_retire);
41490 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41491
41492 if (!cookie) {
41493 - fscache_stat(&fscache_n_relinquishes_null);
41494 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
41495 _leave(" [no cookie]");
41496 return;
41497 }
41498 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41499
41500 /* wait for the cookie to finish being instantiated (or to fail) */
41501 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41502 - fscache_stat(&fscache_n_relinquishes_waitcrt);
41503 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41504 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41505 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41506 }
41507 diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41508 --- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41509 +++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41510 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41511 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41512 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41513
41514 -extern atomic_t fscache_n_op_pend;
41515 -extern atomic_t fscache_n_op_run;
41516 -extern atomic_t fscache_n_op_enqueue;
41517 -extern atomic_t fscache_n_op_deferred_release;
41518 -extern atomic_t fscache_n_op_release;
41519 -extern atomic_t fscache_n_op_gc;
41520 -extern atomic_t fscache_n_op_cancelled;
41521 -extern atomic_t fscache_n_op_rejected;
41522 -
41523 -extern atomic_t fscache_n_attr_changed;
41524 -extern atomic_t fscache_n_attr_changed_ok;
41525 -extern atomic_t fscache_n_attr_changed_nobufs;
41526 -extern atomic_t fscache_n_attr_changed_nomem;
41527 -extern atomic_t fscache_n_attr_changed_calls;
41528 -
41529 -extern atomic_t fscache_n_allocs;
41530 -extern atomic_t fscache_n_allocs_ok;
41531 -extern atomic_t fscache_n_allocs_wait;
41532 -extern atomic_t fscache_n_allocs_nobufs;
41533 -extern atomic_t fscache_n_allocs_intr;
41534 -extern atomic_t fscache_n_allocs_object_dead;
41535 -extern atomic_t fscache_n_alloc_ops;
41536 -extern atomic_t fscache_n_alloc_op_waits;
41537 -
41538 -extern atomic_t fscache_n_retrievals;
41539 -extern atomic_t fscache_n_retrievals_ok;
41540 -extern atomic_t fscache_n_retrievals_wait;
41541 -extern atomic_t fscache_n_retrievals_nodata;
41542 -extern atomic_t fscache_n_retrievals_nobufs;
41543 -extern atomic_t fscache_n_retrievals_intr;
41544 -extern atomic_t fscache_n_retrievals_nomem;
41545 -extern atomic_t fscache_n_retrievals_object_dead;
41546 -extern atomic_t fscache_n_retrieval_ops;
41547 -extern atomic_t fscache_n_retrieval_op_waits;
41548 -
41549 -extern atomic_t fscache_n_stores;
41550 -extern atomic_t fscache_n_stores_ok;
41551 -extern atomic_t fscache_n_stores_again;
41552 -extern atomic_t fscache_n_stores_nobufs;
41553 -extern atomic_t fscache_n_stores_oom;
41554 -extern atomic_t fscache_n_store_ops;
41555 -extern atomic_t fscache_n_store_calls;
41556 -extern atomic_t fscache_n_store_pages;
41557 -extern atomic_t fscache_n_store_radix_deletes;
41558 -extern atomic_t fscache_n_store_pages_over_limit;
41559 -
41560 -extern atomic_t fscache_n_store_vmscan_not_storing;
41561 -extern atomic_t fscache_n_store_vmscan_gone;
41562 -extern atomic_t fscache_n_store_vmscan_busy;
41563 -extern atomic_t fscache_n_store_vmscan_cancelled;
41564 -
41565 -extern atomic_t fscache_n_marks;
41566 -extern atomic_t fscache_n_uncaches;
41567 -
41568 -extern atomic_t fscache_n_acquires;
41569 -extern atomic_t fscache_n_acquires_null;
41570 -extern atomic_t fscache_n_acquires_no_cache;
41571 -extern atomic_t fscache_n_acquires_ok;
41572 -extern atomic_t fscache_n_acquires_nobufs;
41573 -extern atomic_t fscache_n_acquires_oom;
41574 -
41575 -extern atomic_t fscache_n_updates;
41576 -extern atomic_t fscache_n_updates_null;
41577 -extern atomic_t fscache_n_updates_run;
41578 -
41579 -extern atomic_t fscache_n_relinquishes;
41580 -extern atomic_t fscache_n_relinquishes_null;
41581 -extern atomic_t fscache_n_relinquishes_waitcrt;
41582 -extern atomic_t fscache_n_relinquishes_retire;
41583 -
41584 -extern atomic_t fscache_n_cookie_index;
41585 -extern atomic_t fscache_n_cookie_data;
41586 -extern atomic_t fscache_n_cookie_special;
41587 -
41588 -extern atomic_t fscache_n_object_alloc;
41589 -extern atomic_t fscache_n_object_no_alloc;
41590 -extern atomic_t fscache_n_object_lookups;
41591 -extern atomic_t fscache_n_object_lookups_negative;
41592 -extern atomic_t fscache_n_object_lookups_positive;
41593 -extern atomic_t fscache_n_object_lookups_timed_out;
41594 -extern atomic_t fscache_n_object_created;
41595 -extern atomic_t fscache_n_object_avail;
41596 -extern atomic_t fscache_n_object_dead;
41597 -
41598 -extern atomic_t fscache_n_checkaux_none;
41599 -extern atomic_t fscache_n_checkaux_okay;
41600 -extern atomic_t fscache_n_checkaux_update;
41601 -extern atomic_t fscache_n_checkaux_obsolete;
41602 +extern atomic_unchecked_t fscache_n_op_pend;
41603 +extern atomic_unchecked_t fscache_n_op_run;
41604 +extern atomic_unchecked_t fscache_n_op_enqueue;
41605 +extern atomic_unchecked_t fscache_n_op_deferred_release;
41606 +extern atomic_unchecked_t fscache_n_op_release;
41607 +extern atomic_unchecked_t fscache_n_op_gc;
41608 +extern atomic_unchecked_t fscache_n_op_cancelled;
41609 +extern atomic_unchecked_t fscache_n_op_rejected;
41610 +
41611 +extern atomic_unchecked_t fscache_n_attr_changed;
41612 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
41613 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41614 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41615 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
41616 +
41617 +extern atomic_unchecked_t fscache_n_allocs;
41618 +extern atomic_unchecked_t fscache_n_allocs_ok;
41619 +extern atomic_unchecked_t fscache_n_allocs_wait;
41620 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
41621 +extern atomic_unchecked_t fscache_n_allocs_intr;
41622 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
41623 +extern atomic_unchecked_t fscache_n_alloc_ops;
41624 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
41625 +
41626 +extern atomic_unchecked_t fscache_n_retrievals;
41627 +extern atomic_unchecked_t fscache_n_retrievals_ok;
41628 +extern atomic_unchecked_t fscache_n_retrievals_wait;
41629 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
41630 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41631 +extern atomic_unchecked_t fscache_n_retrievals_intr;
41632 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
41633 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41634 +extern atomic_unchecked_t fscache_n_retrieval_ops;
41635 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41636 +
41637 +extern atomic_unchecked_t fscache_n_stores;
41638 +extern atomic_unchecked_t fscache_n_stores_ok;
41639 +extern atomic_unchecked_t fscache_n_stores_again;
41640 +extern atomic_unchecked_t fscache_n_stores_nobufs;
41641 +extern atomic_unchecked_t fscache_n_stores_oom;
41642 +extern atomic_unchecked_t fscache_n_store_ops;
41643 +extern atomic_unchecked_t fscache_n_store_calls;
41644 +extern atomic_unchecked_t fscache_n_store_pages;
41645 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
41646 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41647 +
41648 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41649 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41650 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41651 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41652 +
41653 +extern atomic_unchecked_t fscache_n_marks;
41654 +extern atomic_unchecked_t fscache_n_uncaches;
41655 +
41656 +extern atomic_unchecked_t fscache_n_acquires;
41657 +extern atomic_unchecked_t fscache_n_acquires_null;
41658 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
41659 +extern atomic_unchecked_t fscache_n_acquires_ok;
41660 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
41661 +extern atomic_unchecked_t fscache_n_acquires_oom;
41662 +
41663 +extern atomic_unchecked_t fscache_n_updates;
41664 +extern atomic_unchecked_t fscache_n_updates_null;
41665 +extern atomic_unchecked_t fscache_n_updates_run;
41666 +
41667 +extern atomic_unchecked_t fscache_n_relinquishes;
41668 +extern atomic_unchecked_t fscache_n_relinquishes_null;
41669 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41670 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
41671 +
41672 +extern atomic_unchecked_t fscache_n_cookie_index;
41673 +extern atomic_unchecked_t fscache_n_cookie_data;
41674 +extern atomic_unchecked_t fscache_n_cookie_special;
41675 +
41676 +extern atomic_unchecked_t fscache_n_object_alloc;
41677 +extern atomic_unchecked_t fscache_n_object_no_alloc;
41678 +extern atomic_unchecked_t fscache_n_object_lookups;
41679 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
41680 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
41681 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41682 +extern atomic_unchecked_t fscache_n_object_created;
41683 +extern atomic_unchecked_t fscache_n_object_avail;
41684 +extern atomic_unchecked_t fscache_n_object_dead;
41685 +
41686 +extern atomic_unchecked_t fscache_n_checkaux_none;
41687 +extern atomic_unchecked_t fscache_n_checkaux_okay;
41688 +extern atomic_unchecked_t fscache_n_checkaux_update;
41689 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41690
41691 extern atomic_t fscache_n_cop_alloc_object;
41692 extern atomic_t fscache_n_cop_lookup_object;
41693 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
41694 atomic_inc(stat);
41695 }
41696
41697 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41698 +{
41699 + atomic_inc_unchecked(stat);
41700 +}
41701 +
41702 static inline void fscache_stat_d(atomic_t *stat)
41703 {
41704 atomic_dec(stat);
41705 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
41706
41707 #define __fscache_stat(stat) (NULL)
41708 #define fscache_stat(stat) do {} while (0)
41709 +#define fscache_stat_unchecked(stat) do {} while (0)
41710 #define fscache_stat_d(stat) do {} while (0)
41711 #endif
41712
41713 diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
41714 --- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
41715 +++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
41716 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
41717 /* update the object metadata on disk */
41718 case FSCACHE_OBJECT_UPDATING:
41719 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41720 - fscache_stat(&fscache_n_updates_run);
41721 + fscache_stat_unchecked(&fscache_n_updates_run);
41722 fscache_stat(&fscache_n_cop_update_object);
41723 object->cache->ops->update_object(object);
41724 fscache_stat_d(&fscache_n_cop_update_object);
41725 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
41726 spin_lock(&object->lock);
41727 object->state = FSCACHE_OBJECT_DEAD;
41728 spin_unlock(&object->lock);
41729 - fscache_stat(&fscache_n_object_dead);
41730 + fscache_stat_unchecked(&fscache_n_object_dead);
41731 goto terminal_transit;
41732
41733 /* handle the parent cache of this object being withdrawn from
41734 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
41735 spin_lock(&object->lock);
41736 object->state = FSCACHE_OBJECT_DEAD;
41737 spin_unlock(&object->lock);
41738 - fscache_stat(&fscache_n_object_dead);
41739 + fscache_stat_unchecked(&fscache_n_object_dead);
41740 goto terminal_transit;
41741
41742 /* complain about the object being woken up once it is
41743 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
41744 parent->cookie->def->name, cookie->def->name,
41745 object->cache->tag->name);
41746
41747 - fscache_stat(&fscache_n_object_lookups);
41748 + fscache_stat_unchecked(&fscache_n_object_lookups);
41749 fscache_stat(&fscache_n_cop_lookup_object);
41750 ret = object->cache->ops->lookup_object(object);
41751 fscache_stat_d(&fscache_n_cop_lookup_object);
41752 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
41753 if (ret == -ETIMEDOUT) {
41754 /* probably stuck behind another object, so move this one to
41755 * the back of the queue */
41756 - fscache_stat(&fscache_n_object_lookups_timed_out);
41757 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41758 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41759 }
41760
41761 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
41762
41763 spin_lock(&object->lock);
41764 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41765 - fscache_stat(&fscache_n_object_lookups_negative);
41766 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41767
41768 /* transit here to allow write requests to begin stacking up
41769 * and read requests to begin returning ENODATA */
41770 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
41771 * result, in which case there may be data available */
41772 spin_lock(&object->lock);
41773 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41774 - fscache_stat(&fscache_n_object_lookups_positive);
41775 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41776
41777 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41778
41779 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
41780 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41781 } else {
41782 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41783 - fscache_stat(&fscache_n_object_created);
41784 + fscache_stat_unchecked(&fscache_n_object_created);
41785
41786 object->state = FSCACHE_OBJECT_AVAILABLE;
41787 spin_unlock(&object->lock);
41788 @@ -633,7 +633,7 @@ static void fscache_object_available(str
41789 fscache_enqueue_dependents(object);
41790
41791 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41792 - fscache_stat(&fscache_n_object_avail);
41793 + fscache_stat_unchecked(&fscache_n_object_avail);
41794
41795 _leave("");
41796 }
41797 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41798 enum fscache_checkaux result;
41799
41800 if (!object->cookie->def->check_aux) {
41801 - fscache_stat(&fscache_n_checkaux_none);
41802 + fscache_stat_unchecked(&fscache_n_checkaux_none);
41803 return FSCACHE_CHECKAUX_OKAY;
41804 }
41805
41806 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41807 switch (result) {
41808 /* entry okay as is */
41809 case FSCACHE_CHECKAUX_OKAY:
41810 - fscache_stat(&fscache_n_checkaux_okay);
41811 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
41812 break;
41813
41814 /* entry requires update */
41815 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41816 - fscache_stat(&fscache_n_checkaux_update);
41817 + fscache_stat_unchecked(&fscache_n_checkaux_update);
41818 break;
41819
41820 /* entry requires deletion */
41821 case FSCACHE_CHECKAUX_OBSOLETE:
41822 - fscache_stat(&fscache_n_checkaux_obsolete);
41823 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41824 break;
41825
41826 default:
41827 diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
41828 --- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
41829 +++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
41830 @@ -16,7 +16,7 @@
41831 #include <linux/seq_file.h>
41832 #include "internal.h"
41833
41834 -atomic_t fscache_op_debug_id;
41835 +atomic_unchecked_t fscache_op_debug_id;
41836 EXPORT_SYMBOL(fscache_op_debug_id);
41837
41838 /**
41839 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
41840 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41841 ASSERTCMP(atomic_read(&op->usage), >, 0);
41842
41843 - fscache_stat(&fscache_n_op_enqueue);
41844 + fscache_stat_unchecked(&fscache_n_op_enqueue);
41845 switch (op->flags & FSCACHE_OP_TYPE) {
41846 case FSCACHE_OP_FAST:
41847 _debug("queue fast");
41848 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
41849 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41850 if (op->processor)
41851 fscache_enqueue_operation(op);
41852 - fscache_stat(&fscache_n_op_run);
41853 + fscache_stat_unchecked(&fscache_n_op_run);
41854 }
41855
41856 /*
41857 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
41858 if (object->n_ops > 0) {
41859 atomic_inc(&op->usage);
41860 list_add_tail(&op->pend_link, &object->pending_ops);
41861 - fscache_stat(&fscache_n_op_pend);
41862 + fscache_stat_unchecked(&fscache_n_op_pend);
41863 } else if (!list_empty(&object->pending_ops)) {
41864 atomic_inc(&op->usage);
41865 list_add_tail(&op->pend_link, &object->pending_ops);
41866 - fscache_stat(&fscache_n_op_pend);
41867 + fscache_stat_unchecked(&fscache_n_op_pend);
41868 fscache_start_operations(object);
41869 } else {
41870 ASSERTCMP(object->n_in_progress, ==, 0);
41871 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
41872 object->n_exclusive++; /* reads and writes must wait */
41873 atomic_inc(&op->usage);
41874 list_add_tail(&op->pend_link, &object->pending_ops);
41875 - fscache_stat(&fscache_n_op_pend);
41876 + fscache_stat_unchecked(&fscache_n_op_pend);
41877 ret = 0;
41878 } else {
41879 /* not allowed to submit ops in any other state */
41880 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
41881 if (object->n_exclusive > 0) {
41882 atomic_inc(&op->usage);
41883 list_add_tail(&op->pend_link, &object->pending_ops);
41884 - fscache_stat(&fscache_n_op_pend);
41885 + fscache_stat_unchecked(&fscache_n_op_pend);
41886 } else if (!list_empty(&object->pending_ops)) {
41887 atomic_inc(&op->usage);
41888 list_add_tail(&op->pend_link, &object->pending_ops);
41889 - fscache_stat(&fscache_n_op_pend);
41890 + fscache_stat_unchecked(&fscache_n_op_pend);
41891 fscache_start_operations(object);
41892 } else {
41893 ASSERTCMP(object->n_exclusive, ==, 0);
41894 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
41895 object->n_ops++;
41896 atomic_inc(&op->usage);
41897 list_add_tail(&op->pend_link, &object->pending_ops);
41898 - fscache_stat(&fscache_n_op_pend);
41899 + fscache_stat_unchecked(&fscache_n_op_pend);
41900 ret = 0;
41901 } else if (object->state == FSCACHE_OBJECT_DYING ||
41902 object->state == FSCACHE_OBJECT_LC_DYING ||
41903 object->state == FSCACHE_OBJECT_WITHDRAWING) {
41904 - fscache_stat(&fscache_n_op_rejected);
41905 + fscache_stat_unchecked(&fscache_n_op_rejected);
41906 ret = -ENOBUFS;
41907 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
41908 fscache_report_unexpected_submission(object, op, ostate);
41909 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
41910
41911 ret = -EBUSY;
41912 if (!list_empty(&op->pend_link)) {
41913 - fscache_stat(&fscache_n_op_cancelled);
41914 + fscache_stat_unchecked(&fscache_n_op_cancelled);
41915 list_del_init(&op->pend_link);
41916 object->n_ops--;
41917 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
41918 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
41919 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
41920 BUG();
41921
41922 - fscache_stat(&fscache_n_op_release);
41923 + fscache_stat_unchecked(&fscache_n_op_release);
41924
41925 if (op->release) {
41926 op->release(op);
41927 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
41928 * lock, and defer it otherwise */
41929 if (!spin_trylock(&object->lock)) {
41930 _debug("defer put");
41931 - fscache_stat(&fscache_n_op_deferred_release);
41932 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
41933
41934 cache = object->cache;
41935 spin_lock(&cache->op_gc_list_lock);
41936 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
41937
41938 _debug("GC DEFERRED REL OBJ%x OP%x",
41939 object->debug_id, op->debug_id);
41940 - fscache_stat(&fscache_n_op_gc);
41941 + fscache_stat_unchecked(&fscache_n_op_gc);
41942
41943 ASSERTCMP(atomic_read(&op->usage), ==, 0);
41944
41945 diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
41946 --- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
41947 +++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
41948 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
41949 val = radix_tree_lookup(&cookie->stores, page->index);
41950 if (!val) {
41951 rcu_read_unlock();
41952 - fscache_stat(&fscache_n_store_vmscan_not_storing);
41953 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
41954 __fscache_uncache_page(cookie, page);
41955 return true;
41956 }
41957 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
41958 spin_unlock(&cookie->stores_lock);
41959
41960 if (xpage) {
41961 - fscache_stat(&fscache_n_store_vmscan_cancelled);
41962 - fscache_stat(&fscache_n_store_radix_deletes);
41963 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
41964 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41965 ASSERTCMP(xpage, ==, page);
41966 } else {
41967 - fscache_stat(&fscache_n_store_vmscan_gone);
41968 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
41969 }
41970
41971 wake_up_bit(&cookie->flags, 0);
41972 @@ -106,7 +106,7 @@ page_busy:
41973 /* we might want to wait here, but that could deadlock the allocator as
41974 * the slow-work threads writing to the cache may all end up sleeping
41975 * on memory allocation */
41976 - fscache_stat(&fscache_n_store_vmscan_busy);
41977 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
41978 return false;
41979 }
41980 EXPORT_SYMBOL(__fscache_maybe_release_page);
41981 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
41982 FSCACHE_COOKIE_STORING_TAG);
41983 if (!radix_tree_tag_get(&cookie->stores, page->index,
41984 FSCACHE_COOKIE_PENDING_TAG)) {
41985 - fscache_stat(&fscache_n_store_radix_deletes);
41986 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
41987 xpage = radix_tree_delete(&cookie->stores, page->index);
41988 }
41989 spin_unlock(&cookie->stores_lock);
41990 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
41991
41992 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
41993
41994 - fscache_stat(&fscache_n_attr_changed_calls);
41995 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
41996
41997 if (fscache_object_is_active(object)) {
41998 fscache_set_op_state(op, "CallFS");
41999 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42000
42001 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42002
42003 - fscache_stat(&fscache_n_attr_changed);
42004 + fscache_stat_unchecked(&fscache_n_attr_changed);
42005
42006 op = kzalloc(sizeof(*op), GFP_KERNEL);
42007 if (!op) {
42008 - fscache_stat(&fscache_n_attr_changed_nomem);
42009 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42010 _leave(" = -ENOMEM");
42011 return -ENOMEM;
42012 }
42013 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42014 if (fscache_submit_exclusive_op(object, op) < 0)
42015 goto nobufs;
42016 spin_unlock(&cookie->lock);
42017 - fscache_stat(&fscache_n_attr_changed_ok);
42018 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42019 fscache_put_operation(op);
42020 _leave(" = 0");
42021 return 0;
42022 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42023 nobufs:
42024 spin_unlock(&cookie->lock);
42025 kfree(op);
42026 - fscache_stat(&fscache_n_attr_changed_nobufs);
42027 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42028 _leave(" = %d", -ENOBUFS);
42029 return -ENOBUFS;
42030 }
42031 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42032 /* allocate a retrieval operation and attempt to submit it */
42033 op = kzalloc(sizeof(*op), GFP_NOIO);
42034 if (!op) {
42035 - fscache_stat(&fscache_n_retrievals_nomem);
42036 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42037 return NULL;
42038 }
42039
42040 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42041 return 0;
42042 }
42043
42044 - fscache_stat(&fscache_n_retrievals_wait);
42045 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42046
42047 jif = jiffies;
42048 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42049 fscache_wait_bit_interruptible,
42050 TASK_INTERRUPTIBLE) != 0) {
42051 - fscache_stat(&fscache_n_retrievals_intr);
42052 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42053 _leave(" = -ERESTARTSYS");
42054 return -ERESTARTSYS;
42055 }
42056 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42057 */
42058 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42059 struct fscache_retrieval *op,
42060 - atomic_t *stat_op_waits,
42061 - atomic_t *stat_object_dead)
42062 + atomic_unchecked_t *stat_op_waits,
42063 + atomic_unchecked_t *stat_object_dead)
42064 {
42065 int ret;
42066
42067 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42068 goto check_if_dead;
42069
42070 _debug(">>> WT");
42071 - fscache_stat(stat_op_waits);
42072 + fscache_stat_unchecked(stat_op_waits);
42073 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42074 fscache_wait_bit_interruptible,
42075 TASK_INTERRUPTIBLE) < 0) {
42076 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42077
42078 check_if_dead:
42079 if (unlikely(fscache_object_is_dead(object))) {
42080 - fscache_stat(stat_object_dead);
42081 + fscache_stat_unchecked(stat_object_dead);
42082 return -ENOBUFS;
42083 }
42084 return 0;
42085 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42086
42087 _enter("%p,%p,,,", cookie, page);
42088
42089 - fscache_stat(&fscache_n_retrievals);
42090 + fscache_stat_unchecked(&fscache_n_retrievals);
42091
42092 if (hlist_empty(&cookie->backing_objects))
42093 goto nobufs;
42094 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42095 goto nobufs_unlock;
42096 spin_unlock(&cookie->lock);
42097
42098 - fscache_stat(&fscache_n_retrieval_ops);
42099 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42100
42101 /* pin the netfs read context in case we need to do the actual netfs
42102 * read because we've encountered a cache read failure */
42103 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42104
42105 error:
42106 if (ret == -ENOMEM)
42107 - fscache_stat(&fscache_n_retrievals_nomem);
42108 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42109 else if (ret == -ERESTARTSYS)
42110 - fscache_stat(&fscache_n_retrievals_intr);
42111 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42112 else if (ret == -ENODATA)
42113 - fscache_stat(&fscache_n_retrievals_nodata);
42114 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42115 else if (ret < 0)
42116 - fscache_stat(&fscache_n_retrievals_nobufs);
42117 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42118 else
42119 - fscache_stat(&fscache_n_retrievals_ok);
42120 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42121
42122 fscache_put_retrieval(op);
42123 _leave(" = %d", ret);
42124 @@ -453,7 +453,7 @@ nobufs_unlock:
42125 spin_unlock(&cookie->lock);
42126 kfree(op);
42127 nobufs:
42128 - fscache_stat(&fscache_n_retrievals_nobufs);
42129 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42130 _leave(" = -ENOBUFS");
42131 return -ENOBUFS;
42132 }
42133 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42134
42135 _enter("%p,,%d,,,", cookie, *nr_pages);
42136
42137 - fscache_stat(&fscache_n_retrievals);
42138 + fscache_stat_unchecked(&fscache_n_retrievals);
42139
42140 if (hlist_empty(&cookie->backing_objects))
42141 goto nobufs;
42142 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42143 goto nobufs_unlock;
42144 spin_unlock(&cookie->lock);
42145
42146 - fscache_stat(&fscache_n_retrieval_ops);
42147 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42148
42149 /* pin the netfs read context in case we need to do the actual netfs
42150 * read because we've encountered a cache read failure */
42151 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42152
42153 error:
42154 if (ret == -ENOMEM)
42155 - fscache_stat(&fscache_n_retrievals_nomem);
42156 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42157 else if (ret == -ERESTARTSYS)
42158 - fscache_stat(&fscache_n_retrievals_intr);
42159 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42160 else if (ret == -ENODATA)
42161 - fscache_stat(&fscache_n_retrievals_nodata);
42162 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42163 else if (ret < 0)
42164 - fscache_stat(&fscache_n_retrievals_nobufs);
42165 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42166 else
42167 - fscache_stat(&fscache_n_retrievals_ok);
42168 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42169
42170 fscache_put_retrieval(op);
42171 _leave(" = %d", ret);
42172 @@ -570,7 +570,7 @@ nobufs_unlock:
42173 spin_unlock(&cookie->lock);
42174 kfree(op);
42175 nobufs:
42176 - fscache_stat(&fscache_n_retrievals_nobufs);
42177 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42178 _leave(" = -ENOBUFS");
42179 return -ENOBUFS;
42180 }
42181 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42182
42183 _enter("%p,%p,,,", cookie, page);
42184
42185 - fscache_stat(&fscache_n_allocs);
42186 + fscache_stat_unchecked(&fscache_n_allocs);
42187
42188 if (hlist_empty(&cookie->backing_objects))
42189 goto nobufs;
42190 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42191 goto nobufs_unlock;
42192 spin_unlock(&cookie->lock);
42193
42194 - fscache_stat(&fscache_n_alloc_ops);
42195 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42196
42197 ret = fscache_wait_for_retrieval_activation(
42198 object, op,
42199 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42200
42201 error:
42202 if (ret == -ERESTARTSYS)
42203 - fscache_stat(&fscache_n_allocs_intr);
42204 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42205 else if (ret < 0)
42206 - fscache_stat(&fscache_n_allocs_nobufs);
42207 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42208 else
42209 - fscache_stat(&fscache_n_allocs_ok);
42210 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42211
42212 fscache_put_retrieval(op);
42213 _leave(" = %d", ret);
42214 @@ -651,7 +651,7 @@ nobufs_unlock:
42215 spin_unlock(&cookie->lock);
42216 kfree(op);
42217 nobufs:
42218 - fscache_stat(&fscache_n_allocs_nobufs);
42219 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42220 _leave(" = -ENOBUFS");
42221 return -ENOBUFS;
42222 }
42223 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42224
42225 spin_lock(&cookie->stores_lock);
42226
42227 - fscache_stat(&fscache_n_store_calls);
42228 + fscache_stat_unchecked(&fscache_n_store_calls);
42229
42230 /* find a page to store */
42231 page = NULL;
42232 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42233 page = results[0];
42234 _debug("gang %d [%lx]", n, page->index);
42235 if (page->index > op->store_limit) {
42236 - fscache_stat(&fscache_n_store_pages_over_limit);
42237 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42238 goto superseded;
42239 }
42240
42241 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42242
42243 if (page) {
42244 fscache_set_op_state(&op->op, "Store");
42245 - fscache_stat(&fscache_n_store_pages);
42246 + fscache_stat_unchecked(&fscache_n_store_pages);
42247 fscache_stat(&fscache_n_cop_write_page);
42248 ret = object->cache->ops->write_page(op, page);
42249 fscache_stat_d(&fscache_n_cop_write_page);
42250 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42251 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42252 ASSERT(PageFsCache(page));
42253
42254 - fscache_stat(&fscache_n_stores);
42255 + fscache_stat_unchecked(&fscache_n_stores);
42256
42257 op = kzalloc(sizeof(*op), GFP_NOIO);
42258 if (!op)
42259 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42260 spin_unlock(&cookie->stores_lock);
42261 spin_unlock(&object->lock);
42262
42263 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42264 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42265 op->store_limit = object->store_limit;
42266
42267 if (fscache_submit_op(object, &op->op) < 0)
42268 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42269
42270 spin_unlock(&cookie->lock);
42271 radix_tree_preload_end();
42272 - fscache_stat(&fscache_n_store_ops);
42273 - fscache_stat(&fscache_n_stores_ok);
42274 + fscache_stat_unchecked(&fscache_n_store_ops);
42275 + fscache_stat_unchecked(&fscache_n_stores_ok);
42276
42277 /* the slow work queue now carries its own ref on the object */
42278 fscache_put_operation(&op->op);
42279 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42280 return 0;
42281
42282 already_queued:
42283 - fscache_stat(&fscache_n_stores_again);
42284 + fscache_stat_unchecked(&fscache_n_stores_again);
42285 already_pending:
42286 spin_unlock(&cookie->stores_lock);
42287 spin_unlock(&object->lock);
42288 spin_unlock(&cookie->lock);
42289 radix_tree_preload_end();
42290 kfree(op);
42291 - fscache_stat(&fscache_n_stores_ok);
42292 + fscache_stat_unchecked(&fscache_n_stores_ok);
42293 _leave(" = 0");
42294 return 0;
42295
42296 @@ -886,14 +886,14 @@ nobufs:
42297 spin_unlock(&cookie->lock);
42298 radix_tree_preload_end();
42299 kfree(op);
42300 - fscache_stat(&fscache_n_stores_nobufs);
42301 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42302 _leave(" = -ENOBUFS");
42303 return -ENOBUFS;
42304
42305 nomem_free:
42306 kfree(op);
42307 nomem:
42308 - fscache_stat(&fscache_n_stores_oom);
42309 + fscache_stat_unchecked(&fscache_n_stores_oom);
42310 _leave(" = -ENOMEM");
42311 return -ENOMEM;
42312 }
42313 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42314 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42315 ASSERTCMP(page, !=, NULL);
42316
42317 - fscache_stat(&fscache_n_uncaches);
42318 + fscache_stat_unchecked(&fscache_n_uncaches);
42319
42320 /* cache withdrawal may beat us to it */
42321 if (!PageFsCache(page))
42322 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42323 unsigned long loop;
42324
42325 #ifdef CONFIG_FSCACHE_STATS
42326 - atomic_add(pagevec->nr, &fscache_n_marks);
42327 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42328 #endif
42329
42330 for (loop = 0; loop < pagevec->nr; loop++) {
42331 diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42332 --- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42333 +++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42334 @@ -18,95 +18,95 @@
42335 /*
42336 * operation counters
42337 */
42338 -atomic_t fscache_n_op_pend;
42339 -atomic_t fscache_n_op_run;
42340 -atomic_t fscache_n_op_enqueue;
42341 -atomic_t fscache_n_op_requeue;
42342 -atomic_t fscache_n_op_deferred_release;
42343 -atomic_t fscache_n_op_release;
42344 -atomic_t fscache_n_op_gc;
42345 -atomic_t fscache_n_op_cancelled;
42346 -atomic_t fscache_n_op_rejected;
42347 -
42348 -atomic_t fscache_n_attr_changed;
42349 -atomic_t fscache_n_attr_changed_ok;
42350 -atomic_t fscache_n_attr_changed_nobufs;
42351 -atomic_t fscache_n_attr_changed_nomem;
42352 -atomic_t fscache_n_attr_changed_calls;
42353 -
42354 -atomic_t fscache_n_allocs;
42355 -atomic_t fscache_n_allocs_ok;
42356 -atomic_t fscache_n_allocs_wait;
42357 -atomic_t fscache_n_allocs_nobufs;
42358 -atomic_t fscache_n_allocs_intr;
42359 -atomic_t fscache_n_allocs_object_dead;
42360 -atomic_t fscache_n_alloc_ops;
42361 -atomic_t fscache_n_alloc_op_waits;
42362 -
42363 -atomic_t fscache_n_retrievals;
42364 -atomic_t fscache_n_retrievals_ok;
42365 -atomic_t fscache_n_retrievals_wait;
42366 -atomic_t fscache_n_retrievals_nodata;
42367 -atomic_t fscache_n_retrievals_nobufs;
42368 -atomic_t fscache_n_retrievals_intr;
42369 -atomic_t fscache_n_retrievals_nomem;
42370 -atomic_t fscache_n_retrievals_object_dead;
42371 -atomic_t fscache_n_retrieval_ops;
42372 -atomic_t fscache_n_retrieval_op_waits;
42373 -
42374 -atomic_t fscache_n_stores;
42375 -atomic_t fscache_n_stores_ok;
42376 -atomic_t fscache_n_stores_again;
42377 -atomic_t fscache_n_stores_nobufs;
42378 -atomic_t fscache_n_stores_oom;
42379 -atomic_t fscache_n_store_ops;
42380 -atomic_t fscache_n_store_calls;
42381 -atomic_t fscache_n_store_pages;
42382 -atomic_t fscache_n_store_radix_deletes;
42383 -atomic_t fscache_n_store_pages_over_limit;
42384 -
42385 -atomic_t fscache_n_store_vmscan_not_storing;
42386 -atomic_t fscache_n_store_vmscan_gone;
42387 -atomic_t fscache_n_store_vmscan_busy;
42388 -atomic_t fscache_n_store_vmscan_cancelled;
42389 -
42390 -atomic_t fscache_n_marks;
42391 -atomic_t fscache_n_uncaches;
42392 -
42393 -atomic_t fscache_n_acquires;
42394 -atomic_t fscache_n_acquires_null;
42395 -atomic_t fscache_n_acquires_no_cache;
42396 -atomic_t fscache_n_acquires_ok;
42397 -atomic_t fscache_n_acquires_nobufs;
42398 -atomic_t fscache_n_acquires_oom;
42399 -
42400 -atomic_t fscache_n_updates;
42401 -atomic_t fscache_n_updates_null;
42402 -atomic_t fscache_n_updates_run;
42403 -
42404 -atomic_t fscache_n_relinquishes;
42405 -atomic_t fscache_n_relinquishes_null;
42406 -atomic_t fscache_n_relinquishes_waitcrt;
42407 -atomic_t fscache_n_relinquishes_retire;
42408 -
42409 -atomic_t fscache_n_cookie_index;
42410 -atomic_t fscache_n_cookie_data;
42411 -atomic_t fscache_n_cookie_special;
42412 -
42413 -atomic_t fscache_n_object_alloc;
42414 -atomic_t fscache_n_object_no_alloc;
42415 -atomic_t fscache_n_object_lookups;
42416 -atomic_t fscache_n_object_lookups_negative;
42417 -atomic_t fscache_n_object_lookups_positive;
42418 -atomic_t fscache_n_object_lookups_timed_out;
42419 -atomic_t fscache_n_object_created;
42420 -atomic_t fscache_n_object_avail;
42421 -atomic_t fscache_n_object_dead;
42422 -
42423 -atomic_t fscache_n_checkaux_none;
42424 -atomic_t fscache_n_checkaux_okay;
42425 -atomic_t fscache_n_checkaux_update;
42426 -atomic_t fscache_n_checkaux_obsolete;
42427 +atomic_unchecked_t fscache_n_op_pend;
42428 +atomic_unchecked_t fscache_n_op_run;
42429 +atomic_unchecked_t fscache_n_op_enqueue;
42430 +atomic_unchecked_t fscache_n_op_requeue;
42431 +atomic_unchecked_t fscache_n_op_deferred_release;
42432 +atomic_unchecked_t fscache_n_op_release;
42433 +atomic_unchecked_t fscache_n_op_gc;
42434 +atomic_unchecked_t fscache_n_op_cancelled;
42435 +atomic_unchecked_t fscache_n_op_rejected;
42436 +
42437 +atomic_unchecked_t fscache_n_attr_changed;
42438 +atomic_unchecked_t fscache_n_attr_changed_ok;
42439 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42440 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42441 +atomic_unchecked_t fscache_n_attr_changed_calls;
42442 +
42443 +atomic_unchecked_t fscache_n_allocs;
42444 +atomic_unchecked_t fscache_n_allocs_ok;
42445 +atomic_unchecked_t fscache_n_allocs_wait;
42446 +atomic_unchecked_t fscache_n_allocs_nobufs;
42447 +atomic_unchecked_t fscache_n_allocs_intr;
42448 +atomic_unchecked_t fscache_n_allocs_object_dead;
42449 +atomic_unchecked_t fscache_n_alloc_ops;
42450 +atomic_unchecked_t fscache_n_alloc_op_waits;
42451 +
42452 +atomic_unchecked_t fscache_n_retrievals;
42453 +atomic_unchecked_t fscache_n_retrievals_ok;
42454 +atomic_unchecked_t fscache_n_retrievals_wait;
42455 +atomic_unchecked_t fscache_n_retrievals_nodata;
42456 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42457 +atomic_unchecked_t fscache_n_retrievals_intr;
42458 +atomic_unchecked_t fscache_n_retrievals_nomem;
42459 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42460 +atomic_unchecked_t fscache_n_retrieval_ops;
42461 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42462 +
42463 +atomic_unchecked_t fscache_n_stores;
42464 +atomic_unchecked_t fscache_n_stores_ok;
42465 +atomic_unchecked_t fscache_n_stores_again;
42466 +atomic_unchecked_t fscache_n_stores_nobufs;
42467 +atomic_unchecked_t fscache_n_stores_oom;
42468 +atomic_unchecked_t fscache_n_store_ops;
42469 +atomic_unchecked_t fscache_n_store_calls;
42470 +atomic_unchecked_t fscache_n_store_pages;
42471 +atomic_unchecked_t fscache_n_store_radix_deletes;
42472 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42473 +
42474 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42475 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42476 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42477 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42478 +
42479 +atomic_unchecked_t fscache_n_marks;
42480 +atomic_unchecked_t fscache_n_uncaches;
42481 +
42482 +atomic_unchecked_t fscache_n_acquires;
42483 +atomic_unchecked_t fscache_n_acquires_null;
42484 +atomic_unchecked_t fscache_n_acquires_no_cache;
42485 +atomic_unchecked_t fscache_n_acquires_ok;
42486 +atomic_unchecked_t fscache_n_acquires_nobufs;
42487 +atomic_unchecked_t fscache_n_acquires_oom;
42488 +
42489 +atomic_unchecked_t fscache_n_updates;
42490 +atomic_unchecked_t fscache_n_updates_null;
42491 +atomic_unchecked_t fscache_n_updates_run;
42492 +
42493 +atomic_unchecked_t fscache_n_relinquishes;
42494 +atomic_unchecked_t fscache_n_relinquishes_null;
42495 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42496 +atomic_unchecked_t fscache_n_relinquishes_retire;
42497 +
42498 +atomic_unchecked_t fscache_n_cookie_index;
42499 +atomic_unchecked_t fscache_n_cookie_data;
42500 +atomic_unchecked_t fscache_n_cookie_special;
42501 +
42502 +atomic_unchecked_t fscache_n_object_alloc;
42503 +atomic_unchecked_t fscache_n_object_no_alloc;
42504 +atomic_unchecked_t fscache_n_object_lookups;
42505 +atomic_unchecked_t fscache_n_object_lookups_negative;
42506 +atomic_unchecked_t fscache_n_object_lookups_positive;
42507 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
42508 +atomic_unchecked_t fscache_n_object_created;
42509 +atomic_unchecked_t fscache_n_object_avail;
42510 +atomic_unchecked_t fscache_n_object_dead;
42511 +
42512 +atomic_unchecked_t fscache_n_checkaux_none;
42513 +atomic_unchecked_t fscache_n_checkaux_okay;
42514 +atomic_unchecked_t fscache_n_checkaux_update;
42515 +atomic_unchecked_t fscache_n_checkaux_obsolete;
42516
42517 atomic_t fscache_n_cop_alloc_object;
42518 atomic_t fscache_n_cop_lookup_object;
42519 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42520 seq_puts(m, "FS-Cache statistics\n");
42521
42522 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42523 - atomic_read(&fscache_n_cookie_index),
42524 - atomic_read(&fscache_n_cookie_data),
42525 - atomic_read(&fscache_n_cookie_special));
42526 + atomic_read_unchecked(&fscache_n_cookie_index),
42527 + atomic_read_unchecked(&fscache_n_cookie_data),
42528 + atomic_read_unchecked(&fscache_n_cookie_special));
42529
42530 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42531 - atomic_read(&fscache_n_object_alloc),
42532 - atomic_read(&fscache_n_object_no_alloc),
42533 - atomic_read(&fscache_n_object_avail),
42534 - atomic_read(&fscache_n_object_dead));
42535 + atomic_read_unchecked(&fscache_n_object_alloc),
42536 + atomic_read_unchecked(&fscache_n_object_no_alloc),
42537 + atomic_read_unchecked(&fscache_n_object_avail),
42538 + atomic_read_unchecked(&fscache_n_object_dead));
42539 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42540 - atomic_read(&fscache_n_checkaux_none),
42541 - atomic_read(&fscache_n_checkaux_okay),
42542 - atomic_read(&fscache_n_checkaux_update),
42543 - atomic_read(&fscache_n_checkaux_obsolete));
42544 + atomic_read_unchecked(&fscache_n_checkaux_none),
42545 + atomic_read_unchecked(&fscache_n_checkaux_okay),
42546 + atomic_read_unchecked(&fscache_n_checkaux_update),
42547 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42548
42549 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42550 - atomic_read(&fscache_n_marks),
42551 - atomic_read(&fscache_n_uncaches));
42552 + atomic_read_unchecked(&fscache_n_marks),
42553 + atomic_read_unchecked(&fscache_n_uncaches));
42554
42555 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42556 " oom=%u\n",
42557 - atomic_read(&fscache_n_acquires),
42558 - atomic_read(&fscache_n_acquires_null),
42559 - atomic_read(&fscache_n_acquires_no_cache),
42560 - atomic_read(&fscache_n_acquires_ok),
42561 - atomic_read(&fscache_n_acquires_nobufs),
42562 - atomic_read(&fscache_n_acquires_oom));
42563 + atomic_read_unchecked(&fscache_n_acquires),
42564 + atomic_read_unchecked(&fscache_n_acquires_null),
42565 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
42566 + atomic_read_unchecked(&fscache_n_acquires_ok),
42567 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
42568 + atomic_read_unchecked(&fscache_n_acquires_oom));
42569
42570 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42571 - atomic_read(&fscache_n_object_lookups),
42572 - atomic_read(&fscache_n_object_lookups_negative),
42573 - atomic_read(&fscache_n_object_lookups_positive),
42574 - atomic_read(&fscache_n_object_lookups_timed_out),
42575 - atomic_read(&fscache_n_object_created));
42576 + atomic_read_unchecked(&fscache_n_object_lookups),
42577 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
42578 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
42579 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42580 + atomic_read_unchecked(&fscache_n_object_created));
42581
42582 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42583 - atomic_read(&fscache_n_updates),
42584 - atomic_read(&fscache_n_updates_null),
42585 - atomic_read(&fscache_n_updates_run));
42586 + atomic_read_unchecked(&fscache_n_updates),
42587 + atomic_read_unchecked(&fscache_n_updates_null),
42588 + atomic_read_unchecked(&fscache_n_updates_run));
42589
42590 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42591 - atomic_read(&fscache_n_relinquishes),
42592 - atomic_read(&fscache_n_relinquishes_null),
42593 - atomic_read(&fscache_n_relinquishes_waitcrt),
42594 - atomic_read(&fscache_n_relinquishes_retire));
42595 + atomic_read_unchecked(&fscache_n_relinquishes),
42596 + atomic_read_unchecked(&fscache_n_relinquishes_null),
42597 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42598 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
42599
42600 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42601 - atomic_read(&fscache_n_attr_changed),
42602 - atomic_read(&fscache_n_attr_changed_ok),
42603 - atomic_read(&fscache_n_attr_changed_nobufs),
42604 - atomic_read(&fscache_n_attr_changed_nomem),
42605 - atomic_read(&fscache_n_attr_changed_calls));
42606 + atomic_read_unchecked(&fscache_n_attr_changed),
42607 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
42608 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42609 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42610 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
42611
42612 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42613 - atomic_read(&fscache_n_allocs),
42614 - atomic_read(&fscache_n_allocs_ok),
42615 - atomic_read(&fscache_n_allocs_wait),
42616 - atomic_read(&fscache_n_allocs_nobufs),
42617 - atomic_read(&fscache_n_allocs_intr));
42618 + atomic_read_unchecked(&fscache_n_allocs),
42619 + atomic_read_unchecked(&fscache_n_allocs_ok),
42620 + atomic_read_unchecked(&fscache_n_allocs_wait),
42621 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
42622 + atomic_read_unchecked(&fscache_n_allocs_intr));
42623 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42624 - atomic_read(&fscache_n_alloc_ops),
42625 - atomic_read(&fscache_n_alloc_op_waits),
42626 - atomic_read(&fscache_n_allocs_object_dead));
42627 + atomic_read_unchecked(&fscache_n_alloc_ops),
42628 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
42629 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
42630
42631 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42632 " int=%u oom=%u\n",
42633 - atomic_read(&fscache_n_retrievals),
42634 - atomic_read(&fscache_n_retrievals_ok),
42635 - atomic_read(&fscache_n_retrievals_wait),
42636 - atomic_read(&fscache_n_retrievals_nodata),
42637 - atomic_read(&fscache_n_retrievals_nobufs),
42638 - atomic_read(&fscache_n_retrievals_intr),
42639 - atomic_read(&fscache_n_retrievals_nomem));
42640 + atomic_read_unchecked(&fscache_n_retrievals),
42641 + atomic_read_unchecked(&fscache_n_retrievals_ok),
42642 + atomic_read_unchecked(&fscache_n_retrievals_wait),
42643 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
42644 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42645 + atomic_read_unchecked(&fscache_n_retrievals_intr),
42646 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
42647 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42648 - atomic_read(&fscache_n_retrieval_ops),
42649 - atomic_read(&fscache_n_retrieval_op_waits),
42650 - atomic_read(&fscache_n_retrievals_object_dead));
42651 + atomic_read_unchecked(&fscache_n_retrieval_ops),
42652 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42653 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42654
42655 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42656 - atomic_read(&fscache_n_stores),
42657 - atomic_read(&fscache_n_stores_ok),
42658 - atomic_read(&fscache_n_stores_again),
42659 - atomic_read(&fscache_n_stores_nobufs),
42660 - atomic_read(&fscache_n_stores_oom));
42661 + atomic_read_unchecked(&fscache_n_stores),
42662 + atomic_read_unchecked(&fscache_n_stores_ok),
42663 + atomic_read_unchecked(&fscache_n_stores_again),
42664 + atomic_read_unchecked(&fscache_n_stores_nobufs),
42665 + atomic_read_unchecked(&fscache_n_stores_oom));
42666 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42667 - atomic_read(&fscache_n_store_ops),
42668 - atomic_read(&fscache_n_store_calls),
42669 - atomic_read(&fscache_n_store_pages),
42670 - atomic_read(&fscache_n_store_radix_deletes),
42671 - atomic_read(&fscache_n_store_pages_over_limit));
42672 + atomic_read_unchecked(&fscache_n_store_ops),
42673 + atomic_read_unchecked(&fscache_n_store_calls),
42674 + atomic_read_unchecked(&fscache_n_store_pages),
42675 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
42676 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42677
42678 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42679 - atomic_read(&fscache_n_store_vmscan_not_storing),
42680 - atomic_read(&fscache_n_store_vmscan_gone),
42681 - atomic_read(&fscache_n_store_vmscan_busy),
42682 - atomic_read(&fscache_n_store_vmscan_cancelled));
42683 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42684 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42685 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42686 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42687
42688 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42689 - atomic_read(&fscache_n_op_pend),
42690 - atomic_read(&fscache_n_op_run),
42691 - atomic_read(&fscache_n_op_enqueue),
42692 - atomic_read(&fscache_n_op_cancelled),
42693 - atomic_read(&fscache_n_op_rejected));
42694 + atomic_read_unchecked(&fscache_n_op_pend),
42695 + atomic_read_unchecked(&fscache_n_op_run),
42696 + atomic_read_unchecked(&fscache_n_op_enqueue),
42697 + atomic_read_unchecked(&fscache_n_op_cancelled),
42698 + atomic_read_unchecked(&fscache_n_op_rejected));
42699 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42700 - atomic_read(&fscache_n_op_deferred_release),
42701 - atomic_read(&fscache_n_op_release),
42702 - atomic_read(&fscache_n_op_gc));
42703 + atomic_read_unchecked(&fscache_n_op_deferred_release),
42704 + atomic_read_unchecked(&fscache_n_op_release),
42705 + atomic_read_unchecked(&fscache_n_op_gc));
42706
42707 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42708 atomic_read(&fscache_n_cop_alloc_object),
42709 diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
42710 --- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
42711 +++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
42712 @@ -4,6 +4,7 @@
42713 #include <linux/path.h>
42714 #include <linux/slab.h>
42715 #include <linux/fs_struct.h>
42716 +#include <linux/grsecurity.h>
42717
42718 /*
42719 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
42720 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
42721 old_root = fs->root;
42722 fs->root = *path;
42723 path_get(path);
42724 + gr_set_chroot_entries(current, path);
42725 write_unlock(&fs->lock);
42726 if (old_root.dentry)
42727 path_put(&old_root);
42728 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
42729 && fs->root.mnt == old_root->mnt) {
42730 path_get(new_root);
42731 fs->root = *new_root;
42732 + gr_set_chroot_entries(p, new_root);
42733 count++;
42734 }
42735 if (fs->pwd.dentry == old_root->dentry
42736 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
42737 task_lock(tsk);
42738 write_lock(&fs->lock);
42739 tsk->fs = NULL;
42740 - kill = !--fs->users;
42741 + gr_clear_chroot_entries(tsk);
42742 + kill = !atomic_dec_return(&fs->users);
42743 write_unlock(&fs->lock);
42744 task_unlock(tsk);
42745 if (kill)
42746 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
42747 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42748 /* We don't need to lock fs - think why ;-) */
42749 if (fs) {
42750 - fs->users = 1;
42751 + atomic_set(&fs->users, 1);
42752 fs->in_exec = 0;
42753 rwlock_init(&fs->lock);
42754 fs->umask = old->umask;
42755 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
42756
42757 task_lock(current);
42758 write_lock(&fs->lock);
42759 - kill = !--fs->users;
42760 + kill = !atomic_dec_return(&fs->users);
42761 current->fs = new_fs;
42762 + gr_set_chroot_entries(current, &new_fs->root);
42763 write_unlock(&fs->lock);
42764 task_unlock(current);
42765
42766 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
42767
42768 /* to be mentioned only in INIT_TASK */
42769 struct fs_struct init_fs = {
42770 - .users = 1,
42771 + .users = ATOMIC_INIT(1),
42772 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
42773 .umask = 0022,
42774 };
42775 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
42776 task_lock(current);
42777
42778 write_lock(&init_fs.lock);
42779 - init_fs.users++;
42780 + atomic_inc(&init_fs.users);
42781 write_unlock(&init_fs.lock);
42782
42783 write_lock(&fs->lock);
42784 current->fs = &init_fs;
42785 - kill = !--fs->users;
42786 + gr_set_chroot_entries(current, &current->fs->root);
42787 + kill = !atomic_dec_return(&fs->users);
42788 write_unlock(&fs->lock);
42789
42790 task_unlock(current);
42791 diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
42792 --- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
42793 +++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
42794 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
42795 INIT_LIST_HEAD(&cuse_conntbl[i]);
42796
42797 /* inherit and extend fuse_dev_operations */
42798 - cuse_channel_fops = fuse_dev_operations;
42799 - cuse_channel_fops.owner = THIS_MODULE;
42800 - cuse_channel_fops.open = cuse_channel_open;
42801 - cuse_channel_fops.release = cuse_channel_release;
42802 + pax_open_kernel();
42803 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42804 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42805 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
42806 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
42807 + pax_close_kernel();
42808
42809 cuse_class = class_create(THIS_MODULE, "cuse");
42810 if (IS_ERR(cuse_class))
42811 diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
42812 --- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
42813 +++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
42814 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
42815 {
42816 struct fuse_notify_inval_entry_out outarg;
42817 int err = -EINVAL;
42818 - char buf[FUSE_NAME_MAX+1];
42819 + char *buf = NULL;
42820 struct qstr name;
42821
42822 if (size < sizeof(outarg))
42823 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
42824 if (outarg.namelen > FUSE_NAME_MAX)
42825 goto err;
42826
42827 + err = -ENOMEM;
42828 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
42829 + if (!buf)
42830 + goto err;
42831 +
42832 name.name = buf;
42833 name.len = outarg.namelen;
42834 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
42835 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
42836
42837 down_read(&fc->killsb);
42838 err = -ENOENT;
42839 - if (!fc->sb)
42840 - goto err_unlock;
42841 -
42842 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42843 -
42844 -err_unlock:
42845 + if (fc->sb)
42846 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42847 up_read(&fc->killsb);
42848 + kfree(buf);
42849 return err;
42850
42851 err:
42852 fuse_copy_finish(cs);
42853 + kfree(buf);
42854 return err;
42855 }
42856
42857 diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
42858 --- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
42859 +++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
42860 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
42861 return link;
42862 }
42863
42864 -static void free_link(char *link)
42865 +static void free_link(const char *link)
42866 {
42867 if (!IS_ERR(link))
42868 free_page((unsigned long) link);
42869 diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
42870 --- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
42871 +++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
42872 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
42873 unsigned int x;
42874 int error;
42875
42876 + pax_track_stack();
42877 +
42878 if (ndentry->d_inode) {
42879 nip = GFS2_I(ndentry->d_inode);
42880 if (ip == nip)
42881 diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
42882 --- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
42883 +++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
42884 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
42885 return a->store ? a->store(sdp, buf, len) : len;
42886 }
42887
42888 -static struct sysfs_ops gfs2_attr_ops = {
42889 +static const struct sysfs_ops gfs2_attr_ops = {
42890 .show = gfs2_attr_show,
42891 .store = gfs2_attr_store,
42892 };
42893 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
42894 return 0;
42895 }
42896
42897 -static struct kset_uevent_ops gfs2_uevent_ops = {
42898 +static const struct kset_uevent_ops gfs2_uevent_ops = {
42899 .uevent = gfs2_uevent,
42900 };
42901
42902 diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
42903 --- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
42904 +++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
42905 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
42906 int err;
42907 u16 type;
42908
42909 + pax_track_stack();
42910 +
42911 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
42912 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
42913 if (err)
42914 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
42915 int entry_size;
42916 int err;
42917
42918 + pax_track_stack();
42919 +
42920 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
42921 sb = dir->i_sb;
42922 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
42923 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
42924 int entry_size, type;
42925 int err = 0;
42926
42927 + pax_track_stack();
42928 +
42929 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
42930 dst_dir->i_ino, dst_name->name);
42931 sb = src_dir->i_sb;
42932 diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
42933 --- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
42934 +++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
42935 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
42936 struct hfsplus_readdir_data *rd;
42937 u16 type;
42938
42939 + pax_track_stack();
42940 +
42941 if (filp->f_pos >= inode->i_size)
42942 return 0;
42943
42944 diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
42945 --- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
42946 +++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
42947 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
42948 int res = 0;
42949 u16 type;
42950
42951 + pax_track_stack();
42952 +
42953 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
42954
42955 HFSPLUS_I(inode).dev = 0;
42956 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
42957 struct hfs_find_data fd;
42958 hfsplus_cat_entry entry;
42959
42960 + pax_track_stack();
42961 +
42962 if (HFSPLUS_IS_RSRC(inode))
42963 main_inode = HFSPLUS_I(inode).rsrc_inode;
42964
42965 diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
42966 --- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
42967 +++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
42968 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
42969 struct hfsplus_cat_file *file;
42970 int res;
42971
42972 + pax_track_stack();
42973 +
42974 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42975 return -EOPNOTSUPP;
42976
42977 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
42978 struct hfsplus_cat_file *file;
42979 ssize_t res = 0;
42980
42981 + pax_track_stack();
42982 +
42983 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
42984 return -EOPNOTSUPP;
42985
42986 diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
42987 --- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
42988 +++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
42989 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
42990 struct nls_table *nls = NULL;
42991 int err = -EINVAL;
42992
42993 + pax_track_stack();
42994 +
42995 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
42996 if (!sbi)
42997 return -ENOMEM;
42998 diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
42999 --- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43000 +++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43001 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43002 .kill_sb = kill_litter_super,
43003 };
43004
43005 -static struct vfsmount *hugetlbfs_vfsmount;
43006 +struct vfsmount *hugetlbfs_vfsmount;
43007
43008 static int can_do_hugetlb_shm(void)
43009 {
43010 diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43011 --- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43012 +++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43013 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43014 u64 phys, u64 len, u32 flags)
43015 {
43016 struct fiemap_extent extent;
43017 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43018 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43019
43020 /* only count the extents */
43021 if (fieinfo->fi_extents_max == 0) {
43022 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43023
43024 fieinfo.fi_flags = fiemap.fm_flags;
43025 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43026 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43027 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43028
43029 if (fiemap.fm_extent_count != 0 &&
43030 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43031 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43032 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43033 fiemap.fm_flags = fieinfo.fi_flags;
43034 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43035 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43036 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43037 error = -EFAULT;
43038
43039 return error;
43040 diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43041 --- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43042 +++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43043 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43044 tid_t this_tid;
43045 int result;
43046
43047 + pax_track_stack();
43048 +
43049 jbd_debug(1, "Start checkpoint\n");
43050
43051 /*
43052 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43053 --- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43054 +++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43055 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43056 int outpos = 0;
43057 int pos=0;
43058
43059 + pax_track_stack();
43060 +
43061 memset(positions,0,sizeof(positions));
43062
43063 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43064 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43065 int outpos = 0;
43066 int pos=0;
43067
43068 + pax_track_stack();
43069 +
43070 memset(positions,0,sizeof(positions));
43071
43072 while (outpos<destlen) {
43073 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43074 --- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43075 +++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43076 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43077 int ret;
43078 uint32_t mysrclen, mydstlen;
43079
43080 + pax_track_stack();
43081 +
43082 mysrclen = *sourcelen;
43083 mydstlen = *dstlen - 8;
43084
43085 diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43086 --- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43087 +++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43088 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43089 struct jffs2_unknown_node marker = {
43090 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43091 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43092 - .totlen = cpu_to_je32(c->cleanmarker_size)
43093 + .totlen = cpu_to_je32(c->cleanmarker_size),
43094 + .hdr_crc = cpu_to_je32(0)
43095 };
43096
43097 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43098 diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43099 --- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43100 +++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43101 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43102 {
43103 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43104 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43105 - .totlen = constant_cpu_to_je32(8)
43106 + .totlen = constant_cpu_to_je32(8),
43107 + .hdr_crc = constant_cpu_to_je32(0)
43108 };
43109
43110 /*
43111 diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43112 --- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43113 +++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43114 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43115
43116 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43117
43118 + pax_track_stack();
43119 +
43120 /* Phase.1 : Merge same xref */
43121 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43122 xref_tmphash[i] = NULL;
43123 diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43124 --- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43125 +++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43126 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43127
43128 jfs_inode_cachep =
43129 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43130 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43131 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43132 init_once);
43133 if (jfs_inode_cachep == NULL)
43134 return -ENOMEM;
43135 diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43136 --- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43137 +++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43138 @@ -86,7 +86,7 @@ config HAVE_AOUT
43139
43140 config BINFMT_AOUT
43141 tristate "Kernel support for a.out and ECOFF binaries"
43142 - depends on HAVE_AOUT
43143 + depends on HAVE_AOUT && BROKEN
43144 ---help---
43145 A.out (Assembler.OUTput) is a set of formats for libraries and
43146 executables used in the earliest versions of UNIX. Linux used
43147 diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43148 --- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43149 +++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43150 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43151
43152 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43153 struct dentry *next;
43154 + char d_name[sizeof(next->d_iname)];
43155 + const unsigned char *name;
43156 +
43157 next = list_entry(p, struct dentry, d_u.d_child);
43158 if (d_unhashed(next) || !next->d_inode)
43159 continue;
43160
43161 spin_unlock(&dcache_lock);
43162 - if (filldir(dirent, next->d_name.name,
43163 + name = next->d_name.name;
43164 + if (name == next->d_iname) {
43165 + memcpy(d_name, name, next->d_name.len);
43166 + name = d_name;
43167 + }
43168 + if (filldir(dirent, name,
43169 next->d_name.len, filp->f_pos,
43170 next->d_inode->i_ino,
43171 dt_type(next->d_inode)) < 0)
43172 diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43173 --- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43174 +++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43175 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43176 /*
43177 * Cookie counter for NLM requests
43178 */
43179 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43180 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43181
43182 void nlmclnt_next_cookie(struct nlm_cookie *c)
43183 {
43184 - u32 cookie = atomic_inc_return(&nlm_cookie);
43185 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43186
43187 memcpy(c->data, &cookie, 4);
43188 c->len=4;
43189 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43190 struct nlm_rqst reqst, *req;
43191 int status;
43192
43193 + pax_track_stack();
43194 +
43195 req = &reqst;
43196 memset(req, 0, sizeof(*req));
43197 locks_init_lock(&req->a_args.lock.fl);
43198 diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43199 --- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43200 +++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43201 @@ -43,7 +43,7 @@
43202
43203 static struct svc_program nlmsvc_program;
43204
43205 -struct nlmsvc_binding * nlmsvc_ops;
43206 +const struct nlmsvc_binding * nlmsvc_ops;
43207 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43208
43209 static DEFINE_MUTEX(nlmsvc_mutex);
43210 diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43211 --- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43212 +++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43213 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43214
43215 static struct kmem_cache *filelock_cache __read_mostly;
43216
43217 +static void locks_init_lock_always(struct file_lock *fl)
43218 +{
43219 + fl->fl_next = NULL;
43220 + fl->fl_fasync = NULL;
43221 + fl->fl_owner = NULL;
43222 + fl->fl_pid = 0;
43223 + fl->fl_nspid = NULL;
43224 + fl->fl_file = NULL;
43225 + fl->fl_flags = 0;
43226 + fl->fl_type = 0;
43227 + fl->fl_start = fl->fl_end = 0;
43228 +}
43229 +
43230 /* Allocate an empty lock structure. */
43231 static struct file_lock *locks_alloc_lock(void)
43232 {
43233 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43234 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43235 +
43236 + if (fl)
43237 + locks_init_lock_always(fl);
43238 +
43239 + return fl;
43240 }
43241
43242 void locks_release_private(struct file_lock *fl)
43243 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43244 INIT_LIST_HEAD(&fl->fl_link);
43245 INIT_LIST_HEAD(&fl->fl_block);
43246 init_waitqueue_head(&fl->fl_wait);
43247 - fl->fl_next = NULL;
43248 - fl->fl_fasync = NULL;
43249 - fl->fl_owner = NULL;
43250 - fl->fl_pid = 0;
43251 - fl->fl_nspid = NULL;
43252 - fl->fl_file = NULL;
43253 - fl->fl_flags = 0;
43254 - fl->fl_type = 0;
43255 - fl->fl_start = fl->fl_end = 0;
43256 fl->fl_ops = NULL;
43257 fl->fl_lmops = NULL;
43258 + locks_init_lock_always(fl);
43259 }
43260
43261 EXPORT_SYMBOL(locks_init_lock);
43262 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43263 return;
43264
43265 if (filp->f_op && filp->f_op->flock) {
43266 - struct file_lock fl = {
43267 + struct file_lock flock = {
43268 .fl_pid = current->tgid,
43269 .fl_file = filp,
43270 .fl_flags = FL_FLOCK,
43271 .fl_type = F_UNLCK,
43272 .fl_end = OFFSET_MAX,
43273 };
43274 - filp->f_op->flock(filp, F_SETLKW, &fl);
43275 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43276 - fl.fl_ops->fl_release_private(&fl);
43277 + filp->f_op->flock(filp, F_SETLKW, &flock);
43278 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43279 + flock.fl_ops->fl_release_private(&flock);
43280 }
43281
43282 lock_kernel();
43283 diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43284 --- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43285 +++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43286 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43287 if (!cache)
43288 goto fail;
43289 cache->c_name = name;
43290 - cache->c_op.free = NULL;
43291 + *(void **)&cache->c_op.free = NULL;
43292 if (cache_op)
43293 - cache->c_op.free = cache_op->free;
43294 + *(void **)&cache->c_op.free = cache_op->free;
43295 atomic_set(&cache->c_entry_count, 0);
43296 cache->c_bucket_bits = bucket_bits;
43297 #ifdef MB_CACHE_INDEXES_COUNT
43298 diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43299 --- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43300 +++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43301 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43302 return ret;
43303
43304 /*
43305 - * Read/write DACs are always overridable.
43306 - * Executable DACs are overridable if at least one exec bit is set.
43307 - */
43308 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43309 - if (capable(CAP_DAC_OVERRIDE))
43310 - return 0;
43311 -
43312 - /*
43313 * Searching includes executable on directories, else just read.
43314 */
43315 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43316 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43317 if (capable(CAP_DAC_READ_SEARCH))
43318 return 0;
43319
43320 + /*
43321 + * Read/write DACs are always overridable.
43322 + * Executable DACs are overridable if at least one exec bit is set.
43323 + */
43324 + if (!(mask & MAY_EXEC) || execute_ok(inode))
43325 + if (capable(CAP_DAC_OVERRIDE))
43326 + return 0;
43327 +
43328 return -EACCES;
43329 }
43330
43331 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43332 if (!ret)
43333 goto ok;
43334
43335 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43336 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43337 + capable(CAP_DAC_OVERRIDE))
43338 goto ok;
43339
43340 return ret;
43341 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43342 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43343 error = PTR_ERR(cookie);
43344 if (!IS_ERR(cookie)) {
43345 - char *s = nd_get_link(nd);
43346 + const char *s = nd_get_link(nd);
43347 error = 0;
43348 if (s)
43349 error = __vfs_follow_link(nd, s);
43350 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43351 err = security_inode_follow_link(path->dentry, nd);
43352 if (err)
43353 goto loop;
43354 +
43355 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43356 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43357 + err = -EACCES;
43358 + goto loop;
43359 + }
43360 +
43361 current->link_count++;
43362 current->total_link_count++;
43363 nd->depth++;
43364 @@ -1016,11 +1024,18 @@ return_reval:
43365 break;
43366 }
43367 return_base:
43368 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43369 + path_put(&nd->path);
43370 + return -ENOENT;
43371 + }
43372 return 0;
43373 out_dput:
43374 path_put_conditional(&next, nd);
43375 break;
43376 }
43377 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43378 + err = -ENOENT;
43379 +
43380 path_put(&nd->path);
43381 return_err:
43382 return err;
43383 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43384 int retval = path_init(dfd, name, flags, nd);
43385 if (!retval)
43386 retval = path_walk(name, nd);
43387 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43388 - nd->path.dentry->d_inode))
43389 - audit_inode(name, nd->path.dentry);
43390 +
43391 + if (likely(!retval)) {
43392 + if (nd->path.dentry && nd->path.dentry->d_inode) {
43393 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43394 + retval = -ENOENT;
43395 + if (!audit_dummy_context())
43396 + audit_inode(name, nd->path.dentry);
43397 + }
43398 + }
43399 if (nd->root.mnt) {
43400 path_put(&nd->root);
43401 nd->root.mnt = NULL;
43402 }
43403 +
43404 return retval;
43405 }
43406
43407 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43408 if (error)
43409 goto err_out;
43410
43411 +
43412 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43413 + error = -EPERM;
43414 + goto err_out;
43415 + }
43416 + if (gr_handle_rawio(inode)) {
43417 + error = -EPERM;
43418 + goto err_out;
43419 + }
43420 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43421 + error = -EACCES;
43422 + goto err_out;
43423 + }
43424 +
43425 if (flag & O_TRUNC) {
43426 error = get_write_access(inode);
43427 if (error)
43428 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43429 int error;
43430 struct dentry *dir = nd->path.dentry;
43431
43432 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43433 + error = -EACCES;
43434 + goto out_unlock;
43435 + }
43436 +
43437 if (!IS_POSIXACL(dir->d_inode))
43438 mode &= ~current_umask();
43439 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43440 if (error)
43441 goto out_unlock;
43442 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43443 + if (!error)
43444 + gr_handle_create(path->dentry, nd->path.mnt);
43445 out_unlock:
43446 mutex_unlock(&dir->d_inode->i_mutex);
43447 dput(nd->path.dentry);
43448 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43449 &nd, flag);
43450 if (error)
43451 return ERR_PTR(error);
43452 +
43453 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43454 + error = -EPERM;
43455 + goto exit;
43456 + }
43457 +
43458 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43459 + error = -EPERM;
43460 + goto exit;
43461 + }
43462 +
43463 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43464 + error = -EACCES;
43465 + goto exit;
43466 + }
43467 +
43468 goto ok;
43469 }
43470
43471 @@ -1795,6 +1854,14 @@ do_last:
43472 /*
43473 * It already exists.
43474 */
43475 +
43476 + /* only check if O_CREAT is specified, all other checks need
43477 + to go into may_open */
43478 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43479 + error = -EACCES;
43480 + goto exit_mutex_unlock;
43481 + }
43482 +
43483 mutex_unlock(&dir->d_inode->i_mutex);
43484 audit_inode(pathname, path.dentry);
43485
43486 @@ -1887,6 +1954,13 @@ do_link:
43487 error = security_inode_follow_link(path.dentry, &nd);
43488 if (error)
43489 goto exit_dput;
43490 +
43491 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43492 + path.dentry, nd.path.mnt)) {
43493 + error = -EACCES;
43494 + goto exit_dput;
43495 + }
43496 +
43497 error = __do_follow_link(&path, &nd);
43498 if (error) {
43499 /* Does someone understand code flow here? Or it is only
43500 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43501 error = may_mknod(mode);
43502 if (error)
43503 goto out_dput;
43504 +
43505 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43506 + error = -EPERM;
43507 + goto out_dput;
43508 + }
43509 +
43510 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43511 + error = -EACCES;
43512 + goto out_dput;
43513 + }
43514 +
43515 error = mnt_want_write(nd.path.mnt);
43516 if (error)
43517 goto out_dput;
43518 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43519 }
43520 out_drop_write:
43521 mnt_drop_write(nd.path.mnt);
43522 +
43523 + if (!error)
43524 + gr_handle_create(dentry, nd.path.mnt);
43525 out_dput:
43526 dput(dentry);
43527 out_unlock:
43528 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43529 if (IS_ERR(dentry))
43530 goto out_unlock;
43531
43532 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43533 + error = -EACCES;
43534 + goto out_dput;
43535 + }
43536 +
43537 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43538 mode &= ~current_umask();
43539 error = mnt_want_write(nd.path.mnt);
43540 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43541 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43542 out_drop_write:
43543 mnt_drop_write(nd.path.mnt);
43544 +
43545 + if (!error)
43546 + gr_handle_create(dentry, nd.path.mnt);
43547 +
43548 out_dput:
43549 dput(dentry);
43550 out_unlock:
43551 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43552 char * name;
43553 struct dentry *dentry;
43554 struct nameidata nd;
43555 + ino_t saved_ino = 0;
43556 + dev_t saved_dev = 0;
43557
43558 error = user_path_parent(dfd, pathname, &nd, &name);
43559 if (error)
43560 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43561 error = PTR_ERR(dentry);
43562 if (IS_ERR(dentry))
43563 goto exit2;
43564 +
43565 + if (dentry->d_inode != NULL) {
43566 + if (dentry->d_inode->i_nlink <= 1) {
43567 + saved_ino = dentry->d_inode->i_ino;
43568 + saved_dev = gr_get_dev_from_dentry(dentry);
43569 + }
43570 +
43571 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43572 + error = -EACCES;
43573 + goto exit3;
43574 + }
43575 + }
43576 +
43577 error = mnt_want_write(nd.path.mnt);
43578 if (error)
43579 goto exit3;
43580 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43581 if (error)
43582 goto exit4;
43583 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43584 + if (!error && (saved_dev || saved_ino))
43585 + gr_handle_delete(saved_ino, saved_dev);
43586 exit4:
43587 mnt_drop_write(nd.path.mnt);
43588 exit3:
43589 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43590 struct dentry *dentry;
43591 struct nameidata nd;
43592 struct inode *inode = NULL;
43593 + ino_t saved_ino = 0;
43594 + dev_t saved_dev = 0;
43595
43596 error = user_path_parent(dfd, pathname, &nd, &name);
43597 if (error)
43598 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43599 if (nd.last.name[nd.last.len])
43600 goto slashes;
43601 inode = dentry->d_inode;
43602 - if (inode)
43603 + if (inode) {
43604 + if (inode->i_nlink <= 1) {
43605 + saved_ino = inode->i_ino;
43606 + saved_dev = gr_get_dev_from_dentry(dentry);
43607 + }
43608 +
43609 atomic_inc(&inode->i_count);
43610 +
43611 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43612 + error = -EACCES;
43613 + goto exit2;
43614 + }
43615 + }
43616 error = mnt_want_write(nd.path.mnt);
43617 if (error)
43618 goto exit2;
43619 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43620 if (error)
43621 goto exit3;
43622 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43623 + if (!error && (saved_ino || saved_dev))
43624 + gr_handle_delete(saved_ino, saved_dev);
43625 exit3:
43626 mnt_drop_write(nd.path.mnt);
43627 exit2:
43628 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43629 if (IS_ERR(dentry))
43630 goto out_unlock;
43631
43632 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43633 + error = -EACCES;
43634 + goto out_dput;
43635 + }
43636 +
43637 error = mnt_want_write(nd.path.mnt);
43638 if (error)
43639 goto out_dput;
43640 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43641 if (error)
43642 goto out_drop_write;
43643 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43644 + if (!error)
43645 + gr_handle_create(dentry, nd.path.mnt);
43646 out_drop_write:
43647 mnt_drop_write(nd.path.mnt);
43648 out_dput:
43649 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43650 error = PTR_ERR(new_dentry);
43651 if (IS_ERR(new_dentry))
43652 goto out_unlock;
43653 +
43654 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43655 + old_path.dentry->d_inode,
43656 + old_path.dentry->d_inode->i_mode, to)) {
43657 + error = -EACCES;
43658 + goto out_dput;
43659 + }
43660 +
43661 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
43662 + old_path.dentry, old_path.mnt, to)) {
43663 + error = -EACCES;
43664 + goto out_dput;
43665 + }
43666 +
43667 error = mnt_want_write(nd.path.mnt);
43668 if (error)
43669 goto out_dput;
43670 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43671 if (error)
43672 goto out_drop_write;
43673 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43674 + if (!error)
43675 + gr_handle_create(new_dentry, nd.path.mnt);
43676 out_drop_write:
43677 mnt_drop_write(nd.path.mnt);
43678 out_dput:
43679 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43680 char *to;
43681 int error;
43682
43683 + pax_track_stack();
43684 +
43685 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43686 if (error)
43687 goto exit;
43688 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43689 if (new_dentry == trap)
43690 goto exit5;
43691
43692 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43693 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
43694 + to);
43695 + if (error)
43696 + goto exit5;
43697 +
43698 error = mnt_want_write(oldnd.path.mnt);
43699 if (error)
43700 goto exit5;
43701 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43702 goto exit6;
43703 error = vfs_rename(old_dir->d_inode, old_dentry,
43704 new_dir->d_inode, new_dentry);
43705 + if (!error)
43706 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43707 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43708 exit6:
43709 mnt_drop_write(oldnd.path.mnt);
43710 exit5:
43711 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
43712
43713 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43714 {
43715 + char tmpbuf[64];
43716 + const char *newlink;
43717 int len;
43718
43719 len = PTR_ERR(link);
43720 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
43721 len = strlen(link);
43722 if (len > (unsigned) buflen)
43723 len = buflen;
43724 - if (copy_to_user(buffer, link, len))
43725 +
43726 + if (len < sizeof(tmpbuf)) {
43727 + memcpy(tmpbuf, link, len);
43728 + newlink = tmpbuf;
43729 + } else
43730 + newlink = link;
43731 +
43732 + if (copy_to_user(buffer, newlink, len))
43733 len = -EFAULT;
43734 out:
43735 return len;
43736 diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
43737 --- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
43738 +++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
43739 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
43740 if (!(sb->s_flags & MS_RDONLY))
43741 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43742 up_write(&sb->s_umount);
43743 +
43744 + gr_log_remount(mnt->mnt_devname, retval);
43745 +
43746 return retval;
43747 }
43748
43749 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
43750 security_sb_umount_busy(mnt);
43751 up_write(&namespace_sem);
43752 release_mounts(&umount_list);
43753 +
43754 + gr_log_unmount(mnt->mnt_devname, retval);
43755 +
43756 return retval;
43757 }
43758
43759 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
43760 if (retval)
43761 goto dput_out;
43762
43763 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43764 + retval = -EPERM;
43765 + goto dput_out;
43766 + }
43767 +
43768 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43769 + retval = -EPERM;
43770 + goto dput_out;
43771 + }
43772 +
43773 if (flags & MS_REMOUNT)
43774 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43775 data_page);
43776 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
43777 dev_name, data_page);
43778 dput_out:
43779 path_put(&path);
43780 +
43781 + gr_log_mount(dev_name, dir_name, retval);
43782 +
43783 return retval;
43784 }
43785
43786 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
43787 goto out1;
43788 }
43789
43790 + if (gr_handle_chroot_pivot()) {
43791 + error = -EPERM;
43792 + path_put(&old);
43793 + goto out1;
43794 + }
43795 +
43796 read_lock(&current->fs->lock);
43797 root = current->fs->root;
43798 path_get(&current->fs->root);
43799 diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
43800 --- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43801 +++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43802 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
43803 int res, val = 0, len;
43804 __u8 __name[NCP_MAXPATHLEN + 1];
43805
43806 + pax_track_stack();
43807 +
43808 parent = dget_parent(dentry);
43809 dir = parent->d_inode;
43810
43811 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
43812 int error, res, len;
43813 __u8 __name[NCP_MAXPATHLEN + 1];
43814
43815 + pax_track_stack();
43816 +
43817 lock_kernel();
43818 error = -EIO;
43819 if (!ncp_conn_valid(server))
43820 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
43821 int error, result, len;
43822 int opmode;
43823 __u8 __name[NCP_MAXPATHLEN + 1];
43824 -
43825 +
43826 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43827 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43828
43829 + pax_track_stack();
43830 +
43831 error = -EIO;
43832 lock_kernel();
43833 if (!ncp_conn_valid(server))
43834 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
43835 int error, len;
43836 __u8 __name[NCP_MAXPATHLEN + 1];
43837
43838 + pax_track_stack();
43839 +
43840 DPRINTK("ncp_mkdir: making %s/%s\n",
43841 dentry->d_parent->d_name.name, dentry->d_name.name);
43842
43843 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
43844 if (!ncp_conn_valid(server))
43845 goto out;
43846
43847 + pax_track_stack();
43848 +
43849 ncp_age_dentry(server, dentry);
43850 len = sizeof(__name);
43851 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43852 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
43853 int old_len, new_len;
43854 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43855
43856 + pax_track_stack();
43857 +
43858 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43859 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43860 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43861 diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
43862 --- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43863 +++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
43864 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
43865 #endif
43866 struct ncp_entry_info finfo;
43867
43868 + pax_track_stack();
43869 +
43870 data.wdog_pid = NULL;
43871 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43872 if (!server)
43873 diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
43874 --- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
43875 +++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
43876 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
43877 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43878 nfsi->attrtimeo_timestamp = jiffies;
43879
43880 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43881 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43882 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43883 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43884 else
43885 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
43886 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43887 }
43888
43889 -static atomic_long_t nfs_attr_generation_counter;
43890 +static atomic_long_unchecked_t nfs_attr_generation_counter;
43891
43892 static unsigned long nfs_read_attr_generation_counter(void)
43893 {
43894 - return atomic_long_read(&nfs_attr_generation_counter);
43895 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43896 }
43897
43898 unsigned long nfs_inc_attr_generation_counter(void)
43899 {
43900 - return atomic_long_inc_return(&nfs_attr_generation_counter);
43901 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
43902 }
43903
43904 void nfs_fattr_init(struct nfs_fattr *fattr)
43905 diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
43906 --- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
43907 +++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
43908 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
43909 fput(filp);
43910 }
43911
43912 -static struct nlmsvc_binding nfsd_nlm_ops = {
43913 +static const struct nlmsvc_binding nfsd_nlm_ops = {
43914 .fopen = nlm_fopen, /* open file for locking */
43915 .fclose = nlm_fclose, /* close file */
43916 };
43917 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
43918 --- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
43919 +++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
43920 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
43921 unsigned int cmd;
43922 int err;
43923
43924 + pax_track_stack();
43925 +
43926 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
43927 (long long) lock->lk_offset,
43928 (long long) lock->lk_length);
43929 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
43930 --- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
43931 +++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
43932 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
43933 struct nfsd4_compoundres *resp = rqstp->rq_resp;
43934 u32 minorversion = resp->cstate.minorversion;
43935
43936 + pax_track_stack();
43937 +
43938 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
43939 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
43940 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
43941 diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
43942 --- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
43943 +++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
43944 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
43945 } else {
43946 oldfs = get_fs();
43947 set_fs(KERNEL_DS);
43948 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
43949 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
43950 set_fs(oldfs);
43951 }
43952
43953 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
43954
43955 /* Write the data. */
43956 oldfs = get_fs(); set_fs(KERNEL_DS);
43957 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
43958 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
43959 set_fs(oldfs);
43960 if (host_err < 0)
43961 goto out_nfserr;
43962 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
43963 */
43964
43965 oldfs = get_fs(); set_fs(KERNEL_DS);
43966 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
43967 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
43968 set_fs(oldfs);
43969
43970 if (host_err < 0)
43971 diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
43972 --- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43973 +++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
43974 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
43975 unsigned int cmd, void __user *argp)
43976 {
43977 struct nilfs_argv argv[5];
43978 - const static size_t argsz[5] = {
43979 + static const size_t argsz[5] = {
43980 sizeof(struct nilfs_vdesc),
43981 sizeof(struct nilfs_period),
43982 sizeof(__u64),
43983 diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
43984 --- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
43985 +++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
43986 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
43987 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
43988 }
43989
43990 -static struct fsnotify_ops dnotify_fsnotify_ops = {
43991 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
43992 .handle_event = dnotify_handle_event,
43993 .should_send_event = dnotify_should_send_event,
43994 .free_group_priv = NULL,
43995 diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
43996 --- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
43997 +++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
43998 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
43999 * get set to 0 so it will never get 'freed'
44000 */
44001 static struct fsnotify_event q_overflow_event;
44002 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44003 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44004
44005 /**
44006 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44007 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44008 */
44009 u32 fsnotify_get_cookie(void)
44010 {
44011 - return atomic_inc_return(&fsnotify_sync_cookie);
44012 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44013 }
44014 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44015
44016 diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44017 --- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44018 +++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44019 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44020 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44021 ~(s64)(ndir->itype.index.block_size - 1)));
44022 /* Bounds checks. */
44023 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44024 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44025 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44026 "inode 0x%lx or driver bug.", vdir->i_ino);
44027 goto err_out;
44028 diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44029 --- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44030 +++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44031 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44032 #endif /* NTFS_RW */
44033 };
44034
44035 -const struct file_operations ntfs_empty_file_ops = {};
44036 +const struct file_operations ntfs_empty_file_ops __read_only;
44037
44038 -const struct inode_operations ntfs_empty_inode_ops = {};
44039 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44040 diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44041 --- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44042 +++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44043 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44044 return mlog_mask_store(mlog_attr->mask, buf, count);
44045 }
44046
44047 -static struct sysfs_ops mlog_attr_ops = {
44048 +static const struct sysfs_ops mlog_attr_ops = {
44049 .show = mlog_show,
44050 .store = mlog_store,
44051 };
44052 diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44053 --- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44054 +++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44055 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44056 goto bail;
44057 }
44058
44059 - atomic_inc(&osb->alloc_stats.moves);
44060 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44061
44062 status = 0;
44063 bail:
44064 diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44065 --- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44066 +++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44067 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44068 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44069 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44070
44071 + pax_track_stack();
44072 +
44073 /* At some point it might be nice to break this function up a
44074 * bit. */
44075
44076 diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44077 --- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44078 +++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44079 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44080
44081 struct ocfs2_alloc_stats
44082 {
44083 - atomic_t moves;
44084 - atomic_t local_data;
44085 - atomic_t bitmap_data;
44086 - atomic_t bg_allocs;
44087 - atomic_t bg_extends;
44088 + atomic_unchecked_t moves;
44089 + atomic_unchecked_t local_data;
44090 + atomic_unchecked_t bitmap_data;
44091 + atomic_unchecked_t bg_allocs;
44092 + atomic_unchecked_t bg_extends;
44093 };
44094
44095 enum ocfs2_local_alloc_state
44096 diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44097 --- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44098 +++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44099 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44100 mlog_errno(status);
44101 goto bail;
44102 }
44103 - atomic_inc(&osb->alloc_stats.bg_extends);
44104 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44105
44106 /* You should never ask for this much metadata */
44107 BUG_ON(bits_wanted >
44108 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44109 mlog_errno(status);
44110 goto bail;
44111 }
44112 - atomic_inc(&osb->alloc_stats.bg_allocs);
44113 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44114
44115 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44116 ac->ac_bits_given += (*num_bits);
44117 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44118 mlog_errno(status);
44119 goto bail;
44120 }
44121 - atomic_inc(&osb->alloc_stats.bg_allocs);
44122 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44123
44124 BUG_ON(num_bits != 1);
44125
44126 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44127 cluster_start,
44128 num_clusters);
44129 if (!status)
44130 - atomic_inc(&osb->alloc_stats.local_data);
44131 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44132 } else {
44133 if (min_clusters > (osb->bitmap_cpg - 1)) {
44134 /* The only paths asking for contiguousness
44135 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44136 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44137 bg_blkno,
44138 bg_bit_off);
44139 - atomic_inc(&osb->alloc_stats.bitmap_data);
44140 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44141 }
44142 }
44143 if (status < 0) {
44144 diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44145 --- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44146 +++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44147 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44148 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44149 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44150 "Stats",
44151 - atomic_read(&osb->alloc_stats.bitmap_data),
44152 - atomic_read(&osb->alloc_stats.local_data),
44153 - atomic_read(&osb->alloc_stats.bg_allocs),
44154 - atomic_read(&osb->alloc_stats.moves),
44155 - atomic_read(&osb->alloc_stats.bg_extends));
44156 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44157 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44158 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44159 + atomic_read_unchecked(&osb->alloc_stats.moves),
44160 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44161
44162 out += snprintf(buf + out, len - out,
44163 "%10s => State: %u Descriptor: %llu Size: %u bits "
44164 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44165 spin_lock_init(&osb->osb_xattr_lock);
44166 ocfs2_init_inode_steal_slot(osb);
44167
44168 - atomic_set(&osb->alloc_stats.moves, 0);
44169 - atomic_set(&osb->alloc_stats.local_data, 0);
44170 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44171 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44172 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44173 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44174 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44175 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44176 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44177 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44178
44179 /* Copy the blockcheck stats from the superblock probe */
44180 osb->osb_ecc_stats = *stats;
44181 diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44182 --- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44183 +++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44184 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44185 error = locks_verify_truncate(inode, NULL, length);
44186 if (!error)
44187 error = security_path_truncate(&path, length, 0);
44188 +
44189 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44190 + error = -EACCES;
44191 +
44192 if (!error) {
44193 vfs_dq_init(inode);
44194 error = do_truncate(path.dentry, length, 0, NULL);
44195 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44196 if (__mnt_is_readonly(path.mnt))
44197 res = -EROFS;
44198
44199 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44200 + res = -EACCES;
44201 +
44202 out_path_release:
44203 path_put(&path);
44204 out:
44205 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44206 if (error)
44207 goto dput_and_out;
44208
44209 + gr_log_chdir(path.dentry, path.mnt);
44210 +
44211 set_fs_pwd(current->fs, &path);
44212
44213 dput_and_out:
44214 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44215 goto out_putf;
44216
44217 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44218 +
44219 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44220 + error = -EPERM;
44221 +
44222 + if (!error)
44223 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44224 +
44225 if (!error)
44226 set_fs_pwd(current->fs, &file->f_path);
44227 out_putf:
44228 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44229 if (!capable(CAP_SYS_CHROOT))
44230 goto dput_and_out;
44231
44232 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44233 + goto dput_and_out;
44234 +
44235 + if (gr_handle_chroot_caps(&path)) {
44236 + error = -ENOMEM;
44237 + goto dput_and_out;
44238 + }
44239 +
44240 set_fs_root(current->fs, &path);
44241 +
44242 + gr_handle_chroot_chdir(&path);
44243 +
44244 error = 0;
44245 dput_and_out:
44246 path_put(&path);
44247 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44248 err = mnt_want_write_file(file);
44249 if (err)
44250 goto out_putf;
44251 +
44252 mutex_lock(&inode->i_mutex);
44253 +
44254 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44255 + err = -EACCES;
44256 + goto out_unlock;
44257 + }
44258 +
44259 if (mode == (mode_t) -1)
44260 mode = inode->i_mode;
44261 +
44262 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44263 + err = -EPERM;
44264 + goto out_unlock;
44265 + }
44266 +
44267 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44268 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44269 err = notify_change(dentry, &newattrs);
44270 +
44271 +out_unlock:
44272 mutex_unlock(&inode->i_mutex);
44273 mnt_drop_write(file->f_path.mnt);
44274 out_putf:
44275 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44276 error = mnt_want_write(path.mnt);
44277 if (error)
44278 goto dput_and_out;
44279 +
44280 mutex_lock(&inode->i_mutex);
44281 +
44282 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44283 + error = -EACCES;
44284 + goto out_unlock;
44285 + }
44286 +
44287 if (mode == (mode_t) -1)
44288 mode = inode->i_mode;
44289 +
44290 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44291 + error = -EACCES;
44292 + goto out_unlock;
44293 + }
44294 +
44295 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44296 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44297 error = notify_change(path.dentry, &newattrs);
44298 +
44299 +out_unlock:
44300 mutex_unlock(&inode->i_mutex);
44301 mnt_drop_write(path.mnt);
44302 dput_and_out:
44303 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44304 return sys_fchmodat(AT_FDCWD, filename, mode);
44305 }
44306
44307 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44308 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44309 {
44310 struct inode *inode = dentry->d_inode;
44311 int error;
44312 struct iattr newattrs;
44313
44314 + if (!gr_acl_handle_chown(dentry, mnt))
44315 + return -EACCES;
44316 +
44317 newattrs.ia_valid = ATTR_CTIME;
44318 if (user != (uid_t) -1) {
44319 newattrs.ia_valid |= ATTR_UID;
44320 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44321 error = mnt_want_write(path.mnt);
44322 if (error)
44323 goto out_release;
44324 - error = chown_common(path.dentry, user, group);
44325 + error = chown_common(path.dentry, user, group, path.mnt);
44326 mnt_drop_write(path.mnt);
44327 out_release:
44328 path_put(&path);
44329 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44330 error = mnt_want_write(path.mnt);
44331 if (error)
44332 goto out_release;
44333 - error = chown_common(path.dentry, user, group);
44334 + error = chown_common(path.dentry, user, group, path.mnt);
44335 mnt_drop_write(path.mnt);
44336 out_release:
44337 path_put(&path);
44338 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44339 error = mnt_want_write(path.mnt);
44340 if (error)
44341 goto out_release;
44342 - error = chown_common(path.dentry, user, group);
44343 + error = chown_common(path.dentry, user, group, path.mnt);
44344 mnt_drop_write(path.mnt);
44345 out_release:
44346 path_put(&path);
44347 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44348 goto out_fput;
44349 dentry = file->f_path.dentry;
44350 audit_inode(NULL, dentry);
44351 - error = chown_common(dentry, user, group);
44352 + error = chown_common(dentry, user, group, file->f_path.mnt);
44353 mnt_drop_write(file->f_path.mnt);
44354 out_fput:
44355 fput(file);
44356 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44357 if (!IS_ERR(tmp)) {
44358 fd = get_unused_fd_flags(flags);
44359 if (fd >= 0) {
44360 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44361 + struct file *f;
44362 + /* don't allow to be set by userland */
44363 + flags &= ~FMODE_GREXEC;
44364 + f = do_filp_open(dfd, tmp, flags, mode, 0);
44365 if (IS_ERR(f)) {
44366 put_unused_fd(fd);
44367 fd = PTR_ERR(f);
44368 diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44369 --- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44370 +++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44371 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44372 ldm_error ("A VBLK claims to have %d parts.", num);
44373 return false;
44374 }
44375 +
44376 if (rec >= num) {
44377 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44378 return false;
44379 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44380 goto found;
44381 }
44382
44383 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44384 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44385 if (!f) {
44386 ldm_crit ("Out of memory.");
44387 return false;
44388 diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44389 --- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44390 +++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44391 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44392 return 0; /* not a MacOS disk */
44393 }
44394 blocks_in_map = be32_to_cpu(part->map_count);
44395 + printk(" [mac]");
44396 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44397 put_dev_sector(sect);
44398 return 0;
44399 }
44400 - printk(" [mac]");
44401 for (slot = 1; slot <= blocks_in_map; ++slot) {
44402 int pos = slot * secsize;
44403 put_dev_sector(sect);
44404 diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44405 --- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44406 +++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44407 @@ -401,9 +401,9 @@ redo:
44408 }
44409 if (bufs) /* More to do? */
44410 continue;
44411 - if (!pipe->writers)
44412 + if (!atomic_read(&pipe->writers))
44413 break;
44414 - if (!pipe->waiting_writers) {
44415 + if (!atomic_read(&pipe->waiting_writers)) {
44416 /* syscall merging: Usually we must not sleep
44417 * if O_NONBLOCK is set, or if we got some data.
44418 * But if a writer sleeps in kernel space, then
44419 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44420 mutex_lock(&inode->i_mutex);
44421 pipe = inode->i_pipe;
44422
44423 - if (!pipe->readers) {
44424 + if (!atomic_read(&pipe->readers)) {
44425 send_sig(SIGPIPE, current, 0);
44426 ret = -EPIPE;
44427 goto out;
44428 @@ -511,7 +511,7 @@ redo1:
44429 for (;;) {
44430 int bufs;
44431
44432 - if (!pipe->readers) {
44433 + if (!atomic_read(&pipe->readers)) {
44434 send_sig(SIGPIPE, current, 0);
44435 if (!ret)
44436 ret = -EPIPE;
44437 @@ -597,9 +597,9 @@ redo2:
44438 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44439 do_wakeup = 0;
44440 }
44441 - pipe->waiting_writers++;
44442 + atomic_inc(&pipe->waiting_writers);
44443 pipe_wait(pipe);
44444 - pipe->waiting_writers--;
44445 + atomic_dec(&pipe->waiting_writers);
44446 }
44447 out:
44448 mutex_unlock(&inode->i_mutex);
44449 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44450 mask = 0;
44451 if (filp->f_mode & FMODE_READ) {
44452 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44453 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44454 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44455 mask |= POLLHUP;
44456 }
44457
44458 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44459 * Most Unices do not set POLLERR for FIFOs but on Linux they
44460 * behave exactly like pipes for poll().
44461 */
44462 - if (!pipe->readers)
44463 + if (!atomic_read(&pipe->readers))
44464 mask |= POLLERR;
44465 }
44466
44467 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44468
44469 mutex_lock(&inode->i_mutex);
44470 pipe = inode->i_pipe;
44471 - pipe->readers -= decr;
44472 - pipe->writers -= decw;
44473 + atomic_sub(decr, &pipe->readers);
44474 + atomic_sub(decw, &pipe->writers);
44475
44476 - if (!pipe->readers && !pipe->writers) {
44477 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44478 free_pipe_info(inode);
44479 } else {
44480 wake_up_interruptible_sync(&pipe->wait);
44481 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44482
44483 if (inode->i_pipe) {
44484 ret = 0;
44485 - inode->i_pipe->readers++;
44486 + atomic_inc(&inode->i_pipe->readers);
44487 }
44488
44489 mutex_unlock(&inode->i_mutex);
44490 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44491
44492 if (inode->i_pipe) {
44493 ret = 0;
44494 - inode->i_pipe->writers++;
44495 + atomic_inc(&inode->i_pipe->writers);
44496 }
44497
44498 mutex_unlock(&inode->i_mutex);
44499 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44500 if (inode->i_pipe) {
44501 ret = 0;
44502 if (filp->f_mode & FMODE_READ)
44503 - inode->i_pipe->readers++;
44504 + atomic_inc(&inode->i_pipe->readers);
44505 if (filp->f_mode & FMODE_WRITE)
44506 - inode->i_pipe->writers++;
44507 + atomic_inc(&inode->i_pipe->writers);
44508 }
44509
44510 mutex_unlock(&inode->i_mutex);
44511 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44512 inode->i_pipe = NULL;
44513 }
44514
44515 -static struct vfsmount *pipe_mnt __read_mostly;
44516 +struct vfsmount *pipe_mnt __read_mostly;
44517 static int pipefs_delete_dentry(struct dentry *dentry)
44518 {
44519 /*
44520 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44521 goto fail_iput;
44522 inode->i_pipe = pipe;
44523
44524 - pipe->readers = pipe->writers = 1;
44525 + atomic_set(&pipe->readers, 1);
44526 + atomic_set(&pipe->writers, 1);
44527 inode->i_fop = &rdwr_pipefifo_fops;
44528
44529 /*
44530 diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44531 --- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44532 +++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44533 @@ -60,6 +60,7 @@
44534 #include <linux/tty.h>
44535 #include <linux/string.h>
44536 #include <linux/mman.h>
44537 +#include <linux/grsecurity.h>
44538 #include <linux/proc_fs.h>
44539 #include <linux/ioport.h>
44540 #include <linux/uaccess.h>
44541 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
44542 p->nivcsw);
44543 }
44544
44545 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44546 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44547 +{
44548 + if (p->mm)
44549 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44550 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44551 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44552 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44553 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44554 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44555 + else
44556 + seq_printf(m, "PaX:\t-----\n");
44557 +}
44558 +#endif
44559 +
44560 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44561 struct pid *pid, struct task_struct *task)
44562 {
44563 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44564 task_cap(m, task);
44565 cpuset_task_status_allowed(m, task);
44566 task_context_switch_counts(m, task);
44567 +
44568 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44569 + task_pax(m, task);
44570 +#endif
44571 +
44572 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44573 + task_grsec_rbac(m, task);
44574 +#endif
44575 +
44576 return 0;
44577 }
44578
44579 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44580 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44581 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44582 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44583 +#endif
44584 +
44585 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44586 struct pid *pid, struct task_struct *task, int whole)
44587 {
44588 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44589 cputime_t cutime, cstime, utime, stime;
44590 cputime_t cgtime, gtime;
44591 unsigned long rsslim = 0;
44592 - char tcomm[sizeof(task->comm)];
44593 + char tcomm[sizeof(task->comm)] = { 0 };
44594 unsigned long flags;
44595
44596 + pax_track_stack();
44597 +
44598 state = *get_task_state(task);
44599 vsize = eip = esp = 0;
44600 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44601 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44602 gtime = task_gtime(task);
44603 }
44604
44605 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44606 + if (PAX_RAND_FLAGS(mm)) {
44607 + eip = 0;
44608 + esp = 0;
44609 + wchan = 0;
44610 + }
44611 +#endif
44612 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44613 + wchan = 0;
44614 + eip =0;
44615 + esp =0;
44616 +#endif
44617 +
44618 /* scale priority and nice values from timeslices to -20..20 */
44619 /* to make it look like a "normal" Unix priority/nice value */
44620 priority = task_prio(task);
44621 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44622 vsize,
44623 mm ? get_mm_rss(mm) : 0,
44624 rsslim,
44625 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44626 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44627 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44628 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44629 +#else
44630 mm ? (permitted ? mm->start_code : 1) : 0,
44631 mm ? (permitted ? mm->end_code : 1) : 0,
44632 (permitted && mm) ? mm->start_stack : 0,
44633 +#endif
44634 esp,
44635 eip,
44636 /* The signal information here is obsolete.
44637 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44638
44639 return 0;
44640 }
44641 +
44642 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44643 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44644 +{
44645 + u32 curr_ip = 0;
44646 + unsigned long flags;
44647 +
44648 + if (lock_task_sighand(task, &flags)) {
44649 + curr_ip = task->signal->curr_ip;
44650 + unlock_task_sighand(task, &flags);
44651 + }
44652 +
44653 + return sprintf(buffer, "%pI4\n", &curr_ip);
44654 +}
44655 +#endif
44656 diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
44657 --- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44658 +++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44659 @@ -102,6 +102,22 @@ struct pid_entry {
44660 union proc_op op;
44661 };
44662
44663 +struct getdents_callback {
44664 + struct linux_dirent __user * current_dir;
44665 + struct linux_dirent __user * previous;
44666 + struct file * file;
44667 + int count;
44668 + int error;
44669 +};
44670 +
44671 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
44672 + loff_t offset, u64 ino, unsigned int d_type)
44673 +{
44674 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
44675 + buf->error = -EINVAL;
44676 + return 0;
44677 +}
44678 +
44679 #define NOD(NAME, MODE, IOP, FOP, OP) { \
44680 .name = (NAME), \
44681 .len = sizeof(NAME) - 1, \
44682 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
44683 if (task == current)
44684 return 0;
44685
44686 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
44687 + return -EPERM;
44688 +
44689 /*
44690 * If current is actively ptrace'ing, and would also be
44691 * permitted to freshly attach with ptrace now, permit it.
44692 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
44693 if (!mm->arg_end)
44694 goto out_mm; /* Shh! No looking before we're done */
44695
44696 + if (gr_acl_handle_procpidmem(task))
44697 + goto out_mm;
44698 +
44699 len = mm->arg_end - mm->arg_start;
44700
44701 if (len > PAGE_SIZE)
44702 @@ -287,12 +309,28 @@ out:
44703 return res;
44704 }
44705
44706 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44707 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44708 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44709 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44710 +#endif
44711 +
44712 static int proc_pid_auxv(struct task_struct *task, char *buffer)
44713 {
44714 int res = 0;
44715 struct mm_struct *mm = get_task_mm(task);
44716 if (mm) {
44717 unsigned int nwords = 0;
44718 +
44719 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44720 + /* allow if we're currently ptracing this task */
44721 + if (PAX_RAND_FLAGS(mm) &&
44722 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
44723 + mmput(mm);
44724 + return res;
44725 + }
44726 +#endif
44727 +
44728 do {
44729 nwords += 2;
44730 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
44731 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
44732 }
44733
44734
44735 -#ifdef CONFIG_KALLSYMS
44736 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44737 /*
44738 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
44739 * Returns the resolved symbol. If that fails, simply return the address.
44740 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
44741 }
44742 #endif /* CONFIG_KALLSYMS */
44743
44744 -#ifdef CONFIG_STACKTRACE
44745 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44746
44747 #define MAX_STACK_TRACE_DEPTH 64
44748
44749 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
44750 return count;
44751 }
44752
44753 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44754 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44755 static int proc_pid_syscall(struct task_struct *task, char *buffer)
44756 {
44757 long nr;
44758 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
44759 /************************************************************************/
44760
44761 /* permission checks */
44762 -static int proc_fd_access_allowed(struct inode *inode)
44763 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44764 {
44765 struct task_struct *task;
44766 int allowed = 0;
44767 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
44768 */
44769 task = get_proc_task(inode);
44770 if (task) {
44771 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44772 + if (log)
44773 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44774 + else
44775 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44776 put_task_struct(task);
44777 }
44778 return allowed;
44779 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
44780 if (!task)
44781 goto out_no_task;
44782
44783 + if (gr_acl_handle_procpidmem(task))
44784 + goto out;
44785 +
44786 if (!ptrace_may_access(task, PTRACE_MODE_READ))
44787 goto out;
44788
44789 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
44790 path_put(&nd->path);
44791
44792 /* Are we allowed to snoop on the tasks file descriptors? */
44793 - if (!proc_fd_access_allowed(inode))
44794 + if (!proc_fd_access_allowed(inode,0))
44795 goto out;
44796
44797 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44798 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
44799 struct path path;
44800
44801 /* Are we allowed to snoop on the tasks file descriptors? */
44802 - if (!proc_fd_access_allowed(inode))
44803 - goto out;
44804 + /* logging this is needed for learning on chromium to work properly,
44805 + but we don't want to flood the logs from 'ps' which does a readlink
44806 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44807 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
44808 + */
44809 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44810 + if (!proc_fd_access_allowed(inode,0))
44811 + goto out;
44812 + } else {
44813 + if (!proc_fd_access_allowed(inode,1))
44814 + goto out;
44815 + }
44816
44817 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44818 if (error)
44819 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
44820 rcu_read_lock();
44821 cred = __task_cred(task);
44822 inode->i_uid = cred->euid;
44823 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44824 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44825 +#else
44826 inode->i_gid = cred->egid;
44827 +#endif
44828 rcu_read_unlock();
44829 }
44830 security_task_to_inode(task, inode);
44831 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
44832 struct inode *inode = dentry->d_inode;
44833 struct task_struct *task;
44834 const struct cred *cred;
44835 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44836 + const struct cred *tmpcred = current_cred();
44837 +#endif
44838
44839 generic_fillattr(inode, stat);
44840
44841 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
44842 stat->uid = 0;
44843 stat->gid = 0;
44844 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44845 +
44846 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44847 + rcu_read_unlock();
44848 + return -ENOENT;
44849 + }
44850 +
44851 if (task) {
44852 + cred = __task_cred(task);
44853 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44854 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44855 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44856 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44857 +#endif
44858 + ) {
44859 +#endif
44860 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44861 +#ifdef CONFIG_GRKERNSEC_PROC_USER
44862 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44863 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44864 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44865 +#endif
44866 task_dumpable(task)) {
44867 - cred = __task_cred(task);
44868 stat->uid = cred->euid;
44869 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44870 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44871 +#else
44872 stat->gid = cred->egid;
44873 +#endif
44874 }
44875 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44876 + } else {
44877 + rcu_read_unlock();
44878 + return -ENOENT;
44879 + }
44880 +#endif
44881 }
44882 rcu_read_unlock();
44883 return 0;
44884 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
44885
44886 if (task) {
44887 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44888 +#ifdef CONFIG_GRKERNSEC_PROC_USER
44889 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44890 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44891 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44892 +#endif
44893 task_dumpable(task)) {
44894 rcu_read_lock();
44895 cred = __task_cred(task);
44896 inode->i_uid = cred->euid;
44897 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44898 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44899 +#else
44900 inode->i_gid = cred->egid;
44901 +#endif
44902 rcu_read_unlock();
44903 } else {
44904 inode->i_uid = 0;
44905 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
44906 int fd = proc_fd(inode);
44907
44908 if (task) {
44909 - files = get_files_struct(task);
44910 + if (!gr_acl_handle_procpidmem(task))
44911 + files = get_files_struct(task);
44912 put_task_struct(task);
44913 }
44914 if (files) {
44915 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
44916 static int proc_fd_permission(struct inode *inode, int mask)
44917 {
44918 int rv;
44919 + struct task_struct *task;
44920
44921 rv = generic_permission(inode, mask, NULL);
44922 - if (rv == 0)
44923 - return 0;
44924 +
44925 if (task_pid(current) == proc_pid(inode))
44926 rv = 0;
44927 +
44928 + task = get_proc_task(inode);
44929 + if (task == NULL)
44930 + return rv;
44931 +
44932 + if (gr_acl_handle_procpidmem(task))
44933 + rv = -EACCES;
44934 +
44935 + put_task_struct(task);
44936 +
44937 return rv;
44938 }
44939
44940 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
44941 if (!task)
44942 goto out_no_task;
44943
44944 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44945 + goto out;
44946 +
44947 /*
44948 * Yes, it does not scale. And it should not. Don't add
44949 * new entries into /proc/<tgid>/ without very good reasons.
44950 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
44951 if (!task)
44952 goto out_no_task;
44953
44954 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
44955 + goto out;
44956 +
44957 ret = 0;
44958 i = filp->f_pos;
44959 switch (i) {
44960 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
44961 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
44962 void *cookie)
44963 {
44964 - char *s = nd_get_link(nd);
44965 + const char *s = nd_get_link(nd);
44966 if (!IS_ERR(s))
44967 __putname(s);
44968 }
44969 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
44970 #ifdef CONFIG_SCHED_DEBUG
44971 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
44972 #endif
44973 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44974 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44975 INF("syscall", S_IRUSR, proc_pid_syscall),
44976 #endif
44977 INF("cmdline", S_IRUGO, proc_pid_cmdline),
44978 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
44979 #ifdef CONFIG_SECURITY
44980 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
44981 #endif
44982 -#ifdef CONFIG_KALLSYMS
44983 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44984 INF("wchan", S_IRUGO, proc_pid_wchan),
44985 #endif
44986 -#ifdef CONFIG_STACKTRACE
44987 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44988 ONE("stack", S_IRUSR, proc_pid_stack),
44989 #endif
44990 #ifdef CONFIG_SCHEDSTATS
44991 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
44992 #ifdef CONFIG_TASK_IO_ACCOUNTING
44993 INF("io", S_IRUSR, proc_tgid_io_accounting),
44994 #endif
44995 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44996 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
44997 +#endif
44998 };
44999
45000 static int proc_tgid_base_readdir(struct file * filp,
45001 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45002 if (!inode)
45003 goto out;
45004
45005 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45006 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45007 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45008 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45009 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45010 +#else
45011 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45012 +#endif
45013 inode->i_op = &proc_tgid_base_inode_operations;
45014 inode->i_fop = &proc_tgid_base_operations;
45015 inode->i_flags|=S_IMMUTABLE;
45016 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45017 if (!task)
45018 goto out;
45019
45020 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45021 + goto out_put_task;
45022 +
45023 result = proc_pid_instantiate(dir, dentry, task, NULL);
45024 +out_put_task:
45025 put_task_struct(task);
45026 out:
45027 return result;
45028 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45029 {
45030 unsigned int nr;
45031 struct task_struct *reaper;
45032 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45033 + const struct cred *tmpcred = current_cred();
45034 + const struct cred *itercred;
45035 +#endif
45036 + filldir_t __filldir = filldir;
45037 struct tgid_iter iter;
45038 struct pid_namespace *ns;
45039
45040 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45041 for (iter = next_tgid(ns, iter);
45042 iter.task;
45043 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45044 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45045 + rcu_read_lock();
45046 + itercred = __task_cred(iter.task);
45047 +#endif
45048 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45049 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45050 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45051 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45052 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45053 +#endif
45054 + )
45055 +#endif
45056 + )
45057 + __filldir = &gr_fake_filldir;
45058 + else
45059 + __filldir = filldir;
45060 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45061 + rcu_read_unlock();
45062 +#endif
45063 filp->f_pos = iter.tgid + TGID_OFFSET;
45064 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45065 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45066 put_task_struct(iter.task);
45067 goto out;
45068 }
45069 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45070 #ifdef CONFIG_SCHED_DEBUG
45071 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45072 #endif
45073 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45074 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45075 INF("syscall", S_IRUSR, proc_pid_syscall),
45076 #endif
45077 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45078 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45079 #ifdef CONFIG_SECURITY
45080 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45081 #endif
45082 -#ifdef CONFIG_KALLSYMS
45083 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45084 INF("wchan", S_IRUGO, proc_pid_wchan),
45085 #endif
45086 -#ifdef CONFIG_STACKTRACE
45087 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45088 ONE("stack", S_IRUSR, proc_pid_stack),
45089 #endif
45090 #ifdef CONFIG_SCHEDSTATS
45091 diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45092 --- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45093 +++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45094 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45095
45096 static int __init proc_cmdline_init(void)
45097 {
45098 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45099 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45100 +#else
45101 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45102 +#endif
45103 return 0;
45104 }
45105 module_init(proc_cmdline_init);
45106 diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45107 --- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45108 +++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45109 @@ -64,7 +64,11 @@ static const struct file_operations proc
45110
45111 static int __init proc_devices_init(void)
45112 {
45113 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45114 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45115 +#else
45116 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45117 +#endif
45118 return 0;
45119 }
45120 module_init(proc_devices_init);
45121 diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45122 --- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45123 +++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45124 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45125 if (de->mode) {
45126 inode->i_mode = de->mode;
45127 inode->i_uid = de->uid;
45128 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45129 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45130 +#else
45131 inode->i_gid = de->gid;
45132 +#endif
45133 }
45134 if (de->size)
45135 inode->i_size = de->size;
45136 diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45137 --- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45138 +++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45139 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45140 struct pid *pid, struct task_struct *task);
45141 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45142 struct pid *pid, struct task_struct *task);
45143 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45144 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45145 +#endif
45146 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45147
45148 extern const struct file_operations proc_maps_operations;
45149 diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45150 --- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45151 +++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45152 @@ -30,12 +30,12 @@ config PROC_FS
45153
45154 config PROC_KCORE
45155 bool "/proc/kcore support" if !ARM
45156 - depends on PROC_FS && MMU
45157 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45158
45159 config PROC_VMCORE
45160 bool "/proc/vmcore support (EXPERIMENTAL)"
45161 - depends on PROC_FS && CRASH_DUMP
45162 - default y
45163 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45164 + default n
45165 help
45166 Exports the dump image of crashed kernel in ELF format.
45167
45168 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45169 limited in memory.
45170
45171 config PROC_PAGE_MONITOR
45172 - default y
45173 - depends on PROC_FS && MMU
45174 + default n
45175 + depends on PROC_FS && MMU && !GRKERNSEC
45176 bool "Enable /proc page monitoring" if EMBEDDED
45177 help
45178 Various /proc files exist to monitor process memory utilization:
45179 diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45180 --- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45181 +++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45182 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45183 off_t offset = 0;
45184 struct kcore_list *m;
45185
45186 + pax_track_stack();
45187 +
45188 /* setup ELF header */
45189 elf = (struct elfhdr *) bufp;
45190 bufp += sizeof(struct elfhdr);
45191 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45192 * the addresses in the elf_phdr on our list.
45193 */
45194 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45195 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45196 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45197 + if (tsz > buflen)
45198 tsz = buflen;
45199 -
45200 +
45201 while (buflen) {
45202 struct kcore_list *m;
45203
45204 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45205 kfree(elf_buf);
45206 } else {
45207 if (kern_addr_valid(start)) {
45208 - unsigned long n;
45209 + char *elf_buf;
45210 + mm_segment_t oldfs;
45211
45212 - n = copy_to_user(buffer, (char *)start, tsz);
45213 - /*
45214 - * We cannot distingush between fault on source
45215 - * and fault on destination. When this happens
45216 - * we clear too and hope it will trigger the
45217 - * EFAULT again.
45218 - */
45219 - if (n) {
45220 - if (clear_user(buffer + tsz - n,
45221 - n))
45222 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45223 + if (!elf_buf)
45224 + return -ENOMEM;
45225 + oldfs = get_fs();
45226 + set_fs(KERNEL_DS);
45227 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45228 + set_fs(oldfs);
45229 + if (copy_to_user(buffer, elf_buf, tsz)) {
45230 + kfree(elf_buf);
45231 return -EFAULT;
45232 + }
45233 }
45234 + set_fs(oldfs);
45235 + kfree(elf_buf);
45236 } else {
45237 if (clear_user(buffer, tsz))
45238 return -EFAULT;
45239 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45240
45241 static int open_kcore(struct inode *inode, struct file *filp)
45242 {
45243 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45244 + return -EPERM;
45245 +#endif
45246 if (!capable(CAP_SYS_RAWIO))
45247 return -EPERM;
45248 if (kcore_need_update)
45249 diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45250 --- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45251 +++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45252 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45253 unsigned long pages[NR_LRU_LISTS];
45254 int lru;
45255
45256 + pax_track_stack();
45257 +
45258 /*
45259 * display in kilobytes.
45260 */
45261 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45262 vmi.used >> 10,
45263 vmi.largest_chunk >> 10
45264 #ifdef CONFIG_MEMORY_FAILURE
45265 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45266 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45267 #endif
45268 );
45269
45270 diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45271 --- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45272 +++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45273 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45274 if (len < 1)
45275 len = 1;
45276 seq_printf(m, "%*c", len, ' ');
45277 - seq_path(m, &file->f_path, "");
45278 + seq_path(m, &file->f_path, "\n\\");
45279 }
45280
45281 seq_putc(m, '\n');
45282 diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45283 --- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45284 +++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45285 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45286 struct task_struct *task;
45287 struct nsproxy *ns;
45288 struct net *net = NULL;
45289 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45290 + const struct cred *cred = current_cred();
45291 +#endif
45292 +
45293 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45294 + if (cred->fsuid)
45295 + return net;
45296 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45297 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45298 + return net;
45299 +#endif
45300
45301 rcu_read_lock();
45302 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45303 diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45304 --- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45305 +++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45306 @@ -7,6 +7,8 @@
45307 #include <linux/security.h>
45308 #include "internal.h"
45309
45310 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45311 +
45312 static const struct dentry_operations proc_sys_dentry_operations;
45313 static const struct file_operations proc_sys_file_operations;
45314 static const struct inode_operations proc_sys_inode_operations;
45315 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45316 if (!p)
45317 goto out;
45318
45319 + if (gr_handle_sysctl(p, MAY_EXEC))
45320 + goto out;
45321 +
45322 err = ERR_PTR(-ENOMEM);
45323 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45324 if (h)
45325 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45326 if (*pos < file->f_pos)
45327 continue;
45328
45329 + if (gr_handle_sysctl(table, 0))
45330 + continue;
45331 +
45332 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45333 if (res)
45334 return res;
45335 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45336 if (IS_ERR(head))
45337 return PTR_ERR(head);
45338
45339 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45340 + return -ENOENT;
45341 +
45342 generic_fillattr(inode, stat);
45343 if (table)
45344 stat->mode = (stat->mode & S_IFMT) | table->mode;
45345 diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45346 --- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45347 +++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45348 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
45349 #ifdef CONFIG_PROC_DEVICETREE
45350 proc_device_tree_init();
45351 #endif
45352 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45353 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45354 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45355 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45356 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45357 +#endif
45358 +#else
45359 proc_mkdir("bus", NULL);
45360 +#endif
45361 proc_sys_init();
45362 }
45363
45364 diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45365 --- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45366 +++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45367 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45368 "VmStk:\t%8lu kB\n"
45369 "VmExe:\t%8lu kB\n"
45370 "VmLib:\t%8lu kB\n"
45371 - "VmPTE:\t%8lu kB\n",
45372 - hiwater_vm << (PAGE_SHIFT-10),
45373 + "VmPTE:\t%8lu kB\n"
45374 +
45375 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45376 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45377 +#endif
45378 +
45379 + ,hiwater_vm << (PAGE_SHIFT-10),
45380 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45381 mm->locked_vm << (PAGE_SHIFT-10),
45382 hiwater_rss << (PAGE_SHIFT-10),
45383 total_rss << (PAGE_SHIFT-10),
45384 data << (PAGE_SHIFT-10),
45385 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45386 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45387 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45388 +
45389 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45390 + , mm->context.user_cs_base, mm->context.user_cs_limit
45391 +#endif
45392 +
45393 + );
45394 }
45395
45396 unsigned long task_vsize(struct mm_struct *mm)
45397 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45398 struct proc_maps_private *priv = m->private;
45399 struct vm_area_struct *vma = v;
45400
45401 - vma_stop(priv, vma);
45402 + if (!IS_ERR(vma))
45403 + vma_stop(priv, vma);
45404 if (priv->task)
45405 put_task_struct(priv->task);
45406 }
45407 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45408 return ret;
45409 }
45410
45411 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45412 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45413 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45414 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45415 +#endif
45416 +
45417 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45418 {
45419 struct mm_struct *mm = vma->vm_mm;
45420 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45421 int flags = vma->vm_flags;
45422 unsigned long ino = 0;
45423 unsigned long long pgoff = 0;
45424 - unsigned long start;
45425 dev_t dev = 0;
45426 int len;
45427
45428 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45429 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45430 }
45431
45432 - /* We don't show the stack guard page in /proc/maps */
45433 - start = vma->vm_start;
45434 - if (vma->vm_flags & VM_GROWSDOWN)
45435 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45436 - start += PAGE_SIZE;
45437 -
45438 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45439 - start,
45440 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45441 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45442 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45443 +#else
45444 + vma->vm_start,
45445 vma->vm_end,
45446 +#endif
45447 flags & VM_READ ? 'r' : '-',
45448 flags & VM_WRITE ? 'w' : '-',
45449 flags & VM_EXEC ? 'x' : '-',
45450 flags & VM_MAYSHARE ? 's' : 'p',
45451 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45452 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45453 +#else
45454 pgoff,
45455 +#endif
45456 MAJOR(dev), MINOR(dev), ino, &len);
45457
45458 /*
45459 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45460 */
45461 if (file) {
45462 pad_len_spaces(m, len);
45463 - seq_path(m, &file->f_path, "\n");
45464 + seq_path(m, &file->f_path, "\n\\");
45465 } else {
45466 const char *name = arch_vma_name(vma);
45467 if (!name) {
45468 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45469 if (vma->vm_start <= mm->brk &&
45470 vma->vm_end >= mm->start_brk) {
45471 name = "[heap]";
45472 - } else if (vma->vm_start <= mm->start_stack &&
45473 - vma->vm_end >= mm->start_stack) {
45474 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45475 + (vma->vm_start <= mm->start_stack &&
45476 + vma->vm_end >= mm->start_stack)) {
45477 name = "[stack]";
45478 }
45479 } else {
45480 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45481 };
45482
45483 memset(&mss, 0, sizeof mss);
45484 - mss.vma = vma;
45485 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45486 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45487 +
45488 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45489 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45490 +#endif
45491 + mss.vma = vma;
45492 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45493 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45494 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45495 + }
45496 +#endif
45497
45498 show_map_vma(m, vma);
45499
45500 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45501 "Swap: %8lu kB\n"
45502 "KernelPageSize: %8lu kB\n"
45503 "MMUPageSize: %8lu kB\n",
45504 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45505 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45506 +#else
45507 (vma->vm_end - vma->vm_start) >> 10,
45508 +#endif
45509 mss.resident >> 10,
45510 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45511 mss.shared_clean >> 10,
45512 diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45513 --- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45514 +++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45515 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45516 else
45517 bytes += kobjsize(mm);
45518
45519 - if (current->fs && current->fs->users > 1)
45520 + if (current->fs && atomic_read(&current->fs->users) > 1)
45521 sbytes += kobjsize(current->fs);
45522 else
45523 bytes += kobjsize(current->fs);
45524 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45525 if (len < 1)
45526 len = 1;
45527 seq_printf(m, "%*c", len, ' ');
45528 - seq_path(m, &file->f_path, "");
45529 + seq_path(m, &file->f_path, "\n\\");
45530 }
45531
45532 seq_putc(m, '\n');
45533 diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45534 --- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45535 +++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45536 @@ -16,6 +16,7 @@
45537 #include <linux/security.h>
45538 #include <linux/syscalls.h>
45539 #include <linux/unistd.h>
45540 +#include <linux/namei.h>
45541
45542 #include <asm/uaccess.h>
45543
45544 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45545
45546 struct readdir_callback {
45547 struct old_linux_dirent __user * dirent;
45548 + struct file * file;
45549 int result;
45550 };
45551
45552 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45553 buf->result = -EOVERFLOW;
45554 return -EOVERFLOW;
45555 }
45556 +
45557 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45558 + return 0;
45559 +
45560 buf->result++;
45561 dirent = buf->dirent;
45562 if (!access_ok(VERIFY_WRITE, dirent,
45563 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45564
45565 buf.result = 0;
45566 buf.dirent = dirent;
45567 + buf.file = file;
45568
45569 error = vfs_readdir(file, fillonedir, &buf);
45570 if (buf.result)
45571 @@ -142,6 +149,7 @@ struct linux_dirent {
45572 struct getdents_callback {
45573 struct linux_dirent __user * current_dir;
45574 struct linux_dirent __user * previous;
45575 + struct file * file;
45576 int count;
45577 int error;
45578 };
45579 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45580 buf->error = -EOVERFLOW;
45581 return -EOVERFLOW;
45582 }
45583 +
45584 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45585 + return 0;
45586 +
45587 dirent = buf->previous;
45588 if (dirent) {
45589 if (__put_user(offset, &dirent->d_off))
45590 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45591 buf.previous = NULL;
45592 buf.count = count;
45593 buf.error = 0;
45594 + buf.file = file;
45595
45596 error = vfs_readdir(file, filldir, &buf);
45597 if (error >= 0)
45598 @@ -228,6 +241,7 @@ out:
45599 struct getdents_callback64 {
45600 struct linux_dirent64 __user * current_dir;
45601 struct linux_dirent64 __user * previous;
45602 + struct file *file;
45603 int count;
45604 int error;
45605 };
45606 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45607 buf->error = -EINVAL; /* only used if we fail.. */
45608 if (reclen > buf->count)
45609 return -EINVAL;
45610 +
45611 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45612 + return 0;
45613 +
45614 dirent = buf->previous;
45615 if (dirent) {
45616 if (__put_user(offset, &dirent->d_off))
45617 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45618
45619 buf.current_dir = dirent;
45620 buf.previous = NULL;
45621 + buf.file = file;
45622 buf.count = count;
45623 buf.error = 0;
45624
45625 diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
45626 --- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45627 +++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45628 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45629 struct reiserfs_dir_entry de;
45630 int ret = 0;
45631
45632 + pax_track_stack();
45633 +
45634 reiserfs_write_lock(inode->i_sb);
45635
45636 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45637 diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
45638 --- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45639 +++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45640 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45641 return;
45642 }
45643
45644 - atomic_inc(&(fs_generation(tb->tb_sb)));
45645 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45646 do_balance_starts(tb);
45647
45648 /* balance leaf returns 0 except if combining L R and S into
45649 diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
45650 --- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45651 +++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45652 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45653 vi->vi_index, vi->vi_type, vi->vi_ih);
45654 }
45655
45656 -static struct item_operations stat_data_ops = {
45657 +static const struct item_operations stat_data_ops = {
45658 .bytes_number = sd_bytes_number,
45659 .decrement_key = sd_decrement_key,
45660 .is_left_mergeable = sd_is_left_mergeable,
45661 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
45662 vi->vi_index, vi->vi_type, vi->vi_ih);
45663 }
45664
45665 -static struct item_operations direct_ops = {
45666 +static const struct item_operations direct_ops = {
45667 .bytes_number = direct_bytes_number,
45668 .decrement_key = direct_decrement_key,
45669 .is_left_mergeable = direct_is_left_mergeable,
45670 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
45671 vi->vi_index, vi->vi_type, vi->vi_ih);
45672 }
45673
45674 -static struct item_operations indirect_ops = {
45675 +static const struct item_operations indirect_ops = {
45676 .bytes_number = indirect_bytes_number,
45677 .decrement_key = indirect_decrement_key,
45678 .is_left_mergeable = indirect_is_left_mergeable,
45679 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
45680 printk("\n");
45681 }
45682
45683 -static struct item_operations direntry_ops = {
45684 +static const struct item_operations direntry_ops = {
45685 .bytes_number = direntry_bytes_number,
45686 .decrement_key = direntry_decrement_key,
45687 .is_left_mergeable = direntry_is_left_mergeable,
45688 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
45689 "Invalid item type observed, run fsck ASAP");
45690 }
45691
45692 -static struct item_operations errcatch_ops = {
45693 +static const struct item_operations errcatch_ops = {
45694 errcatch_bytes_number,
45695 errcatch_decrement_key,
45696 errcatch_is_left_mergeable,
45697 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
45698 #error Item types must use disk-format assigned values.
45699 #endif
45700
45701 -struct item_operations *item_ops[TYPE_ANY + 1] = {
45702 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
45703 &stat_data_ops,
45704 &indirect_ops,
45705 &direct_ops,
45706 diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
45707 --- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
45708 +++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
45709 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
45710 struct buffer_head *bh;
45711 int i, j;
45712
45713 + pax_track_stack();
45714 +
45715 bh = __getblk(dev, block, bufsize);
45716 if (buffer_uptodate(bh))
45717 return (bh);
45718 diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
45719 --- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
45720 +++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
45721 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
45722 unsigned long savelink = 1;
45723 struct timespec ctime;
45724
45725 + pax_track_stack();
45726 +
45727 /* three balancings: (1) old name removal, (2) new name insertion
45728 and (3) maybe "save" link insertion
45729 stat data updates: (1) old directory,
45730 diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
45731 --- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
45732 +++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
45733 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
45734 "SMALL_TAILS " : "NO_TAILS ",
45735 replay_only(sb) ? "REPLAY_ONLY " : "",
45736 convert_reiserfs(sb) ? "CONV " : "",
45737 - atomic_read(&r->s_generation_counter),
45738 + atomic_read_unchecked(&r->s_generation_counter),
45739 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
45740 SF(s_do_balance), SF(s_unneeded_left_neighbor),
45741 SF(s_good_search_by_key_reada), SF(s_bmaps),
45742 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
45743 struct journal_params *jp = &rs->s_v1.s_journal;
45744 char b[BDEVNAME_SIZE];
45745
45746 + pax_track_stack();
45747 +
45748 seq_printf(m, /* on-disk fields */
45749 "jp_journal_1st_block: \t%i\n"
45750 "jp_journal_dev: \t%s[%x]\n"
45751 diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
45752 --- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
45753 +++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
45754 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
45755 int iter = 0;
45756 #endif
45757
45758 + pax_track_stack();
45759 +
45760 BUG_ON(!th->t_trans_id);
45761
45762 init_tb_struct(th, &s_del_balance, sb, path,
45763 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
45764 int retval;
45765 int quota_cut_bytes = 0;
45766
45767 + pax_track_stack();
45768 +
45769 BUG_ON(!th->t_trans_id);
45770
45771 le_key2cpu_key(&cpu_key, key);
45772 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
45773 int quota_cut_bytes;
45774 loff_t tail_pos = 0;
45775
45776 + pax_track_stack();
45777 +
45778 BUG_ON(!th->t_trans_id);
45779
45780 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
45781 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
45782 int retval;
45783 int fs_gen;
45784
45785 + pax_track_stack();
45786 +
45787 BUG_ON(!th->t_trans_id);
45788
45789 fs_gen = get_generation(inode->i_sb);
45790 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
45791 int fs_gen = 0;
45792 int quota_bytes = 0;
45793
45794 + pax_track_stack();
45795 +
45796 BUG_ON(!th->t_trans_id);
45797
45798 if (inode) { /* Do we count quotas for item? */
45799 diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
45800 --- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
45801 +++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
45802 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
45803 {.option_name = NULL}
45804 };
45805
45806 + pax_track_stack();
45807 +
45808 *blocks = 0;
45809 if (!options || !*options)
45810 /* use default configuration: create tails, journaling on, no
45811 diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
45812 --- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
45813 +++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
45814 @@ -20,6 +20,7 @@
45815 #include <linux/module.h>
45816 #include <linux/slab.h>
45817 #include <linux/poll.h>
45818 +#include <linux/security.h>
45819 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45820 #include <linux/file.h>
45821 #include <linux/fdtable.h>
45822 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
45823 int retval, i, timed_out = 0;
45824 unsigned long slack = 0;
45825
45826 + pax_track_stack();
45827 +
45828 rcu_read_lock();
45829 retval = max_select_fd(n, fds);
45830 rcu_read_unlock();
45831 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
45832 /* Allocate small arguments on the stack to save memory and be faster */
45833 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45834
45835 + pax_track_stack();
45836 +
45837 ret = -EINVAL;
45838 if (n < 0)
45839 goto out_nofds;
45840 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
45841 struct poll_list *walk = head;
45842 unsigned long todo = nfds;
45843
45844 + pax_track_stack();
45845 +
45846 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45847 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45848 return -EINVAL;
45849
45850 diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
45851 --- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
45852 +++ linux-2.6.32.45/fs/seq_file.c 2011-08-23 21:22:32.000000000 -0400
45853 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45854 return 0;
45855 }
45856 if (!m->buf) {
45857 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45858 + m->size = PAGE_SIZE;
45859 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45860 if (!m->buf)
45861 return -ENOMEM;
45862 }
45863 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45864 Eoverflow:
45865 m->op->stop(m, p);
45866 kfree(m->buf);
45867 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45868 + m->size <<= 1;
45869 + m->buf = kmalloc(m->size, GFP_KERNEL);
45870 return !m->buf ? -ENOMEM : -EAGAIN;
45871 }
45872
45873 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45874 m->version = file->f_version;
45875 /* grab buffer if we didn't have one */
45876 if (!m->buf) {
45877 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45878 + m->size = PAGE_SIZE;
45879 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45880 if (!m->buf)
45881 goto Enomem;
45882 }
45883 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45884 goto Fill;
45885 m->op->stop(m, p);
45886 kfree(m->buf);
45887 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45888 + m->size <<= 1;
45889 + m->buf = kmalloc(m->size, GFP_KERNEL);
45890 if (!m->buf)
45891 goto Enomem;
45892 m->count = 0;
45893 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file
45894 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45895 void *data)
45896 {
45897 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45898 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45899 int res = -ENOMEM;
45900
45901 if (op) {
45902 diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
45903 --- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
45904 +++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
45905 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
45906
45907 out:
45908 if (server->local_nls != NULL && server->remote_nls != NULL)
45909 - server->ops->convert = convert_cp;
45910 + *(void **)&server->ops->convert = convert_cp;
45911 else
45912 - server->ops->convert = convert_memcpy;
45913 + *(void **)&server->ops->convert = convert_memcpy;
45914
45915 smb_unlock_server(server);
45916 return n;
45917 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
45918
45919 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
45920 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
45921 - server->ops->getattr = smb_proc_getattr_core;
45922 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
45923 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
45924 - server->ops->getattr = smb_proc_getattr_ff;
45925 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
45926 }
45927
45928 /* Decode server capabilities */
45929 @@ -3439,7 +3439,7 @@ out:
45930 static void
45931 install_ops(struct smb_ops *dst, struct smb_ops *src)
45932 {
45933 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
45934 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
45935 }
45936
45937 /* < LANMAN2 */
45938 diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
45939 --- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
45940 +++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
45941 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
45942
45943 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45944 {
45945 - char *s = nd_get_link(nd);
45946 + const char *s = nd_get_link(nd);
45947 if (!IS_ERR(s))
45948 __putname(s);
45949 }
45950 diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
45951 --- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
45952 +++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
45953 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
45954 pipe_lock(pipe);
45955
45956 for (;;) {
45957 - if (!pipe->readers) {
45958 + if (!atomic_read(&pipe->readers)) {
45959 send_sig(SIGPIPE, current, 0);
45960 if (!ret)
45961 ret = -EPIPE;
45962 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
45963 do_wakeup = 0;
45964 }
45965
45966 - pipe->waiting_writers++;
45967 + atomic_inc(&pipe->waiting_writers);
45968 pipe_wait(pipe);
45969 - pipe->waiting_writers--;
45970 + atomic_dec(&pipe->waiting_writers);
45971 }
45972
45973 pipe_unlock(pipe);
45974 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
45975 .spd_release = spd_release_page,
45976 };
45977
45978 + pax_track_stack();
45979 +
45980 index = *ppos >> PAGE_CACHE_SHIFT;
45981 loff = *ppos & ~PAGE_CACHE_MASK;
45982 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
45983 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
45984 old_fs = get_fs();
45985 set_fs(get_ds());
45986 /* The cast to a user pointer is valid due to the set_fs() */
45987 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
45988 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
45989 set_fs(old_fs);
45990
45991 return res;
45992 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
45993 old_fs = get_fs();
45994 set_fs(get_ds());
45995 /* The cast to a user pointer is valid due to the set_fs() */
45996 - res = vfs_write(file, (const char __user *)buf, count, &pos);
45997 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
45998 set_fs(old_fs);
45999
46000 return res;
46001 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46002 .spd_release = spd_release_page,
46003 };
46004
46005 + pax_track_stack();
46006 +
46007 index = *ppos >> PAGE_CACHE_SHIFT;
46008 offset = *ppos & ~PAGE_CACHE_MASK;
46009 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46010 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46011 goto err;
46012
46013 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46014 - vec[i].iov_base = (void __user *) page_address(page);
46015 + vec[i].iov_base = (__force void __user *) page_address(page);
46016 vec[i].iov_len = this_len;
46017 pages[i] = page;
46018 spd.nr_pages++;
46019 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46020 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46021 {
46022 while (!pipe->nrbufs) {
46023 - if (!pipe->writers)
46024 + if (!atomic_read(&pipe->writers))
46025 return 0;
46026
46027 - if (!pipe->waiting_writers && sd->num_spliced)
46028 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46029 return 0;
46030
46031 if (sd->flags & SPLICE_F_NONBLOCK)
46032 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46033 * out of the pipe right after the splice_to_pipe(). So set
46034 * PIPE_READERS appropriately.
46035 */
46036 - pipe->readers = 1;
46037 + atomic_set(&pipe->readers, 1);
46038
46039 current->splice_pipe = pipe;
46040 }
46041 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46042 .spd_release = spd_release_page,
46043 };
46044
46045 + pax_track_stack();
46046 +
46047 pipe = pipe_info(file->f_path.dentry->d_inode);
46048 if (!pipe)
46049 return -EBADF;
46050 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46051 ret = -ERESTARTSYS;
46052 break;
46053 }
46054 - if (!pipe->writers)
46055 + if (!atomic_read(&pipe->writers))
46056 break;
46057 - if (!pipe->waiting_writers) {
46058 + if (!atomic_read(&pipe->waiting_writers)) {
46059 if (flags & SPLICE_F_NONBLOCK) {
46060 ret = -EAGAIN;
46061 break;
46062 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46063 pipe_lock(pipe);
46064
46065 while (pipe->nrbufs >= PIPE_BUFFERS) {
46066 - if (!pipe->readers) {
46067 + if (!atomic_read(&pipe->readers)) {
46068 send_sig(SIGPIPE, current, 0);
46069 ret = -EPIPE;
46070 break;
46071 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46072 ret = -ERESTARTSYS;
46073 break;
46074 }
46075 - pipe->waiting_writers++;
46076 + atomic_inc(&pipe->waiting_writers);
46077 pipe_wait(pipe);
46078 - pipe->waiting_writers--;
46079 + atomic_dec(&pipe->waiting_writers);
46080 }
46081
46082 pipe_unlock(pipe);
46083 @@ -1785,14 +1791,14 @@ retry:
46084 pipe_double_lock(ipipe, opipe);
46085
46086 do {
46087 - if (!opipe->readers) {
46088 + if (!atomic_read(&opipe->readers)) {
46089 send_sig(SIGPIPE, current, 0);
46090 if (!ret)
46091 ret = -EPIPE;
46092 break;
46093 }
46094
46095 - if (!ipipe->nrbufs && !ipipe->writers)
46096 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46097 break;
46098
46099 /*
46100 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46101 pipe_double_lock(ipipe, opipe);
46102
46103 do {
46104 - if (!opipe->readers) {
46105 + if (!atomic_read(&opipe->readers)) {
46106 send_sig(SIGPIPE, current, 0);
46107 if (!ret)
46108 ret = -EPIPE;
46109 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46110 * return EAGAIN if we have the potential of some data in the
46111 * future, otherwise just return 0
46112 */
46113 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46114 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46115 ret = -EAGAIN;
46116
46117 pipe_unlock(ipipe);
46118 diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46119 --- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46120 +++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46121 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46122
46123 struct sysfs_open_dirent {
46124 atomic_t refcnt;
46125 - atomic_t event;
46126 + atomic_unchecked_t event;
46127 wait_queue_head_t poll;
46128 struct list_head buffers; /* goes through sysfs_buffer.list */
46129 };
46130 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46131 size_t count;
46132 loff_t pos;
46133 char * page;
46134 - struct sysfs_ops * ops;
46135 + const struct sysfs_ops * ops;
46136 struct mutex mutex;
46137 int needs_read_fill;
46138 int event;
46139 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46140 {
46141 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46142 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46143 - struct sysfs_ops * ops = buffer->ops;
46144 + const struct sysfs_ops * ops = buffer->ops;
46145 int ret = 0;
46146 ssize_t count;
46147
46148 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46149 if (!sysfs_get_active_two(attr_sd))
46150 return -ENODEV;
46151
46152 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46153 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46154 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46155
46156 sysfs_put_active_two(attr_sd);
46157 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46158 {
46159 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46160 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46161 - struct sysfs_ops * ops = buffer->ops;
46162 + const struct sysfs_ops * ops = buffer->ops;
46163 int rc;
46164
46165 /* need attr_sd for attr and ops, its parent for kobj */
46166 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46167 return -ENOMEM;
46168
46169 atomic_set(&new_od->refcnt, 0);
46170 - atomic_set(&new_od->event, 1);
46171 + atomic_set_unchecked(&new_od->event, 1);
46172 init_waitqueue_head(&new_od->poll);
46173 INIT_LIST_HEAD(&new_od->buffers);
46174 goto retry;
46175 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46176 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46177 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46178 struct sysfs_buffer *buffer;
46179 - struct sysfs_ops *ops;
46180 + const struct sysfs_ops *ops;
46181 int error = -EACCES;
46182 char *p;
46183
46184 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46185
46186 sysfs_put_active_two(attr_sd);
46187
46188 - if (buffer->event != atomic_read(&od->event))
46189 + if (buffer->event != atomic_read_unchecked(&od->event))
46190 goto trigger;
46191
46192 return DEFAULT_POLLMASK;
46193 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46194
46195 od = sd->s_attr.open;
46196 if (od) {
46197 - atomic_inc(&od->event);
46198 + atomic_inc_unchecked(&od->event);
46199 wake_up_interruptible(&od->poll);
46200 }
46201
46202 diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46203 --- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46204 +++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46205 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46206 .s_name = "",
46207 .s_count = ATOMIC_INIT(1),
46208 .s_flags = SYSFS_DIR,
46209 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46210 + .s_mode = S_IFDIR | S_IRWXU,
46211 +#else
46212 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46213 +#endif
46214 .s_ino = 1,
46215 };
46216
46217 diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46218 --- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46219 +++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46220 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46221
46222 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46223 {
46224 - char *page = nd_get_link(nd);
46225 + const char *page = nd_get_link(nd);
46226 if (!IS_ERR(page))
46227 free_page((unsigned long)page);
46228 }
46229 diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46230 --- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46231 +++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46232 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46233
46234 mutex_lock(&sbi->s_alloc_mutex);
46235 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46236 - if (bloc->logicalBlockNum < 0 ||
46237 - (bloc->logicalBlockNum + count) >
46238 - partmap->s_partition_len) {
46239 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46240 udf_debug("%d < %d || %d + %d > %d\n",
46241 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46242 count, partmap->s_partition_len);
46243 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46244
46245 mutex_lock(&sbi->s_alloc_mutex);
46246 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46247 - if (bloc->logicalBlockNum < 0 ||
46248 - (bloc->logicalBlockNum + count) >
46249 - partmap->s_partition_len) {
46250 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46251 udf_debug("%d < %d || %d + %d > %d\n",
46252 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46253 partmap->s_partition_len);
46254 diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46255 --- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46256 +++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46257 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46258 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46259 int lastblock = 0;
46260
46261 + pax_track_stack();
46262 +
46263 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46264 prev_epos.block = iinfo->i_location;
46265 prev_epos.bh = NULL;
46266 diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46267 --- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46268 +++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46269 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46270
46271 u8 udf_tag_checksum(const struct tag *t)
46272 {
46273 - u8 *data = (u8 *)t;
46274 + const u8 *data = (const u8 *)t;
46275 u8 checksum = 0;
46276 int i;
46277 for (i = 0; i < sizeof(struct tag); ++i)
46278 diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46279 --- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46280 +++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46281 @@ -1,6 +1,7 @@
46282 #include <linux/compiler.h>
46283 #include <linux/file.h>
46284 #include <linux/fs.h>
46285 +#include <linux/security.h>
46286 #include <linux/linkage.h>
46287 #include <linux/mount.h>
46288 #include <linux/namei.h>
46289 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46290 goto mnt_drop_write_and_out;
46291 }
46292 }
46293 +
46294 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46295 + error = -EACCES;
46296 + goto mnt_drop_write_and_out;
46297 + }
46298 +
46299 mutex_lock(&inode->i_mutex);
46300 error = notify_change(path->dentry, &newattrs);
46301 mutex_unlock(&inode->i_mutex);
46302 diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46303 --- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46304 +++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46305 @@ -17,8 +17,8 @@
46306 struct posix_acl *
46307 posix_acl_from_xattr(const void *value, size_t size)
46308 {
46309 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46310 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46311 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46312 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46313 int count;
46314 struct posix_acl *acl;
46315 struct posix_acl_entry *acl_e;
46316 diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46317 --- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46318 +++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46319 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46320 * Extended attribute SET operations
46321 */
46322 static long
46323 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46324 +setxattr(struct path *path, const char __user *name, const void __user *value,
46325 size_t size, int flags)
46326 {
46327 int error;
46328 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46329 return PTR_ERR(kvalue);
46330 }
46331
46332 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46333 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46334 + error = -EACCES;
46335 + goto out;
46336 + }
46337 +
46338 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46339 +out:
46340 kfree(kvalue);
46341 return error;
46342 }
46343 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46344 return error;
46345 error = mnt_want_write(path.mnt);
46346 if (!error) {
46347 - error = setxattr(path.dentry, name, value, size, flags);
46348 + error = setxattr(&path, name, value, size, flags);
46349 mnt_drop_write(path.mnt);
46350 }
46351 path_put(&path);
46352 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46353 return error;
46354 error = mnt_want_write(path.mnt);
46355 if (!error) {
46356 - error = setxattr(path.dentry, name, value, size, flags);
46357 + error = setxattr(&path, name, value, size, flags);
46358 mnt_drop_write(path.mnt);
46359 }
46360 path_put(&path);
46361 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46362 const void __user *,value, size_t, size, int, flags)
46363 {
46364 struct file *f;
46365 - struct dentry *dentry;
46366 int error = -EBADF;
46367
46368 f = fget(fd);
46369 if (!f)
46370 return error;
46371 - dentry = f->f_path.dentry;
46372 - audit_inode(NULL, dentry);
46373 + audit_inode(NULL, f->f_path.dentry);
46374 error = mnt_want_write_file(f);
46375 if (!error) {
46376 - error = setxattr(dentry, name, value, size, flags);
46377 + error = setxattr(&f->f_path, name, value, size, flags);
46378 mnt_drop_write(f->f_path.mnt);
46379 }
46380 fput(f);
46381 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46382 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46383 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46384 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46385 xfs_fsop_geom_t fsgeo;
46386 int error;
46387
46388 + memset(&fsgeo, 0, sizeof(fsgeo));
46389 error = xfs_fs_geometry(mp, &fsgeo, 3);
46390 if (error)
46391 return -error;
46392 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46393 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46394 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46395 @@ -134,7 +134,7 @@ xfs_find_handle(
46396 }
46397
46398 error = -EFAULT;
46399 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46400 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46401 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46402 goto out_put;
46403
46404 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46405 if (IS_ERR(dentry))
46406 return PTR_ERR(dentry);
46407
46408 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46409 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46410 if (!kbuf)
46411 goto out_dput;
46412
46413 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46414 xfs_mount_t *mp,
46415 void __user *arg)
46416 {
46417 - xfs_fsop_geom_t fsgeo;
46418 + xfs_fsop_geom_t fsgeo;
46419 int error;
46420
46421 error = xfs_fs_geometry(mp, &fsgeo, 3);
46422 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46423 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46424 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46425 @@ -468,7 +468,7 @@ xfs_vn_put_link(
46426 struct nameidata *nd,
46427 void *p)
46428 {
46429 - char *s = nd_get_link(nd);
46430 + const char *s = nd_get_link(nd);
46431
46432 if (!IS_ERR(s))
46433 kfree(s);
46434 diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46435 --- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46436 +++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46437 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46438 int nmap,
46439 int ret_nmap);
46440 #else
46441 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46442 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46443 #endif /* DEBUG */
46444
46445 #if defined(XFS_RW_TRACE)
46446 diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46447 --- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46448 +++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46449 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46450 }
46451
46452 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46453 - if (filldir(dirent, sfep->name, sfep->namelen,
46454 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46455 + char name[sfep->namelen];
46456 + memcpy(name, sfep->name, sfep->namelen);
46457 + if (filldir(dirent, name, sfep->namelen,
46458 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46459 + *offset = off & 0x7fffffff;
46460 + return 0;
46461 + }
46462 + } else if (filldir(dirent, sfep->name, sfep->namelen,
46463 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46464 *offset = off & 0x7fffffff;
46465 return 0;
46466 diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46467 --- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46468 +++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46469 @@ -0,0 +1,105 @@
46470 +#include <linux/kernel.h>
46471 +#include <linux/mm.h>
46472 +#include <linux/slab.h>
46473 +#include <linux/vmalloc.h>
46474 +#include <linux/gracl.h>
46475 +#include <linux/grsecurity.h>
46476 +
46477 +static unsigned long alloc_stack_next = 1;
46478 +static unsigned long alloc_stack_size = 1;
46479 +static void **alloc_stack;
46480 +
46481 +static __inline__ int
46482 +alloc_pop(void)
46483 +{
46484 + if (alloc_stack_next == 1)
46485 + return 0;
46486 +
46487 + kfree(alloc_stack[alloc_stack_next - 2]);
46488 +
46489 + alloc_stack_next--;
46490 +
46491 + return 1;
46492 +}
46493 +
46494 +static __inline__ int
46495 +alloc_push(void *buf)
46496 +{
46497 + if (alloc_stack_next >= alloc_stack_size)
46498 + return 1;
46499 +
46500 + alloc_stack[alloc_stack_next - 1] = buf;
46501 +
46502 + alloc_stack_next++;
46503 +
46504 + return 0;
46505 +}
46506 +
46507 +void *
46508 +acl_alloc(unsigned long len)
46509 +{
46510 + void *ret = NULL;
46511 +
46512 + if (!len || len > PAGE_SIZE)
46513 + goto out;
46514 +
46515 + ret = kmalloc(len, GFP_KERNEL);
46516 +
46517 + if (ret) {
46518 + if (alloc_push(ret)) {
46519 + kfree(ret);
46520 + ret = NULL;
46521 + }
46522 + }
46523 +
46524 +out:
46525 + return ret;
46526 +}
46527 +
46528 +void *
46529 +acl_alloc_num(unsigned long num, unsigned long len)
46530 +{
46531 + if (!len || (num > (PAGE_SIZE / len)))
46532 + return NULL;
46533 +
46534 + return acl_alloc(num * len);
46535 +}
46536 +
46537 +void
46538 +acl_free_all(void)
46539 +{
46540 + if (gr_acl_is_enabled() || !alloc_stack)
46541 + return;
46542 +
46543 + while (alloc_pop()) ;
46544 +
46545 + if (alloc_stack) {
46546 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46547 + kfree(alloc_stack);
46548 + else
46549 + vfree(alloc_stack);
46550 + }
46551 +
46552 + alloc_stack = NULL;
46553 + alloc_stack_size = 1;
46554 + alloc_stack_next = 1;
46555 +
46556 + return;
46557 +}
46558 +
46559 +int
46560 +acl_alloc_stack_init(unsigned long size)
46561 +{
46562 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46563 + alloc_stack =
46564 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46565 + else
46566 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46567 +
46568 + alloc_stack_size = size;
46569 +
46570 + if (!alloc_stack)
46571 + return 0;
46572 + else
46573 + return 1;
46574 +}
46575 diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46576 --- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46577 +++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46578 @@ -0,0 +1,4082 @@
46579 +#include <linux/kernel.h>
46580 +#include <linux/module.h>
46581 +#include <linux/sched.h>
46582 +#include <linux/mm.h>
46583 +#include <linux/file.h>
46584 +#include <linux/fs.h>
46585 +#include <linux/namei.h>
46586 +#include <linux/mount.h>
46587 +#include <linux/tty.h>
46588 +#include <linux/proc_fs.h>
46589 +#include <linux/smp_lock.h>
46590 +#include <linux/slab.h>
46591 +#include <linux/vmalloc.h>
46592 +#include <linux/types.h>
46593 +#include <linux/sysctl.h>
46594 +#include <linux/netdevice.h>
46595 +#include <linux/ptrace.h>
46596 +#include <linux/gracl.h>
46597 +#include <linux/gralloc.h>
46598 +#include <linux/grsecurity.h>
46599 +#include <linux/grinternal.h>
46600 +#include <linux/pid_namespace.h>
46601 +#include <linux/fdtable.h>
46602 +#include <linux/percpu.h>
46603 +
46604 +#include <asm/uaccess.h>
46605 +#include <asm/errno.h>
46606 +#include <asm/mman.h>
46607 +
46608 +static struct acl_role_db acl_role_set;
46609 +static struct name_db name_set;
46610 +static struct inodev_db inodev_set;
46611 +
46612 +/* for keeping track of userspace pointers used for subjects, so we
46613 + can share references in the kernel as well
46614 +*/
46615 +
46616 +static struct dentry *real_root;
46617 +static struct vfsmount *real_root_mnt;
46618 +
46619 +static struct acl_subj_map_db subj_map_set;
46620 +
46621 +static struct acl_role_label *default_role;
46622 +
46623 +static struct acl_role_label *role_list;
46624 +
46625 +static u16 acl_sp_role_value;
46626 +
46627 +extern char *gr_shared_page[4];
46628 +static DEFINE_MUTEX(gr_dev_mutex);
46629 +DEFINE_RWLOCK(gr_inode_lock);
46630 +
46631 +struct gr_arg *gr_usermode;
46632 +
46633 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
46634 +
46635 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46636 +extern void gr_clear_learn_entries(void);
46637 +
46638 +#ifdef CONFIG_GRKERNSEC_RESLOG
46639 +extern void gr_log_resource(const struct task_struct *task,
46640 + const int res, const unsigned long wanted, const int gt);
46641 +#endif
46642 +
46643 +unsigned char *gr_system_salt;
46644 +unsigned char *gr_system_sum;
46645 +
46646 +static struct sprole_pw **acl_special_roles = NULL;
46647 +static __u16 num_sprole_pws = 0;
46648 +
46649 +static struct acl_role_label *kernel_role = NULL;
46650 +
46651 +static unsigned int gr_auth_attempts = 0;
46652 +static unsigned long gr_auth_expires = 0UL;
46653 +
46654 +#ifdef CONFIG_NET
46655 +extern struct vfsmount *sock_mnt;
46656 +#endif
46657 +extern struct vfsmount *pipe_mnt;
46658 +extern struct vfsmount *shm_mnt;
46659 +#ifdef CONFIG_HUGETLBFS
46660 +extern struct vfsmount *hugetlbfs_vfsmount;
46661 +#endif
46662 +
46663 +static struct acl_object_label *fakefs_obj_rw;
46664 +static struct acl_object_label *fakefs_obj_rwx;
46665 +
46666 +extern int gr_init_uidset(void);
46667 +extern void gr_free_uidset(void);
46668 +extern void gr_remove_uid(uid_t uid);
46669 +extern int gr_find_uid(uid_t uid);
46670 +
46671 +__inline__ int
46672 +gr_acl_is_enabled(void)
46673 +{
46674 + return (gr_status & GR_READY);
46675 +}
46676 +
46677 +#ifdef CONFIG_BTRFS_FS
46678 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46679 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46680 +#endif
46681 +
46682 +static inline dev_t __get_dev(const struct dentry *dentry)
46683 +{
46684 +#ifdef CONFIG_BTRFS_FS
46685 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46686 + return get_btrfs_dev_from_inode(dentry->d_inode);
46687 + else
46688 +#endif
46689 + return dentry->d_inode->i_sb->s_dev;
46690 +}
46691 +
46692 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46693 +{
46694 + return __get_dev(dentry);
46695 +}
46696 +
46697 +static char gr_task_roletype_to_char(struct task_struct *task)
46698 +{
46699 + switch (task->role->roletype &
46700 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46701 + GR_ROLE_SPECIAL)) {
46702 + case GR_ROLE_DEFAULT:
46703 + return 'D';
46704 + case GR_ROLE_USER:
46705 + return 'U';
46706 + case GR_ROLE_GROUP:
46707 + return 'G';
46708 + case GR_ROLE_SPECIAL:
46709 + return 'S';
46710 + }
46711 +
46712 + return 'X';
46713 +}
46714 +
46715 +char gr_roletype_to_char(void)
46716 +{
46717 + return gr_task_roletype_to_char(current);
46718 +}
46719 +
46720 +__inline__ int
46721 +gr_acl_tpe_check(void)
46722 +{
46723 + if (unlikely(!(gr_status & GR_READY)))
46724 + return 0;
46725 + if (current->role->roletype & GR_ROLE_TPE)
46726 + return 1;
46727 + else
46728 + return 0;
46729 +}
46730 +
46731 +int
46732 +gr_handle_rawio(const struct inode *inode)
46733 +{
46734 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46735 + if (inode && S_ISBLK(inode->i_mode) &&
46736 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46737 + !capable(CAP_SYS_RAWIO))
46738 + return 1;
46739 +#endif
46740 + return 0;
46741 +}
46742 +
46743 +static int
46744 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46745 +{
46746 + if (likely(lena != lenb))
46747 + return 0;
46748 +
46749 + return !memcmp(a, b, lena);
46750 +}
46751 +
46752 +/* this must be called with vfsmount_lock and dcache_lock held */
46753 +
46754 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46755 + struct dentry *root, struct vfsmount *rootmnt,
46756 + char *buffer, int buflen)
46757 +{
46758 + char * end = buffer+buflen;
46759 + char * retval;
46760 + int namelen;
46761 +
46762 + *--end = '\0';
46763 + buflen--;
46764 +
46765 + if (buflen < 1)
46766 + goto Elong;
46767 + /* Get '/' right */
46768 + retval = end-1;
46769 + *retval = '/';
46770 +
46771 + for (;;) {
46772 + struct dentry * parent;
46773 +
46774 + if (dentry == root && vfsmnt == rootmnt)
46775 + break;
46776 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
46777 + /* Global root? */
46778 + if (vfsmnt->mnt_parent == vfsmnt)
46779 + goto global_root;
46780 + dentry = vfsmnt->mnt_mountpoint;
46781 + vfsmnt = vfsmnt->mnt_parent;
46782 + continue;
46783 + }
46784 + parent = dentry->d_parent;
46785 + prefetch(parent);
46786 + namelen = dentry->d_name.len;
46787 + buflen -= namelen + 1;
46788 + if (buflen < 0)
46789 + goto Elong;
46790 + end -= namelen;
46791 + memcpy(end, dentry->d_name.name, namelen);
46792 + *--end = '/';
46793 + retval = end;
46794 + dentry = parent;
46795 + }
46796 +
46797 +out:
46798 + return retval;
46799 +
46800 +global_root:
46801 + namelen = dentry->d_name.len;
46802 + buflen -= namelen;
46803 + if (buflen < 0)
46804 + goto Elong;
46805 + retval -= namelen-1; /* hit the slash */
46806 + memcpy(retval, dentry->d_name.name, namelen);
46807 + goto out;
46808 +Elong:
46809 + retval = ERR_PTR(-ENAMETOOLONG);
46810 + goto out;
46811 +}
46812 +
46813 +static char *
46814 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46815 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
46816 +{
46817 + char *retval;
46818 +
46819 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
46820 + if (unlikely(IS_ERR(retval)))
46821 + retval = strcpy(buf, "<path too long>");
46822 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
46823 + retval[1] = '\0';
46824 +
46825 + return retval;
46826 +}
46827 +
46828 +static char *
46829 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46830 + char *buf, int buflen)
46831 +{
46832 + char *res;
46833 +
46834 + /* we can use real_root, real_root_mnt, because this is only called
46835 + by the RBAC system */
46836 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
46837 +
46838 + return res;
46839 +}
46840 +
46841 +static char *
46842 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46843 + char *buf, int buflen)
46844 +{
46845 + char *res;
46846 + struct dentry *root;
46847 + struct vfsmount *rootmnt;
46848 + struct task_struct *reaper = &init_task;
46849 +
46850 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
46851 + read_lock(&reaper->fs->lock);
46852 + root = dget(reaper->fs->root.dentry);
46853 + rootmnt = mntget(reaper->fs->root.mnt);
46854 + read_unlock(&reaper->fs->lock);
46855 +
46856 + spin_lock(&dcache_lock);
46857 + spin_lock(&vfsmount_lock);
46858 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
46859 + spin_unlock(&vfsmount_lock);
46860 + spin_unlock(&dcache_lock);
46861 +
46862 + dput(root);
46863 + mntput(rootmnt);
46864 + return res;
46865 +}
46866 +
46867 +static char *
46868 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
46869 +{
46870 + char *ret;
46871 + spin_lock(&dcache_lock);
46872 + spin_lock(&vfsmount_lock);
46873 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46874 + PAGE_SIZE);
46875 + spin_unlock(&vfsmount_lock);
46876 + spin_unlock(&dcache_lock);
46877 + return ret;
46878 +}
46879 +
46880 +char *
46881 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
46882 +{
46883 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46884 + PAGE_SIZE);
46885 +}
46886 +
46887 +char *
46888 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
46889 +{
46890 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
46891 + PAGE_SIZE);
46892 +}
46893 +
46894 +char *
46895 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
46896 +{
46897 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
46898 + PAGE_SIZE);
46899 +}
46900 +
46901 +char *
46902 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
46903 +{
46904 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
46905 + PAGE_SIZE);
46906 +}
46907 +
46908 +char *
46909 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
46910 +{
46911 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
46912 + PAGE_SIZE);
46913 +}
46914 +
46915 +__inline__ __u32
46916 +to_gr_audit(const __u32 reqmode)
46917 +{
46918 + /* masks off auditable permission flags, then shifts them to create
46919 + auditing flags, and adds the special case of append auditing if
46920 + we're requesting write */
46921 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
46922 +}
46923 +
46924 +struct acl_subject_label *
46925 +lookup_subject_map(const struct acl_subject_label *userp)
46926 +{
46927 + unsigned int index = shash(userp, subj_map_set.s_size);
46928 + struct subject_map *match;
46929 +
46930 + match = subj_map_set.s_hash[index];
46931 +
46932 + while (match && match->user != userp)
46933 + match = match->next;
46934 +
46935 + if (match != NULL)
46936 + return match->kernel;
46937 + else
46938 + return NULL;
46939 +}
46940 +
46941 +static void
46942 +insert_subj_map_entry(struct subject_map *subjmap)
46943 +{
46944 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
46945 + struct subject_map **curr;
46946 +
46947 + subjmap->prev = NULL;
46948 +
46949 + curr = &subj_map_set.s_hash[index];
46950 + if (*curr != NULL)
46951 + (*curr)->prev = subjmap;
46952 +
46953 + subjmap->next = *curr;
46954 + *curr = subjmap;
46955 +
46956 + return;
46957 +}
46958 +
46959 +static struct acl_role_label *
46960 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
46961 + const gid_t gid)
46962 +{
46963 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
46964 + struct acl_role_label *match;
46965 + struct role_allowed_ip *ipp;
46966 + unsigned int x;
46967 + u32 curr_ip = task->signal->curr_ip;
46968 +
46969 + task->signal->saved_ip = curr_ip;
46970 +
46971 + match = acl_role_set.r_hash[index];
46972 +
46973 + while (match) {
46974 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
46975 + for (x = 0; x < match->domain_child_num; x++) {
46976 + if (match->domain_children[x] == uid)
46977 + goto found;
46978 + }
46979 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
46980 + break;
46981 + match = match->next;
46982 + }
46983 +found:
46984 + if (match == NULL) {
46985 + try_group:
46986 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
46987 + match = acl_role_set.r_hash[index];
46988 +
46989 + while (match) {
46990 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
46991 + for (x = 0; x < match->domain_child_num; x++) {
46992 + if (match->domain_children[x] == gid)
46993 + goto found2;
46994 + }
46995 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
46996 + break;
46997 + match = match->next;
46998 + }
46999 +found2:
47000 + if (match == NULL)
47001 + match = default_role;
47002 + if (match->allowed_ips == NULL)
47003 + return match;
47004 + else {
47005 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47006 + if (likely
47007 + ((ntohl(curr_ip) & ipp->netmask) ==
47008 + (ntohl(ipp->addr) & ipp->netmask)))
47009 + return match;
47010 + }
47011 + match = default_role;
47012 + }
47013 + } else if (match->allowed_ips == NULL) {
47014 + return match;
47015 + } else {
47016 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47017 + if (likely
47018 + ((ntohl(curr_ip) & ipp->netmask) ==
47019 + (ntohl(ipp->addr) & ipp->netmask)))
47020 + return match;
47021 + }
47022 + goto try_group;
47023 + }
47024 +
47025 + return match;
47026 +}
47027 +
47028 +struct acl_subject_label *
47029 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47030 + const struct acl_role_label *role)
47031 +{
47032 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47033 + struct acl_subject_label *match;
47034 +
47035 + match = role->subj_hash[index];
47036 +
47037 + while (match && (match->inode != ino || match->device != dev ||
47038 + (match->mode & GR_DELETED))) {
47039 + match = match->next;
47040 + }
47041 +
47042 + if (match && !(match->mode & GR_DELETED))
47043 + return match;
47044 + else
47045 + return NULL;
47046 +}
47047 +
47048 +struct acl_subject_label *
47049 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47050 + const struct acl_role_label *role)
47051 +{
47052 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47053 + struct acl_subject_label *match;
47054 +
47055 + match = role->subj_hash[index];
47056 +
47057 + while (match && (match->inode != ino || match->device != dev ||
47058 + !(match->mode & GR_DELETED))) {
47059 + match = match->next;
47060 + }
47061 +
47062 + if (match && (match->mode & GR_DELETED))
47063 + return match;
47064 + else
47065 + return NULL;
47066 +}
47067 +
47068 +static struct acl_object_label *
47069 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47070 + const struct acl_subject_label *subj)
47071 +{
47072 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47073 + struct acl_object_label *match;
47074 +
47075 + match = subj->obj_hash[index];
47076 +
47077 + while (match && (match->inode != ino || match->device != dev ||
47078 + (match->mode & GR_DELETED))) {
47079 + match = match->next;
47080 + }
47081 +
47082 + if (match && !(match->mode & GR_DELETED))
47083 + return match;
47084 + else
47085 + return NULL;
47086 +}
47087 +
47088 +static struct acl_object_label *
47089 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47090 + const struct acl_subject_label *subj)
47091 +{
47092 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47093 + struct acl_object_label *match;
47094 +
47095 + match = subj->obj_hash[index];
47096 +
47097 + while (match && (match->inode != ino || match->device != dev ||
47098 + !(match->mode & GR_DELETED))) {
47099 + match = match->next;
47100 + }
47101 +
47102 + if (match && (match->mode & GR_DELETED))
47103 + return match;
47104 +
47105 + match = subj->obj_hash[index];
47106 +
47107 + while (match && (match->inode != ino || match->device != dev ||
47108 + (match->mode & GR_DELETED))) {
47109 + match = match->next;
47110 + }
47111 +
47112 + if (match && !(match->mode & GR_DELETED))
47113 + return match;
47114 + else
47115 + return NULL;
47116 +}
47117 +
47118 +static struct name_entry *
47119 +lookup_name_entry(const char *name)
47120 +{
47121 + unsigned int len = strlen(name);
47122 + unsigned int key = full_name_hash(name, len);
47123 + unsigned int index = key % name_set.n_size;
47124 + struct name_entry *match;
47125 +
47126 + match = name_set.n_hash[index];
47127 +
47128 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47129 + match = match->next;
47130 +
47131 + return match;
47132 +}
47133 +
47134 +static struct name_entry *
47135 +lookup_name_entry_create(const char *name)
47136 +{
47137 + unsigned int len = strlen(name);
47138 + unsigned int key = full_name_hash(name, len);
47139 + unsigned int index = key % name_set.n_size;
47140 + struct name_entry *match;
47141 +
47142 + match = name_set.n_hash[index];
47143 +
47144 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47145 + !match->deleted))
47146 + match = match->next;
47147 +
47148 + if (match && match->deleted)
47149 + return match;
47150 +
47151 + match = name_set.n_hash[index];
47152 +
47153 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47154 + match->deleted))
47155 + match = match->next;
47156 +
47157 + if (match && !match->deleted)
47158 + return match;
47159 + else
47160 + return NULL;
47161 +}
47162 +
47163 +static struct inodev_entry *
47164 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47165 +{
47166 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47167 + struct inodev_entry *match;
47168 +
47169 + match = inodev_set.i_hash[index];
47170 +
47171 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47172 + match = match->next;
47173 +
47174 + return match;
47175 +}
47176 +
47177 +static void
47178 +insert_inodev_entry(struct inodev_entry *entry)
47179 +{
47180 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47181 + inodev_set.i_size);
47182 + struct inodev_entry **curr;
47183 +
47184 + entry->prev = NULL;
47185 +
47186 + curr = &inodev_set.i_hash[index];
47187 + if (*curr != NULL)
47188 + (*curr)->prev = entry;
47189 +
47190 + entry->next = *curr;
47191 + *curr = entry;
47192 +
47193 + return;
47194 +}
47195 +
47196 +static void
47197 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47198 +{
47199 + unsigned int index =
47200 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47201 + struct acl_role_label **curr;
47202 + struct acl_role_label *tmp;
47203 +
47204 + curr = &acl_role_set.r_hash[index];
47205 +
47206 + /* if role was already inserted due to domains and already has
47207 + a role in the same bucket as it attached, then we need to
47208 + combine these two buckets
47209 + */
47210 + if (role->next) {
47211 + tmp = role->next;
47212 + while (tmp->next)
47213 + tmp = tmp->next;
47214 + tmp->next = *curr;
47215 + } else
47216 + role->next = *curr;
47217 + *curr = role;
47218 +
47219 + return;
47220 +}
47221 +
47222 +static void
47223 +insert_acl_role_label(struct acl_role_label *role)
47224 +{
47225 + int i;
47226 +
47227 + if (role_list == NULL) {
47228 + role_list = role;
47229 + role->prev = NULL;
47230 + } else {
47231 + role->prev = role_list;
47232 + role_list = role;
47233 + }
47234 +
47235 + /* used for hash chains */
47236 + role->next = NULL;
47237 +
47238 + if (role->roletype & GR_ROLE_DOMAIN) {
47239 + for (i = 0; i < role->domain_child_num; i++)
47240 + __insert_acl_role_label(role, role->domain_children[i]);
47241 + } else
47242 + __insert_acl_role_label(role, role->uidgid);
47243 +}
47244 +
47245 +static int
47246 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47247 +{
47248 + struct name_entry **curr, *nentry;
47249 + struct inodev_entry *ientry;
47250 + unsigned int len = strlen(name);
47251 + unsigned int key = full_name_hash(name, len);
47252 + unsigned int index = key % name_set.n_size;
47253 +
47254 + curr = &name_set.n_hash[index];
47255 +
47256 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47257 + curr = &((*curr)->next);
47258 +
47259 + if (*curr != NULL)
47260 + return 1;
47261 +
47262 + nentry = acl_alloc(sizeof (struct name_entry));
47263 + if (nentry == NULL)
47264 + return 0;
47265 + ientry = acl_alloc(sizeof (struct inodev_entry));
47266 + if (ientry == NULL)
47267 + return 0;
47268 + ientry->nentry = nentry;
47269 +
47270 + nentry->key = key;
47271 + nentry->name = name;
47272 + nentry->inode = inode;
47273 + nentry->device = device;
47274 + nentry->len = len;
47275 + nentry->deleted = deleted;
47276 +
47277 + nentry->prev = NULL;
47278 + curr = &name_set.n_hash[index];
47279 + if (*curr != NULL)
47280 + (*curr)->prev = nentry;
47281 + nentry->next = *curr;
47282 + *curr = nentry;
47283 +
47284 + /* insert us into the table searchable by inode/dev */
47285 + insert_inodev_entry(ientry);
47286 +
47287 + return 1;
47288 +}
47289 +
47290 +static void
47291 +insert_acl_obj_label(struct acl_object_label *obj,
47292 + struct acl_subject_label *subj)
47293 +{
47294 + unsigned int index =
47295 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47296 + struct acl_object_label **curr;
47297 +
47298 +
47299 + obj->prev = NULL;
47300 +
47301 + curr = &subj->obj_hash[index];
47302 + if (*curr != NULL)
47303 + (*curr)->prev = obj;
47304 +
47305 + obj->next = *curr;
47306 + *curr = obj;
47307 +
47308 + return;
47309 +}
47310 +
47311 +static void
47312 +insert_acl_subj_label(struct acl_subject_label *obj,
47313 + struct acl_role_label *role)
47314 +{
47315 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47316 + struct acl_subject_label **curr;
47317 +
47318 + obj->prev = NULL;
47319 +
47320 + curr = &role->subj_hash[index];
47321 + if (*curr != NULL)
47322 + (*curr)->prev = obj;
47323 +
47324 + obj->next = *curr;
47325 + *curr = obj;
47326 +
47327 + return;
47328 +}
47329 +
47330 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47331 +
47332 +static void *
47333 +create_table(__u32 * len, int elementsize)
47334 +{
47335 + unsigned int table_sizes[] = {
47336 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47337 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47338 + 4194301, 8388593, 16777213, 33554393, 67108859
47339 + };
47340 + void *newtable = NULL;
47341 + unsigned int pwr = 0;
47342 +
47343 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47344 + table_sizes[pwr] <= *len)
47345 + pwr++;
47346 +
47347 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47348 + return newtable;
47349 +
47350 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47351 + newtable =
47352 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47353 + else
47354 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47355 +
47356 + *len = table_sizes[pwr];
47357 +
47358 + return newtable;
47359 +}
47360 +
47361 +static int
47362 +init_variables(const struct gr_arg *arg)
47363 +{
47364 + struct task_struct *reaper = &init_task;
47365 + unsigned int stacksize;
47366 +
47367 + subj_map_set.s_size = arg->role_db.num_subjects;
47368 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47369 + name_set.n_size = arg->role_db.num_objects;
47370 + inodev_set.i_size = arg->role_db.num_objects;
47371 +
47372 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47373 + !name_set.n_size || !inodev_set.i_size)
47374 + return 1;
47375 +
47376 + if (!gr_init_uidset())
47377 + return 1;
47378 +
47379 + /* set up the stack that holds allocation info */
47380 +
47381 + stacksize = arg->role_db.num_pointers + 5;
47382 +
47383 + if (!acl_alloc_stack_init(stacksize))
47384 + return 1;
47385 +
47386 + /* grab reference for the real root dentry and vfsmount */
47387 + read_lock(&reaper->fs->lock);
47388 + real_root = dget(reaper->fs->root.dentry);
47389 + real_root_mnt = mntget(reaper->fs->root.mnt);
47390 + read_unlock(&reaper->fs->lock);
47391 +
47392 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47393 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47394 +#endif
47395 +
47396 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47397 + if (fakefs_obj_rw == NULL)
47398 + return 1;
47399 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47400 +
47401 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47402 + if (fakefs_obj_rwx == NULL)
47403 + return 1;
47404 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47405 +
47406 + subj_map_set.s_hash =
47407 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47408 + acl_role_set.r_hash =
47409 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47410 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47411 + inodev_set.i_hash =
47412 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47413 +
47414 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47415 + !name_set.n_hash || !inodev_set.i_hash)
47416 + return 1;
47417 +
47418 + memset(subj_map_set.s_hash, 0,
47419 + sizeof(struct subject_map *) * subj_map_set.s_size);
47420 + memset(acl_role_set.r_hash, 0,
47421 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47422 + memset(name_set.n_hash, 0,
47423 + sizeof (struct name_entry *) * name_set.n_size);
47424 + memset(inodev_set.i_hash, 0,
47425 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47426 +
47427 + return 0;
47428 +}
47429 +
47430 +/* free information not needed after startup
47431 + currently contains user->kernel pointer mappings for subjects
47432 +*/
47433 +
47434 +static void
47435 +free_init_variables(void)
47436 +{
47437 + __u32 i;
47438 +
47439 + if (subj_map_set.s_hash) {
47440 + for (i = 0; i < subj_map_set.s_size; i++) {
47441 + if (subj_map_set.s_hash[i]) {
47442 + kfree(subj_map_set.s_hash[i]);
47443 + subj_map_set.s_hash[i] = NULL;
47444 + }
47445 + }
47446 +
47447 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47448 + PAGE_SIZE)
47449 + kfree(subj_map_set.s_hash);
47450 + else
47451 + vfree(subj_map_set.s_hash);
47452 + }
47453 +
47454 + return;
47455 +}
47456 +
47457 +static void
47458 +free_variables(void)
47459 +{
47460 + struct acl_subject_label *s;
47461 + struct acl_role_label *r;
47462 + struct task_struct *task, *task2;
47463 + unsigned int x;
47464 +
47465 + gr_clear_learn_entries();
47466 +
47467 + read_lock(&tasklist_lock);
47468 + do_each_thread(task2, task) {
47469 + task->acl_sp_role = 0;
47470 + task->acl_role_id = 0;
47471 + task->acl = NULL;
47472 + task->role = NULL;
47473 + } while_each_thread(task2, task);
47474 + read_unlock(&tasklist_lock);
47475 +
47476 + /* release the reference to the real root dentry and vfsmount */
47477 + if (real_root)
47478 + dput(real_root);
47479 + real_root = NULL;
47480 + if (real_root_mnt)
47481 + mntput(real_root_mnt);
47482 + real_root_mnt = NULL;
47483 +
47484 + /* free all object hash tables */
47485 +
47486 + FOR_EACH_ROLE_START(r)
47487 + if (r->subj_hash == NULL)
47488 + goto next_role;
47489 + FOR_EACH_SUBJECT_START(r, s, x)
47490 + if (s->obj_hash == NULL)
47491 + break;
47492 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47493 + kfree(s->obj_hash);
47494 + else
47495 + vfree(s->obj_hash);
47496 + FOR_EACH_SUBJECT_END(s, x)
47497 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47498 + if (s->obj_hash == NULL)
47499 + break;
47500 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47501 + kfree(s->obj_hash);
47502 + else
47503 + vfree(s->obj_hash);
47504 + FOR_EACH_NESTED_SUBJECT_END(s)
47505 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47506 + kfree(r->subj_hash);
47507 + else
47508 + vfree(r->subj_hash);
47509 + r->subj_hash = NULL;
47510 +next_role:
47511 + FOR_EACH_ROLE_END(r)
47512 +
47513 + acl_free_all();
47514 +
47515 + if (acl_role_set.r_hash) {
47516 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47517 + PAGE_SIZE)
47518 + kfree(acl_role_set.r_hash);
47519 + else
47520 + vfree(acl_role_set.r_hash);
47521 + }
47522 + if (name_set.n_hash) {
47523 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47524 + PAGE_SIZE)
47525 + kfree(name_set.n_hash);
47526 + else
47527 + vfree(name_set.n_hash);
47528 + }
47529 +
47530 + if (inodev_set.i_hash) {
47531 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47532 + PAGE_SIZE)
47533 + kfree(inodev_set.i_hash);
47534 + else
47535 + vfree(inodev_set.i_hash);
47536 + }
47537 +
47538 + gr_free_uidset();
47539 +
47540 + memset(&name_set, 0, sizeof (struct name_db));
47541 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47542 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47543 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47544 +
47545 + default_role = NULL;
47546 + role_list = NULL;
47547 +
47548 + return;
47549 +}
47550 +
47551 +static __u32
47552 +count_user_objs(struct acl_object_label *userp)
47553 +{
47554 + struct acl_object_label o_tmp;
47555 + __u32 num = 0;
47556 +
47557 + while (userp) {
47558 + if (copy_from_user(&o_tmp, userp,
47559 + sizeof (struct acl_object_label)))
47560 + break;
47561 +
47562 + userp = o_tmp.prev;
47563 + num++;
47564 + }
47565 +
47566 + return num;
47567 +}
47568 +
47569 +static struct acl_subject_label *
47570 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47571 +
47572 +static int
47573 +copy_user_glob(struct acl_object_label *obj)
47574 +{
47575 + struct acl_object_label *g_tmp, **guser;
47576 + unsigned int len;
47577 + char *tmp;
47578 +
47579 + if (obj->globbed == NULL)
47580 + return 0;
47581 +
47582 + guser = &obj->globbed;
47583 + while (*guser) {
47584 + g_tmp = (struct acl_object_label *)
47585 + acl_alloc(sizeof (struct acl_object_label));
47586 + if (g_tmp == NULL)
47587 + return -ENOMEM;
47588 +
47589 + if (copy_from_user(g_tmp, *guser,
47590 + sizeof (struct acl_object_label)))
47591 + return -EFAULT;
47592 +
47593 + len = strnlen_user(g_tmp->filename, PATH_MAX);
47594 +
47595 + if (!len || len >= PATH_MAX)
47596 + return -EINVAL;
47597 +
47598 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47599 + return -ENOMEM;
47600 +
47601 + if (copy_from_user(tmp, g_tmp->filename, len))
47602 + return -EFAULT;
47603 + tmp[len-1] = '\0';
47604 + g_tmp->filename = tmp;
47605 +
47606 + *guser = g_tmp;
47607 + guser = &(g_tmp->next);
47608 + }
47609 +
47610 + return 0;
47611 +}
47612 +
47613 +static int
47614 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47615 + struct acl_role_label *role)
47616 +{
47617 + struct acl_object_label *o_tmp;
47618 + unsigned int len;
47619 + int ret;
47620 + char *tmp;
47621 +
47622 + while (userp) {
47623 + if ((o_tmp = (struct acl_object_label *)
47624 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
47625 + return -ENOMEM;
47626 +
47627 + if (copy_from_user(o_tmp, userp,
47628 + sizeof (struct acl_object_label)))
47629 + return -EFAULT;
47630 +
47631 + userp = o_tmp->prev;
47632 +
47633 + len = strnlen_user(o_tmp->filename, PATH_MAX);
47634 +
47635 + if (!len || len >= PATH_MAX)
47636 + return -EINVAL;
47637 +
47638 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47639 + return -ENOMEM;
47640 +
47641 + if (copy_from_user(tmp, o_tmp->filename, len))
47642 + return -EFAULT;
47643 + tmp[len-1] = '\0';
47644 + o_tmp->filename = tmp;
47645 +
47646 + insert_acl_obj_label(o_tmp, subj);
47647 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47648 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47649 + return -ENOMEM;
47650 +
47651 + ret = copy_user_glob(o_tmp);
47652 + if (ret)
47653 + return ret;
47654 +
47655 + if (o_tmp->nested) {
47656 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47657 + if (IS_ERR(o_tmp->nested))
47658 + return PTR_ERR(o_tmp->nested);
47659 +
47660 + /* insert into nested subject list */
47661 + o_tmp->nested->next = role->hash->first;
47662 + role->hash->first = o_tmp->nested;
47663 + }
47664 + }
47665 +
47666 + return 0;
47667 +}
47668 +
47669 +static __u32
47670 +count_user_subjs(struct acl_subject_label *userp)
47671 +{
47672 + struct acl_subject_label s_tmp;
47673 + __u32 num = 0;
47674 +
47675 + while (userp) {
47676 + if (copy_from_user(&s_tmp, userp,
47677 + sizeof (struct acl_subject_label)))
47678 + break;
47679 +
47680 + userp = s_tmp.prev;
47681 + /* do not count nested subjects against this count, since
47682 + they are not included in the hash table, but are
47683 + attached to objects. We have already counted
47684 + the subjects in userspace for the allocation
47685 + stack
47686 + */
47687 + if (!(s_tmp.mode & GR_NESTED))
47688 + num++;
47689 + }
47690 +
47691 + return num;
47692 +}
47693 +
47694 +static int
47695 +copy_user_allowedips(struct acl_role_label *rolep)
47696 +{
47697 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47698 +
47699 + ruserip = rolep->allowed_ips;
47700 +
47701 + while (ruserip) {
47702 + rlast = rtmp;
47703 +
47704 + if ((rtmp = (struct role_allowed_ip *)
47705 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47706 + return -ENOMEM;
47707 +
47708 + if (copy_from_user(rtmp, ruserip,
47709 + sizeof (struct role_allowed_ip)))
47710 + return -EFAULT;
47711 +
47712 + ruserip = rtmp->prev;
47713 +
47714 + if (!rlast) {
47715 + rtmp->prev = NULL;
47716 + rolep->allowed_ips = rtmp;
47717 + } else {
47718 + rlast->next = rtmp;
47719 + rtmp->prev = rlast;
47720 + }
47721 +
47722 + if (!ruserip)
47723 + rtmp->next = NULL;
47724 + }
47725 +
47726 + return 0;
47727 +}
47728 +
47729 +static int
47730 +copy_user_transitions(struct acl_role_label *rolep)
47731 +{
47732 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
47733 +
47734 + unsigned int len;
47735 + char *tmp;
47736 +
47737 + rusertp = rolep->transitions;
47738 +
47739 + while (rusertp) {
47740 + rlast = rtmp;
47741 +
47742 + if ((rtmp = (struct role_transition *)
47743 + acl_alloc(sizeof (struct role_transition))) == NULL)
47744 + return -ENOMEM;
47745 +
47746 + if (copy_from_user(rtmp, rusertp,
47747 + sizeof (struct role_transition)))
47748 + return -EFAULT;
47749 +
47750 + rusertp = rtmp->prev;
47751 +
47752 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
47753 +
47754 + if (!len || len >= GR_SPROLE_LEN)
47755 + return -EINVAL;
47756 +
47757 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47758 + return -ENOMEM;
47759 +
47760 + if (copy_from_user(tmp, rtmp->rolename, len))
47761 + return -EFAULT;
47762 + tmp[len-1] = '\0';
47763 + rtmp->rolename = tmp;
47764 +
47765 + if (!rlast) {
47766 + rtmp->prev = NULL;
47767 + rolep->transitions = rtmp;
47768 + } else {
47769 + rlast->next = rtmp;
47770 + rtmp->prev = rlast;
47771 + }
47772 +
47773 + if (!rusertp)
47774 + rtmp->next = NULL;
47775 + }
47776 +
47777 + return 0;
47778 +}
47779 +
47780 +static struct acl_subject_label *
47781 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
47782 +{
47783 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
47784 + unsigned int len;
47785 + char *tmp;
47786 + __u32 num_objs;
47787 + struct acl_ip_label **i_tmp, *i_utmp2;
47788 + struct gr_hash_struct ghash;
47789 + struct subject_map *subjmap;
47790 + unsigned int i_num;
47791 + int err;
47792 +
47793 + s_tmp = lookup_subject_map(userp);
47794 +
47795 + /* we've already copied this subject into the kernel, just return
47796 + the reference to it, and don't copy it over again
47797 + */
47798 + if (s_tmp)
47799 + return(s_tmp);
47800 +
47801 + if ((s_tmp = (struct acl_subject_label *)
47802 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
47803 + return ERR_PTR(-ENOMEM);
47804 +
47805 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
47806 + if (subjmap == NULL)
47807 + return ERR_PTR(-ENOMEM);
47808 +
47809 + subjmap->user = userp;
47810 + subjmap->kernel = s_tmp;
47811 + insert_subj_map_entry(subjmap);
47812 +
47813 + if (copy_from_user(s_tmp, userp,
47814 + sizeof (struct acl_subject_label)))
47815 + return ERR_PTR(-EFAULT);
47816 +
47817 + len = strnlen_user(s_tmp->filename, PATH_MAX);
47818 +
47819 + if (!len || len >= PATH_MAX)
47820 + return ERR_PTR(-EINVAL);
47821 +
47822 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47823 + return ERR_PTR(-ENOMEM);
47824 +
47825 + if (copy_from_user(tmp, s_tmp->filename, len))
47826 + return ERR_PTR(-EFAULT);
47827 + tmp[len-1] = '\0';
47828 + s_tmp->filename = tmp;
47829 +
47830 + if (!strcmp(s_tmp->filename, "/"))
47831 + role->root_label = s_tmp;
47832 +
47833 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
47834 + return ERR_PTR(-EFAULT);
47835 +
47836 + /* copy user and group transition tables */
47837 +
47838 + if (s_tmp->user_trans_num) {
47839 + uid_t *uidlist;
47840 +
47841 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
47842 + if (uidlist == NULL)
47843 + return ERR_PTR(-ENOMEM);
47844 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
47845 + return ERR_PTR(-EFAULT);
47846 +
47847 + s_tmp->user_transitions = uidlist;
47848 + }
47849 +
47850 + if (s_tmp->group_trans_num) {
47851 + gid_t *gidlist;
47852 +
47853 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
47854 + if (gidlist == NULL)
47855 + return ERR_PTR(-ENOMEM);
47856 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
47857 + return ERR_PTR(-EFAULT);
47858 +
47859 + s_tmp->group_transitions = gidlist;
47860 + }
47861 +
47862 + /* set up object hash table */
47863 + num_objs = count_user_objs(ghash.first);
47864 +
47865 + s_tmp->obj_hash_size = num_objs;
47866 + s_tmp->obj_hash =
47867 + (struct acl_object_label **)
47868 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
47869 +
47870 + if (!s_tmp->obj_hash)
47871 + return ERR_PTR(-ENOMEM);
47872 +
47873 + memset(s_tmp->obj_hash, 0,
47874 + s_tmp->obj_hash_size *
47875 + sizeof (struct acl_object_label *));
47876 +
47877 + /* add in objects */
47878 + err = copy_user_objs(ghash.first, s_tmp, role);
47879 +
47880 + if (err)
47881 + return ERR_PTR(err);
47882 +
47883 + /* set pointer for parent subject */
47884 + if (s_tmp->parent_subject) {
47885 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
47886 +
47887 + if (IS_ERR(s_tmp2))
47888 + return s_tmp2;
47889 +
47890 + s_tmp->parent_subject = s_tmp2;
47891 + }
47892 +
47893 + /* add in ip acls */
47894 +
47895 + if (!s_tmp->ip_num) {
47896 + s_tmp->ips = NULL;
47897 + goto insert;
47898 + }
47899 +
47900 + i_tmp =
47901 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
47902 + sizeof (struct acl_ip_label *));
47903 +
47904 + if (!i_tmp)
47905 + return ERR_PTR(-ENOMEM);
47906 +
47907 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
47908 + *(i_tmp + i_num) =
47909 + (struct acl_ip_label *)
47910 + acl_alloc(sizeof (struct acl_ip_label));
47911 + if (!*(i_tmp + i_num))
47912 + return ERR_PTR(-ENOMEM);
47913 +
47914 + if (copy_from_user
47915 + (&i_utmp2, s_tmp->ips + i_num,
47916 + sizeof (struct acl_ip_label *)))
47917 + return ERR_PTR(-EFAULT);
47918 +
47919 + if (copy_from_user
47920 + (*(i_tmp + i_num), i_utmp2,
47921 + sizeof (struct acl_ip_label)))
47922 + return ERR_PTR(-EFAULT);
47923 +
47924 + if ((*(i_tmp + i_num))->iface == NULL)
47925 + continue;
47926 +
47927 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
47928 + if (!len || len >= IFNAMSIZ)
47929 + return ERR_PTR(-EINVAL);
47930 + tmp = acl_alloc(len);
47931 + if (tmp == NULL)
47932 + return ERR_PTR(-ENOMEM);
47933 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
47934 + return ERR_PTR(-EFAULT);
47935 + (*(i_tmp + i_num))->iface = tmp;
47936 + }
47937 +
47938 + s_tmp->ips = i_tmp;
47939 +
47940 +insert:
47941 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
47942 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
47943 + return ERR_PTR(-ENOMEM);
47944 +
47945 + return s_tmp;
47946 +}
47947 +
47948 +static int
47949 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
47950 +{
47951 + struct acl_subject_label s_pre;
47952 + struct acl_subject_label * ret;
47953 + int err;
47954 +
47955 + while (userp) {
47956 + if (copy_from_user(&s_pre, userp,
47957 + sizeof (struct acl_subject_label)))
47958 + return -EFAULT;
47959 +
47960 + /* do not add nested subjects here, add
47961 + while parsing objects
47962 + */
47963 +
47964 + if (s_pre.mode & GR_NESTED) {
47965 + userp = s_pre.prev;
47966 + continue;
47967 + }
47968 +
47969 + ret = do_copy_user_subj(userp, role);
47970 +
47971 + err = PTR_ERR(ret);
47972 + if (IS_ERR(ret))
47973 + return err;
47974 +
47975 + insert_acl_subj_label(ret, role);
47976 +
47977 + userp = s_pre.prev;
47978 + }
47979 +
47980 + return 0;
47981 +}
47982 +
47983 +static int
47984 +copy_user_acl(struct gr_arg *arg)
47985 +{
47986 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
47987 + struct sprole_pw *sptmp;
47988 + struct gr_hash_struct *ghash;
47989 + uid_t *domainlist;
47990 + unsigned int r_num;
47991 + unsigned int len;
47992 + char *tmp;
47993 + int err = 0;
47994 + __u16 i;
47995 + __u32 num_subjs;
47996 +
47997 + /* we need a default and kernel role */
47998 + if (arg->role_db.num_roles < 2)
47999 + return -EINVAL;
48000 +
48001 + /* copy special role authentication info from userspace */
48002 +
48003 + num_sprole_pws = arg->num_sprole_pws;
48004 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48005 +
48006 + if (!acl_special_roles) {
48007 + err = -ENOMEM;
48008 + goto cleanup;
48009 + }
48010 +
48011 + for (i = 0; i < num_sprole_pws; i++) {
48012 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48013 + if (!sptmp) {
48014 + err = -ENOMEM;
48015 + goto cleanup;
48016 + }
48017 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48018 + sizeof (struct sprole_pw))) {
48019 + err = -EFAULT;
48020 + goto cleanup;
48021 + }
48022 +
48023 + len =
48024 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48025 +
48026 + if (!len || len >= GR_SPROLE_LEN) {
48027 + err = -EINVAL;
48028 + goto cleanup;
48029 + }
48030 +
48031 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48032 + err = -ENOMEM;
48033 + goto cleanup;
48034 + }
48035 +
48036 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48037 + err = -EFAULT;
48038 + goto cleanup;
48039 + }
48040 + tmp[len-1] = '\0';
48041 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48042 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48043 +#endif
48044 + sptmp->rolename = tmp;
48045 + acl_special_roles[i] = sptmp;
48046 + }
48047 +
48048 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48049 +
48050 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48051 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48052 +
48053 + if (!r_tmp) {
48054 + err = -ENOMEM;
48055 + goto cleanup;
48056 + }
48057 +
48058 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48059 + sizeof (struct acl_role_label *))) {
48060 + err = -EFAULT;
48061 + goto cleanup;
48062 + }
48063 +
48064 + if (copy_from_user(r_tmp, r_utmp2,
48065 + sizeof (struct acl_role_label))) {
48066 + err = -EFAULT;
48067 + goto cleanup;
48068 + }
48069 +
48070 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48071 +
48072 + if (!len || len >= PATH_MAX) {
48073 + err = -EINVAL;
48074 + goto cleanup;
48075 + }
48076 +
48077 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48078 + err = -ENOMEM;
48079 + goto cleanup;
48080 + }
48081 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48082 + err = -EFAULT;
48083 + goto cleanup;
48084 + }
48085 + tmp[len-1] = '\0';
48086 + r_tmp->rolename = tmp;
48087 +
48088 + if (!strcmp(r_tmp->rolename, "default")
48089 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48090 + default_role = r_tmp;
48091 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48092 + kernel_role = r_tmp;
48093 + }
48094 +
48095 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48096 + err = -ENOMEM;
48097 + goto cleanup;
48098 + }
48099 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48100 + err = -EFAULT;
48101 + goto cleanup;
48102 + }
48103 +
48104 + r_tmp->hash = ghash;
48105 +
48106 + num_subjs = count_user_subjs(r_tmp->hash->first);
48107 +
48108 + r_tmp->subj_hash_size = num_subjs;
48109 + r_tmp->subj_hash =
48110 + (struct acl_subject_label **)
48111 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48112 +
48113 + if (!r_tmp->subj_hash) {
48114 + err = -ENOMEM;
48115 + goto cleanup;
48116 + }
48117 +
48118 + err = copy_user_allowedips(r_tmp);
48119 + if (err)
48120 + goto cleanup;
48121 +
48122 + /* copy domain info */
48123 + if (r_tmp->domain_children != NULL) {
48124 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48125 + if (domainlist == NULL) {
48126 + err = -ENOMEM;
48127 + goto cleanup;
48128 + }
48129 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48130 + err = -EFAULT;
48131 + goto cleanup;
48132 + }
48133 + r_tmp->domain_children = domainlist;
48134 + }
48135 +
48136 + err = copy_user_transitions(r_tmp);
48137 + if (err)
48138 + goto cleanup;
48139 +
48140 + memset(r_tmp->subj_hash, 0,
48141 + r_tmp->subj_hash_size *
48142 + sizeof (struct acl_subject_label *));
48143 +
48144 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48145 +
48146 + if (err)
48147 + goto cleanup;
48148 +
48149 + /* set nested subject list to null */
48150 + r_tmp->hash->first = NULL;
48151 +
48152 + insert_acl_role_label(r_tmp);
48153 + }
48154 +
48155 + goto return_err;
48156 + cleanup:
48157 + free_variables();
48158 + return_err:
48159 + return err;
48160 +
48161 +}
48162 +
48163 +static int
48164 +gracl_init(struct gr_arg *args)
48165 +{
48166 + int error = 0;
48167 +
48168 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48169 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48170 +
48171 + if (init_variables(args)) {
48172 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48173 + error = -ENOMEM;
48174 + free_variables();
48175 + goto out;
48176 + }
48177 +
48178 + error = copy_user_acl(args);
48179 + free_init_variables();
48180 + if (error) {
48181 + free_variables();
48182 + goto out;
48183 + }
48184 +
48185 + if ((error = gr_set_acls(0))) {
48186 + free_variables();
48187 + goto out;
48188 + }
48189 +
48190 + pax_open_kernel();
48191 + gr_status |= GR_READY;
48192 + pax_close_kernel();
48193 +
48194 + out:
48195 + return error;
48196 +}
48197 +
48198 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48199 +
48200 +static int
48201 +glob_match(const char *p, const char *n)
48202 +{
48203 + char c;
48204 +
48205 + while ((c = *p++) != '\0') {
48206 + switch (c) {
48207 + case '?':
48208 + if (*n == '\0')
48209 + return 1;
48210 + else if (*n == '/')
48211 + return 1;
48212 + break;
48213 + case '\\':
48214 + if (*n != c)
48215 + return 1;
48216 + break;
48217 + case '*':
48218 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48219 + if (*n == '/')
48220 + return 1;
48221 + else if (c == '?') {
48222 + if (*n == '\0')
48223 + return 1;
48224 + else
48225 + ++n;
48226 + }
48227 + }
48228 + if (c == '\0') {
48229 + return 0;
48230 + } else {
48231 + const char *endp;
48232 +
48233 + if ((endp = strchr(n, '/')) == NULL)
48234 + endp = n + strlen(n);
48235 +
48236 + if (c == '[') {
48237 + for (--p; n < endp; ++n)
48238 + if (!glob_match(p, n))
48239 + return 0;
48240 + } else if (c == '/') {
48241 + while (*n != '\0' && *n != '/')
48242 + ++n;
48243 + if (*n == '/' && !glob_match(p, n + 1))
48244 + return 0;
48245 + } else {
48246 + for (--p; n < endp; ++n)
48247 + if (*n == c && !glob_match(p, n))
48248 + return 0;
48249 + }
48250 +
48251 + return 1;
48252 + }
48253 + case '[':
48254 + {
48255 + int not;
48256 + char cold;
48257 +
48258 + if (*n == '\0' || *n == '/')
48259 + return 1;
48260 +
48261 + not = (*p == '!' || *p == '^');
48262 + if (not)
48263 + ++p;
48264 +
48265 + c = *p++;
48266 + for (;;) {
48267 + unsigned char fn = (unsigned char)*n;
48268 +
48269 + if (c == '\0')
48270 + return 1;
48271 + else {
48272 + if (c == fn)
48273 + goto matched;
48274 + cold = c;
48275 + c = *p++;
48276 +
48277 + if (c == '-' && *p != ']') {
48278 + unsigned char cend = *p++;
48279 +
48280 + if (cend == '\0')
48281 + return 1;
48282 +
48283 + if (cold <= fn && fn <= cend)
48284 + goto matched;
48285 +
48286 + c = *p++;
48287 + }
48288 + }
48289 +
48290 + if (c == ']')
48291 + break;
48292 + }
48293 + if (!not)
48294 + return 1;
48295 + break;
48296 + matched:
48297 + while (c != ']') {
48298 + if (c == '\0')
48299 + return 1;
48300 +
48301 + c = *p++;
48302 + }
48303 + if (not)
48304 + return 1;
48305 + }
48306 + break;
48307 + default:
48308 + if (c != *n)
48309 + return 1;
48310 + }
48311 +
48312 + ++n;
48313 + }
48314 +
48315 + if (*n == '\0')
48316 + return 0;
48317 +
48318 + if (*n == '/')
48319 + return 0;
48320 +
48321 + return 1;
48322 +}
48323 +
48324 +static struct acl_object_label *
48325 +chk_glob_label(struct acl_object_label *globbed,
48326 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48327 +{
48328 + struct acl_object_label *tmp;
48329 +
48330 + if (*path == NULL)
48331 + *path = gr_to_filename_nolock(dentry, mnt);
48332 +
48333 + tmp = globbed;
48334 +
48335 + while (tmp) {
48336 + if (!glob_match(tmp->filename, *path))
48337 + return tmp;
48338 + tmp = tmp->next;
48339 + }
48340 +
48341 + return NULL;
48342 +}
48343 +
48344 +static struct acl_object_label *
48345 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48346 + const ino_t curr_ino, const dev_t curr_dev,
48347 + const struct acl_subject_label *subj, char **path, const int checkglob)
48348 +{
48349 + struct acl_subject_label *tmpsubj;
48350 + struct acl_object_label *retval;
48351 + struct acl_object_label *retval2;
48352 +
48353 + tmpsubj = (struct acl_subject_label *) subj;
48354 + read_lock(&gr_inode_lock);
48355 + do {
48356 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48357 + if (retval) {
48358 + if (checkglob && retval->globbed) {
48359 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48360 + (struct vfsmount *)orig_mnt, path);
48361 + if (retval2)
48362 + retval = retval2;
48363 + }
48364 + break;
48365 + }
48366 + } while ((tmpsubj = tmpsubj->parent_subject));
48367 + read_unlock(&gr_inode_lock);
48368 +
48369 + return retval;
48370 +}
48371 +
48372 +static __inline__ struct acl_object_label *
48373 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48374 + const struct dentry *curr_dentry,
48375 + const struct acl_subject_label *subj, char **path, const int checkglob)
48376 +{
48377 + int newglob = checkglob;
48378 +
48379 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48380 + as we don't want a / * rule to match instead of the / object
48381 + don't do this for create lookups that call this function though, since they're looking up
48382 + on the parent and thus need globbing checks on all paths
48383 + */
48384 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48385 + newglob = GR_NO_GLOB;
48386 +
48387 + return __full_lookup(orig_dentry, orig_mnt,
48388 + curr_dentry->d_inode->i_ino,
48389 + __get_dev(curr_dentry), subj, path, newglob);
48390 +}
48391 +
48392 +static struct acl_object_label *
48393 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48394 + const struct acl_subject_label *subj, char *path, const int checkglob)
48395 +{
48396 + struct dentry *dentry = (struct dentry *) l_dentry;
48397 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48398 + struct acl_object_label *retval;
48399 +
48400 + spin_lock(&dcache_lock);
48401 + spin_lock(&vfsmount_lock);
48402 +
48403 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48404 +#ifdef CONFIG_NET
48405 + mnt == sock_mnt ||
48406 +#endif
48407 +#ifdef CONFIG_HUGETLBFS
48408 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48409 +#endif
48410 + /* ignore Eric Biederman */
48411 + IS_PRIVATE(l_dentry->d_inode))) {
48412 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48413 + goto out;
48414 + }
48415 +
48416 + for (;;) {
48417 + if (dentry == real_root && mnt == real_root_mnt)
48418 + break;
48419 +
48420 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48421 + if (mnt->mnt_parent == mnt)
48422 + break;
48423 +
48424 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48425 + if (retval != NULL)
48426 + goto out;
48427 +
48428 + dentry = mnt->mnt_mountpoint;
48429 + mnt = mnt->mnt_parent;
48430 + continue;
48431 + }
48432 +
48433 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48434 + if (retval != NULL)
48435 + goto out;
48436 +
48437 + dentry = dentry->d_parent;
48438 + }
48439 +
48440 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48441 +
48442 + if (retval == NULL)
48443 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48444 +out:
48445 + spin_unlock(&vfsmount_lock);
48446 + spin_unlock(&dcache_lock);
48447 +
48448 + BUG_ON(retval == NULL);
48449 +
48450 + return retval;
48451 +}
48452 +
48453 +static __inline__ struct acl_object_label *
48454 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48455 + const struct acl_subject_label *subj)
48456 +{
48457 + char *path = NULL;
48458 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48459 +}
48460 +
48461 +static __inline__ struct acl_object_label *
48462 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48463 + const struct acl_subject_label *subj)
48464 +{
48465 + char *path = NULL;
48466 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48467 +}
48468 +
48469 +static __inline__ struct acl_object_label *
48470 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48471 + const struct acl_subject_label *subj, char *path)
48472 +{
48473 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48474 +}
48475 +
48476 +static struct acl_subject_label *
48477 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48478 + const struct acl_role_label *role)
48479 +{
48480 + struct dentry *dentry = (struct dentry *) l_dentry;
48481 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48482 + struct acl_subject_label *retval;
48483 +
48484 + spin_lock(&dcache_lock);
48485 + spin_lock(&vfsmount_lock);
48486 +
48487 + for (;;) {
48488 + if (dentry == real_root && mnt == real_root_mnt)
48489 + break;
48490 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48491 + if (mnt->mnt_parent == mnt)
48492 + break;
48493 +
48494 + read_lock(&gr_inode_lock);
48495 + retval =
48496 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48497 + __get_dev(dentry), role);
48498 + read_unlock(&gr_inode_lock);
48499 + if (retval != NULL)
48500 + goto out;
48501 +
48502 + dentry = mnt->mnt_mountpoint;
48503 + mnt = mnt->mnt_parent;
48504 + continue;
48505 + }
48506 +
48507 + read_lock(&gr_inode_lock);
48508 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48509 + __get_dev(dentry), role);
48510 + read_unlock(&gr_inode_lock);
48511 + if (retval != NULL)
48512 + goto out;
48513 +
48514 + dentry = dentry->d_parent;
48515 + }
48516 +
48517 + read_lock(&gr_inode_lock);
48518 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48519 + __get_dev(dentry), role);
48520 + read_unlock(&gr_inode_lock);
48521 +
48522 + if (unlikely(retval == NULL)) {
48523 + read_lock(&gr_inode_lock);
48524 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48525 + __get_dev(real_root), role);
48526 + read_unlock(&gr_inode_lock);
48527 + }
48528 +out:
48529 + spin_unlock(&vfsmount_lock);
48530 + spin_unlock(&dcache_lock);
48531 +
48532 + BUG_ON(retval == NULL);
48533 +
48534 + return retval;
48535 +}
48536 +
48537 +static void
48538 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48539 +{
48540 + struct task_struct *task = current;
48541 + const struct cred *cred = current_cred();
48542 +
48543 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48544 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48545 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48546 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48547 +
48548 + return;
48549 +}
48550 +
48551 +static void
48552 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48553 +{
48554 + struct task_struct *task = current;
48555 + const struct cred *cred = current_cred();
48556 +
48557 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48558 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48559 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48560 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48561 +
48562 + return;
48563 +}
48564 +
48565 +static void
48566 +gr_log_learn_id_change(const char type, const unsigned int real,
48567 + const unsigned int effective, const unsigned int fs)
48568 +{
48569 + struct task_struct *task = current;
48570 + const struct cred *cred = current_cred();
48571 +
48572 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48573 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48574 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48575 + type, real, effective, fs, &task->signal->saved_ip);
48576 +
48577 + return;
48578 +}
48579 +
48580 +__u32
48581 +gr_check_link(const struct dentry * new_dentry,
48582 + const struct dentry * parent_dentry,
48583 + const struct vfsmount * parent_mnt,
48584 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48585 +{
48586 + struct acl_object_label *obj;
48587 + __u32 oldmode, newmode;
48588 + __u32 needmode;
48589 +
48590 + if (unlikely(!(gr_status & GR_READY)))
48591 + return (GR_CREATE | GR_LINK);
48592 +
48593 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48594 + oldmode = obj->mode;
48595 +
48596 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48597 + oldmode |= (GR_CREATE | GR_LINK);
48598 +
48599 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48600 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48601 + needmode |= GR_SETID | GR_AUDIT_SETID;
48602 +
48603 + newmode =
48604 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48605 + oldmode | needmode);
48606 +
48607 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48608 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48609 + GR_INHERIT | GR_AUDIT_INHERIT);
48610 +
48611 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48612 + goto bad;
48613 +
48614 + if ((oldmode & needmode) != needmode)
48615 + goto bad;
48616 +
48617 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48618 + if ((newmode & needmode) != needmode)
48619 + goto bad;
48620 +
48621 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48622 + return newmode;
48623 +bad:
48624 + needmode = oldmode;
48625 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48626 + needmode |= GR_SETID;
48627 +
48628 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48629 + gr_log_learn(old_dentry, old_mnt, needmode);
48630 + return (GR_CREATE | GR_LINK);
48631 + } else if (newmode & GR_SUPPRESS)
48632 + return GR_SUPPRESS;
48633 + else
48634 + return 0;
48635 +}
48636 +
48637 +__u32
48638 +gr_search_file(const struct dentry * dentry, const __u32 mode,
48639 + const struct vfsmount * mnt)
48640 +{
48641 + __u32 retval = mode;
48642 + struct acl_subject_label *curracl;
48643 + struct acl_object_label *currobj;
48644 +
48645 + if (unlikely(!(gr_status & GR_READY)))
48646 + return (mode & ~GR_AUDITS);
48647 +
48648 + curracl = current->acl;
48649 +
48650 + currobj = chk_obj_label(dentry, mnt, curracl);
48651 + retval = currobj->mode & mode;
48652 +
48653 + /* if we're opening a specified transfer file for writing
48654 + (e.g. /dev/initctl), then transfer our role to init
48655 + */
48656 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48657 + current->role->roletype & GR_ROLE_PERSIST)) {
48658 + struct task_struct *task = init_pid_ns.child_reaper;
48659 +
48660 + if (task->role != current->role) {
48661 + task->acl_sp_role = 0;
48662 + task->acl_role_id = current->acl_role_id;
48663 + task->role = current->role;
48664 + rcu_read_lock();
48665 + read_lock(&grsec_exec_file_lock);
48666 + gr_apply_subject_to_task(task);
48667 + read_unlock(&grsec_exec_file_lock);
48668 + rcu_read_unlock();
48669 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48670 + }
48671 + }
48672 +
48673 + if (unlikely
48674 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48675 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48676 + __u32 new_mode = mode;
48677 +
48678 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48679 +
48680 + retval = new_mode;
48681 +
48682 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48683 + new_mode |= GR_INHERIT;
48684 +
48685 + if (!(mode & GR_NOLEARN))
48686 + gr_log_learn(dentry, mnt, new_mode);
48687 + }
48688 +
48689 + return retval;
48690 +}
48691 +
48692 +__u32
48693 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48694 + const struct vfsmount * mnt, const __u32 mode)
48695 +{
48696 + struct name_entry *match;
48697 + struct acl_object_label *matchpo;
48698 + struct acl_subject_label *curracl;
48699 + char *path;
48700 + __u32 retval;
48701 +
48702 + if (unlikely(!(gr_status & GR_READY)))
48703 + return (mode & ~GR_AUDITS);
48704 +
48705 + preempt_disable();
48706 + path = gr_to_filename_rbac(new_dentry, mnt);
48707 + match = lookup_name_entry_create(path);
48708 +
48709 + if (!match)
48710 + goto check_parent;
48711 +
48712 + curracl = current->acl;
48713 +
48714 + read_lock(&gr_inode_lock);
48715 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48716 + read_unlock(&gr_inode_lock);
48717 +
48718 + if (matchpo) {
48719 + if ((matchpo->mode & mode) !=
48720 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
48721 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48722 + __u32 new_mode = mode;
48723 +
48724 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48725 +
48726 + gr_log_learn(new_dentry, mnt, new_mode);
48727 +
48728 + preempt_enable();
48729 + return new_mode;
48730 + }
48731 + preempt_enable();
48732 + return (matchpo->mode & mode);
48733 + }
48734 +
48735 + check_parent:
48736 + curracl = current->acl;
48737 +
48738 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48739 + retval = matchpo->mode & mode;
48740 +
48741 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48742 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48743 + __u32 new_mode = mode;
48744 +
48745 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48746 +
48747 + gr_log_learn(new_dentry, mnt, new_mode);
48748 + preempt_enable();
48749 + return new_mode;
48750 + }
48751 +
48752 + preempt_enable();
48753 + return retval;
48754 +}
48755 +
48756 +int
48757 +gr_check_hidden_task(const struct task_struct *task)
48758 +{
48759 + if (unlikely(!(gr_status & GR_READY)))
48760 + return 0;
48761 +
48762 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
48763 + return 1;
48764 +
48765 + return 0;
48766 +}
48767 +
48768 +int
48769 +gr_check_protected_task(const struct task_struct *task)
48770 +{
48771 + if (unlikely(!(gr_status & GR_READY) || !task))
48772 + return 0;
48773 +
48774 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48775 + task->acl != current->acl)
48776 + return 1;
48777 +
48778 + return 0;
48779 +}
48780 +
48781 +int
48782 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
48783 +{
48784 + struct task_struct *p;
48785 + int ret = 0;
48786 +
48787 + if (unlikely(!(gr_status & GR_READY) || !pid))
48788 + return ret;
48789 +
48790 + read_lock(&tasklist_lock);
48791 + do_each_pid_task(pid, type, p) {
48792 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48793 + p->acl != current->acl) {
48794 + ret = 1;
48795 + goto out;
48796 + }
48797 + } while_each_pid_task(pid, type, p);
48798 +out:
48799 + read_unlock(&tasklist_lock);
48800 +
48801 + return ret;
48802 +}
48803 +
48804 +void
48805 +gr_copy_label(struct task_struct *tsk)
48806 +{
48807 + tsk->signal->used_accept = 0;
48808 + tsk->acl_sp_role = 0;
48809 + tsk->acl_role_id = current->acl_role_id;
48810 + tsk->acl = current->acl;
48811 + tsk->role = current->role;
48812 + tsk->signal->curr_ip = current->signal->curr_ip;
48813 + tsk->signal->saved_ip = current->signal->saved_ip;
48814 + if (current->exec_file)
48815 + get_file(current->exec_file);
48816 + tsk->exec_file = current->exec_file;
48817 + tsk->is_writable = current->is_writable;
48818 + if (unlikely(current->signal->used_accept)) {
48819 + current->signal->curr_ip = 0;
48820 + current->signal->saved_ip = 0;
48821 + }
48822 +
48823 + return;
48824 +}
48825 +
48826 +static void
48827 +gr_set_proc_res(struct task_struct *task)
48828 +{
48829 + struct acl_subject_label *proc;
48830 + unsigned short i;
48831 +
48832 + proc = task->acl;
48833 +
48834 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
48835 + return;
48836 +
48837 + for (i = 0; i < RLIM_NLIMITS; i++) {
48838 + if (!(proc->resmask & (1 << i)))
48839 + continue;
48840 +
48841 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
48842 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
48843 + }
48844 +
48845 + return;
48846 +}
48847 +
48848 +extern int __gr_process_user_ban(struct user_struct *user);
48849 +
48850 +int
48851 +gr_check_user_change(int real, int effective, int fs)
48852 +{
48853 + unsigned int i;
48854 + __u16 num;
48855 + uid_t *uidlist;
48856 + int curuid;
48857 + int realok = 0;
48858 + int effectiveok = 0;
48859 + int fsok = 0;
48860 +
48861 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48862 + struct user_struct *user;
48863 +
48864 + if (real == -1)
48865 + goto skipit;
48866 +
48867 + user = find_user(real);
48868 + if (user == NULL)
48869 + goto skipit;
48870 +
48871 + if (__gr_process_user_ban(user)) {
48872 + /* for find_user */
48873 + free_uid(user);
48874 + return 1;
48875 + }
48876 +
48877 + /* for find_user */
48878 + free_uid(user);
48879 +
48880 +skipit:
48881 +#endif
48882 +
48883 + if (unlikely(!(gr_status & GR_READY)))
48884 + return 0;
48885 +
48886 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48887 + gr_log_learn_id_change('u', real, effective, fs);
48888 +
48889 + num = current->acl->user_trans_num;
48890 + uidlist = current->acl->user_transitions;
48891 +
48892 + if (uidlist == NULL)
48893 + return 0;
48894 +
48895 + if (real == -1)
48896 + realok = 1;
48897 + if (effective == -1)
48898 + effectiveok = 1;
48899 + if (fs == -1)
48900 + fsok = 1;
48901 +
48902 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
48903 + for (i = 0; i < num; i++) {
48904 + curuid = (int)uidlist[i];
48905 + if (real == curuid)
48906 + realok = 1;
48907 + if (effective == curuid)
48908 + effectiveok = 1;
48909 + if (fs == curuid)
48910 + fsok = 1;
48911 + }
48912 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
48913 + for (i = 0; i < num; i++) {
48914 + curuid = (int)uidlist[i];
48915 + if (real == curuid)
48916 + break;
48917 + if (effective == curuid)
48918 + break;
48919 + if (fs == curuid)
48920 + break;
48921 + }
48922 + /* not in deny list */
48923 + if (i == num) {
48924 + realok = 1;
48925 + effectiveok = 1;
48926 + fsok = 1;
48927 + }
48928 + }
48929 +
48930 + if (realok && effectiveok && fsok)
48931 + return 0;
48932 + else {
48933 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
48934 + return 1;
48935 + }
48936 +}
48937 +
48938 +int
48939 +gr_check_group_change(int real, int effective, int fs)
48940 +{
48941 + unsigned int i;
48942 + __u16 num;
48943 + gid_t *gidlist;
48944 + int curgid;
48945 + int realok = 0;
48946 + int effectiveok = 0;
48947 + int fsok = 0;
48948 +
48949 + if (unlikely(!(gr_status & GR_READY)))
48950 + return 0;
48951 +
48952 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48953 + gr_log_learn_id_change('g', real, effective, fs);
48954 +
48955 + num = current->acl->group_trans_num;
48956 + gidlist = current->acl->group_transitions;
48957 +
48958 + if (gidlist == NULL)
48959 + return 0;
48960 +
48961 + if (real == -1)
48962 + realok = 1;
48963 + if (effective == -1)
48964 + effectiveok = 1;
48965 + if (fs == -1)
48966 + fsok = 1;
48967 +
48968 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
48969 + for (i = 0; i < num; i++) {
48970 + curgid = (int)gidlist[i];
48971 + if (real == curgid)
48972 + realok = 1;
48973 + if (effective == curgid)
48974 + effectiveok = 1;
48975 + if (fs == curgid)
48976 + fsok = 1;
48977 + }
48978 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
48979 + for (i = 0; i < num; i++) {
48980 + curgid = (int)gidlist[i];
48981 + if (real == curgid)
48982 + break;
48983 + if (effective == curgid)
48984 + break;
48985 + if (fs == curgid)
48986 + break;
48987 + }
48988 + /* not in deny list */
48989 + if (i == num) {
48990 + realok = 1;
48991 + effectiveok = 1;
48992 + fsok = 1;
48993 + }
48994 + }
48995 +
48996 + if (realok && effectiveok && fsok)
48997 + return 0;
48998 + else {
48999 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49000 + return 1;
49001 + }
49002 +}
49003 +
49004 +void
49005 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49006 +{
49007 + struct acl_role_label *role = task->role;
49008 + struct acl_subject_label *subj = NULL;
49009 + struct acl_object_label *obj;
49010 + struct file *filp;
49011 +
49012 + if (unlikely(!(gr_status & GR_READY)))
49013 + return;
49014 +
49015 + filp = task->exec_file;
49016 +
49017 + /* kernel process, we'll give them the kernel role */
49018 + if (unlikely(!filp)) {
49019 + task->role = kernel_role;
49020 + task->acl = kernel_role->root_label;
49021 + return;
49022 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49023 + role = lookup_acl_role_label(task, uid, gid);
49024 +
49025 + /* perform subject lookup in possibly new role
49026 + we can use this result below in the case where role == task->role
49027 + */
49028 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49029 +
49030 + /* if we changed uid/gid, but result in the same role
49031 + and are using inheritance, don't lose the inherited subject
49032 + if current subject is other than what normal lookup
49033 + would result in, we arrived via inheritance, don't
49034 + lose subject
49035 + */
49036 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49037 + (subj == task->acl)))
49038 + task->acl = subj;
49039 +
49040 + task->role = role;
49041 +
49042 + task->is_writable = 0;
49043 +
49044 + /* ignore additional mmap checks for processes that are writable
49045 + by the default ACL */
49046 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49047 + if (unlikely(obj->mode & GR_WRITE))
49048 + task->is_writable = 1;
49049 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49050 + if (unlikely(obj->mode & GR_WRITE))
49051 + task->is_writable = 1;
49052 +
49053 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49054 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49055 +#endif
49056 +
49057 + gr_set_proc_res(task);
49058 +
49059 + return;
49060 +}
49061 +
49062 +int
49063 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49064 + const int unsafe_share)
49065 +{
49066 + struct task_struct *task = current;
49067 + struct acl_subject_label *newacl;
49068 + struct acl_object_label *obj;
49069 + __u32 retmode;
49070 +
49071 + if (unlikely(!(gr_status & GR_READY)))
49072 + return 0;
49073 +
49074 + newacl = chk_subj_label(dentry, mnt, task->role);
49075 +
49076 + task_lock(task);
49077 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49078 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49079 + !(task->role->roletype & GR_ROLE_GOD) &&
49080 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49081 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49082 + task_unlock(task);
49083 + if (unsafe_share)
49084 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49085 + else
49086 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49087 + return -EACCES;
49088 + }
49089 + task_unlock(task);
49090 +
49091 + obj = chk_obj_label(dentry, mnt, task->acl);
49092 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49093 +
49094 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49095 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49096 + if (obj->nested)
49097 + task->acl = obj->nested;
49098 + else
49099 + task->acl = newacl;
49100 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49101 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49102 +
49103 + task->is_writable = 0;
49104 +
49105 + /* ignore additional mmap checks for processes that are writable
49106 + by the default ACL */
49107 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49108 + if (unlikely(obj->mode & GR_WRITE))
49109 + task->is_writable = 1;
49110 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49111 + if (unlikely(obj->mode & GR_WRITE))
49112 + task->is_writable = 1;
49113 +
49114 + gr_set_proc_res(task);
49115 +
49116 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49117 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49118 +#endif
49119 + return 0;
49120 +}
49121 +
49122 +/* always called with valid inodev ptr */
49123 +static void
49124 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49125 +{
49126 + struct acl_object_label *matchpo;
49127 + struct acl_subject_label *matchps;
49128 + struct acl_subject_label *subj;
49129 + struct acl_role_label *role;
49130 + unsigned int x;
49131 +
49132 + FOR_EACH_ROLE_START(role)
49133 + FOR_EACH_SUBJECT_START(role, subj, x)
49134 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49135 + matchpo->mode |= GR_DELETED;
49136 + FOR_EACH_SUBJECT_END(subj,x)
49137 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49138 + if (subj->inode == ino && subj->device == dev)
49139 + subj->mode |= GR_DELETED;
49140 + FOR_EACH_NESTED_SUBJECT_END(subj)
49141 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49142 + matchps->mode |= GR_DELETED;
49143 + FOR_EACH_ROLE_END(role)
49144 +
49145 + inodev->nentry->deleted = 1;
49146 +
49147 + return;
49148 +}
49149 +
49150 +void
49151 +gr_handle_delete(const ino_t ino, const dev_t dev)
49152 +{
49153 + struct inodev_entry *inodev;
49154 +
49155 + if (unlikely(!(gr_status & GR_READY)))
49156 + return;
49157 +
49158 + write_lock(&gr_inode_lock);
49159 + inodev = lookup_inodev_entry(ino, dev);
49160 + if (inodev != NULL)
49161 + do_handle_delete(inodev, ino, dev);
49162 + write_unlock(&gr_inode_lock);
49163 +
49164 + return;
49165 +}
49166 +
49167 +static void
49168 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49169 + const ino_t newinode, const dev_t newdevice,
49170 + struct acl_subject_label *subj)
49171 +{
49172 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49173 + struct acl_object_label *match;
49174 +
49175 + match = subj->obj_hash[index];
49176 +
49177 + while (match && (match->inode != oldinode ||
49178 + match->device != olddevice ||
49179 + !(match->mode & GR_DELETED)))
49180 + match = match->next;
49181 +
49182 + if (match && (match->inode == oldinode)
49183 + && (match->device == olddevice)
49184 + && (match->mode & GR_DELETED)) {
49185 + if (match->prev == NULL) {
49186 + subj->obj_hash[index] = match->next;
49187 + if (match->next != NULL)
49188 + match->next->prev = NULL;
49189 + } else {
49190 + match->prev->next = match->next;
49191 + if (match->next != NULL)
49192 + match->next->prev = match->prev;
49193 + }
49194 + match->prev = NULL;
49195 + match->next = NULL;
49196 + match->inode = newinode;
49197 + match->device = newdevice;
49198 + match->mode &= ~GR_DELETED;
49199 +
49200 + insert_acl_obj_label(match, subj);
49201 + }
49202 +
49203 + return;
49204 +}
49205 +
49206 +static void
49207 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49208 + const ino_t newinode, const dev_t newdevice,
49209 + struct acl_role_label *role)
49210 +{
49211 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49212 + struct acl_subject_label *match;
49213 +
49214 + match = role->subj_hash[index];
49215 +
49216 + while (match && (match->inode != oldinode ||
49217 + match->device != olddevice ||
49218 + !(match->mode & GR_DELETED)))
49219 + match = match->next;
49220 +
49221 + if (match && (match->inode == oldinode)
49222 + && (match->device == olddevice)
49223 + && (match->mode & GR_DELETED)) {
49224 + if (match->prev == NULL) {
49225 + role->subj_hash[index] = match->next;
49226 + if (match->next != NULL)
49227 + match->next->prev = NULL;
49228 + } else {
49229 + match->prev->next = match->next;
49230 + if (match->next != NULL)
49231 + match->next->prev = match->prev;
49232 + }
49233 + match->prev = NULL;
49234 + match->next = NULL;
49235 + match->inode = newinode;
49236 + match->device = newdevice;
49237 + match->mode &= ~GR_DELETED;
49238 +
49239 + insert_acl_subj_label(match, role);
49240 + }
49241 +
49242 + return;
49243 +}
49244 +
49245 +static void
49246 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49247 + const ino_t newinode, const dev_t newdevice)
49248 +{
49249 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49250 + struct inodev_entry *match;
49251 +
49252 + match = inodev_set.i_hash[index];
49253 +
49254 + while (match && (match->nentry->inode != oldinode ||
49255 + match->nentry->device != olddevice || !match->nentry->deleted))
49256 + match = match->next;
49257 +
49258 + if (match && (match->nentry->inode == oldinode)
49259 + && (match->nentry->device == olddevice) &&
49260 + match->nentry->deleted) {
49261 + if (match->prev == NULL) {
49262 + inodev_set.i_hash[index] = match->next;
49263 + if (match->next != NULL)
49264 + match->next->prev = NULL;
49265 + } else {
49266 + match->prev->next = match->next;
49267 + if (match->next != NULL)
49268 + match->next->prev = match->prev;
49269 + }
49270 + match->prev = NULL;
49271 + match->next = NULL;
49272 + match->nentry->inode = newinode;
49273 + match->nentry->device = newdevice;
49274 + match->nentry->deleted = 0;
49275 +
49276 + insert_inodev_entry(match);
49277 + }
49278 +
49279 + return;
49280 +}
49281 +
49282 +static void
49283 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49284 + const struct vfsmount *mnt)
49285 +{
49286 + struct acl_subject_label *subj;
49287 + struct acl_role_label *role;
49288 + unsigned int x;
49289 + ino_t inode = dentry->d_inode->i_ino;
49290 + dev_t dev = __get_dev(dentry);
49291 +
49292 + FOR_EACH_ROLE_START(role)
49293 + update_acl_subj_label(matchn->inode, matchn->device,
49294 + inode, dev, role);
49295 +
49296 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49297 + if ((subj->inode == inode) && (subj->device == dev)) {
49298 + subj->inode = inode;
49299 + subj->device = dev;
49300 + }
49301 + FOR_EACH_NESTED_SUBJECT_END(subj)
49302 + FOR_EACH_SUBJECT_START(role, subj, x)
49303 + update_acl_obj_label(matchn->inode, matchn->device,
49304 + inode, dev, subj);
49305 + FOR_EACH_SUBJECT_END(subj,x)
49306 + FOR_EACH_ROLE_END(role)
49307 +
49308 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49309 +
49310 + return;
49311 +}
49312 +
49313 +void
49314 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49315 +{
49316 + struct name_entry *matchn;
49317 +
49318 + if (unlikely(!(gr_status & GR_READY)))
49319 + return;
49320 +
49321 + preempt_disable();
49322 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49323 +
49324 + if (unlikely((unsigned long)matchn)) {
49325 + write_lock(&gr_inode_lock);
49326 + do_handle_create(matchn, dentry, mnt);
49327 + write_unlock(&gr_inode_lock);
49328 + }
49329 + preempt_enable();
49330 +
49331 + return;
49332 +}
49333 +
49334 +void
49335 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49336 + struct dentry *old_dentry,
49337 + struct dentry *new_dentry,
49338 + struct vfsmount *mnt, const __u8 replace)
49339 +{
49340 + struct name_entry *matchn;
49341 + struct inodev_entry *inodev;
49342 + ino_t oldinode = old_dentry->d_inode->i_ino;
49343 + dev_t olddev = __get_dev(old_dentry);
49344 +
49345 + /* vfs_rename swaps the name and parent link for old_dentry and
49346 + new_dentry
49347 + at this point, old_dentry has the new name, parent link, and inode
49348 + for the renamed file
49349 + if a file is being replaced by a rename, new_dentry has the inode
49350 + and name for the replaced file
49351 + */
49352 +
49353 + if (unlikely(!(gr_status & GR_READY)))
49354 + return;
49355 +
49356 + preempt_disable();
49357 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49358 +
49359 + /* we wouldn't have to check d_inode if it weren't for
49360 + NFS silly-renaming
49361 + */
49362 +
49363 + write_lock(&gr_inode_lock);
49364 + if (unlikely(replace && new_dentry->d_inode)) {
49365 + ino_t newinode = new_dentry->d_inode->i_ino;
49366 + dev_t newdev = __get_dev(new_dentry);
49367 + inodev = lookup_inodev_entry(newinode, newdev);
49368 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49369 + do_handle_delete(inodev, newinode, newdev);
49370 + }
49371 +
49372 + inodev = lookup_inodev_entry(oldinode, olddev);
49373 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49374 + do_handle_delete(inodev, oldinode, olddev);
49375 +
49376 + if (unlikely((unsigned long)matchn))
49377 + do_handle_create(matchn, old_dentry, mnt);
49378 +
49379 + write_unlock(&gr_inode_lock);
49380 + preempt_enable();
49381 +
49382 + return;
49383 +}
49384 +
49385 +static int
49386 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49387 + unsigned char **sum)
49388 +{
49389 + struct acl_role_label *r;
49390 + struct role_allowed_ip *ipp;
49391 + struct role_transition *trans;
49392 + unsigned int i;
49393 + int found = 0;
49394 + u32 curr_ip = current->signal->curr_ip;
49395 +
49396 + current->signal->saved_ip = curr_ip;
49397 +
49398 + /* check transition table */
49399 +
49400 + for (trans = current->role->transitions; trans; trans = trans->next) {
49401 + if (!strcmp(rolename, trans->rolename)) {
49402 + found = 1;
49403 + break;
49404 + }
49405 + }
49406 +
49407 + if (!found)
49408 + return 0;
49409 +
49410 + /* handle special roles that do not require authentication
49411 + and check ip */
49412 +
49413 + FOR_EACH_ROLE_START(r)
49414 + if (!strcmp(rolename, r->rolename) &&
49415 + (r->roletype & GR_ROLE_SPECIAL)) {
49416 + found = 0;
49417 + if (r->allowed_ips != NULL) {
49418 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49419 + if ((ntohl(curr_ip) & ipp->netmask) ==
49420 + (ntohl(ipp->addr) & ipp->netmask))
49421 + found = 1;
49422 + }
49423 + } else
49424 + found = 2;
49425 + if (!found)
49426 + return 0;
49427 +
49428 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49429 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49430 + *salt = NULL;
49431 + *sum = NULL;
49432 + return 1;
49433 + }
49434 + }
49435 + FOR_EACH_ROLE_END(r)
49436 +
49437 + for (i = 0; i < num_sprole_pws; i++) {
49438 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49439 + *salt = acl_special_roles[i]->salt;
49440 + *sum = acl_special_roles[i]->sum;
49441 + return 1;
49442 + }
49443 + }
49444 +
49445 + return 0;
49446 +}
49447 +
49448 +static void
49449 +assign_special_role(char *rolename)
49450 +{
49451 + struct acl_object_label *obj;
49452 + struct acl_role_label *r;
49453 + struct acl_role_label *assigned = NULL;
49454 + struct task_struct *tsk;
49455 + struct file *filp;
49456 +
49457 + FOR_EACH_ROLE_START(r)
49458 + if (!strcmp(rolename, r->rolename) &&
49459 + (r->roletype & GR_ROLE_SPECIAL)) {
49460 + assigned = r;
49461 + break;
49462 + }
49463 + FOR_EACH_ROLE_END(r)
49464 +
49465 + if (!assigned)
49466 + return;
49467 +
49468 + read_lock(&tasklist_lock);
49469 + read_lock(&grsec_exec_file_lock);
49470 +
49471 + tsk = current->real_parent;
49472 + if (tsk == NULL)
49473 + goto out_unlock;
49474 +
49475 + filp = tsk->exec_file;
49476 + if (filp == NULL)
49477 + goto out_unlock;
49478 +
49479 + tsk->is_writable = 0;
49480 +
49481 + tsk->acl_sp_role = 1;
49482 + tsk->acl_role_id = ++acl_sp_role_value;
49483 + tsk->role = assigned;
49484 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49485 +
49486 + /* ignore additional mmap checks for processes that are writable
49487 + by the default ACL */
49488 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49489 + if (unlikely(obj->mode & GR_WRITE))
49490 + tsk->is_writable = 1;
49491 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49492 + if (unlikely(obj->mode & GR_WRITE))
49493 + tsk->is_writable = 1;
49494 +
49495 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49496 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49497 +#endif
49498 +
49499 +out_unlock:
49500 + read_unlock(&grsec_exec_file_lock);
49501 + read_unlock(&tasklist_lock);
49502 + return;
49503 +}
49504 +
49505 +int gr_check_secure_terminal(struct task_struct *task)
49506 +{
49507 + struct task_struct *p, *p2, *p3;
49508 + struct files_struct *files;
49509 + struct fdtable *fdt;
49510 + struct file *our_file = NULL, *file;
49511 + int i;
49512 +
49513 + if (task->signal->tty == NULL)
49514 + return 1;
49515 +
49516 + files = get_files_struct(task);
49517 + if (files != NULL) {
49518 + rcu_read_lock();
49519 + fdt = files_fdtable(files);
49520 + for (i=0; i < fdt->max_fds; i++) {
49521 + file = fcheck_files(files, i);
49522 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49523 + get_file(file);
49524 + our_file = file;
49525 + }
49526 + }
49527 + rcu_read_unlock();
49528 + put_files_struct(files);
49529 + }
49530 +
49531 + if (our_file == NULL)
49532 + return 1;
49533 +
49534 + read_lock(&tasklist_lock);
49535 + do_each_thread(p2, p) {
49536 + files = get_files_struct(p);
49537 + if (files == NULL ||
49538 + (p->signal && p->signal->tty == task->signal->tty)) {
49539 + if (files != NULL)
49540 + put_files_struct(files);
49541 + continue;
49542 + }
49543 + rcu_read_lock();
49544 + fdt = files_fdtable(files);
49545 + for (i=0; i < fdt->max_fds; i++) {
49546 + file = fcheck_files(files, i);
49547 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49548 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49549 + p3 = task;
49550 + while (p3->pid > 0) {
49551 + if (p3 == p)
49552 + break;
49553 + p3 = p3->real_parent;
49554 + }
49555 + if (p3 == p)
49556 + break;
49557 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49558 + gr_handle_alertkill(p);
49559 + rcu_read_unlock();
49560 + put_files_struct(files);
49561 + read_unlock(&tasklist_lock);
49562 + fput(our_file);
49563 + return 0;
49564 + }
49565 + }
49566 + rcu_read_unlock();
49567 + put_files_struct(files);
49568 + } while_each_thread(p2, p);
49569 + read_unlock(&tasklist_lock);
49570 +
49571 + fput(our_file);
49572 + return 1;
49573 +}
49574 +
49575 +ssize_t
49576 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49577 +{
49578 + struct gr_arg_wrapper uwrap;
49579 + unsigned char *sprole_salt = NULL;
49580 + unsigned char *sprole_sum = NULL;
49581 + int error = sizeof (struct gr_arg_wrapper);
49582 + int error2 = 0;
49583 +
49584 + mutex_lock(&gr_dev_mutex);
49585 +
49586 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49587 + error = -EPERM;
49588 + goto out;
49589 + }
49590 +
49591 + if (count != sizeof (struct gr_arg_wrapper)) {
49592 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49593 + error = -EINVAL;
49594 + goto out;
49595 + }
49596 +
49597 +
49598 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49599 + gr_auth_expires = 0;
49600 + gr_auth_attempts = 0;
49601 + }
49602 +
49603 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49604 + error = -EFAULT;
49605 + goto out;
49606 + }
49607 +
49608 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49609 + error = -EINVAL;
49610 + goto out;
49611 + }
49612 +
49613 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49614 + error = -EFAULT;
49615 + goto out;
49616 + }
49617 +
49618 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49619 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49620 + time_after(gr_auth_expires, get_seconds())) {
49621 + error = -EBUSY;
49622 + goto out;
49623 + }
49624 +
49625 + /* if non-root trying to do anything other than use a special role,
49626 + do not attempt authentication, do not count towards authentication
49627 + locking
49628 + */
49629 +
49630 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49631 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49632 + current_uid()) {
49633 + error = -EPERM;
49634 + goto out;
49635 + }
49636 +
49637 + /* ensure pw and special role name are null terminated */
49638 +
49639 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49640 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49641 +
49642 + /* Okay.
49643 + * We have our enough of the argument structure..(we have yet
49644 + * to copy_from_user the tables themselves) . Copy the tables
49645 + * only if we need them, i.e. for loading operations. */
49646 +
49647 + switch (gr_usermode->mode) {
49648 + case GR_STATUS:
49649 + if (gr_status & GR_READY) {
49650 + error = 1;
49651 + if (!gr_check_secure_terminal(current))
49652 + error = 3;
49653 + } else
49654 + error = 2;
49655 + goto out;
49656 + case GR_SHUTDOWN:
49657 + if ((gr_status & GR_READY)
49658 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49659 + pax_open_kernel();
49660 + gr_status &= ~GR_READY;
49661 + pax_close_kernel();
49662 +
49663 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49664 + free_variables();
49665 + memset(gr_usermode, 0, sizeof (struct gr_arg));
49666 + memset(gr_system_salt, 0, GR_SALT_LEN);
49667 + memset(gr_system_sum, 0, GR_SHA_LEN);
49668 + } else if (gr_status & GR_READY) {
49669 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49670 + error = -EPERM;
49671 + } else {
49672 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49673 + error = -EAGAIN;
49674 + }
49675 + break;
49676 + case GR_ENABLE:
49677 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49678 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49679 + else {
49680 + if (gr_status & GR_READY)
49681 + error = -EAGAIN;
49682 + else
49683 + error = error2;
49684 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49685 + }
49686 + break;
49687 + case GR_RELOAD:
49688 + if (!(gr_status & GR_READY)) {
49689 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49690 + error = -EAGAIN;
49691 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49692 + lock_kernel();
49693 +
49694 + pax_open_kernel();
49695 + gr_status &= ~GR_READY;
49696 + pax_close_kernel();
49697 +
49698 + free_variables();
49699 + if (!(error2 = gracl_init(gr_usermode))) {
49700 + unlock_kernel();
49701 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
49702 + } else {
49703 + unlock_kernel();
49704 + error = error2;
49705 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49706 + }
49707 + } else {
49708 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49709 + error = -EPERM;
49710 + }
49711 + break;
49712 + case GR_SEGVMOD:
49713 + if (unlikely(!(gr_status & GR_READY))) {
49714 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
49715 + error = -EAGAIN;
49716 + break;
49717 + }
49718 +
49719 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49720 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
49721 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
49722 + struct acl_subject_label *segvacl;
49723 + segvacl =
49724 + lookup_acl_subj_label(gr_usermode->segv_inode,
49725 + gr_usermode->segv_device,
49726 + current->role);
49727 + if (segvacl) {
49728 + segvacl->crashes = 0;
49729 + segvacl->expires = 0;
49730 + }
49731 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
49732 + gr_remove_uid(gr_usermode->segv_uid);
49733 + }
49734 + } else {
49735 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
49736 + error = -EPERM;
49737 + }
49738 + break;
49739 + case GR_SPROLE:
49740 + case GR_SPROLEPAM:
49741 + if (unlikely(!(gr_status & GR_READY))) {
49742 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
49743 + error = -EAGAIN;
49744 + break;
49745 + }
49746 +
49747 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
49748 + current->role->expires = 0;
49749 + current->role->auth_attempts = 0;
49750 + }
49751 +
49752 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49753 + time_after(current->role->expires, get_seconds())) {
49754 + error = -EBUSY;
49755 + goto out;
49756 + }
49757 +
49758 + if (lookup_special_role_auth
49759 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
49760 + && ((!sprole_salt && !sprole_sum)
49761 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
49762 + char *p = "";
49763 + assign_special_role(gr_usermode->sp_role);
49764 + read_lock(&tasklist_lock);
49765 + if (current->real_parent)
49766 + p = current->real_parent->role->rolename;
49767 + read_unlock(&tasklist_lock);
49768 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
49769 + p, acl_sp_role_value);
49770 + } else {
49771 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
49772 + error = -EPERM;
49773 + if(!(current->role->auth_attempts++))
49774 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49775 +
49776 + goto out;
49777 + }
49778 + break;
49779 + case GR_UNSPROLE:
49780 + if (unlikely(!(gr_status & GR_READY))) {
49781 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
49782 + error = -EAGAIN;
49783 + break;
49784 + }
49785 +
49786 + if (current->role->roletype & GR_ROLE_SPECIAL) {
49787 + char *p = "";
49788 + int i = 0;
49789 +
49790 + read_lock(&tasklist_lock);
49791 + if (current->real_parent) {
49792 + p = current->real_parent->role->rolename;
49793 + i = current->real_parent->acl_role_id;
49794 + }
49795 + read_unlock(&tasklist_lock);
49796 +
49797 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
49798 + gr_set_acls(1);
49799 + } else {
49800 + error = -EPERM;
49801 + goto out;
49802 + }
49803 + break;
49804 + default:
49805 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
49806 + error = -EINVAL;
49807 + break;
49808 + }
49809 +
49810 + if (error != -EPERM)
49811 + goto out;
49812 +
49813 + if(!(gr_auth_attempts++))
49814 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49815 +
49816 + out:
49817 + mutex_unlock(&gr_dev_mutex);
49818 + return error;
49819 +}
49820 +
49821 +/* must be called with
49822 + rcu_read_lock();
49823 + read_lock(&tasklist_lock);
49824 + read_lock(&grsec_exec_file_lock);
49825 +*/
49826 +int gr_apply_subject_to_task(struct task_struct *task)
49827 +{
49828 + struct acl_object_label *obj;
49829 + char *tmpname;
49830 + struct acl_subject_label *tmpsubj;
49831 + struct file *filp;
49832 + struct name_entry *nmatch;
49833 +
49834 + filp = task->exec_file;
49835 + if (filp == NULL)
49836 + return 0;
49837 +
49838 + /* the following is to apply the correct subject
49839 + on binaries running when the RBAC system
49840 + is enabled, when the binaries have been
49841 + replaced or deleted since their execution
49842 + -----
49843 + when the RBAC system starts, the inode/dev
49844 + from exec_file will be one the RBAC system
49845 + is unaware of. It only knows the inode/dev
49846 + of the present file on disk, or the absence
49847 + of it.
49848 + */
49849 + preempt_disable();
49850 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
49851 +
49852 + nmatch = lookup_name_entry(tmpname);
49853 + preempt_enable();
49854 + tmpsubj = NULL;
49855 + if (nmatch) {
49856 + if (nmatch->deleted)
49857 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
49858 + else
49859 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
49860 + if (tmpsubj != NULL)
49861 + task->acl = tmpsubj;
49862 + }
49863 + if (tmpsubj == NULL)
49864 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
49865 + task->role);
49866 + if (task->acl) {
49867 + task->is_writable = 0;
49868 + /* ignore additional mmap checks for processes that are writable
49869 + by the default ACL */
49870 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49871 + if (unlikely(obj->mode & GR_WRITE))
49872 + task->is_writable = 1;
49873 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49874 + if (unlikely(obj->mode & GR_WRITE))
49875 + task->is_writable = 1;
49876 +
49877 + gr_set_proc_res(task);
49878 +
49879 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49880 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49881 +#endif
49882 + } else {
49883 + return 1;
49884 + }
49885 +
49886 + return 0;
49887 +}
49888 +
49889 +int
49890 +gr_set_acls(const int type)
49891 +{
49892 + struct task_struct *task, *task2;
49893 + struct acl_role_label *role = current->role;
49894 + __u16 acl_role_id = current->acl_role_id;
49895 + const struct cred *cred;
49896 + int ret;
49897 +
49898 + rcu_read_lock();
49899 + read_lock(&tasklist_lock);
49900 + read_lock(&grsec_exec_file_lock);
49901 + do_each_thread(task2, task) {
49902 + /* check to see if we're called from the exit handler,
49903 + if so, only replace ACLs that have inherited the admin
49904 + ACL */
49905 +
49906 + if (type && (task->role != role ||
49907 + task->acl_role_id != acl_role_id))
49908 + continue;
49909 +
49910 + task->acl_role_id = 0;
49911 + task->acl_sp_role = 0;
49912 +
49913 + if (task->exec_file) {
49914 + cred = __task_cred(task);
49915 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
49916 +
49917 + ret = gr_apply_subject_to_task(task);
49918 + if (ret) {
49919 + read_unlock(&grsec_exec_file_lock);
49920 + read_unlock(&tasklist_lock);
49921 + rcu_read_unlock();
49922 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
49923 + return ret;
49924 + }
49925 + } else {
49926 + // it's a kernel process
49927 + task->role = kernel_role;
49928 + task->acl = kernel_role->root_label;
49929 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
49930 + task->acl->mode &= ~GR_PROCFIND;
49931 +#endif
49932 + }
49933 + } while_each_thread(task2, task);
49934 + read_unlock(&grsec_exec_file_lock);
49935 + read_unlock(&tasklist_lock);
49936 + rcu_read_unlock();
49937 +
49938 + return 0;
49939 +}
49940 +
49941 +void
49942 +gr_learn_resource(const struct task_struct *task,
49943 + const int res, const unsigned long wanted, const int gt)
49944 +{
49945 + struct acl_subject_label *acl;
49946 + const struct cred *cred;
49947 +
49948 + if (unlikely((gr_status & GR_READY) &&
49949 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
49950 + goto skip_reslog;
49951 +
49952 +#ifdef CONFIG_GRKERNSEC_RESLOG
49953 + gr_log_resource(task, res, wanted, gt);
49954 +#endif
49955 + skip_reslog:
49956 +
49957 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
49958 + return;
49959 +
49960 + acl = task->acl;
49961 +
49962 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
49963 + !(acl->resmask & (1 << (unsigned short) res))))
49964 + return;
49965 +
49966 + if (wanted >= acl->res[res].rlim_cur) {
49967 + unsigned long res_add;
49968 +
49969 + res_add = wanted;
49970 + switch (res) {
49971 + case RLIMIT_CPU:
49972 + res_add += GR_RLIM_CPU_BUMP;
49973 + break;
49974 + case RLIMIT_FSIZE:
49975 + res_add += GR_RLIM_FSIZE_BUMP;
49976 + break;
49977 + case RLIMIT_DATA:
49978 + res_add += GR_RLIM_DATA_BUMP;
49979 + break;
49980 + case RLIMIT_STACK:
49981 + res_add += GR_RLIM_STACK_BUMP;
49982 + break;
49983 + case RLIMIT_CORE:
49984 + res_add += GR_RLIM_CORE_BUMP;
49985 + break;
49986 + case RLIMIT_RSS:
49987 + res_add += GR_RLIM_RSS_BUMP;
49988 + break;
49989 + case RLIMIT_NPROC:
49990 + res_add += GR_RLIM_NPROC_BUMP;
49991 + break;
49992 + case RLIMIT_NOFILE:
49993 + res_add += GR_RLIM_NOFILE_BUMP;
49994 + break;
49995 + case RLIMIT_MEMLOCK:
49996 + res_add += GR_RLIM_MEMLOCK_BUMP;
49997 + break;
49998 + case RLIMIT_AS:
49999 + res_add += GR_RLIM_AS_BUMP;
50000 + break;
50001 + case RLIMIT_LOCKS:
50002 + res_add += GR_RLIM_LOCKS_BUMP;
50003 + break;
50004 + case RLIMIT_SIGPENDING:
50005 + res_add += GR_RLIM_SIGPENDING_BUMP;
50006 + break;
50007 + case RLIMIT_MSGQUEUE:
50008 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50009 + break;
50010 + case RLIMIT_NICE:
50011 + res_add += GR_RLIM_NICE_BUMP;
50012 + break;
50013 + case RLIMIT_RTPRIO:
50014 + res_add += GR_RLIM_RTPRIO_BUMP;
50015 + break;
50016 + case RLIMIT_RTTIME:
50017 + res_add += GR_RLIM_RTTIME_BUMP;
50018 + break;
50019 + }
50020 +
50021 + acl->res[res].rlim_cur = res_add;
50022 +
50023 + if (wanted > acl->res[res].rlim_max)
50024 + acl->res[res].rlim_max = res_add;
50025 +
50026 + /* only log the subject filename, since resource logging is supported for
50027 + single-subject learning only */
50028 + rcu_read_lock();
50029 + cred = __task_cred(task);
50030 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50031 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50032 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50033 + "", (unsigned long) res, &task->signal->saved_ip);
50034 + rcu_read_unlock();
50035 + }
50036 +
50037 + return;
50038 +}
50039 +
50040 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50041 +void
50042 +pax_set_initial_flags(struct linux_binprm *bprm)
50043 +{
50044 + struct task_struct *task = current;
50045 + struct acl_subject_label *proc;
50046 + unsigned long flags;
50047 +
50048 + if (unlikely(!(gr_status & GR_READY)))
50049 + return;
50050 +
50051 + flags = pax_get_flags(task);
50052 +
50053 + proc = task->acl;
50054 +
50055 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50056 + flags &= ~MF_PAX_PAGEEXEC;
50057 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50058 + flags &= ~MF_PAX_SEGMEXEC;
50059 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50060 + flags &= ~MF_PAX_RANDMMAP;
50061 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50062 + flags &= ~MF_PAX_EMUTRAMP;
50063 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50064 + flags &= ~MF_PAX_MPROTECT;
50065 +
50066 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50067 + flags |= MF_PAX_PAGEEXEC;
50068 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50069 + flags |= MF_PAX_SEGMEXEC;
50070 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50071 + flags |= MF_PAX_RANDMMAP;
50072 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50073 + flags |= MF_PAX_EMUTRAMP;
50074 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50075 + flags |= MF_PAX_MPROTECT;
50076 +
50077 + pax_set_flags(task, flags);
50078 +
50079 + return;
50080 +}
50081 +#endif
50082 +
50083 +#ifdef CONFIG_SYSCTL
50084 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50085 + system to save 35kb of memory */
50086 +
50087 +/* we modify the passed in filename, but adjust it back before returning */
50088 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50089 +{
50090 + struct name_entry *nmatch;
50091 + char *p, *lastp = NULL;
50092 + struct acl_object_label *obj = NULL, *tmp;
50093 + struct acl_subject_label *tmpsubj;
50094 + char c = '\0';
50095 +
50096 + read_lock(&gr_inode_lock);
50097 +
50098 + p = name + len - 1;
50099 + do {
50100 + nmatch = lookup_name_entry(name);
50101 + if (lastp != NULL)
50102 + *lastp = c;
50103 +
50104 + if (nmatch == NULL)
50105 + goto next_component;
50106 + tmpsubj = current->acl;
50107 + do {
50108 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50109 + if (obj != NULL) {
50110 + tmp = obj->globbed;
50111 + while (tmp) {
50112 + if (!glob_match(tmp->filename, name)) {
50113 + obj = tmp;
50114 + goto found_obj;
50115 + }
50116 + tmp = tmp->next;
50117 + }
50118 + goto found_obj;
50119 + }
50120 + } while ((tmpsubj = tmpsubj->parent_subject));
50121 +next_component:
50122 + /* end case */
50123 + if (p == name)
50124 + break;
50125 +
50126 + while (*p != '/')
50127 + p--;
50128 + if (p == name)
50129 + lastp = p + 1;
50130 + else {
50131 + lastp = p;
50132 + p--;
50133 + }
50134 + c = *lastp;
50135 + *lastp = '\0';
50136 + } while (1);
50137 +found_obj:
50138 + read_unlock(&gr_inode_lock);
50139 + /* obj returned will always be non-null */
50140 + return obj;
50141 +}
50142 +
50143 +/* returns 0 when allowing, non-zero on error
50144 + op of 0 is used for readdir, so we don't log the names of hidden files
50145 +*/
50146 +__u32
50147 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50148 +{
50149 + ctl_table *tmp;
50150 + const char *proc_sys = "/proc/sys";
50151 + char *path;
50152 + struct acl_object_label *obj;
50153 + unsigned short len = 0, pos = 0, depth = 0, i;
50154 + __u32 err = 0;
50155 + __u32 mode = 0;
50156 +
50157 + if (unlikely(!(gr_status & GR_READY)))
50158 + return 0;
50159 +
50160 + /* for now, ignore operations on non-sysctl entries if it's not a
50161 + readdir*/
50162 + if (table->child != NULL && op != 0)
50163 + return 0;
50164 +
50165 + mode |= GR_FIND;
50166 + /* it's only a read if it's an entry, read on dirs is for readdir */
50167 + if (op & MAY_READ)
50168 + mode |= GR_READ;
50169 + if (op & MAY_WRITE)
50170 + mode |= GR_WRITE;
50171 +
50172 + preempt_disable();
50173 +
50174 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50175 +
50176 + /* it's only a read/write if it's an actual entry, not a dir
50177 + (which are opened for readdir)
50178 + */
50179 +
50180 + /* convert the requested sysctl entry into a pathname */
50181 +
50182 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50183 + len += strlen(tmp->procname);
50184 + len++;
50185 + depth++;
50186 + }
50187 +
50188 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50189 + /* deny */
50190 + goto out;
50191 + }
50192 +
50193 + memset(path, 0, PAGE_SIZE);
50194 +
50195 + memcpy(path, proc_sys, strlen(proc_sys));
50196 +
50197 + pos += strlen(proc_sys);
50198 +
50199 + for (; depth > 0; depth--) {
50200 + path[pos] = '/';
50201 + pos++;
50202 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50203 + if (depth == i) {
50204 + memcpy(path + pos, tmp->procname,
50205 + strlen(tmp->procname));
50206 + pos += strlen(tmp->procname);
50207 + }
50208 + i++;
50209 + }
50210 + }
50211 +
50212 + obj = gr_lookup_by_name(path, pos);
50213 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50214 +
50215 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50216 + ((err & mode) != mode))) {
50217 + __u32 new_mode = mode;
50218 +
50219 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50220 +
50221 + err = 0;
50222 + gr_log_learn_sysctl(path, new_mode);
50223 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50224 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50225 + err = -ENOENT;
50226 + } else if (!(err & GR_FIND)) {
50227 + err = -ENOENT;
50228 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50229 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50230 + path, (mode & GR_READ) ? " reading" : "",
50231 + (mode & GR_WRITE) ? " writing" : "");
50232 + err = -EACCES;
50233 + } else if ((err & mode) != mode) {
50234 + err = -EACCES;
50235 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50236 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50237 + path, (mode & GR_READ) ? " reading" : "",
50238 + (mode & GR_WRITE) ? " writing" : "");
50239 + err = 0;
50240 + } else
50241 + err = 0;
50242 +
50243 + out:
50244 + preempt_enable();
50245 +
50246 + return err;
50247 +}
50248 +#endif
50249 +
50250 +int
50251 +gr_handle_proc_ptrace(struct task_struct *task)
50252 +{
50253 + struct file *filp;
50254 + struct task_struct *tmp = task;
50255 + struct task_struct *curtemp = current;
50256 + __u32 retmode;
50257 +
50258 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50259 + if (unlikely(!(gr_status & GR_READY)))
50260 + return 0;
50261 +#endif
50262 +
50263 + read_lock(&tasklist_lock);
50264 + read_lock(&grsec_exec_file_lock);
50265 + filp = task->exec_file;
50266 +
50267 + while (tmp->pid > 0) {
50268 + if (tmp == curtemp)
50269 + break;
50270 + tmp = tmp->real_parent;
50271 + }
50272 +
50273 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50274 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50275 + read_unlock(&grsec_exec_file_lock);
50276 + read_unlock(&tasklist_lock);
50277 + return 1;
50278 + }
50279 +
50280 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50281 + if (!(gr_status & GR_READY)) {
50282 + read_unlock(&grsec_exec_file_lock);
50283 + read_unlock(&tasklist_lock);
50284 + return 0;
50285 + }
50286 +#endif
50287 +
50288 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50289 + read_unlock(&grsec_exec_file_lock);
50290 + read_unlock(&tasklist_lock);
50291 +
50292 + if (retmode & GR_NOPTRACE)
50293 + return 1;
50294 +
50295 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50296 + && (current->acl != task->acl || (current->acl != current->role->root_label
50297 + && current->pid != task->pid)))
50298 + return 1;
50299 +
50300 + return 0;
50301 +}
50302 +
50303 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50304 +{
50305 + if (unlikely(!(gr_status & GR_READY)))
50306 + return;
50307 +
50308 + if (!(current->role->roletype & GR_ROLE_GOD))
50309 + return;
50310 +
50311 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50312 + p->role->rolename, gr_task_roletype_to_char(p),
50313 + p->acl->filename);
50314 +}
50315 +
50316 +int
50317 +gr_handle_ptrace(struct task_struct *task, const long request)
50318 +{
50319 + struct task_struct *tmp = task;
50320 + struct task_struct *curtemp = current;
50321 + __u32 retmode;
50322 +
50323 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50324 + if (unlikely(!(gr_status & GR_READY)))
50325 + return 0;
50326 +#endif
50327 +
50328 + read_lock(&tasklist_lock);
50329 + while (tmp->pid > 0) {
50330 + if (tmp == curtemp)
50331 + break;
50332 + tmp = tmp->real_parent;
50333 + }
50334 +
50335 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50336 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50337 + read_unlock(&tasklist_lock);
50338 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50339 + return 1;
50340 + }
50341 + read_unlock(&tasklist_lock);
50342 +
50343 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50344 + if (!(gr_status & GR_READY))
50345 + return 0;
50346 +#endif
50347 +
50348 + read_lock(&grsec_exec_file_lock);
50349 + if (unlikely(!task->exec_file)) {
50350 + read_unlock(&grsec_exec_file_lock);
50351 + return 0;
50352 + }
50353 +
50354 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50355 + read_unlock(&grsec_exec_file_lock);
50356 +
50357 + if (retmode & GR_NOPTRACE) {
50358 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50359 + return 1;
50360 + }
50361 +
50362 + if (retmode & GR_PTRACERD) {
50363 + switch (request) {
50364 + case PTRACE_POKETEXT:
50365 + case PTRACE_POKEDATA:
50366 + case PTRACE_POKEUSR:
50367 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50368 + case PTRACE_SETREGS:
50369 + case PTRACE_SETFPREGS:
50370 +#endif
50371 +#ifdef CONFIG_X86
50372 + case PTRACE_SETFPXREGS:
50373 +#endif
50374 +#ifdef CONFIG_ALTIVEC
50375 + case PTRACE_SETVRREGS:
50376 +#endif
50377 + return 1;
50378 + default:
50379 + return 0;
50380 + }
50381 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50382 + !(current->role->roletype & GR_ROLE_GOD) &&
50383 + (current->acl != task->acl)) {
50384 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50385 + return 1;
50386 + }
50387 +
50388 + return 0;
50389 +}
50390 +
50391 +static int is_writable_mmap(const struct file *filp)
50392 +{
50393 + struct task_struct *task = current;
50394 + struct acl_object_label *obj, *obj2;
50395 +
50396 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50397 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50398 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50399 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50400 + task->role->root_label);
50401 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50402 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50403 + return 1;
50404 + }
50405 + }
50406 + return 0;
50407 +}
50408 +
50409 +int
50410 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50411 +{
50412 + __u32 mode;
50413 +
50414 + if (unlikely(!file || !(prot & PROT_EXEC)))
50415 + return 1;
50416 +
50417 + if (is_writable_mmap(file))
50418 + return 0;
50419 +
50420 + mode =
50421 + gr_search_file(file->f_path.dentry,
50422 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50423 + file->f_path.mnt);
50424 +
50425 + if (!gr_tpe_allow(file))
50426 + return 0;
50427 +
50428 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50429 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50430 + return 0;
50431 + } else if (unlikely(!(mode & GR_EXEC))) {
50432 + return 0;
50433 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50434 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50435 + return 1;
50436 + }
50437 +
50438 + return 1;
50439 +}
50440 +
50441 +int
50442 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50443 +{
50444 + __u32 mode;
50445 +
50446 + if (unlikely(!file || !(prot & PROT_EXEC)))
50447 + return 1;
50448 +
50449 + if (is_writable_mmap(file))
50450 + return 0;
50451 +
50452 + mode =
50453 + gr_search_file(file->f_path.dentry,
50454 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50455 + file->f_path.mnt);
50456 +
50457 + if (!gr_tpe_allow(file))
50458 + return 0;
50459 +
50460 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50461 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50462 + return 0;
50463 + } else if (unlikely(!(mode & GR_EXEC))) {
50464 + return 0;
50465 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50466 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50467 + return 1;
50468 + }
50469 +
50470 + return 1;
50471 +}
50472 +
50473 +void
50474 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50475 +{
50476 + unsigned long runtime;
50477 + unsigned long cputime;
50478 + unsigned int wday, cday;
50479 + __u8 whr, chr;
50480 + __u8 wmin, cmin;
50481 + __u8 wsec, csec;
50482 + struct timespec timeval;
50483 +
50484 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50485 + !(task->acl->mode & GR_PROCACCT)))
50486 + return;
50487 +
50488 + do_posix_clock_monotonic_gettime(&timeval);
50489 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50490 + wday = runtime / (3600 * 24);
50491 + runtime -= wday * (3600 * 24);
50492 + whr = runtime / 3600;
50493 + runtime -= whr * 3600;
50494 + wmin = runtime / 60;
50495 + runtime -= wmin * 60;
50496 + wsec = runtime;
50497 +
50498 + cputime = (task->utime + task->stime) / HZ;
50499 + cday = cputime / (3600 * 24);
50500 + cputime -= cday * (3600 * 24);
50501 + chr = cputime / 3600;
50502 + cputime -= chr * 3600;
50503 + cmin = cputime / 60;
50504 + cputime -= cmin * 60;
50505 + csec = cputime;
50506 +
50507 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50508 +
50509 + return;
50510 +}
50511 +
50512 +void gr_set_kernel_label(struct task_struct *task)
50513 +{
50514 + if (gr_status & GR_READY) {
50515 + task->role = kernel_role;
50516 + task->acl = kernel_role->root_label;
50517 + }
50518 + return;
50519 +}
50520 +
50521 +#ifdef CONFIG_TASKSTATS
50522 +int gr_is_taskstats_denied(int pid)
50523 +{
50524 + struct task_struct *task;
50525 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50526 + const struct cred *cred;
50527 +#endif
50528 + int ret = 0;
50529 +
50530 + /* restrict taskstats viewing to un-chrooted root users
50531 + who have the 'view' subject flag if the RBAC system is enabled
50532 + */
50533 +
50534 + rcu_read_lock();
50535 + read_lock(&tasklist_lock);
50536 + task = find_task_by_vpid(pid);
50537 + if (task) {
50538 +#ifdef CONFIG_GRKERNSEC_CHROOT
50539 + if (proc_is_chrooted(task))
50540 + ret = -EACCES;
50541 +#endif
50542 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50543 + cred = __task_cred(task);
50544 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50545 + if (cred->uid != 0)
50546 + ret = -EACCES;
50547 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50548 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50549 + ret = -EACCES;
50550 +#endif
50551 +#endif
50552 + if (gr_status & GR_READY) {
50553 + if (!(task->acl->mode & GR_VIEW))
50554 + ret = -EACCES;
50555 + }
50556 + } else
50557 + ret = -ENOENT;
50558 +
50559 + read_unlock(&tasklist_lock);
50560 + rcu_read_unlock();
50561 +
50562 + return ret;
50563 +}
50564 +#endif
50565 +
50566 +/* AUXV entries are filled via a descendant of search_binary_handler
50567 + after we've already applied the subject for the target
50568 +*/
50569 +int gr_acl_enable_at_secure(void)
50570 +{
50571 + if (unlikely(!(gr_status & GR_READY)))
50572 + return 0;
50573 +
50574 + if (current->acl->mode & GR_ATSECURE)
50575 + return 1;
50576 +
50577 + return 0;
50578 +}
50579 +
50580 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50581 +{
50582 + struct task_struct *task = current;
50583 + struct dentry *dentry = file->f_path.dentry;
50584 + struct vfsmount *mnt = file->f_path.mnt;
50585 + struct acl_object_label *obj, *tmp;
50586 + struct acl_subject_label *subj;
50587 + unsigned int bufsize;
50588 + int is_not_root;
50589 + char *path;
50590 + dev_t dev = __get_dev(dentry);
50591 +
50592 + if (unlikely(!(gr_status & GR_READY)))
50593 + return 1;
50594 +
50595 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50596 + return 1;
50597 +
50598 + /* ignore Eric Biederman */
50599 + if (IS_PRIVATE(dentry->d_inode))
50600 + return 1;
50601 +
50602 + subj = task->acl;
50603 + do {
50604 + obj = lookup_acl_obj_label(ino, dev, subj);
50605 + if (obj != NULL)
50606 + return (obj->mode & GR_FIND) ? 1 : 0;
50607 + } while ((subj = subj->parent_subject));
50608 +
50609 + /* this is purely an optimization since we're looking for an object
50610 + for the directory we're doing a readdir on
50611 + if it's possible for any globbed object to match the entry we're
50612 + filling into the directory, then the object we find here will be
50613 + an anchor point with attached globbed objects
50614 + */
50615 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50616 + if (obj->globbed == NULL)
50617 + return (obj->mode & GR_FIND) ? 1 : 0;
50618 +
50619 + is_not_root = ((obj->filename[0] == '/') &&
50620 + (obj->filename[1] == '\0')) ? 0 : 1;
50621 + bufsize = PAGE_SIZE - namelen - is_not_root;
50622 +
50623 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
50624 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50625 + return 1;
50626 +
50627 + preempt_disable();
50628 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50629 + bufsize);
50630 +
50631 + bufsize = strlen(path);
50632 +
50633 + /* if base is "/", don't append an additional slash */
50634 + if (is_not_root)
50635 + *(path + bufsize) = '/';
50636 + memcpy(path + bufsize + is_not_root, name, namelen);
50637 + *(path + bufsize + namelen + is_not_root) = '\0';
50638 +
50639 + tmp = obj->globbed;
50640 + while (tmp) {
50641 + if (!glob_match(tmp->filename, path)) {
50642 + preempt_enable();
50643 + return (tmp->mode & GR_FIND) ? 1 : 0;
50644 + }
50645 + tmp = tmp->next;
50646 + }
50647 + preempt_enable();
50648 + return (obj->mode & GR_FIND) ? 1 : 0;
50649 +}
50650 +
50651 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50652 +EXPORT_SYMBOL(gr_acl_is_enabled);
50653 +#endif
50654 +EXPORT_SYMBOL(gr_learn_resource);
50655 +EXPORT_SYMBOL(gr_set_kernel_label);
50656 +#ifdef CONFIG_SECURITY
50657 +EXPORT_SYMBOL(gr_check_user_change);
50658 +EXPORT_SYMBOL(gr_check_group_change);
50659 +#endif
50660 +
50661 diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
50662 --- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50663 +++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
50664 @@ -0,0 +1,138 @@
50665 +#include <linux/kernel.h>
50666 +#include <linux/module.h>
50667 +#include <linux/sched.h>
50668 +#include <linux/gracl.h>
50669 +#include <linux/grsecurity.h>
50670 +#include <linux/grinternal.h>
50671 +
50672 +static const char *captab_log[] = {
50673 + "CAP_CHOWN",
50674 + "CAP_DAC_OVERRIDE",
50675 + "CAP_DAC_READ_SEARCH",
50676 + "CAP_FOWNER",
50677 + "CAP_FSETID",
50678 + "CAP_KILL",
50679 + "CAP_SETGID",
50680 + "CAP_SETUID",
50681 + "CAP_SETPCAP",
50682 + "CAP_LINUX_IMMUTABLE",
50683 + "CAP_NET_BIND_SERVICE",
50684 + "CAP_NET_BROADCAST",
50685 + "CAP_NET_ADMIN",
50686 + "CAP_NET_RAW",
50687 + "CAP_IPC_LOCK",
50688 + "CAP_IPC_OWNER",
50689 + "CAP_SYS_MODULE",
50690 + "CAP_SYS_RAWIO",
50691 + "CAP_SYS_CHROOT",
50692 + "CAP_SYS_PTRACE",
50693 + "CAP_SYS_PACCT",
50694 + "CAP_SYS_ADMIN",
50695 + "CAP_SYS_BOOT",
50696 + "CAP_SYS_NICE",
50697 + "CAP_SYS_RESOURCE",
50698 + "CAP_SYS_TIME",
50699 + "CAP_SYS_TTY_CONFIG",
50700 + "CAP_MKNOD",
50701 + "CAP_LEASE",
50702 + "CAP_AUDIT_WRITE",
50703 + "CAP_AUDIT_CONTROL",
50704 + "CAP_SETFCAP",
50705 + "CAP_MAC_OVERRIDE",
50706 + "CAP_MAC_ADMIN"
50707 +};
50708 +
50709 +EXPORT_SYMBOL(gr_is_capable);
50710 +EXPORT_SYMBOL(gr_is_capable_nolog);
50711 +
50712 +int
50713 +gr_is_capable(const int cap)
50714 +{
50715 + struct task_struct *task = current;
50716 + const struct cred *cred = current_cred();
50717 + struct acl_subject_label *curracl;
50718 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50719 + kernel_cap_t cap_audit = __cap_empty_set;
50720 +
50721 + if (!gr_acl_is_enabled())
50722 + return 1;
50723 +
50724 + curracl = task->acl;
50725 +
50726 + cap_drop = curracl->cap_lower;
50727 + cap_mask = curracl->cap_mask;
50728 + cap_audit = curracl->cap_invert_audit;
50729 +
50730 + while ((curracl = curracl->parent_subject)) {
50731 + /* if the cap isn't specified in the current computed mask but is specified in the
50732 + current level subject, and is lowered in the current level subject, then add
50733 + it to the set of dropped capabilities
50734 + otherwise, add the current level subject's mask to the current computed mask
50735 + */
50736 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50737 + cap_raise(cap_mask, cap);
50738 + if (cap_raised(curracl->cap_lower, cap))
50739 + cap_raise(cap_drop, cap);
50740 + if (cap_raised(curracl->cap_invert_audit, cap))
50741 + cap_raise(cap_audit, cap);
50742 + }
50743 + }
50744 +
50745 + if (!cap_raised(cap_drop, cap)) {
50746 + if (cap_raised(cap_audit, cap))
50747 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
50748 + return 1;
50749 + }
50750 +
50751 + curracl = task->acl;
50752 +
50753 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
50754 + && cap_raised(cred->cap_effective, cap)) {
50755 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50756 + task->role->roletype, cred->uid,
50757 + cred->gid, task->exec_file ?
50758 + gr_to_filename(task->exec_file->f_path.dentry,
50759 + task->exec_file->f_path.mnt) : curracl->filename,
50760 + curracl->filename, 0UL,
50761 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
50762 + return 1;
50763 + }
50764 +
50765 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
50766 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
50767 + return 0;
50768 +}
50769 +
50770 +int
50771 +gr_is_capable_nolog(const int cap)
50772 +{
50773 + struct acl_subject_label *curracl;
50774 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50775 +
50776 + if (!gr_acl_is_enabled())
50777 + return 1;
50778 +
50779 + curracl = current->acl;
50780 +
50781 + cap_drop = curracl->cap_lower;
50782 + cap_mask = curracl->cap_mask;
50783 +
50784 + while ((curracl = curracl->parent_subject)) {
50785 + /* if the cap isn't specified in the current computed mask but is specified in the
50786 + current level subject, and is lowered in the current level subject, then add
50787 + it to the set of dropped capabilities
50788 + otherwise, add the current level subject's mask to the current computed mask
50789 + */
50790 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50791 + cap_raise(cap_mask, cap);
50792 + if (cap_raised(curracl->cap_lower, cap))
50793 + cap_raise(cap_drop, cap);
50794 + }
50795 + }
50796 +
50797 + if (!cap_raised(cap_drop, cap))
50798 + return 1;
50799 +
50800 + return 0;
50801 +}
50802 +
50803 diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
50804 --- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
50805 +++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
50806 @@ -0,0 +1,431 @@
50807 +#include <linux/kernel.h>
50808 +#include <linux/sched.h>
50809 +#include <linux/types.h>
50810 +#include <linux/fs.h>
50811 +#include <linux/file.h>
50812 +#include <linux/stat.h>
50813 +#include <linux/grsecurity.h>
50814 +#include <linux/grinternal.h>
50815 +#include <linux/gracl.h>
50816 +
50817 +__u32
50818 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50819 + const struct vfsmount * mnt)
50820 +{
50821 + __u32 mode;
50822 +
50823 + if (unlikely(!dentry->d_inode))
50824 + return GR_FIND;
50825 +
50826 + mode =
50827 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
50828 +
50829 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
50830 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50831 + return mode;
50832 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
50833 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50834 + return 0;
50835 + } else if (unlikely(!(mode & GR_FIND)))
50836 + return 0;
50837 +
50838 + return GR_FIND;
50839 +}
50840 +
50841 +__u32
50842 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50843 + const int fmode)
50844 +{
50845 + __u32 reqmode = GR_FIND;
50846 + __u32 mode;
50847 +
50848 + if (unlikely(!dentry->d_inode))
50849 + return reqmode;
50850 +
50851 + if (unlikely(fmode & O_APPEND))
50852 + reqmode |= GR_APPEND;
50853 + else if (unlikely(fmode & FMODE_WRITE))
50854 + reqmode |= GR_WRITE;
50855 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50856 + reqmode |= GR_READ;
50857 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
50858 + reqmode &= ~GR_READ;
50859 + mode =
50860 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50861 + mnt);
50862 +
50863 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50864 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50865 + reqmode & GR_READ ? " reading" : "",
50866 + reqmode & GR_WRITE ? " writing" : reqmode &
50867 + GR_APPEND ? " appending" : "");
50868 + return reqmode;
50869 + } else
50870 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50871 + {
50872 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50873 + reqmode & GR_READ ? " reading" : "",
50874 + reqmode & GR_WRITE ? " writing" : reqmode &
50875 + GR_APPEND ? " appending" : "");
50876 + return 0;
50877 + } else if (unlikely((mode & reqmode) != reqmode))
50878 + return 0;
50879 +
50880 + return reqmode;
50881 +}
50882 +
50883 +__u32
50884 +gr_acl_handle_creat(const struct dentry * dentry,
50885 + const struct dentry * p_dentry,
50886 + const struct vfsmount * p_mnt, const int fmode,
50887 + const int imode)
50888 +{
50889 + __u32 reqmode = GR_WRITE | GR_CREATE;
50890 + __u32 mode;
50891 +
50892 + if (unlikely(fmode & O_APPEND))
50893 + reqmode |= GR_APPEND;
50894 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50895 + reqmode |= GR_READ;
50896 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
50897 + reqmode |= GR_SETID;
50898 +
50899 + mode =
50900 + gr_check_create(dentry, p_dentry, p_mnt,
50901 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
50902 +
50903 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50904 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50905 + reqmode & GR_READ ? " reading" : "",
50906 + reqmode & GR_WRITE ? " writing" : reqmode &
50907 + GR_APPEND ? " appending" : "");
50908 + return reqmode;
50909 + } else
50910 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50911 + {
50912 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50913 + reqmode & GR_READ ? " reading" : "",
50914 + reqmode & GR_WRITE ? " writing" : reqmode &
50915 + GR_APPEND ? " appending" : "");
50916 + return 0;
50917 + } else if (unlikely((mode & reqmode) != reqmode))
50918 + return 0;
50919 +
50920 + return reqmode;
50921 +}
50922 +
50923 +__u32
50924 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
50925 + const int fmode)
50926 +{
50927 + __u32 mode, reqmode = GR_FIND;
50928 +
50929 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
50930 + reqmode |= GR_EXEC;
50931 + if (fmode & S_IWOTH)
50932 + reqmode |= GR_WRITE;
50933 + if (fmode & S_IROTH)
50934 + reqmode |= GR_READ;
50935 +
50936 + mode =
50937 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50938 + mnt);
50939 +
50940 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50941 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50942 + reqmode & GR_READ ? " reading" : "",
50943 + reqmode & GR_WRITE ? " writing" : "",
50944 + reqmode & GR_EXEC ? " executing" : "");
50945 + return reqmode;
50946 + } else
50947 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50948 + {
50949 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
50950 + reqmode & GR_READ ? " reading" : "",
50951 + reqmode & GR_WRITE ? " writing" : "",
50952 + reqmode & GR_EXEC ? " executing" : "");
50953 + return 0;
50954 + } else if (unlikely((mode & reqmode) != reqmode))
50955 + return 0;
50956 +
50957 + return reqmode;
50958 +}
50959 +
50960 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
50961 +{
50962 + __u32 mode;
50963 +
50964 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
50965 +
50966 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
50967 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
50968 + return mode;
50969 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
50970 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
50971 + return 0;
50972 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
50973 + return 0;
50974 +
50975 + return (reqmode);
50976 +}
50977 +
50978 +__u32
50979 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50980 +{
50981 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
50982 +}
50983 +
50984 +__u32
50985 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
50986 +{
50987 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
50988 +}
50989 +
50990 +__u32
50991 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
50992 +{
50993 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
50994 +}
50995 +
50996 +__u32
50997 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
50998 +{
50999 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51000 +}
51001 +
51002 +__u32
51003 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51004 + mode_t mode)
51005 +{
51006 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51007 + return 1;
51008 +
51009 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51010 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51011 + GR_FCHMOD_ACL_MSG);
51012 + } else {
51013 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51014 + }
51015 +}
51016 +
51017 +__u32
51018 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51019 + mode_t mode)
51020 +{
51021 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51022 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51023 + GR_CHMOD_ACL_MSG);
51024 + } else {
51025 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51026 + }
51027 +}
51028 +
51029 +__u32
51030 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51031 +{
51032 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51033 +}
51034 +
51035 +__u32
51036 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51037 +{
51038 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51039 +}
51040 +
51041 +__u32
51042 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51043 +{
51044 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51045 +}
51046 +
51047 +__u32
51048 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51049 +{
51050 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51051 + GR_UNIXCONNECT_ACL_MSG);
51052 +}
51053 +
51054 +/* hardlinks require at minimum create permission,
51055 + any additional privilege required is based on the
51056 + privilege of the file being linked to
51057 +*/
51058 +__u32
51059 +gr_acl_handle_link(const struct dentry * new_dentry,
51060 + const struct dentry * parent_dentry,
51061 + const struct vfsmount * parent_mnt,
51062 + const struct dentry * old_dentry,
51063 + const struct vfsmount * old_mnt, const char *to)
51064 +{
51065 + __u32 mode;
51066 + __u32 needmode = GR_CREATE | GR_LINK;
51067 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51068 +
51069 + mode =
51070 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51071 + old_mnt);
51072 +
51073 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51074 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51075 + return mode;
51076 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51077 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51078 + return 0;
51079 + } else if (unlikely((mode & needmode) != needmode))
51080 + return 0;
51081 +
51082 + return 1;
51083 +}
51084 +
51085 +__u32
51086 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51087 + const struct dentry * parent_dentry,
51088 + const struct vfsmount * parent_mnt, const char *from)
51089 +{
51090 + __u32 needmode = GR_WRITE | GR_CREATE;
51091 + __u32 mode;
51092 +
51093 + mode =
51094 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51095 + GR_CREATE | GR_AUDIT_CREATE |
51096 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51097 +
51098 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51099 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51100 + return mode;
51101 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51102 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51103 + return 0;
51104 + } else if (unlikely((mode & needmode) != needmode))
51105 + return 0;
51106 +
51107 + return (GR_WRITE | GR_CREATE);
51108 +}
51109 +
51110 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51111 +{
51112 + __u32 mode;
51113 +
51114 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51115 +
51116 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51117 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51118 + return mode;
51119 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51120 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51121 + return 0;
51122 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51123 + return 0;
51124 +
51125 + return (reqmode);
51126 +}
51127 +
51128 +__u32
51129 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51130 + const struct dentry * parent_dentry,
51131 + const struct vfsmount * parent_mnt,
51132 + const int mode)
51133 +{
51134 + __u32 reqmode = GR_WRITE | GR_CREATE;
51135 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51136 + reqmode |= GR_SETID;
51137 +
51138 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51139 + reqmode, GR_MKNOD_ACL_MSG);
51140 +}
51141 +
51142 +__u32
51143 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51144 + const struct dentry *parent_dentry,
51145 + const struct vfsmount *parent_mnt)
51146 +{
51147 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51148 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51149 +}
51150 +
51151 +#define RENAME_CHECK_SUCCESS(old, new) \
51152 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51153 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51154 +
51155 +int
51156 +gr_acl_handle_rename(struct dentry *new_dentry,
51157 + struct dentry *parent_dentry,
51158 + const struct vfsmount *parent_mnt,
51159 + struct dentry *old_dentry,
51160 + struct inode *old_parent_inode,
51161 + struct vfsmount *old_mnt, const char *newname)
51162 +{
51163 + __u32 comp1, comp2;
51164 + int error = 0;
51165 +
51166 + if (unlikely(!gr_acl_is_enabled()))
51167 + return 0;
51168 +
51169 + if (!new_dentry->d_inode) {
51170 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51171 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51172 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51173 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51174 + GR_DELETE | GR_AUDIT_DELETE |
51175 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51176 + GR_SUPPRESS, old_mnt);
51177 + } else {
51178 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51179 + GR_CREATE | GR_DELETE |
51180 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51181 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51182 + GR_SUPPRESS, parent_mnt);
51183 + comp2 =
51184 + gr_search_file(old_dentry,
51185 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51186 + GR_DELETE | GR_AUDIT_DELETE |
51187 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51188 + }
51189 +
51190 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51191 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51192 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51193 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51194 + && !(comp2 & GR_SUPPRESS)) {
51195 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51196 + error = -EACCES;
51197 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51198 + error = -EACCES;
51199 +
51200 + return error;
51201 +}
51202 +
51203 +void
51204 +gr_acl_handle_exit(void)
51205 +{
51206 + u16 id;
51207 + char *rolename;
51208 + struct file *exec_file;
51209 +
51210 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51211 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51212 + id = current->acl_role_id;
51213 + rolename = current->role->rolename;
51214 + gr_set_acls(1);
51215 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51216 + }
51217 +
51218 + write_lock(&grsec_exec_file_lock);
51219 + exec_file = current->exec_file;
51220 + current->exec_file = NULL;
51221 + write_unlock(&grsec_exec_file_lock);
51222 +
51223 + if (exec_file)
51224 + fput(exec_file);
51225 +}
51226 +
51227 +int
51228 +gr_acl_handle_procpidmem(const struct task_struct *task)
51229 +{
51230 + if (unlikely(!gr_acl_is_enabled()))
51231 + return 0;
51232 +
51233 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51234 + return -EACCES;
51235 +
51236 + return 0;
51237 +}
51238 diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51239 --- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51240 +++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51241 @@ -0,0 +1,382 @@
51242 +#include <linux/kernel.h>
51243 +#include <asm/uaccess.h>
51244 +#include <asm/errno.h>
51245 +#include <net/sock.h>
51246 +#include <linux/file.h>
51247 +#include <linux/fs.h>
51248 +#include <linux/net.h>
51249 +#include <linux/in.h>
51250 +#include <linux/skbuff.h>
51251 +#include <linux/ip.h>
51252 +#include <linux/udp.h>
51253 +#include <linux/smp_lock.h>
51254 +#include <linux/types.h>
51255 +#include <linux/sched.h>
51256 +#include <linux/netdevice.h>
51257 +#include <linux/inetdevice.h>
51258 +#include <linux/gracl.h>
51259 +#include <linux/grsecurity.h>
51260 +#include <linux/grinternal.h>
51261 +
51262 +#define GR_BIND 0x01
51263 +#define GR_CONNECT 0x02
51264 +#define GR_INVERT 0x04
51265 +#define GR_BINDOVERRIDE 0x08
51266 +#define GR_CONNECTOVERRIDE 0x10
51267 +#define GR_SOCK_FAMILY 0x20
51268 +
51269 +static const char * gr_protocols[IPPROTO_MAX] = {
51270 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51271 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51272 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51273 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51274 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51275 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51276 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51277 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51278 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51279 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51280 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51281 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51282 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51283 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51284 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51285 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51286 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51287 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51288 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51289 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51290 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51291 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51292 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51293 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51294 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51295 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51296 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51297 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51298 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51299 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51300 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51301 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51302 + };
51303 +
51304 +static const char * gr_socktypes[SOCK_MAX] = {
51305 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51306 + "unknown:7", "unknown:8", "unknown:9", "packet"
51307 + };
51308 +
51309 +static const char * gr_sockfamilies[AF_MAX+1] = {
51310 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51311 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51312 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51313 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51314 + };
51315 +
51316 +const char *
51317 +gr_proto_to_name(unsigned char proto)
51318 +{
51319 + return gr_protocols[proto];
51320 +}
51321 +
51322 +const char *
51323 +gr_socktype_to_name(unsigned char type)
51324 +{
51325 + return gr_socktypes[type];
51326 +}
51327 +
51328 +const char *
51329 +gr_sockfamily_to_name(unsigned char family)
51330 +{
51331 + return gr_sockfamilies[family];
51332 +}
51333 +
51334 +int
51335 +gr_search_socket(const int domain, const int type, const int protocol)
51336 +{
51337 + struct acl_subject_label *curr;
51338 + const struct cred *cred = current_cred();
51339 +
51340 + if (unlikely(!gr_acl_is_enabled()))
51341 + goto exit;
51342 +
51343 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51344 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51345 + goto exit; // let the kernel handle it
51346 +
51347 + curr = current->acl;
51348 +
51349 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51350 + /* the family is allowed, if this is PF_INET allow it only if
51351 + the extra sock type/protocol checks pass */
51352 + if (domain == PF_INET)
51353 + goto inet_check;
51354 + goto exit;
51355 + } else {
51356 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51357 + __u32 fakeip = 0;
51358 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51359 + current->role->roletype, cred->uid,
51360 + cred->gid, current->exec_file ?
51361 + gr_to_filename(current->exec_file->f_path.dentry,
51362 + current->exec_file->f_path.mnt) :
51363 + curr->filename, curr->filename,
51364 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51365 + &current->signal->saved_ip);
51366 + goto exit;
51367 + }
51368 + goto exit_fail;
51369 + }
51370 +
51371 +inet_check:
51372 + /* the rest of this checking is for IPv4 only */
51373 + if (!curr->ips)
51374 + goto exit;
51375 +
51376 + if ((curr->ip_type & (1 << type)) &&
51377 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51378 + goto exit;
51379 +
51380 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51381 + /* we don't place acls on raw sockets , and sometimes
51382 + dgram/ip sockets are opened for ioctl and not
51383 + bind/connect, so we'll fake a bind learn log */
51384 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51385 + __u32 fakeip = 0;
51386 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51387 + current->role->roletype, cred->uid,
51388 + cred->gid, current->exec_file ?
51389 + gr_to_filename(current->exec_file->f_path.dentry,
51390 + current->exec_file->f_path.mnt) :
51391 + curr->filename, curr->filename,
51392 + &fakeip, 0, type,
51393 + protocol, GR_CONNECT, &current->signal->saved_ip);
51394 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51395 + __u32 fakeip = 0;
51396 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51397 + current->role->roletype, cred->uid,
51398 + cred->gid, current->exec_file ?
51399 + gr_to_filename(current->exec_file->f_path.dentry,
51400 + current->exec_file->f_path.mnt) :
51401 + curr->filename, curr->filename,
51402 + &fakeip, 0, type,
51403 + protocol, GR_BIND, &current->signal->saved_ip);
51404 + }
51405 + /* we'll log when they use connect or bind */
51406 + goto exit;
51407 + }
51408 +
51409 +exit_fail:
51410 + if (domain == PF_INET)
51411 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51412 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51413 + else
51414 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51415 + gr_socktype_to_name(type), protocol);
51416 +
51417 + return 0;
51418 +exit:
51419 + return 1;
51420 +}
51421 +
51422 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51423 +{
51424 + if ((ip->mode & mode) &&
51425 + (ip_port >= ip->low) &&
51426 + (ip_port <= ip->high) &&
51427 + ((ntohl(ip_addr) & our_netmask) ==
51428 + (ntohl(our_addr) & our_netmask))
51429 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51430 + && (ip->type & (1 << type))) {
51431 + if (ip->mode & GR_INVERT)
51432 + return 2; // specifically denied
51433 + else
51434 + return 1; // allowed
51435 + }
51436 +
51437 + return 0; // not specifically allowed, may continue parsing
51438 +}
51439 +
51440 +static int
51441 +gr_search_connectbind(const int full_mode, struct sock *sk,
51442 + struct sockaddr_in *addr, const int type)
51443 +{
51444 + char iface[IFNAMSIZ] = {0};
51445 + struct acl_subject_label *curr;
51446 + struct acl_ip_label *ip;
51447 + struct inet_sock *isk;
51448 + struct net_device *dev;
51449 + struct in_device *idev;
51450 + unsigned long i;
51451 + int ret;
51452 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51453 + __u32 ip_addr = 0;
51454 + __u32 our_addr;
51455 + __u32 our_netmask;
51456 + char *p;
51457 + __u16 ip_port = 0;
51458 + const struct cred *cred = current_cred();
51459 +
51460 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51461 + return 0;
51462 +
51463 + curr = current->acl;
51464 + isk = inet_sk(sk);
51465 +
51466 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51467 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51468 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51469 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51470 + struct sockaddr_in saddr;
51471 + int err;
51472 +
51473 + saddr.sin_family = AF_INET;
51474 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51475 + saddr.sin_port = isk->sport;
51476 +
51477 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51478 + if (err)
51479 + return err;
51480 +
51481 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51482 + if (err)
51483 + return err;
51484 + }
51485 +
51486 + if (!curr->ips)
51487 + return 0;
51488 +
51489 + ip_addr = addr->sin_addr.s_addr;
51490 + ip_port = ntohs(addr->sin_port);
51491 +
51492 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51493 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51494 + current->role->roletype, cred->uid,
51495 + cred->gid, current->exec_file ?
51496 + gr_to_filename(current->exec_file->f_path.dentry,
51497 + current->exec_file->f_path.mnt) :
51498 + curr->filename, curr->filename,
51499 + &ip_addr, ip_port, type,
51500 + sk->sk_protocol, mode, &current->signal->saved_ip);
51501 + return 0;
51502 + }
51503 +
51504 + for (i = 0; i < curr->ip_num; i++) {
51505 + ip = *(curr->ips + i);
51506 + if (ip->iface != NULL) {
51507 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51508 + p = strchr(iface, ':');
51509 + if (p != NULL)
51510 + *p = '\0';
51511 + dev = dev_get_by_name(sock_net(sk), iface);
51512 + if (dev == NULL)
51513 + continue;
51514 + idev = in_dev_get(dev);
51515 + if (idev == NULL) {
51516 + dev_put(dev);
51517 + continue;
51518 + }
51519 + rcu_read_lock();
51520 + for_ifa(idev) {
51521 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51522 + our_addr = ifa->ifa_address;
51523 + our_netmask = 0xffffffff;
51524 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51525 + if (ret == 1) {
51526 + rcu_read_unlock();
51527 + in_dev_put(idev);
51528 + dev_put(dev);
51529 + return 0;
51530 + } else if (ret == 2) {
51531 + rcu_read_unlock();
51532 + in_dev_put(idev);
51533 + dev_put(dev);
51534 + goto denied;
51535 + }
51536 + }
51537 + } endfor_ifa(idev);
51538 + rcu_read_unlock();
51539 + in_dev_put(idev);
51540 + dev_put(dev);
51541 + } else {
51542 + our_addr = ip->addr;
51543 + our_netmask = ip->netmask;
51544 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51545 + if (ret == 1)
51546 + return 0;
51547 + else if (ret == 2)
51548 + goto denied;
51549 + }
51550 + }
51551 +
51552 +denied:
51553 + if (mode == GR_BIND)
51554 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51555 + else if (mode == GR_CONNECT)
51556 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51557 +
51558 + return -EACCES;
51559 +}
51560 +
51561 +int
51562 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51563 +{
51564 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51565 +}
51566 +
51567 +int
51568 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51569 +{
51570 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51571 +}
51572 +
51573 +int gr_search_listen(struct socket *sock)
51574 +{
51575 + struct sock *sk = sock->sk;
51576 + struct sockaddr_in addr;
51577 +
51578 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51579 + addr.sin_port = inet_sk(sk)->sport;
51580 +
51581 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51582 +}
51583 +
51584 +int gr_search_accept(struct socket *sock)
51585 +{
51586 + struct sock *sk = sock->sk;
51587 + struct sockaddr_in addr;
51588 +
51589 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51590 + addr.sin_port = inet_sk(sk)->sport;
51591 +
51592 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51593 +}
51594 +
51595 +int
51596 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51597 +{
51598 + if (addr)
51599 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51600 + else {
51601 + struct sockaddr_in sin;
51602 + const struct inet_sock *inet = inet_sk(sk);
51603 +
51604 + sin.sin_addr.s_addr = inet->daddr;
51605 + sin.sin_port = inet->dport;
51606 +
51607 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51608 + }
51609 +}
51610 +
51611 +int
51612 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51613 +{
51614 + struct sockaddr_in sin;
51615 +
51616 + if (unlikely(skb->len < sizeof (struct udphdr)))
51617 + return 0; // skip this packet
51618 +
51619 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51620 + sin.sin_port = udp_hdr(skb)->source;
51621 +
51622 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51623 +}
51624 diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
51625 --- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51626 +++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51627 @@ -0,0 +1,208 @@
51628 +#include <linux/kernel.h>
51629 +#include <linux/mm.h>
51630 +#include <linux/sched.h>
51631 +#include <linux/poll.h>
51632 +#include <linux/smp_lock.h>
51633 +#include <linux/string.h>
51634 +#include <linux/file.h>
51635 +#include <linux/types.h>
51636 +#include <linux/vmalloc.h>
51637 +#include <linux/grinternal.h>
51638 +
51639 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51640 + size_t count, loff_t *ppos);
51641 +extern int gr_acl_is_enabled(void);
51642 +
51643 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51644 +static int gr_learn_attached;
51645 +
51646 +/* use a 512k buffer */
51647 +#define LEARN_BUFFER_SIZE (512 * 1024)
51648 +
51649 +static DEFINE_SPINLOCK(gr_learn_lock);
51650 +static DEFINE_MUTEX(gr_learn_user_mutex);
51651 +
51652 +/* we need to maintain two buffers, so that the kernel context of grlearn
51653 + uses a semaphore around the userspace copying, and the other kernel contexts
51654 + use a spinlock when copying into the buffer, since they cannot sleep
51655 +*/
51656 +static char *learn_buffer;
51657 +static char *learn_buffer_user;
51658 +static int learn_buffer_len;
51659 +static int learn_buffer_user_len;
51660 +
51661 +static ssize_t
51662 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51663 +{
51664 + DECLARE_WAITQUEUE(wait, current);
51665 + ssize_t retval = 0;
51666 +
51667 + add_wait_queue(&learn_wait, &wait);
51668 + set_current_state(TASK_INTERRUPTIBLE);
51669 + do {
51670 + mutex_lock(&gr_learn_user_mutex);
51671 + spin_lock(&gr_learn_lock);
51672 + if (learn_buffer_len)
51673 + break;
51674 + spin_unlock(&gr_learn_lock);
51675 + mutex_unlock(&gr_learn_user_mutex);
51676 + if (file->f_flags & O_NONBLOCK) {
51677 + retval = -EAGAIN;
51678 + goto out;
51679 + }
51680 + if (signal_pending(current)) {
51681 + retval = -ERESTARTSYS;
51682 + goto out;
51683 + }
51684 +
51685 + schedule();
51686 + } while (1);
51687 +
51688 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51689 + learn_buffer_user_len = learn_buffer_len;
51690 + retval = learn_buffer_len;
51691 + learn_buffer_len = 0;
51692 +
51693 + spin_unlock(&gr_learn_lock);
51694 +
51695 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51696 + retval = -EFAULT;
51697 +
51698 + mutex_unlock(&gr_learn_user_mutex);
51699 +out:
51700 + set_current_state(TASK_RUNNING);
51701 + remove_wait_queue(&learn_wait, &wait);
51702 + return retval;
51703 +}
51704 +
51705 +static unsigned int
51706 +poll_learn(struct file * file, poll_table * wait)
51707 +{
51708 + poll_wait(file, &learn_wait, wait);
51709 +
51710 + if (learn_buffer_len)
51711 + return (POLLIN | POLLRDNORM);
51712 +
51713 + return 0;
51714 +}
51715 +
51716 +void
51717 +gr_clear_learn_entries(void)
51718 +{
51719 + char *tmp;
51720 +
51721 + mutex_lock(&gr_learn_user_mutex);
51722 + spin_lock(&gr_learn_lock);
51723 + tmp = learn_buffer;
51724 + learn_buffer = NULL;
51725 + spin_unlock(&gr_learn_lock);
51726 + if (tmp)
51727 + vfree(tmp);
51728 + if (learn_buffer_user != NULL) {
51729 + vfree(learn_buffer_user);
51730 + learn_buffer_user = NULL;
51731 + }
51732 + learn_buffer_len = 0;
51733 + mutex_unlock(&gr_learn_user_mutex);
51734 +
51735 + return;
51736 +}
51737 +
51738 +void
51739 +gr_add_learn_entry(const char *fmt, ...)
51740 +{
51741 + va_list args;
51742 + unsigned int len;
51743 +
51744 + if (!gr_learn_attached)
51745 + return;
51746 +
51747 + spin_lock(&gr_learn_lock);
51748 +
51749 + /* leave a gap at the end so we know when it's "full" but don't have to
51750 + compute the exact length of the string we're trying to append
51751 + */
51752 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
51753 + spin_unlock(&gr_learn_lock);
51754 + wake_up_interruptible(&learn_wait);
51755 + return;
51756 + }
51757 + if (learn_buffer == NULL) {
51758 + spin_unlock(&gr_learn_lock);
51759 + return;
51760 + }
51761 +
51762 + va_start(args, fmt);
51763 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
51764 + va_end(args);
51765 +
51766 + learn_buffer_len += len + 1;
51767 +
51768 + spin_unlock(&gr_learn_lock);
51769 + wake_up_interruptible(&learn_wait);
51770 +
51771 + return;
51772 +}
51773 +
51774 +static int
51775 +open_learn(struct inode *inode, struct file *file)
51776 +{
51777 + if (file->f_mode & FMODE_READ && gr_learn_attached)
51778 + return -EBUSY;
51779 + if (file->f_mode & FMODE_READ) {
51780 + int retval = 0;
51781 + mutex_lock(&gr_learn_user_mutex);
51782 + if (learn_buffer == NULL)
51783 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
51784 + if (learn_buffer_user == NULL)
51785 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
51786 + if (learn_buffer == NULL) {
51787 + retval = -ENOMEM;
51788 + goto out_error;
51789 + }
51790 + if (learn_buffer_user == NULL) {
51791 + retval = -ENOMEM;
51792 + goto out_error;
51793 + }
51794 + learn_buffer_len = 0;
51795 + learn_buffer_user_len = 0;
51796 + gr_learn_attached = 1;
51797 +out_error:
51798 + mutex_unlock(&gr_learn_user_mutex);
51799 + return retval;
51800 + }
51801 + return 0;
51802 +}
51803 +
51804 +static int
51805 +close_learn(struct inode *inode, struct file *file)
51806 +{
51807 + if (file->f_mode & FMODE_READ) {
51808 + char *tmp = NULL;
51809 + mutex_lock(&gr_learn_user_mutex);
51810 + spin_lock(&gr_learn_lock);
51811 + tmp = learn_buffer;
51812 + learn_buffer = NULL;
51813 + spin_unlock(&gr_learn_lock);
51814 + if (tmp)
51815 + vfree(tmp);
51816 + if (learn_buffer_user != NULL) {
51817 + vfree(learn_buffer_user);
51818 + learn_buffer_user = NULL;
51819 + }
51820 + learn_buffer_len = 0;
51821 + learn_buffer_user_len = 0;
51822 + gr_learn_attached = 0;
51823 + mutex_unlock(&gr_learn_user_mutex);
51824 + }
51825 +
51826 + return 0;
51827 +}
51828 +
51829 +const struct file_operations grsec_fops = {
51830 + .read = read_learn,
51831 + .write = write_grsec_handler,
51832 + .open = open_learn,
51833 + .release = close_learn,
51834 + .poll = poll_learn,
51835 +};
51836 diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
51837 --- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
51838 +++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
51839 @@ -0,0 +1,67 @@
51840 +#include <linux/kernel.h>
51841 +#include <linux/sched.h>
51842 +#include <linux/gracl.h>
51843 +#include <linux/grinternal.h>
51844 +
51845 +static const char *restab_log[] = {
51846 + [RLIMIT_CPU] = "RLIMIT_CPU",
51847 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
51848 + [RLIMIT_DATA] = "RLIMIT_DATA",
51849 + [RLIMIT_STACK] = "RLIMIT_STACK",
51850 + [RLIMIT_CORE] = "RLIMIT_CORE",
51851 + [RLIMIT_RSS] = "RLIMIT_RSS",
51852 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
51853 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
51854 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
51855 + [RLIMIT_AS] = "RLIMIT_AS",
51856 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
51857 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
51858 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
51859 + [RLIMIT_NICE] = "RLIMIT_NICE",
51860 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
51861 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
51862 + [GR_CRASH_RES] = "RLIMIT_CRASH"
51863 +};
51864 +
51865 +void
51866 +gr_log_resource(const struct task_struct *task,
51867 + const int res, const unsigned long wanted, const int gt)
51868 +{
51869 + const struct cred *cred;
51870 + unsigned long rlim;
51871 +
51872 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
51873 + return;
51874 +
51875 + // not yet supported resource
51876 + if (unlikely(!restab_log[res]))
51877 + return;
51878 +
51879 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
51880 + rlim = task->signal->rlim[res].rlim_max;
51881 + else
51882 + rlim = task->signal->rlim[res].rlim_cur;
51883 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
51884 + return;
51885 +
51886 + rcu_read_lock();
51887 + cred = __task_cred(task);
51888 +
51889 + if (res == RLIMIT_NPROC &&
51890 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
51891 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
51892 + goto out_rcu_unlock;
51893 + else if (res == RLIMIT_MEMLOCK &&
51894 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
51895 + goto out_rcu_unlock;
51896 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
51897 + goto out_rcu_unlock;
51898 + rcu_read_unlock();
51899 +
51900 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
51901 +
51902 + return;
51903 +out_rcu_unlock:
51904 + rcu_read_unlock();
51905 + return;
51906 +}
51907 diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
51908 --- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
51909 +++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
51910 @@ -0,0 +1,284 @@
51911 +#include <linux/kernel.h>
51912 +#include <linux/mm.h>
51913 +#include <asm/uaccess.h>
51914 +#include <asm/errno.h>
51915 +#include <asm/mman.h>
51916 +#include <net/sock.h>
51917 +#include <linux/file.h>
51918 +#include <linux/fs.h>
51919 +#include <linux/net.h>
51920 +#include <linux/in.h>
51921 +#include <linux/smp_lock.h>
51922 +#include <linux/slab.h>
51923 +#include <linux/types.h>
51924 +#include <linux/sched.h>
51925 +#include <linux/timer.h>
51926 +#include <linux/gracl.h>
51927 +#include <linux/grsecurity.h>
51928 +#include <linux/grinternal.h>
51929 +
51930 +static struct crash_uid *uid_set;
51931 +static unsigned short uid_used;
51932 +static DEFINE_SPINLOCK(gr_uid_lock);
51933 +extern rwlock_t gr_inode_lock;
51934 +extern struct acl_subject_label *
51935 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
51936 + struct acl_role_label *role);
51937 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
51938 +
51939 +int
51940 +gr_init_uidset(void)
51941 +{
51942 + uid_set =
51943 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
51944 + uid_used = 0;
51945 +
51946 + return uid_set ? 1 : 0;
51947 +}
51948 +
51949 +void
51950 +gr_free_uidset(void)
51951 +{
51952 + if (uid_set)
51953 + kfree(uid_set);
51954 +
51955 + return;
51956 +}
51957 +
51958 +int
51959 +gr_find_uid(const uid_t uid)
51960 +{
51961 + struct crash_uid *tmp = uid_set;
51962 + uid_t buid;
51963 + int low = 0, high = uid_used - 1, mid;
51964 +
51965 + while (high >= low) {
51966 + mid = (low + high) >> 1;
51967 + buid = tmp[mid].uid;
51968 + if (buid == uid)
51969 + return mid;
51970 + if (buid > uid)
51971 + high = mid - 1;
51972 + if (buid < uid)
51973 + low = mid + 1;
51974 + }
51975 +
51976 + return -1;
51977 +}
51978 +
51979 +static __inline__ void
51980 +gr_insertsort(void)
51981 +{
51982 + unsigned short i, j;
51983 + struct crash_uid index;
51984 +
51985 + for (i = 1; i < uid_used; i++) {
51986 + index = uid_set[i];
51987 + j = i;
51988 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
51989 + uid_set[j] = uid_set[j - 1];
51990 + j--;
51991 + }
51992 + uid_set[j] = index;
51993 + }
51994 +
51995 + return;
51996 +}
51997 +
51998 +static __inline__ void
51999 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52000 +{
52001 + int loc;
52002 +
52003 + if (uid_used == GR_UIDTABLE_MAX)
52004 + return;
52005 +
52006 + loc = gr_find_uid(uid);
52007 +
52008 + if (loc >= 0) {
52009 + uid_set[loc].expires = expires;
52010 + return;
52011 + }
52012 +
52013 + uid_set[uid_used].uid = uid;
52014 + uid_set[uid_used].expires = expires;
52015 + uid_used++;
52016 +
52017 + gr_insertsort();
52018 +
52019 + return;
52020 +}
52021 +
52022 +void
52023 +gr_remove_uid(const unsigned short loc)
52024 +{
52025 + unsigned short i;
52026 +
52027 + for (i = loc + 1; i < uid_used; i++)
52028 + uid_set[i - 1] = uid_set[i];
52029 +
52030 + uid_used--;
52031 +
52032 + return;
52033 +}
52034 +
52035 +int
52036 +gr_check_crash_uid(const uid_t uid)
52037 +{
52038 + int loc;
52039 + int ret = 0;
52040 +
52041 + if (unlikely(!gr_acl_is_enabled()))
52042 + return 0;
52043 +
52044 + spin_lock(&gr_uid_lock);
52045 + loc = gr_find_uid(uid);
52046 +
52047 + if (loc < 0)
52048 + goto out_unlock;
52049 +
52050 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52051 + gr_remove_uid(loc);
52052 + else
52053 + ret = 1;
52054 +
52055 +out_unlock:
52056 + spin_unlock(&gr_uid_lock);
52057 + return ret;
52058 +}
52059 +
52060 +static __inline__ int
52061 +proc_is_setxid(const struct cred *cred)
52062 +{
52063 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52064 + cred->uid != cred->fsuid)
52065 + return 1;
52066 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52067 + cred->gid != cred->fsgid)
52068 + return 1;
52069 +
52070 + return 0;
52071 +}
52072 +
52073 +void
52074 +gr_handle_crash(struct task_struct *task, const int sig)
52075 +{
52076 + struct acl_subject_label *curr;
52077 + struct acl_subject_label *curr2;
52078 + struct task_struct *tsk, *tsk2;
52079 + const struct cred *cred;
52080 + const struct cred *cred2;
52081 +
52082 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52083 + return;
52084 +
52085 + if (unlikely(!gr_acl_is_enabled()))
52086 + return;
52087 +
52088 + curr = task->acl;
52089 +
52090 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52091 + return;
52092 +
52093 + if (time_before_eq(curr->expires, get_seconds())) {
52094 + curr->expires = 0;
52095 + curr->crashes = 0;
52096 + }
52097 +
52098 + curr->crashes++;
52099 +
52100 + if (!curr->expires)
52101 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52102 +
52103 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52104 + time_after(curr->expires, get_seconds())) {
52105 + rcu_read_lock();
52106 + cred = __task_cred(task);
52107 + if (cred->uid && proc_is_setxid(cred)) {
52108 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52109 + spin_lock(&gr_uid_lock);
52110 + gr_insert_uid(cred->uid, curr->expires);
52111 + spin_unlock(&gr_uid_lock);
52112 + curr->expires = 0;
52113 + curr->crashes = 0;
52114 + read_lock(&tasklist_lock);
52115 + do_each_thread(tsk2, tsk) {
52116 + cred2 = __task_cred(tsk);
52117 + if (tsk != task && cred2->uid == cred->uid)
52118 + gr_fake_force_sig(SIGKILL, tsk);
52119 + } while_each_thread(tsk2, tsk);
52120 + read_unlock(&tasklist_lock);
52121 + } else {
52122 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52123 + read_lock(&tasklist_lock);
52124 + do_each_thread(tsk2, tsk) {
52125 + if (likely(tsk != task)) {
52126 + curr2 = tsk->acl;
52127 +
52128 + if (curr2->device == curr->device &&
52129 + curr2->inode == curr->inode)
52130 + gr_fake_force_sig(SIGKILL, tsk);
52131 + }
52132 + } while_each_thread(tsk2, tsk);
52133 + read_unlock(&tasklist_lock);
52134 + }
52135 + rcu_read_unlock();
52136 + }
52137 +
52138 + return;
52139 +}
52140 +
52141 +int
52142 +gr_check_crash_exec(const struct file *filp)
52143 +{
52144 + struct acl_subject_label *curr;
52145 +
52146 + if (unlikely(!gr_acl_is_enabled()))
52147 + return 0;
52148 +
52149 + read_lock(&gr_inode_lock);
52150 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52151 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52152 + current->role);
52153 + read_unlock(&gr_inode_lock);
52154 +
52155 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52156 + (!curr->crashes && !curr->expires))
52157 + return 0;
52158 +
52159 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52160 + time_after(curr->expires, get_seconds()))
52161 + return 1;
52162 + else if (time_before_eq(curr->expires, get_seconds())) {
52163 + curr->crashes = 0;
52164 + curr->expires = 0;
52165 + }
52166 +
52167 + return 0;
52168 +}
52169 +
52170 +void
52171 +gr_handle_alertkill(struct task_struct *task)
52172 +{
52173 + struct acl_subject_label *curracl;
52174 + __u32 curr_ip;
52175 + struct task_struct *p, *p2;
52176 +
52177 + if (unlikely(!gr_acl_is_enabled()))
52178 + return;
52179 +
52180 + curracl = task->acl;
52181 + curr_ip = task->signal->curr_ip;
52182 +
52183 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52184 + read_lock(&tasklist_lock);
52185 + do_each_thread(p2, p) {
52186 + if (p->signal->curr_ip == curr_ip)
52187 + gr_fake_force_sig(SIGKILL, p);
52188 + } while_each_thread(p2, p);
52189 + read_unlock(&tasklist_lock);
52190 + } else if (curracl->mode & GR_KILLPROC)
52191 + gr_fake_force_sig(SIGKILL, task);
52192 +
52193 + return;
52194 +}
52195 diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52196 --- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52197 +++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52198 @@ -0,0 +1,40 @@
52199 +#include <linux/kernel.h>
52200 +#include <linux/mm.h>
52201 +#include <linux/sched.h>
52202 +#include <linux/file.h>
52203 +#include <linux/ipc.h>
52204 +#include <linux/gracl.h>
52205 +#include <linux/grsecurity.h>
52206 +#include <linux/grinternal.h>
52207 +
52208 +int
52209 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52210 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52211 +{
52212 + struct task_struct *task;
52213 +
52214 + if (!gr_acl_is_enabled())
52215 + return 1;
52216 +
52217 + rcu_read_lock();
52218 + read_lock(&tasklist_lock);
52219 +
52220 + task = find_task_by_vpid(shm_cprid);
52221 +
52222 + if (unlikely(!task))
52223 + task = find_task_by_vpid(shm_lapid);
52224 +
52225 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52226 + (task->pid == shm_lapid)) &&
52227 + (task->acl->mode & GR_PROTSHM) &&
52228 + (task->acl != current->acl))) {
52229 + read_unlock(&tasklist_lock);
52230 + rcu_read_unlock();
52231 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52232 + return 0;
52233 + }
52234 + read_unlock(&tasklist_lock);
52235 + rcu_read_unlock();
52236 +
52237 + return 1;
52238 +}
52239 diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52240 --- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52241 +++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52242 @@ -0,0 +1,19 @@
52243 +#include <linux/kernel.h>
52244 +#include <linux/sched.h>
52245 +#include <linux/fs.h>
52246 +#include <linux/file.h>
52247 +#include <linux/grsecurity.h>
52248 +#include <linux/grinternal.h>
52249 +
52250 +void
52251 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52252 +{
52253 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52254 + if ((grsec_enable_chdir && grsec_enable_group &&
52255 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52256 + !grsec_enable_group)) {
52257 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52258 + }
52259 +#endif
52260 + return;
52261 +}
52262 diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52263 --- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52264 +++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52265 @@ -0,0 +1,384 @@
52266 +#include <linux/kernel.h>
52267 +#include <linux/module.h>
52268 +#include <linux/sched.h>
52269 +#include <linux/file.h>
52270 +#include <linux/fs.h>
52271 +#include <linux/mount.h>
52272 +#include <linux/types.h>
52273 +#include <linux/pid_namespace.h>
52274 +#include <linux/grsecurity.h>
52275 +#include <linux/grinternal.h>
52276 +
52277 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52278 +{
52279 +#ifdef CONFIG_GRKERNSEC
52280 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52281 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52282 + task->gr_is_chrooted = 1;
52283 + else
52284 + task->gr_is_chrooted = 0;
52285 +
52286 + task->gr_chroot_dentry = path->dentry;
52287 +#endif
52288 + return;
52289 +}
52290 +
52291 +void gr_clear_chroot_entries(struct task_struct *task)
52292 +{
52293 +#ifdef CONFIG_GRKERNSEC
52294 + task->gr_is_chrooted = 0;
52295 + task->gr_chroot_dentry = NULL;
52296 +#endif
52297 + return;
52298 +}
52299 +
52300 +int
52301 +gr_handle_chroot_unix(const pid_t pid)
52302 +{
52303 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52304 + struct task_struct *p;
52305 +
52306 + if (unlikely(!grsec_enable_chroot_unix))
52307 + return 1;
52308 +
52309 + if (likely(!proc_is_chrooted(current)))
52310 + return 1;
52311 +
52312 + rcu_read_lock();
52313 + read_lock(&tasklist_lock);
52314 +
52315 + p = find_task_by_vpid_unrestricted(pid);
52316 + if (unlikely(p && !have_same_root(current, p))) {
52317 + read_unlock(&tasklist_lock);
52318 + rcu_read_unlock();
52319 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52320 + return 0;
52321 + }
52322 + read_unlock(&tasklist_lock);
52323 + rcu_read_unlock();
52324 +#endif
52325 + return 1;
52326 +}
52327 +
52328 +int
52329 +gr_handle_chroot_nice(void)
52330 +{
52331 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52332 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52333 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52334 + return -EPERM;
52335 + }
52336 +#endif
52337 + return 0;
52338 +}
52339 +
52340 +int
52341 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52342 +{
52343 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52344 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52345 + && proc_is_chrooted(current)) {
52346 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52347 + return -EACCES;
52348 + }
52349 +#endif
52350 + return 0;
52351 +}
52352 +
52353 +int
52354 +gr_handle_chroot_rawio(const struct inode *inode)
52355 +{
52356 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52357 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52358 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52359 + return 1;
52360 +#endif
52361 + return 0;
52362 +}
52363 +
52364 +int
52365 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52366 +{
52367 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52368 + struct task_struct *p;
52369 + int ret = 0;
52370 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52371 + return ret;
52372 +
52373 + read_lock(&tasklist_lock);
52374 + do_each_pid_task(pid, type, p) {
52375 + if (!have_same_root(current, p)) {
52376 + ret = 1;
52377 + goto out;
52378 + }
52379 + } while_each_pid_task(pid, type, p);
52380 +out:
52381 + read_unlock(&tasklist_lock);
52382 + return ret;
52383 +#endif
52384 + return 0;
52385 +}
52386 +
52387 +int
52388 +gr_pid_is_chrooted(struct task_struct *p)
52389 +{
52390 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52391 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52392 + return 0;
52393 +
52394 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52395 + !have_same_root(current, p)) {
52396 + return 1;
52397 + }
52398 +#endif
52399 + return 0;
52400 +}
52401 +
52402 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52403 +
52404 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52405 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52406 +{
52407 + struct dentry *dentry = (struct dentry *)u_dentry;
52408 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52409 + struct dentry *realroot;
52410 + struct vfsmount *realrootmnt;
52411 + struct dentry *currentroot;
52412 + struct vfsmount *currentmnt;
52413 + struct task_struct *reaper = &init_task;
52414 + int ret = 1;
52415 +
52416 + read_lock(&reaper->fs->lock);
52417 + realrootmnt = mntget(reaper->fs->root.mnt);
52418 + realroot = dget(reaper->fs->root.dentry);
52419 + read_unlock(&reaper->fs->lock);
52420 +
52421 + read_lock(&current->fs->lock);
52422 + currentmnt = mntget(current->fs->root.mnt);
52423 + currentroot = dget(current->fs->root.dentry);
52424 + read_unlock(&current->fs->lock);
52425 +
52426 + spin_lock(&dcache_lock);
52427 + for (;;) {
52428 + if (unlikely((dentry == realroot && mnt == realrootmnt)
52429 + || (dentry == currentroot && mnt == currentmnt)))
52430 + break;
52431 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52432 + if (mnt->mnt_parent == mnt)
52433 + break;
52434 + dentry = mnt->mnt_mountpoint;
52435 + mnt = mnt->mnt_parent;
52436 + continue;
52437 + }
52438 + dentry = dentry->d_parent;
52439 + }
52440 + spin_unlock(&dcache_lock);
52441 +
52442 + dput(currentroot);
52443 + mntput(currentmnt);
52444 +
52445 + /* access is outside of chroot */
52446 + if (dentry == realroot && mnt == realrootmnt)
52447 + ret = 0;
52448 +
52449 + dput(realroot);
52450 + mntput(realrootmnt);
52451 + return ret;
52452 +}
52453 +#endif
52454 +
52455 +int
52456 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52457 +{
52458 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52459 + if (!grsec_enable_chroot_fchdir)
52460 + return 1;
52461 +
52462 + if (!proc_is_chrooted(current))
52463 + return 1;
52464 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52465 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52466 + return 0;
52467 + }
52468 +#endif
52469 + return 1;
52470 +}
52471 +
52472 +int
52473 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52474 + const time_t shm_createtime)
52475 +{
52476 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52477 + struct task_struct *p;
52478 + time_t starttime;
52479 +
52480 + if (unlikely(!grsec_enable_chroot_shmat))
52481 + return 1;
52482 +
52483 + if (likely(!proc_is_chrooted(current)))
52484 + return 1;
52485 +
52486 + rcu_read_lock();
52487 + read_lock(&tasklist_lock);
52488 +
52489 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52490 + starttime = p->start_time.tv_sec;
52491 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52492 + if (have_same_root(current, p)) {
52493 + goto allow;
52494 + } else {
52495 + read_unlock(&tasklist_lock);
52496 + rcu_read_unlock();
52497 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52498 + return 0;
52499 + }
52500 + }
52501 + /* creator exited, pid reuse, fall through to next check */
52502 + }
52503 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52504 + if (unlikely(!have_same_root(current, p))) {
52505 + read_unlock(&tasklist_lock);
52506 + rcu_read_unlock();
52507 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52508 + return 0;
52509 + }
52510 + }
52511 +
52512 +allow:
52513 + read_unlock(&tasklist_lock);
52514 + rcu_read_unlock();
52515 +#endif
52516 + return 1;
52517 +}
52518 +
52519 +void
52520 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52521 +{
52522 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52523 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52524 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52525 +#endif
52526 + return;
52527 +}
52528 +
52529 +int
52530 +gr_handle_chroot_mknod(const struct dentry *dentry,
52531 + const struct vfsmount *mnt, const int mode)
52532 +{
52533 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52534 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52535 + proc_is_chrooted(current)) {
52536 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52537 + return -EPERM;
52538 + }
52539 +#endif
52540 + return 0;
52541 +}
52542 +
52543 +int
52544 +gr_handle_chroot_mount(const struct dentry *dentry,
52545 + const struct vfsmount *mnt, const char *dev_name)
52546 +{
52547 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52548 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52549 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52550 + return -EPERM;
52551 + }
52552 +#endif
52553 + return 0;
52554 +}
52555 +
52556 +int
52557 +gr_handle_chroot_pivot(void)
52558 +{
52559 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52560 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52561 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52562 + return -EPERM;
52563 + }
52564 +#endif
52565 + return 0;
52566 +}
52567 +
52568 +int
52569 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52570 +{
52571 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52572 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52573 + !gr_is_outside_chroot(dentry, mnt)) {
52574 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52575 + return -EPERM;
52576 + }
52577 +#endif
52578 + return 0;
52579 +}
52580 +
52581 +int
52582 +gr_handle_chroot_caps(struct path *path)
52583 +{
52584 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52585 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52586 + (init_task.fs->root.dentry != path->dentry) &&
52587 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52588 +
52589 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52590 + const struct cred *old = current_cred();
52591 + struct cred *new = prepare_creds();
52592 + if (new == NULL)
52593 + return 1;
52594 +
52595 + new->cap_permitted = cap_drop(old->cap_permitted,
52596 + chroot_caps);
52597 + new->cap_inheritable = cap_drop(old->cap_inheritable,
52598 + chroot_caps);
52599 + new->cap_effective = cap_drop(old->cap_effective,
52600 + chroot_caps);
52601 +
52602 + commit_creds(new);
52603 +
52604 + return 0;
52605 + }
52606 +#endif
52607 + return 0;
52608 +}
52609 +
52610 +int
52611 +gr_handle_chroot_sysctl(const int op)
52612 +{
52613 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52614 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52615 + && (op & MAY_WRITE))
52616 + return -EACCES;
52617 +#endif
52618 + return 0;
52619 +}
52620 +
52621 +void
52622 +gr_handle_chroot_chdir(struct path *path)
52623 +{
52624 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52625 + if (grsec_enable_chroot_chdir)
52626 + set_fs_pwd(current->fs, path);
52627 +#endif
52628 + return;
52629 +}
52630 +
52631 +int
52632 +gr_handle_chroot_chmod(const struct dentry *dentry,
52633 + const struct vfsmount *mnt, const int mode)
52634 +{
52635 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52636 + /* allow chmod +s on directories, but not on files */
52637 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52638 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52639 + proc_is_chrooted(current)) {
52640 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52641 + return -EPERM;
52642 + }
52643 +#endif
52644 + return 0;
52645 +}
52646 +
52647 +#ifdef CONFIG_SECURITY
52648 +EXPORT_SYMBOL(gr_handle_chroot_caps);
52649 +#endif
52650 diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
52651 --- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52652 +++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52653 @@ -0,0 +1,447 @@
52654 +#include <linux/kernel.h>
52655 +#include <linux/module.h>
52656 +#include <linux/sched.h>
52657 +#include <linux/file.h>
52658 +#include <linux/fs.h>
52659 +#include <linux/kdev_t.h>
52660 +#include <linux/net.h>
52661 +#include <linux/in.h>
52662 +#include <linux/ip.h>
52663 +#include <linux/skbuff.h>
52664 +#include <linux/sysctl.h>
52665 +
52666 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52667 +void
52668 +pax_set_initial_flags(struct linux_binprm *bprm)
52669 +{
52670 + return;
52671 +}
52672 +#endif
52673 +
52674 +#ifdef CONFIG_SYSCTL
52675 +__u32
52676 +gr_handle_sysctl(const struct ctl_table * table, const int op)
52677 +{
52678 + return 0;
52679 +}
52680 +#endif
52681 +
52682 +#ifdef CONFIG_TASKSTATS
52683 +int gr_is_taskstats_denied(int pid)
52684 +{
52685 + return 0;
52686 +}
52687 +#endif
52688 +
52689 +int
52690 +gr_acl_is_enabled(void)
52691 +{
52692 + return 0;
52693 +}
52694 +
52695 +int
52696 +gr_handle_rawio(const struct inode *inode)
52697 +{
52698 + return 0;
52699 +}
52700 +
52701 +void
52702 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52703 +{
52704 + return;
52705 +}
52706 +
52707 +int
52708 +gr_handle_ptrace(struct task_struct *task, const long request)
52709 +{
52710 + return 0;
52711 +}
52712 +
52713 +int
52714 +gr_handle_proc_ptrace(struct task_struct *task)
52715 +{
52716 + return 0;
52717 +}
52718 +
52719 +void
52720 +gr_learn_resource(const struct task_struct *task,
52721 + const int res, const unsigned long wanted, const int gt)
52722 +{
52723 + return;
52724 +}
52725 +
52726 +int
52727 +gr_set_acls(const int type)
52728 +{
52729 + return 0;
52730 +}
52731 +
52732 +int
52733 +gr_check_hidden_task(const struct task_struct *tsk)
52734 +{
52735 + return 0;
52736 +}
52737 +
52738 +int
52739 +gr_check_protected_task(const struct task_struct *task)
52740 +{
52741 + return 0;
52742 +}
52743 +
52744 +int
52745 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52746 +{
52747 + return 0;
52748 +}
52749 +
52750 +void
52751 +gr_copy_label(struct task_struct *tsk)
52752 +{
52753 + return;
52754 +}
52755 +
52756 +void
52757 +gr_set_pax_flags(struct task_struct *task)
52758 +{
52759 + return;
52760 +}
52761 +
52762 +int
52763 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52764 + const int unsafe_share)
52765 +{
52766 + return 0;
52767 +}
52768 +
52769 +void
52770 +gr_handle_delete(const ino_t ino, const dev_t dev)
52771 +{
52772 + return;
52773 +}
52774 +
52775 +void
52776 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52777 +{
52778 + return;
52779 +}
52780 +
52781 +void
52782 +gr_handle_crash(struct task_struct *task, const int sig)
52783 +{
52784 + return;
52785 +}
52786 +
52787 +int
52788 +gr_check_crash_exec(const struct file *filp)
52789 +{
52790 + return 0;
52791 +}
52792 +
52793 +int
52794 +gr_check_crash_uid(const uid_t uid)
52795 +{
52796 + return 0;
52797 +}
52798 +
52799 +void
52800 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52801 + struct dentry *old_dentry,
52802 + struct dentry *new_dentry,
52803 + struct vfsmount *mnt, const __u8 replace)
52804 +{
52805 + return;
52806 +}
52807 +
52808 +int
52809 +gr_search_socket(const int family, const int type, const int protocol)
52810 +{
52811 + return 1;
52812 +}
52813 +
52814 +int
52815 +gr_search_connectbind(const int mode, const struct socket *sock,
52816 + const struct sockaddr_in *addr)
52817 +{
52818 + return 0;
52819 +}
52820 +
52821 +int
52822 +gr_is_capable(const int cap)
52823 +{
52824 + return 1;
52825 +}
52826 +
52827 +int
52828 +gr_is_capable_nolog(const int cap)
52829 +{
52830 + return 1;
52831 +}
52832 +
52833 +void
52834 +gr_handle_alertkill(struct task_struct *task)
52835 +{
52836 + return;
52837 +}
52838 +
52839 +__u32
52840 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
52841 +{
52842 + return 1;
52843 +}
52844 +
52845 +__u32
52846 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52847 + const struct vfsmount * mnt)
52848 +{
52849 + return 1;
52850 +}
52851 +
52852 +__u32
52853 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52854 + const int fmode)
52855 +{
52856 + return 1;
52857 +}
52858 +
52859 +__u32
52860 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52861 +{
52862 + return 1;
52863 +}
52864 +
52865 +__u32
52866 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
52867 +{
52868 + return 1;
52869 +}
52870 +
52871 +int
52872 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
52873 + unsigned int *vm_flags)
52874 +{
52875 + return 1;
52876 +}
52877 +
52878 +__u32
52879 +gr_acl_handle_truncate(const struct dentry * dentry,
52880 + const struct vfsmount * mnt)
52881 +{
52882 + return 1;
52883 +}
52884 +
52885 +__u32
52886 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
52887 +{
52888 + return 1;
52889 +}
52890 +
52891 +__u32
52892 +gr_acl_handle_access(const struct dentry * dentry,
52893 + const struct vfsmount * mnt, const int fmode)
52894 +{
52895 + return 1;
52896 +}
52897 +
52898 +__u32
52899 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
52900 + mode_t mode)
52901 +{
52902 + return 1;
52903 +}
52904 +
52905 +__u32
52906 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
52907 + mode_t mode)
52908 +{
52909 + return 1;
52910 +}
52911 +
52912 +__u32
52913 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
52914 +{
52915 + return 1;
52916 +}
52917 +
52918 +__u32
52919 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
52920 +{
52921 + return 1;
52922 +}
52923 +
52924 +void
52925 +grsecurity_init(void)
52926 +{
52927 + return;
52928 +}
52929 +
52930 +__u32
52931 +gr_acl_handle_mknod(const struct dentry * new_dentry,
52932 + const struct dentry * parent_dentry,
52933 + const struct vfsmount * parent_mnt,
52934 + const int mode)
52935 +{
52936 + return 1;
52937 +}
52938 +
52939 +__u32
52940 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
52941 + const struct dentry * parent_dentry,
52942 + const struct vfsmount * parent_mnt)
52943 +{
52944 + return 1;
52945 +}
52946 +
52947 +__u32
52948 +gr_acl_handle_symlink(const struct dentry * new_dentry,
52949 + const struct dentry * parent_dentry,
52950 + const struct vfsmount * parent_mnt, const char *from)
52951 +{
52952 + return 1;
52953 +}
52954 +
52955 +__u32
52956 +gr_acl_handle_link(const struct dentry * new_dentry,
52957 + const struct dentry * parent_dentry,
52958 + const struct vfsmount * parent_mnt,
52959 + const struct dentry * old_dentry,
52960 + const struct vfsmount * old_mnt, const char *to)
52961 +{
52962 + return 1;
52963 +}
52964 +
52965 +int
52966 +gr_acl_handle_rename(const struct dentry *new_dentry,
52967 + const struct dentry *parent_dentry,
52968 + const struct vfsmount *parent_mnt,
52969 + const struct dentry *old_dentry,
52970 + const struct inode *old_parent_inode,
52971 + const struct vfsmount *old_mnt, const char *newname)
52972 +{
52973 + return 0;
52974 +}
52975 +
52976 +int
52977 +gr_acl_handle_filldir(const struct file *file, const char *name,
52978 + const int namelen, const ino_t ino)
52979 +{
52980 + return 1;
52981 +}
52982 +
52983 +int
52984 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52985 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52986 +{
52987 + return 1;
52988 +}
52989 +
52990 +int
52991 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
52992 +{
52993 + return 0;
52994 +}
52995 +
52996 +int
52997 +gr_search_accept(const struct socket *sock)
52998 +{
52999 + return 0;
53000 +}
53001 +
53002 +int
53003 +gr_search_listen(const struct socket *sock)
53004 +{
53005 + return 0;
53006 +}
53007 +
53008 +int
53009 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53010 +{
53011 + return 0;
53012 +}
53013 +
53014 +__u32
53015 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53016 +{
53017 + return 1;
53018 +}
53019 +
53020 +__u32
53021 +gr_acl_handle_creat(const struct dentry * dentry,
53022 + const struct dentry * p_dentry,
53023 + const struct vfsmount * p_mnt, const int fmode,
53024 + const int imode)
53025 +{
53026 + return 1;
53027 +}
53028 +
53029 +void
53030 +gr_acl_handle_exit(void)
53031 +{
53032 + return;
53033 +}
53034 +
53035 +int
53036 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53037 +{
53038 + return 1;
53039 +}
53040 +
53041 +void
53042 +gr_set_role_label(const uid_t uid, const gid_t gid)
53043 +{
53044 + return;
53045 +}
53046 +
53047 +int
53048 +gr_acl_handle_procpidmem(const struct task_struct *task)
53049 +{
53050 + return 0;
53051 +}
53052 +
53053 +int
53054 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53055 +{
53056 + return 0;
53057 +}
53058 +
53059 +int
53060 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53061 +{
53062 + return 0;
53063 +}
53064 +
53065 +void
53066 +gr_set_kernel_label(struct task_struct *task)
53067 +{
53068 + return;
53069 +}
53070 +
53071 +int
53072 +gr_check_user_change(int real, int effective, int fs)
53073 +{
53074 + return 0;
53075 +}
53076 +
53077 +int
53078 +gr_check_group_change(int real, int effective, int fs)
53079 +{
53080 + return 0;
53081 +}
53082 +
53083 +int gr_acl_enable_at_secure(void)
53084 +{
53085 + return 0;
53086 +}
53087 +
53088 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53089 +{
53090 + return dentry->d_inode->i_sb->s_dev;
53091 +}
53092 +
53093 +EXPORT_SYMBOL(gr_is_capable);
53094 +EXPORT_SYMBOL(gr_is_capable_nolog);
53095 +EXPORT_SYMBOL(gr_learn_resource);
53096 +EXPORT_SYMBOL(gr_set_kernel_label);
53097 +#ifdef CONFIG_SECURITY
53098 +EXPORT_SYMBOL(gr_check_user_change);
53099 +EXPORT_SYMBOL(gr_check_group_change);
53100 +#endif
53101 diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53102 --- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53103 +++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53104 @@ -0,0 +1,132 @@
53105 +#include <linux/kernel.h>
53106 +#include <linux/sched.h>
53107 +#include <linux/file.h>
53108 +#include <linux/binfmts.h>
53109 +#include <linux/smp_lock.h>
53110 +#include <linux/fs.h>
53111 +#include <linux/types.h>
53112 +#include <linux/grdefs.h>
53113 +#include <linux/grinternal.h>
53114 +#include <linux/capability.h>
53115 +#include <linux/compat.h>
53116 +
53117 +#include <asm/uaccess.h>
53118 +
53119 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53120 +static char gr_exec_arg_buf[132];
53121 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53122 +#endif
53123 +
53124 +void
53125 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53126 +{
53127 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53128 + char *grarg = gr_exec_arg_buf;
53129 + unsigned int i, x, execlen = 0;
53130 + char c;
53131 +
53132 + if (!((grsec_enable_execlog && grsec_enable_group &&
53133 + in_group_p(grsec_audit_gid))
53134 + || (grsec_enable_execlog && !grsec_enable_group)))
53135 + return;
53136 +
53137 + mutex_lock(&gr_exec_arg_mutex);
53138 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53139 +
53140 + if (unlikely(argv == NULL))
53141 + goto log;
53142 +
53143 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53144 + const char __user *p;
53145 + unsigned int len;
53146 +
53147 + if (copy_from_user(&p, argv + i, sizeof(p)))
53148 + goto log;
53149 + if (!p)
53150 + goto log;
53151 + len = strnlen_user(p, 128 - execlen);
53152 + if (len > 128 - execlen)
53153 + len = 128 - execlen;
53154 + else if (len > 0)
53155 + len--;
53156 + if (copy_from_user(grarg + execlen, p, len))
53157 + goto log;
53158 +
53159 + /* rewrite unprintable characters */
53160 + for (x = 0; x < len; x++) {
53161 + c = *(grarg + execlen + x);
53162 + if (c < 32 || c > 126)
53163 + *(grarg + execlen + x) = ' ';
53164 + }
53165 +
53166 + execlen += len;
53167 + *(grarg + execlen) = ' ';
53168 + *(grarg + execlen + 1) = '\0';
53169 + execlen++;
53170 + }
53171 +
53172 + log:
53173 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53174 + bprm->file->f_path.mnt, grarg);
53175 + mutex_unlock(&gr_exec_arg_mutex);
53176 +#endif
53177 + return;
53178 +}
53179 +
53180 +#ifdef CONFIG_COMPAT
53181 +void
53182 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53183 +{
53184 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53185 + char *grarg = gr_exec_arg_buf;
53186 + unsigned int i, x, execlen = 0;
53187 + char c;
53188 +
53189 + if (!((grsec_enable_execlog && grsec_enable_group &&
53190 + in_group_p(grsec_audit_gid))
53191 + || (grsec_enable_execlog && !grsec_enable_group)))
53192 + return;
53193 +
53194 + mutex_lock(&gr_exec_arg_mutex);
53195 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53196 +
53197 + if (unlikely(argv == NULL))
53198 + goto log;
53199 +
53200 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53201 + compat_uptr_t p;
53202 + unsigned int len;
53203 +
53204 + if (get_user(p, argv + i))
53205 + goto log;
53206 + len = strnlen_user(compat_ptr(p), 128 - execlen);
53207 + if (len > 128 - execlen)
53208 + len = 128 - execlen;
53209 + else if (len > 0)
53210 + len--;
53211 + else
53212 + goto log;
53213 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53214 + goto log;
53215 +
53216 + /* rewrite unprintable characters */
53217 + for (x = 0; x < len; x++) {
53218 + c = *(grarg + execlen + x);
53219 + if (c < 32 || c > 126)
53220 + *(grarg + execlen + x) = ' ';
53221 + }
53222 +
53223 + execlen += len;
53224 + *(grarg + execlen) = ' ';
53225 + *(grarg + execlen + 1) = '\0';
53226 + execlen++;
53227 + }
53228 +
53229 + log:
53230 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53231 + bprm->file->f_path.mnt, grarg);
53232 + mutex_unlock(&gr_exec_arg_mutex);
53233 +#endif
53234 + return;
53235 +}
53236 +#endif
53237 diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53238 --- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53239 +++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53240 @@ -0,0 +1,24 @@
53241 +#include <linux/kernel.h>
53242 +#include <linux/sched.h>
53243 +#include <linux/fs.h>
53244 +#include <linux/file.h>
53245 +#include <linux/grinternal.h>
53246 +
53247 +int
53248 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53249 + const struct dentry *dir, const int flag, const int acc_mode)
53250 +{
53251 +#ifdef CONFIG_GRKERNSEC_FIFO
53252 + const struct cred *cred = current_cred();
53253 +
53254 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53255 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53256 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53257 + (cred->fsuid != dentry->d_inode->i_uid)) {
53258 + if (!inode_permission(dentry->d_inode, acc_mode))
53259 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53260 + return -EACCES;
53261 + }
53262 +#endif
53263 + return 0;
53264 +}
53265 diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53266 --- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53267 +++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53268 @@ -0,0 +1,23 @@
53269 +#include <linux/kernel.h>
53270 +#include <linux/sched.h>
53271 +#include <linux/grsecurity.h>
53272 +#include <linux/grinternal.h>
53273 +#include <linux/errno.h>
53274 +
53275 +void
53276 +gr_log_forkfail(const int retval)
53277 +{
53278 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53279 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53280 + switch (retval) {
53281 + case -EAGAIN:
53282 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53283 + break;
53284 + case -ENOMEM:
53285 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53286 + break;
53287 + }
53288 + }
53289 +#endif
53290 + return;
53291 +}
53292 diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53293 --- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53294 +++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53295 @@ -0,0 +1,270 @@
53296 +#include <linux/kernel.h>
53297 +#include <linux/sched.h>
53298 +#include <linux/mm.h>
53299 +#include <linux/smp_lock.h>
53300 +#include <linux/gracl.h>
53301 +#include <linux/slab.h>
53302 +#include <linux/vmalloc.h>
53303 +#include <linux/percpu.h>
53304 +#include <linux/module.h>
53305 +
53306 +int grsec_enable_brute;
53307 +int grsec_enable_link;
53308 +int grsec_enable_dmesg;
53309 +int grsec_enable_harden_ptrace;
53310 +int grsec_enable_fifo;
53311 +int grsec_enable_execlog;
53312 +int grsec_enable_signal;
53313 +int grsec_enable_forkfail;
53314 +int grsec_enable_audit_ptrace;
53315 +int grsec_enable_time;
53316 +int grsec_enable_audit_textrel;
53317 +int grsec_enable_group;
53318 +int grsec_audit_gid;
53319 +int grsec_enable_chdir;
53320 +int grsec_enable_mount;
53321 +int grsec_enable_rofs;
53322 +int grsec_enable_chroot_findtask;
53323 +int grsec_enable_chroot_mount;
53324 +int grsec_enable_chroot_shmat;
53325 +int grsec_enable_chroot_fchdir;
53326 +int grsec_enable_chroot_double;
53327 +int grsec_enable_chroot_pivot;
53328 +int grsec_enable_chroot_chdir;
53329 +int grsec_enable_chroot_chmod;
53330 +int grsec_enable_chroot_mknod;
53331 +int grsec_enable_chroot_nice;
53332 +int grsec_enable_chroot_execlog;
53333 +int grsec_enable_chroot_caps;
53334 +int grsec_enable_chroot_sysctl;
53335 +int grsec_enable_chroot_unix;
53336 +int grsec_enable_tpe;
53337 +int grsec_tpe_gid;
53338 +int grsec_enable_blackhole;
53339 +#ifdef CONFIG_IPV6_MODULE
53340 +EXPORT_SYMBOL(grsec_enable_blackhole);
53341 +#endif
53342 +int grsec_lastack_retries;
53343 +int grsec_enable_tpe_all;
53344 +int grsec_enable_tpe_invert;
53345 +int grsec_enable_socket_all;
53346 +int grsec_socket_all_gid;
53347 +int grsec_enable_socket_client;
53348 +int grsec_socket_client_gid;
53349 +int grsec_enable_socket_server;
53350 +int grsec_socket_server_gid;
53351 +int grsec_resource_logging;
53352 +int grsec_disable_privio;
53353 +int grsec_enable_log_rwxmaps;
53354 +int grsec_lock;
53355 +
53356 +DEFINE_SPINLOCK(grsec_alert_lock);
53357 +unsigned long grsec_alert_wtime = 0;
53358 +unsigned long grsec_alert_fyet = 0;
53359 +
53360 +DEFINE_SPINLOCK(grsec_audit_lock);
53361 +
53362 +DEFINE_RWLOCK(grsec_exec_file_lock);
53363 +
53364 +char *gr_shared_page[4];
53365 +
53366 +char *gr_alert_log_fmt;
53367 +char *gr_audit_log_fmt;
53368 +char *gr_alert_log_buf;
53369 +char *gr_audit_log_buf;
53370 +
53371 +extern struct gr_arg *gr_usermode;
53372 +extern unsigned char *gr_system_salt;
53373 +extern unsigned char *gr_system_sum;
53374 +
53375 +void __init
53376 +grsecurity_init(void)
53377 +{
53378 + int j;
53379 + /* create the per-cpu shared pages */
53380 +
53381 +#ifdef CONFIG_X86
53382 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53383 +#endif
53384 +
53385 + for (j = 0; j < 4; j++) {
53386 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53387 + if (gr_shared_page[j] == NULL) {
53388 + panic("Unable to allocate grsecurity shared page");
53389 + return;
53390 + }
53391 + }
53392 +
53393 + /* allocate log buffers */
53394 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53395 + if (!gr_alert_log_fmt) {
53396 + panic("Unable to allocate grsecurity alert log format buffer");
53397 + return;
53398 + }
53399 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53400 + if (!gr_audit_log_fmt) {
53401 + panic("Unable to allocate grsecurity audit log format buffer");
53402 + return;
53403 + }
53404 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53405 + if (!gr_alert_log_buf) {
53406 + panic("Unable to allocate grsecurity alert log buffer");
53407 + return;
53408 + }
53409 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53410 + if (!gr_audit_log_buf) {
53411 + panic("Unable to allocate grsecurity audit log buffer");
53412 + return;
53413 + }
53414 +
53415 + /* allocate memory for authentication structure */
53416 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53417 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53418 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53419 +
53420 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53421 + panic("Unable to allocate grsecurity authentication structure");
53422 + return;
53423 + }
53424 +
53425 +
53426 +#ifdef CONFIG_GRKERNSEC_IO
53427 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53428 + grsec_disable_privio = 1;
53429 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53430 + grsec_disable_privio = 1;
53431 +#else
53432 + grsec_disable_privio = 0;
53433 +#endif
53434 +#endif
53435 +
53436 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53437 + /* for backward compatibility, tpe_invert always defaults to on if
53438 + enabled in the kernel
53439 + */
53440 + grsec_enable_tpe_invert = 1;
53441 +#endif
53442 +
53443 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53444 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53445 + grsec_lock = 1;
53446 +#endif
53447 +
53448 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53449 + grsec_enable_audit_textrel = 1;
53450 +#endif
53451 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53452 + grsec_enable_log_rwxmaps = 1;
53453 +#endif
53454 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53455 + grsec_enable_group = 1;
53456 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53457 +#endif
53458 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53459 + grsec_enable_chdir = 1;
53460 +#endif
53461 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53462 + grsec_enable_harden_ptrace = 1;
53463 +#endif
53464 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53465 + grsec_enable_mount = 1;
53466 +#endif
53467 +#ifdef CONFIG_GRKERNSEC_LINK
53468 + grsec_enable_link = 1;
53469 +#endif
53470 +#ifdef CONFIG_GRKERNSEC_BRUTE
53471 + grsec_enable_brute = 1;
53472 +#endif
53473 +#ifdef CONFIG_GRKERNSEC_DMESG
53474 + grsec_enable_dmesg = 1;
53475 +#endif
53476 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53477 + grsec_enable_blackhole = 1;
53478 + grsec_lastack_retries = 4;
53479 +#endif
53480 +#ifdef CONFIG_GRKERNSEC_FIFO
53481 + grsec_enable_fifo = 1;
53482 +#endif
53483 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53484 + grsec_enable_execlog = 1;
53485 +#endif
53486 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53487 + grsec_enable_signal = 1;
53488 +#endif
53489 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53490 + grsec_enable_forkfail = 1;
53491 +#endif
53492 +#ifdef CONFIG_GRKERNSEC_TIME
53493 + grsec_enable_time = 1;
53494 +#endif
53495 +#ifdef CONFIG_GRKERNSEC_RESLOG
53496 + grsec_resource_logging = 1;
53497 +#endif
53498 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53499 + grsec_enable_chroot_findtask = 1;
53500 +#endif
53501 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53502 + grsec_enable_chroot_unix = 1;
53503 +#endif
53504 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53505 + grsec_enable_chroot_mount = 1;
53506 +#endif
53507 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53508 + grsec_enable_chroot_fchdir = 1;
53509 +#endif
53510 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53511 + grsec_enable_chroot_shmat = 1;
53512 +#endif
53513 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53514 + grsec_enable_audit_ptrace = 1;
53515 +#endif
53516 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53517 + grsec_enable_chroot_double = 1;
53518 +#endif
53519 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53520 + grsec_enable_chroot_pivot = 1;
53521 +#endif
53522 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53523 + grsec_enable_chroot_chdir = 1;
53524 +#endif
53525 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53526 + grsec_enable_chroot_chmod = 1;
53527 +#endif
53528 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53529 + grsec_enable_chroot_mknod = 1;
53530 +#endif
53531 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53532 + grsec_enable_chroot_nice = 1;
53533 +#endif
53534 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53535 + grsec_enable_chroot_execlog = 1;
53536 +#endif
53537 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53538 + grsec_enable_chroot_caps = 1;
53539 +#endif
53540 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53541 + grsec_enable_chroot_sysctl = 1;
53542 +#endif
53543 +#ifdef CONFIG_GRKERNSEC_TPE
53544 + grsec_enable_tpe = 1;
53545 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53546 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53547 + grsec_enable_tpe_all = 1;
53548 +#endif
53549 +#endif
53550 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53551 + grsec_enable_socket_all = 1;
53552 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53553 +#endif
53554 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53555 + grsec_enable_socket_client = 1;
53556 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53557 +#endif
53558 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53559 + grsec_enable_socket_server = 1;
53560 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53561 +#endif
53562 +#endif
53563 +
53564 + return;
53565 +}
53566 diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53567 --- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53568 +++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53569 @@ -0,0 +1,43 @@
53570 +#include <linux/kernel.h>
53571 +#include <linux/sched.h>
53572 +#include <linux/fs.h>
53573 +#include <linux/file.h>
53574 +#include <linux/grinternal.h>
53575 +
53576 +int
53577 +gr_handle_follow_link(const struct inode *parent,
53578 + const struct inode *inode,
53579 + const struct dentry *dentry, const struct vfsmount *mnt)
53580 +{
53581 +#ifdef CONFIG_GRKERNSEC_LINK
53582 + const struct cred *cred = current_cred();
53583 +
53584 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53585 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53586 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53587 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53588 + return -EACCES;
53589 + }
53590 +#endif
53591 + return 0;
53592 +}
53593 +
53594 +int
53595 +gr_handle_hardlink(const struct dentry *dentry,
53596 + const struct vfsmount *mnt,
53597 + struct inode *inode, const int mode, const char *to)
53598 +{
53599 +#ifdef CONFIG_GRKERNSEC_LINK
53600 + const struct cred *cred = current_cred();
53601 +
53602 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53603 + (!S_ISREG(mode) || (mode & S_ISUID) ||
53604 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53605 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53606 + !capable(CAP_FOWNER) && cred->uid) {
53607 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53608 + return -EPERM;
53609 + }
53610 +#endif
53611 + return 0;
53612 +}
53613 diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53614 --- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53615 +++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53616 @@ -0,0 +1,310 @@
53617 +#include <linux/kernel.h>
53618 +#include <linux/sched.h>
53619 +#include <linux/file.h>
53620 +#include <linux/tty.h>
53621 +#include <linux/fs.h>
53622 +#include <linux/grinternal.h>
53623 +
53624 +#ifdef CONFIG_TREE_PREEMPT_RCU
53625 +#define DISABLE_PREEMPT() preempt_disable()
53626 +#define ENABLE_PREEMPT() preempt_enable()
53627 +#else
53628 +#define DISABLE_PREEMPT()
53629 +#define ENABLE_PREEMPT()
53630 +#endif
53631 +
53632 +#define BEGIN_LOCKS(x) \
53633 + DISABLE_PREEMPT(); \
53634 + rcu_read_lock(); \
53635 + read_lock(&tasklist_lock); \
53636 + read_lock(&grsec_exec_file_lock); \
53637 + if (x != GR_DO_AUDIT) \
53638 + spin_lock(&grsec_alert_lock); \
53639 + else \
53640 + spin_lock(&grsec_audit_lock)
53641 +
53642 +#define END_LOCKS(x) \
53643 + if (x != GR_DO_AUDIT) \
53644 + spin_unlock(&grsec_alert_lock); \
53645 + else \
53646 + spin_unlock(&grsec_audit_lock); \
53647 + read_unlock(&grsec_exec_file_lock); \
53648 + read_unlock(&tasklist_lock); \
53649 + rcu_read_unlock(); \
53650 + ENABLE_PREEMPT(); \
53651 + if (x == GR_DONT_AUDIT) \
53652 + gr_handle_alertkill(current)
53653 +
53654 +enum {
53655 + FLOODING,
53656 + NO_FLOODING
53657 +};
53658 +
53659 +extern char *gr_alert_log_fmt;
53660 +extern char *gr_audit_log_fmt;
53661 +extern char *gr_alert_log_buf;
53662 +extern char *gr_audit_log_buf;
53663 +
53664 +static int gr_log_start(int audit)
53665 +{
53666 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53667 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53668 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53669 +
53670 + if (audit == GR_DO_AUDIT)
53671 + goto set_fmt;
53672 +
53673 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
53674 + grsec_alert_wtime = jiffies;
53675 + grsec_alert_fyet = 0;
53676 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53677 + grsec_alert_fyet++;
53678 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53679 + grsec_alert_wtime = jiffies;
53680 + grsec_alert_fyet++;
53681 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53682 + return FLOODING;
53683 + } else return FLOODING;
53684 +
53685 +set_fmt:
53686 + memset(buf, 0, PAGE_SIZE);
53687 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
53688 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53689 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53690 + } else if (current->signal->curr_ip) {
53691 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53692 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53693 + } else if (gr_acl_is_enabled()) {
53694 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53695 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53696 + } else {
53697 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
53698 + strcpy(buf, fmt);
53699 + }
53700 +
53701 + return NO_FLOODING;
53702 +}
53703 +
53704 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53705 + __attribute__ ((format (printf, 2, 0)));
53706 +
53707 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53708 +{
53709 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53710 + unsigned int len = strlen(buf);
53711 +
53712 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53713 +
53714 + return;
53715 +}
53716 +
53717 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53718 + __attribute__ ((format (printf, 2, 3)));
53719 +
53720 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53721 +{
53722 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53723 + unsigned int len = strlen(buf);
53724 + va_list ap;
53725 +
53726 + va_start(ap, msg);
53727 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53728 + va_end(ap);
53729 +
53730 + return;
53731 +}
53732 +
53733 +static void gr_log_end(int audit)
53734 +{
53735 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53736 + unsigned int len = strlen(buf);
53737 +
53738 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53739 + printk("%s\n", buf);
53740 +
53741 + return;
53742 +}
53743 +
53744 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53745 +{
53746 + int logtype;
53747 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53748 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53749 + void *voidptr = NULL;
53750 + int num1 = 0, num2 = 0;
53751 + unsigned long ulong1 = 0, ulong2 = 0;
53752 + struct dentry *dentry = NULL;
53753 + struct vfsmount *mnt = NULL;
53754 + struct file *file = NULL;
53755 + struct task_struct *task = NULL;
53756 + const struct cred *cred, *pcred;
53757 + va_list ap;
53758 +
53759 + BEGIN_LOCKS(audit);
53760 + logtype = gr_log_start(audit);
53761 + if (logtype == FLOODING) {
53762 + END_LOCKS(audit);
53763 + return;
53764 + }
53765 + va_start(ap, argtypes);
53766 + switch (argtypes) {
53767 + case GR_TTYSNIFF:
53768 + task = va_arg(ap, struct task_struct *);
53769 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
53770 + break;
53771 + case GR_SYSCTL_HIDDEN:
53772 + str1 = va_arg(ap, char *);
53773 + gr_log_middle_varargs(audit, msg, result, str1);
53774 + break;
53775 + case GR_RBAC:
53776 + dentry = va_arg(ap, struct dentry *);
53777 + mnt = va_arg(ap, struct vfsmount *);
53778 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
53779 + break;
53780 + case GR_RBAC_STR:
53781 + dentry = va_arg(ap, struct dentry *);
53782 + mnt = va_arg(ap, struct vfsmount *);
53783 + str1 = va_arg(ap, char *);
53784 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
53785 + break;
53786 + case GR_STR_RBAC:
53787 + str1 = va_arg(ap, char *);
53788 + dentry = va_arg(ap, struct dentry *);
53789 + mnt = va_arg(ap, struct vfsmount *);
53790 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
53791 + break;
53792 + case GR_RBAC_MODE2:
53793 + dentry = va_arg(ap, struct dentry *);
53794 + mnt = va_arg(ap, struct vfsmount *);
53795 + str1 = va_arg(ap, char *);
53796 + str2 = va_arg(ap, char *);
53797 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
53798 + break;
53799 + case GR_RBAC_MODE3:
53800 + dentry = va_arg(ap, struct dentry *);
53801 + mnt = va_arg(ap, struct vfsmount *);
53802 + str1 = va_arg(ap, char *);
53803 + str2 = va_arg(ap, char *);
53804 + str3 = va_arg(ap, char *);
53805 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
53806 + break;
53807 + case GR_FILENAME:
53808 + dentry = va_arg(ap, struct dentry *);
53809 + mnt = va_arg(ap, struct vfsmount *);
53810 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
53811 + break;
53812 + case GR_STR_FILENAME:
53813 + str1 = va_arg(ap, char *);
53814 + dentry = va_arg(ap, struct dentry *);
53815 + mnt = va_arg(ap, struct vfsmount *);
53816 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
53817 + break;
53818 + case GR_FILENAME_STR:
53819 + dentry = va_arg(ap, struct dentry *);
53820 + mnt = va_arg(ap, struct vfsmount *);
53821 + str1 = va_arg(ap, char *);
53822 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
53823 + break;
53824 + case GR_FILENAME_TWO_INT:
53825 + dentry = va_arg(ap, struct dentry *);
53826 + mnt = va_arg(ap, struct vfsmount *);
53827 + num1 = va_arg(ap, int);
53828 + num2 = va_arg(ap, int);
53829 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
53830 + break;
53831 + case GR_FILENAME_TWO_INT_STR:
53832 + dentry = va_arg(ap, struct dentry *);
53833 + mnt = va_arg(ap, struct vfsmount *);
53834 + num1 = va_arg(ap, int);
53835 + num2 = va_arg(ap, int);
53836 + str1 = va_arg(ap, char *);
53837 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
53838 + break;
53839 + case GR_TEXTREL:
53840 + file = va_arg(ap, struct file *);
53841 + ulong1 = va_arg(ap, unsigned long);
53842 + ulong2 = va_arg(ap, unsigned long);
53843 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
53844 + break;
53845 + case GR_PTRACE:
53846 + task = va_arg(ap, struct task_struct *);
53847 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
53848 + break;
53849 + case GR_RESOURCE:
53850 + task = va_arg(ap, struct task_struct *);
53851 + cred = __task_cred(task);
53852 + pcred = __task_cred(task->real_parent);
53853 + ulong1 = va_arg(ap, unsigned long);
53854 + str1 = va_arg(ap, char *);
53855 + ulong2 = va_arg(ap, unsigned long);
53856 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53857 + break;
53858 + case GR_CAP:
53859 + task = va_arg(ap, struct task_struct *);
53860 + cred = __task_cred(task);
53861 + pcred = __task_cred(task->real_parent);
53862 + str1 = va_arg(ap, char *);
53863 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53864 + break;
53865 + case GR_SIG:
53866 + str1 = va_arg(ap, char *);
53867 + voidptr = va_arg(ap, void *);
53868 + gr_log_middle_varargs(audit, msg, str1, voidptr);
53869 + break;
53870 + case GR_SIG2:
53871 + task = va_arg(ap, struct task_struct *);
53872 + cred = __task_cred(task);
53873 + pcred = __task_cred(task->real_parent);
53874 + num1 = va_arg(ap, int);
53875 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53876 + break;
53877 + case GR_CRASH1:
53878 + task = va_arg(ap, struct task_struct *);
53879 + cred = __task_cred(task);
53880 + pcred = __task_cred(task->real_parent);
53881 + ulong1 = va_arg(ap, unsigned long);
53882 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
53883 + break;
53884 + case GR_CRASH2:
53885 + task = va_arg(ap, struct task_struct *);
53886 + cred = __task_cred(task);
53887 + pcred = __task_cred(task->real_parent);
53888 + ulong1 = va_arg(ap, unsigned long);
53889 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
53890 + break;
53891 + case GR_RWXMAP:
53892 + file = va_arg(ap, struct file *);
53893 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
53894 + break;
53895 + case GR_PSACCT:
53896 + {
53897 + unsigned int wday, cday;
53898 + __u8 whr, chr;
53899 + __u8 wmin, cmin;
53900 + __u8 wsec, csec;
53901 + char cur_tty[64] = { 0 };
53902 + char parent_tty[64] = { 0 };
53903 +
53904 + task = va_arg(ap, struct task_struct *);
53905 + wday = va_arg(ap, unsigned int);
53906 + cday = va_arg(ap, unsigned int);
53907 + whr = va_arg(ap, int);
53908 + chr = va_arg(ap, int);
53909 + wmin = va_arg(ap, int);
53910 + cmin = va_arg(ap, int);
53911 + wsec = va_arg(ap, int);
53912 + csec = va_arg(ap, int);
53913 + ulong1 = va_arg(ap, unsigned long);
53914 + cred = __task_cred(task);
53915 + pcred = __task_cred(task->real_parent);
53916 +
53917 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53918 + }
53919 + break;
53920 + default:
53921 + gr_log_middle(audit, msg, ap);
53922 + }
53923 + va_end(ap);
53924 + gr_log_end(audit);
53925 + END_LOCKS(audit);
53926 +}
53927 diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
53928 --- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
53929 +++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
53930 @@ -0,0 +1,33 @@
53931 +#include <linux/kernel.h>
53932 +#include <linux/sched.h>
53933 +#include <linux/mm.h>
53934 +#include <linux/mman.h>
53935 +#include <linux/grinternal.h>
53936 +
53937 +void
53938 +gr_handle_ioperm(void)
53939 +{
53940 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
53941 + return;
53942 +}
53943 +
53944 +void
53945 +gr_handle_iopl(void)
53946 +{
53947 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
53948 + return;
53949 +}
53950 +
53951 +void
53952 +gr_handle_mem_readwrite(u64 from, u64 to)
53953 +{
53954 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
53955 + return;
53956 +}
53957 +
53958 +void
53959 +gr_handle_vm86(void)
53960 +{
53961 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
53962 + return;
53963 +}
53964 diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
53965 --- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
53966 +++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
53967 @@ -0,0 +1,62 @@
53968 +#include <linux/kernel.h>
53969 +#include <linux/sched.h>
53970 +#include <linux/mount.h>
53971 +#include <linux/grsecurity.h>
53972 +#include <linux/grinternal.h>
53973 +
53974 +void
53975 +gr_log_remount(const char *devname, const int retval)
53976 +{
53977 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53978 + if (grsec_enable_mount && (retval >= 0))
53979 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
53980 +#endif
53981 + return;
53982 +}
53983 +
53984 +void
53985 +gr_log_unmount(const char *devname, const int retval)
53986 +{
53987 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53988 + if (grsec_enable_mount && (retval >= 0))
53989 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
53990 +#endif
53991 + return;
53992 +}
53993 +
53994 +void
53995 +gr_log_mount(const char *from, const char *to, const int retval)
53996 +{
53997 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53998 + if (grsec_enable_mount && (retval >= 0))
53999 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54000 +#endif
54001 + return;
54002 +}
54003 +
54004 +int
54005 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54006 +{
54007 +#ifdef CONFIG_GRKERNSEC_ROFS
54008 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54009 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54010 + return -EPERM;
54011 + } else
54012 + return 0;
54013 +#endif
54014 + return 0;
54015 +}
54016 +
54017 +int
54018 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54019 +{
54020 +#ifdef CONFIG_GRKERNSEC_ROFS
54021 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54022 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54023 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54024 + return -EPERM;
54025 + } else
54026 + return 0;
54027 +#endif
54028 + return 0;
54029 +}
54030 diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54031 --- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54032 +++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54033 @@ -0,0 +1,36 @@
54034 +#include <linux/kernel.h>
54035 +#include <linux/sched.h>
54036 +#include <linux/mm.h>
54037 +#include <linux/file.h>
54038 +#include <linux/grinternal.h>
54039 +#include <linux/grsecurity.h>
54040 +
54041 +void
54042 +gr_log_textrel(struct vm_area_struct * vma)
54043 +{
54044 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54045 + if (grsec_enable_audit_textrel)
54046 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54047 +#endif
54048 + return;
54049 +}
54050 +
54051 +void
54052 +gr_log_rwxmmap(struct file *file)
54053 +{
54054 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54055 + if (grsec_enable_log_rwxmaps)
54056 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54057 +#endif
54058 + return;
54059 +}
54060 +
54061 +void
54062 +gr_log_rwxmprotect(struct file *file)
54063 +{
54064 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54065 + if (grsec_enable_log_rwxmaps)
54066 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54067 +#endif
54068 + return;
54069 +}
54070 diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54071 --- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54072 +++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54073 @@ -0,0 +1,14 @@
54074 +#include <linux/kernel.h>
54075 +#include <linux/sched.h>
54076 +#include <linux/grinternal.h>
54077 +#include <linux/grsecurity.h>
54078 +
54079 +void
54080 +gr_audit_ptrace(struct task_struct *task)
54081 +{
54082 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54083 + if (grsec_enable_audit_ptrace)
54084 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54085 +#endif
54086 + return;
54087 +}
54088 diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54089 --- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54090 +++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54091 @@ -0,0 +1,205 @@
54092 +#include <linux/kernel.h>
54093 +#include <linux/sched.h>
54094 +#include <linux/delay.h>
54095 +#include <linux/grsecurity.h>
54096 +#include <linux/grinternal.h>
54097 +#include <linux/hardirq.h>
54098 +
54099 +char *signames[] = {
54100 + [SIGSEGV] = "Segmentation fault",
54101 + [SIGILL] = "Illegal instruction",
54102 + [SIGABRT] = "Abort",
54103 + [SIGBUS] = "Invalid alignment/Bus error"
54104 +};
54105 +
54106 +void
54107 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54108 +{
54109 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54110 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54111 + (sig == SIGABRT) || (sig == SIGBUS))) {
54112 + if (t->pid == current->pid) {
54113 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54114 + } else {
54115 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54116 + }
54117 + }
54118 +#endif
54119 + return;
54120 +}
54121 +
54122 +int
54123 +gr_handle_signal(const struct task_struct *p, const int sig)
54124 +{
54125 +#ifdef CONFIG_GRKERNSEC
54126 + if (current->pid > 1 && gr_check_protected_task(p)) {
54127 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54128 + return -EPERM;
54129 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54130 + return -EPERM;
54131 + }
54132 +#endif
54133 + return 0;
54134 +}
54135 +
54136 +#ifdef CONFIG_GRKERNSEC
54137 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54138 +
54139 +int gr_fake_force_sig(int sig, struct task_struct *t)
54140 +{
54141 + unsigned long int flags;
54142 + int ret, blocked, ignored;
54143 + struct k_sigaction *action;
54144 +
54145 + spin_lock_irqsave(&t->sighand->siglock, flags);
54146 + action = &t->sighand->action[sig-1];
54147 + ignored = action->sa.sa_handler == SIG_IGN;
54148 + blocked = sigismember(&t->blocked, sig);
54149 + if (blocked || ignored) {
54150 + action->sa.sa_handler = SIG_DFL;
54151 + if (blocked) {
54152 + sigdelset(&t->blocked, sig);
54153 + recalc_sigpending_and_wake(t);
54154 + }
54155 + }
54156 + if (action->sa.sa_handler == SIG_DFL)
54157 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54158 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54159 +
54160 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54161 +
54162 + return ret;
54163 +}
54164 +#endif
54165 +
54166 +#ifdef CONFIG_GRKERNSEC_BRUTE
54167 +#define GR_USER_BAN_TIME (15 * 60)
54168 +
54169 +static int __get_dumpable(unsigned long mm_flags)
54170 +{
54171 + int ret;
54172 +
54173 + ret = mm_flags & MMF_DUMPABLE_MASK;
54174 + return (ret >= 2) ? 2 : ret;
54175 +}
54176 +#endif
54177 +
54178 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54179 +{
54180 +#ifdef CONFIG_GRKERNSEC_BRUTE
54181 + uid_t uid = 0;
54182 +
54183 + if (!grsec_enable_brute)
54184 + return;
54185 +
54186 + rcu_read_lock();
54187 + read_lock(&tasklist_lock);
54188 + read_lock(&grsec_exec_file_lock);
54189 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54190 + p->real_parent->brute = 1;
54191 + else {
54192 + const struct cred *cred = __task_cred(p), *cred2;
54193 + struct task_struct *tsk, *tsk2;
54194 +
54195 + if (!__get_dumpable(mm_flags) && cred->uid) {
54196 + struct user_struct *user;
54197 +
54198 + uid = cred->uid;
54199 +
54200 + /* this is put upon execution past expiration */
54201 + user = find_user(uid);
54202 + if (user == NULL)
54203 + goto unlock;
54204 + user->banned = 1;
54205 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54206 + if (user->ban_expires == ~0UL)
54207 + user->ban_expires--;
54208 +
54209 + do_each_thread(tsk2, tsk) {
54210 + cred2 = __task_cred(tsk);
54211 + if (tsk != p && cred2->uid == uid)
54212 + gr_fake_force_sig(SIGKILL, tsk);
54213 + } while_each_thread(tsk2, tsk);
54214 + }
54215 + }
54216 +unlock:
54217 + read_unlock(&grsec_exec_file_lock);
54218 + read_unlock(&tasklist_lock);
54219 + rcu_read_unlock();
54220 +
54221 + if (uid)
54222 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54223 +#endif
54224 + return;
54225 +}
54226 +
54227 +void gr_handle_brute_check(void)
54228 +{
54229 +#ifdef CONFIG_GRKERNSEC_BRUTE
54230 + if (current->brute)
54231 + msleep(30 * 1000);
54232 +#endif
54233 + return;
54234 +}
54235 +
54236 +void gr_handle_kernel_exploit(void)
54237 +{
54238 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54239 + const struct cred *cred;
54240 + struct task_struct *tsk, *tsk2;
54241 + struct user_struct *user;
54242 + uid_t uid;
54243 +
54244 + if (in_irq() || in_serving_softirq() || in_nmi())
54245 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54246 +
54247 + uid = current_uid();
54248 +
54249 + if (uid == 0)
54250 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54251 + else {
54252 + /* kill all the processes of this user, hold a reference
54253 + to their creds struct, and prevent them from creating
54254 + another process until system reset
54255 + */
54256 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54257 + /* we intentionally leak this ref */
54258 + user = get_uid(current->cred->user);
54259 + if (user) {
54260 + user->banned = 1;
54261 + user->ban_expires = ~0UL;
54262 + }
54263 +
54264 + read_lock(&tasklist_lock);
54265 + do_each_thread(tsk2, tsk) {
54266 + cred = __task_cred(tsk);
54267 + if (cred->uid == uid)
54268 + gr_fake_force_sig(SIGKILL, tsk);
54269 + } while_each_thread(tsk2, tsk);
54270 + read_unlock(&tasklist_lock);
54271 + }
54272 +#endif
54273 +}
54274 +
54275 +int __gr_process_user_ban(struct user_struct *user)
54276 +{
54277 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54278 + if (unlikely(user->banned)) {
54279 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54280 + user->banned = 0;
54281 + user->ban_expires = 0;
54282 + free_uid(user);
54283 + } else
54284 + return -EPERM;
54285 + }
54286 +#endif
54287 + return 0;
54288 +}
54289 +
54290 +int gr_process_user_ban(void)
54291 +{
54292 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54293 + return __gr_process_user_ban(current->cred->user);
54294 +#endif
54295 + return 0;
54296 +}
54297 diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54298 --- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54299 +++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54300 @@ -0,0 +1,275 @@
54301 +#include <linux/kernel.h>
54302 +#include <linux/module.h>
54303 +#include <linux/sched.h>
54304 +#include <linux/file.h>
54305 +#include <linux/net.h>
54306 +#include <linux/in.h>
54307 +#include <linux/ip.h>
54308 +#include <net/sock.h>
54309 +#include <net/inet_sock.h>
54310 +#include <linux/grsecurity.h>
54311 +#include <linux/grinternal.h>
54312 +#include <linux/gracl.h>
54313 +
54314 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54315 +EXPORT_SYMBOL(gr_cap_rtnetlink);
54316 +
54317 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54318 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54319 +
54320 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54321 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54322 +
54323 +#ifdef CONFIG_UNIX_MODULE
54324 +EXPORT_SYMBOL(gr_acl_handle_unix);
54325 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54326 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54327 +EXPORT_SYMBOL(gr_handle_create);
54328 +#endif
54329 +
54330 +#ifdef CONFIG_GRKERNSEC
54331 +#define gr_conn_table_size 32749
54332 +struct conn_table_entry {
54333 + struct conn_table_entry *next;
54334 + struct signal_struct *sig;
54335 +};
54336 +
54337 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54338 +DEFINE_SPINLOCK(gr_conn_table_lock);
54339 +
54340 +extern const char * gr_socktype_to_name(unsigned char type);
54341 +extern const char * gr_proto_to_name(unsigned char proto);
54342 +extern const char * gr_sockfamily_to_name(unsigned char family);
54343 +
54344 +static __inline__ int
54345 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54346 +{
54347 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54348 +}
54349 +
54350 +static __inline__ int
54351 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54352 + __u16 sport, __u16 dport)
54353 +{
54354 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54355 + sig->gr_sport == sport && sig->gr_dport == dport))
54356 + return 1;
54357 + else
54358 + return 0;
54359 +}
54360 +
54361 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54362 +{
54363 + struct conn_table_entry **match;
54364 + unsigned int index;
54365 +
54366 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54367 + sig->gr_sport, sig->gr_dport,
54368 + gr_conn_table_size);
54369 +
54370 + newent->sig = sig;
54371 +
54372 + match = &gr_conn_table[index];
54373 + newent->next = *match;
54374 + *match = newent;
54375 +
54376 + return;
54377 +}
54378 +
54379 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54380 +{
54381 + struct conn_table_entry *match, *last = NULL;
54382 + unsigned int index;
54383 +
54384 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54385 + sig->gr_sport, sig->gr_dport,
54386 + gr_conn_table_size);
54387 +
54388 + match = gr_conn_table[index];
54389 + while (match && !conn_match(match->sig,
54390 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54391 + sig->gr_dport)) {
54392 + last = match;
54393 + match = match->next;
54394 + }
54395 +
54396 + if (match) {
54397 + if (last)
54398 + last->next = match->next;
54399 + else
54400 + gr_conn_table[index] = NULL;
54401 + kfree(match);
54402 + }
54403 +
54404 + return;
54405 +}
54406 +
54407 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54408 + __u16 sport, __u16 dport)
54409 +{
54410 + struct conn_table_entry *match;
54411 + unsigned int index;
54412 +
54413 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54414 +
54415 + match = gr_conn_table[index];
54416 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54417 + match = match->next;
54418 +
54419 + if (match)
54420 + return match->sig;
54421 + else
54422 + return NULL;
54423 +}
54424 +
54425 +#endif
54426 +
54427 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54428 +{
54429 +#ifdef CONFIG_GRKERNSEC
54430 + struct signal_struct *sig = task->signal;
54431 + struct conn_table_entry *newent;
54432 +
54433 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54434 + if (newent == NULL)
54435 + return;
54436 + /* no bh lock needed since we are called with bh disabled */
54437 + spin_lock(&gr_conn_table_lock);
54438 + gr_del_task_from_ip_table_nolock(sig);
54439 + sig->gr_saddr = inet->rcv_saddr;
54440 + sig->gr_daddr = inet->daddr;
54441 + sig->gr_sport = inet->sport;
54442 + sig->gr_dport = inet->dport;
54443 + gr_add_to_task_ip_table_nolock(sig, newent);
54444 + spin_unlock(&gr_conn_table_lock);
54445 +#endif
54446 + return;
54447 +}
54448 +
54449 +void gr_del_task_from_ip_table(struct task_struct *task)
54450 +{
54451 +#ifdef CONFIG_GRKERNSEC
54452 + spin_lock_bh(&gr_conn_table_lock);
54453 + gr_del_task_from_ip_table_nolock(task->signal);
54454 + spin_unlock_bh(&gr_conn_table_lock);
54455 +#endif
54456 + return;
54457 +}
54458 +
54459 +void
54460 +gr_attach_curr_ip(const struct sock *sk)
54461 +{
54462 +#ifdef CONFIG_GRKERNSEC
54463 + struct signal_struct *p, *set;
54464 + const struct inet_sock *inet = inet_sk(sk);
54465 +
54466 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54467 + return;
54468 +
54469 + set = current->signal;
54470 +
54471 + spin_lock_bh(&gr_conn_table_lock);
54472 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54473 + inet->dport, inet->sport);
54474 + if (unlikely(p != NULL)) {
54475 + set->curr_ip = p->curr_ip;
54476 + set->used_accept = 1;
54477 + gr_del_task_from_ip_table_nolock(p);
54478 + spin_unlock_bh(&gr_conn_table_lock);
54479 + return;
54480 + }
54481 + spin_unlock_bh(&gr_conn_table_lock);
54482 +
54483 + set->curr_ip = inet->daddr;
54484 + set->used_accept = 1;
54485 +#endif
54486 + return;
54487 +}
54488 +
54489 +int
54490 +gr_handle_sock_all(const int family, const int type, const int protocol)
54491 +{
54492 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54493 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54494 + (family != AF_UNIX)) {
54495 + if (family == AF_INET)
54496 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54497 + else
54498 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54499 + return -EACCES;
54500 + }
54501 +#endif
54502 + return 0;
54503 +}
54504 +
54505 +int
54506 +gr_handle_sock_server(const struct sockaddr *sck)
54507 +{
54508 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54509 + if (grsec_enable_socket_server &&
54510 + in_group_p(grsec_socket_server_gid) &&
54511 + sck && (sck->sa_family != AF_UNIX) &&
54512 + (sck->sa_family != AF_LOCAL)) {
54513 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54514 + return -EACCES;
54515 + }
54516 +#endif
54517 + return 0;
54518 +}
54519 +
54520 +int
54521 +gr_handle_sock_server_other(const struct sock *sck)
54522 +{
54523 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54524 + if (grsec_enable_socket_server &&
54525 + in_group_p(grsec_socket_server_gid) &&
54526 + sck && (sck->sk_family != AF_UNIX) &&
54527 + (sck->sk_family != AF_LOCAL)) {
54528 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54529 + return -EACCES;
54530 + }
54531 +#endif
54532 + return 0;
54533 +}
54534 +
54535 +int
54536 +gr_handle_sock_client(const struct sockaddr *sck)
54537 +{
54538 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54539 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54540 + sck && (sck->sa_family != AF_UNIX) &&
54541 + (sck->sa_family != AF_LOCAL)) {
54542 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54543 + return -EACCES;
54544 + }
54545 +#endif
54546 + return 0;
54547 +}
54548 +
54549 +kernel_cap_t
54550 +gr_cap_rtnetlink(struct sock *sock)
54551 +{
54552 +#ifdef CONFIG_GRKERNSEC
54553 + if (!gr_acl_is_enabled())
54554 + return current_cap();
54555 + else if (sock->sk_protocol == NETLINK_ISCSI &&
54556 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54557 + gr_is_capable(CAP_SYS_ADMIN))
54558 + return current_cap();
54559 + else if (sock->sk_protocol == NETLINK_AUDIT &&
54560 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54561 + gr_is_capable(CAP_AUDIT_WRITE) &&
54562 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54563 + gr_is_capable(CAP_AUDIT_CONTROL))
54564 + return current_cap();
54565 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54566 + ((sock->sk_protocol == NETLINK_ROUTE) ?
54567 + gr_is_capable_nolog(CAP_NET_ADMIN) :
54568 + gr_is_capable(CAP_NET_ADMIN)))
54569 + return current_cap();
54570 + else
54571 + return __cap_empty_set;
54572 +#else
54573 + return current_cap();
54574 +#endif
54575 +}
54576 diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54577 --- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54578 +++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54579 @@ -0,0 +1,479 @@
54580 +#include <linux/kernel.h>
54581 +#include <linux/sched.h>
54582 +#include <linux/sysctl.h>
54583 +#include <linux/grsecurity.h>
54584 +#include <linux/grinternal.h>
54585 +
54586 +int
54587 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54588 +{
54589 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54590 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54591 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54592 + return -EACCES;
54593 + }
54594 +#endif
54595 + return 0;
54596 +}
54597 +
54598 +#ifdef CONFIG_GRKERNSEC_ROFS
54599 +static int __maybe_unused one = 1;
54600 +#endif
54601 +
54602 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54603 +ctl_table grsecurity_table[] = {
54604 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54605 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54606 +#ifdef CONFIG_GRKERNSEC_IO
54607 + {
54608 + .ctl_name = CTL_UNNUMBERED,
54609 + .procname = "disable_priv_io",
54610 + .data = &grsec_disable_privio,
54611 + .maxlen = sizeof(int),
54612 + .mode = 0600,
54613 + .proc_handler = &proc_dointvec,
54614 + },
54615 +#endif
54616 +#endif
54617 +#ifdef CONFIG_GRKERNSEC_LINK
54618 + {
54619 + .ctl_name = CTL_UNNUMBERED,
54620 + .procname = "linking_restrictions",
54621 + .data = &grsec_enable_link,
54622 + .maxlen = sizeof(int),
54623 + .mode = 0600,
54624 + .proc_handler = &proc_dointvec,
54625 + },
54626 +#endif
54627 +#ifdef CONFIG_GRKERNSEC_BRUTE
54628 + {
54629 + .ctl_name = CTL_UNNUMBERED,
54630 + .procname = "deter_bruteforce",
54631 + .data = &grsec_enable_brute,
54632 + .maxlen = sizeof(int),
54633 + .mode = 0600,
54634 + .proc_handler = &proc_dointvec,
54635 + },
54636 +#endif
54637 +#ifdef CONFIG_GRKERNSEC_FIFO
54638 + {
54639 + .ctl_name = CTL_UNNUMBERED,
54640 + .procname = "fifo_restrictions",
54641 + .data = &grsec_enable_fifo,
54642 + .maxlen = sizeof(int),
54643 + .mode = 0600,
54644 + .proc_handler = &proc_dointvec,
54645 + },
54646 +#endif
54647 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54648 + {
54649 + .ctl_name = CTL_UNNUMBERED,
54650 + .procname = "ip_blackhole",
54651 + .data = &grsec_enable_blackhole,
54652 + .maxlen = sizeof(int),
54653 + .mode = 0600,
54654 + .proc_handler = &proc_dointvec,
54655 + },
54656 + {
54657 + .ctl_name = CTL_UNNUMBERED,
54658 + .procname = "lastack_retries",
54659 + .data = &grsec_lastack_retries,
54660 + .maxlen = sizeof(int),
54661 + .mode = 0600,
54662 + .proc_handler = &proc_dointvec,
54663 + },
54664 +#endif
54665 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54666 + {
54667 + .ctl_name = CTL_UNNUMBERED,
54668 + .procname = "exec_logging",
54669 + .data = &grsec_enable_execlog,
54670 + .maxlen = sizeof(int),
54671 + .mode = 0600,
54672 + .proc_handler = &proc_dointvec,
54673 + },
54674 +#endif
54675 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54676 + {
54677 + .ctl_name = CTL_UNNUMBERED,
54678 + .procname = "rwxmap_logging",
54679 + .data = &grsec_enable_log_rwxmaps,
54680 + .maxlen = sizeof(int),
54681 + .mode = 0600,
54682 + .proc_handler = &proc_dointvec,
54683 + },
54684 +#endif
54685 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54686 + {
54687 + .ctl_name = CTL_UNNUMBERED,
54688 + .procname = "signal_logging",
54689 + .data = &grsec_enable_signal,
54690 + .maxlen = sizeof(int),
54691 + .mode = 0600,
54692 + .proc_handler = &proc_dointvec,
54693 + },
54694 +#endif
54695 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54696 + {
54697 + .ctl_name = CTL_UNNUMBERED,
54698 + .procname = "forkfail_logging",
54699 + .data = &grsec_enable_forkfail,
54700 + .maxlen = sizeof(int),
54701 + .mode = 0600,
54702 + .proc_handler = &proc_dointvec,
54703 + },
54704 +#endif
54705 +#ifdef CONFIG_GRKERNSEC_TIME
54706 + {
54707 + .ctl_name = CTL_UNNUMBERED,
54708 + .procname = "timechange_logging",
54709 + .data = &grsec_enable_time,
54710 + .maxlen = sizeof(int),
54711 + .mode = 0600,
54712 + .proc_handler = &proc_dointvec,
54713 + },
54714 +#endif
54715 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54716 + {
54717 + .ctl_name = CTL_UNNUMBERED,
54718 + .procname = "chroot_deny_shmat",
54719 + .data = &grsec_enable_chroot_shmat,
54720 + .maxlen = sizeof(int),
54721 + .mode = 0600,
54722 + .proc_handler = &proc_dointvec,
54723 + },
54724 +#endif
54725 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54726 + {
54727 + .ctl_name = CTL_UNNUMBERED,
54728 + .procname = "chroot_deny_unix",
54729 + .data = &grsec_enable_chroot_unix,
54730 + .maxlen = sizeof(int),
54731 + .mode = 0600,
54732 + .proc_handler = &proc_dointvec,
54733 + },
54734 +#endif
54735 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54736 + {
54737 + .ctl_name = CTL_UNNUMBERED,
54738 + .procname = "chroot_deny_mount",
54739 + .data = &grsec_enable_chroot_mount,
54740 + .maxlen = sizeof(int),
54741 + .mode = 0600,
54742 + .proc_handler = &proc_dointvec,
54743 + },
54744 +#endif
54745 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54746 + {
54747 + .ctl_name = CTL_UNNUMBERED,
54748 + .procname = "chroot_deny_fchdir",
54749 + .data = &grsec_enable_chroot_fchdir,
54750 + .maxlen = sizeof(int),
54751 + .mode = 0600,
54752 + .proc_handler = &proc_dointvec,
54753 + },
54754 +#endif
54755 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54756 + {
54757 + .ctl_name = CTL_UNNUMBERED,
54758 + .procname = "chroot_deny_chroot",
54759 + .data = &grsec_enable_chroot_double,
54760 + .maxlen = sizeof(int),
54761 + .mode = 0600,
54762 + .proc_handler = &proc_dointvec,
54763 + },
54764 +#endif
54765 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54766 + {
54767 + .ctl_name = CTL_UNNUMBERED,
54768 + .procname = "chroot_deny_pivot",
54769 + .data = &grsec_enable_chroot_pivot,
54770 + .maxlen = sizeof(int),
54771 + .mode = 0600,
54772 + .proc_handler = &proc_dointvec,
54773 + },
54774 +#endif
54775 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54776 + {
54777 + .ctl_name = CTL_UNNUMBERED,
54778 + .procname = "chroot_enforce_chdir",
54779 + .data = &grsec_enable_chroot_chdir,
54780 + .maxlen = sizeof(int),
54781 + .mode = 0600,
54782 + .proc_handler = &proc_dointvec,
54783 + },
54784 +#endif
54785 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54786 + {
54787 + .ctl_name = CTL_UNNUMBERED,
54788 + .procname = "chroot_deny_chmod",
54789 + .data = &grsec_enable_chroot_chmod,
54790 + .maxlen = sizeof(int),
54791 + .mode = 0600,
54792 + .proc_handler = &proc_dointvec,
54793 + },
54794 +#endif
54795 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54796 + {
54797 + .ctl_name = CTL_UNNUMBERED,
54798 + .procname = "chroot_deny_mknod",
54799 + .data = &grsec_enable_chroot_mknod,
54800 + .maxlen = sizeof(int),
54801 + .mode = 0600,
54802 + .proc_handler = &proc_dointvec,
54803 + },
54804 +#endif
54805 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54806 + {
54807 + .ctl_name = CTL_UNNUMBERED,
54808 + .procname = "chroot_restrict_nice",
54809 + .data = &grsec_enable_chroot_nice,
54810 + .maxlen = sizeof(int),
54811 + .mode = 0600,
54812 + .proc_handler = &proc_dointvec,
54813 + },
54814 +#endif
54815 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54816 + {
54817 + .ctl_name = CTL_UNNUMBERED,
54818 + .procname = "chroot_execlog",
54819 + .data = &grsec_enable_chroot_execlog,
54820 + .maxlen = sizeof(int),
54821 + .mode = 0600,
54822 + .proc_handler = &proc_dointvec,
54823 + },
54824 +#endif
54825 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54826 + {
54827 + .ctl_name = CTL_UNNUMBERED,
54828 + .procname = "chroot_caps",
54829 + .data = &grsec_enable_chroot_caps,
54830 + .maxlen = sizeof(int),
54831 + .mode = 0600,
54832 + .proc_handler = &proc_dointvec,
54833 + },
54834 +#endif
54835 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54836 + {
54837 + .ctl_name = CTL_UNNUMBERED,
54838 + .procname = "chroot_deny_sysctl",
54839 + .data = &grsec_enable_chroot_sysctl,
54840 + .maxlen = sizeof(int),
54841 + .mode = 0600,
54842 + .proc_handler = &proc_dointvec,
54843 + },
54844 +#endif
54845 +#ifdef CONFIG_GRKERNSEC_TPE
54846 + {
54847 + .ctl_name = CTL_UNNUMBERED,
54848 + .procname = "tpe",
54849 + .data = &grsec_enable_tpe,
54850 + .maxlen = sizeof(int),
54851 + .mode = 0600,
54852 + .proc_handler = &proc_dointvec,
54853 + },
54854 + {
54855 + .ctl_name = CTL_UNNUMBERED,
54856 + .procname = "tpe_gid",
54857 + .data = &grsec_tpe_gid,
54858 + .maxlen = sizeof(int),
54859 + .mode = 0600,
54860 + .proc_handler = &proc_dointvec,
54861 + },
54862 +#endif
54863 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54864 + {
54865 + .ctl_name = CTL_UNNUMBERED,
54866 + .procname = "tpe_invert",
54867 + .data = &grsec_enable_tpe_invert,
54868 + .maxlen = sizeof(int),
54869 + .mode = 0600,
54870 + .proc_handler = &proc_dointvec,
54871 + },
54872 +#endif
54873 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
54874 + {
54875 + .ctl_name = CTL_UNNUMBERED,
54876 + .procname = "tpe_restrict_all",
54877 + .data = &grsec_enable_tpe_all,
54878 + .maxlen = sizeof(int),
54879 + .mode = 0600,
54880 + .proc_handler = &proc_dointvec,
54881 + },
54882 +#endif
54883 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54884 + {
54885 + .ctl_name = CTL_UNNUMBERED,
54886 + .procname = "socket_all",
54887 + .data = &grsec_enable_socket_all,
54888 + .maxlen = sizeof(int),
54889 + .mode = 0600,
54890 + .proc_handler = &proc_dointvec,
54891 + },
54892 + {
54893 + .ctl_name = CTL_UNNUMBERED,
54894 + .procname = "socket_all_gid",
54895 + .data = &grsec_socket_all_gid,
54896 + .maxlen = sizeof(int),
54897 + .mode = 0600,
54898 + .proc_handler = &proc_dointvec,
54899 + },
54900 +#endif
54901 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54902 + {
54903 + .ctl_name = CTL_UNNUMBERED,
54904 + .procname = "socket_client",
54905 + .data = &grsec_enable_socket_client,
54906 + .maxlen = sizeof(int),
54907 + .mode = 0600,
54908 + .proc_handler = &proc_dointvec,
54909 + },
54910 + {
54911 + .ctl_name = CTL_UNNUMBERED,
54912 + .procname = "socket_client_gid",
54913 + .data = &grsec_socket_client_gid,
54914 + .maxlen = sizeof(int),
54915 + .mode = 0600,
54916 + .proc_handler = &proc_dointvec,
54917 + },
54918 +#endif
54919 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54920 + {
54921 + .ctl_name = CTL_UNNUMBERED,
54922 + .procname = "socket_server",
54923 + .data = &grsec_enable_socket_server,
54924 + .maxlen = sizeof(int),
54925 + .mode = 0600,
54926 + .proc_handler = &proc_dointvec,
54927 + },
54928 + {
54929 + .ctl_name = CTL_UNNUMBERED,
54930 + .procname = "socket_server_gid",
54931 + .data = &grsec_socket_server_gid,
54932 + .maxlen = sizeof(int),
54933 + .mode = 0600,
54934 + .proc_handler = &proc_dointvec,
54935 + },
54936 +#endif
54937 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54938 + {
54939 + .ctl_name = CTL_UNNUMBERED,
54940 + .procname = "audit_group",
54941 + .data = &grsec_enable_group,
54942 + .maxlen = sizeof(int),
54943 + .mode = 0600,
54944 + .proc_handler = &proc_dointvec,
54945 + },
54946 + {
54947 + .ctl_name = CTL_UNNUMBERED,
54948 + .procname = "audit_gid",
54949 + .data = &grsec_audit_gid,
54950 + .maxlen = sizeof(int),
54951 + .mode = 0600,
54952 + .proc_handler = &proc_dointvec,
54953 + },
54954 +#endif
54955 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54956 + {
54957 + .ctl_name = CTL_UNNUMBERED,
54958 + .procname = "audit_chdir",
54959 + .data = &grsec_enable_chdir,
54960 + .maxlen = sizeof(int),
54961 + .mode = 0600,
54962 + .proc_handler = &proc_dointvec,
54963 + },
54964 +#endif
54965 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54966 + {
54967 + .ctl_name = CTL_UNNUMBERED,
54968 + .procname = "audit_mount",
54969 + .data = &grsec_enable_mount,
54970 + .maxlen = sizeof(int),
54971 + .mode = 0600,
54972 + .proc_handler = &proc_dointvec,
54973 + },
54974 +#endif
54975 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54976 + {
54977 + .ctl_name = CTL_UNNUMBERED,
54978 + .procname = "audit_textrel",
54979 + .data = &grsec_enable_audit_textrel,
54980 + .maxlen = sizeof(int),
54981 + .mode = 0600,
54982 + .proc_handler = &proc_dointvec,
54983 + },
54984 +#endif
54985 +#ifdef CONFIG_GRKERNSEC_DMESG
54986 + {
54987 + .ctl_name = CTL_UNNUMBERED,
54988 + .procname = "dmesg",
54989 + .data = &grsec_enable_dmesg,
54990 + .maxlen = sizeof(int),
54991 + .mode = 0600,
54992 + .proc_handler = &proc_dointvec,
54993 + },
54994 +#endif
54995 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54996 + {
54997 + .ctl_name = CTL_UNNUMBERED,
54998 + .procname = "chroot_findtask",
54999 + .data = &grsec_enable_chroot_findtask,
55000 + .maxlen = sizeof(int),
55001 + .mode = 0600,
55002 + .proc_handler = &proc_dointvec,
55003 + },
55004 +#endif
55005 +#ifdef CONFIG_GRKERNSEC_RESLOG
55006 + {
55007 + .ctl_name = CTL_UNNUMBERED,
55008 + .procname = "resource_logging",
55009 + .data = &grsec_resource_logging,
55010 + .maxlen = sizeof(int),
55011 + .mode = 0600,
55012 + .proc_handler = &proc_dointvec,
55013 + },
55014 +#endif
55015 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55016 + {
55017 + .ctl_name = CTL_UNNUMBERED,
55018 + .procname = "audit_ptrace",
55019 + .data = &grsec_enable_audit_ptrace,
55020 + .maxlen = sizeof(int),
55021 + .mode = 0600,
55022 + .proc_handler = &proc_dointvec,
55023 + },
55024 +#endif
55025 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55026 + {
55027 + .ctl_name = CTL_UNNUMBERED,
55028 + .procname = "harden_ptrace",
55029 + .data = &grsec_enable_harden_ptrace,
55030 + .maxlen = sizeof(int),
55031 + .mode = 0600,
55032 + .proc_handler = &proc_dointvec,
55033 + },
55034 +#endif
55035 + {
55036 + .ctl_name = CTL_UNNUMBERED,
55037 + .procname = "grsec_lock",
55038 + .data = &grsec_lock,
55039 + .maxlen = sizeof(int),
55040 + .mode = 0600,
55041 + .proc_handler = &proc_dointvec,
55042 + },
55043 +#endif
55044 +#ifdef CONFIG_GRKERNSEC_ROFS
55045 + {
55046 + .ctl_name = CTL_UNNUMBERED,
55047 + .procname = "romount_protect",
55048 + .data = &grsec_enable_rofs,
55049 + .maxlen = sizeof(int),
55050 + .mode = 0600,
55051 + .proc_handler = &proc_dointvec_minmax,
55052 + .extra1 = &one,
55053 + .extra2 = &one,
55054 + },
55055 +#endif
55056 + { .ctl_name = 0 }
55057 +};
55058 +#endif
55059 diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55060 --- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55061 +++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55062 @@ -0,0 +1,16 @@
55063 +#include <linux/kernel.h>
55064 +#include <linux/sched.h>
55065 +#include <linux/grinternal.h>
55066 +#include <linux/module.h>
55067 +
55068 +void
55069 +gr_log_timechange(void)
55070 +{
55071 +#ifdef CONFIG_GRKERNSEC_TIME
55072 + if (grsec_enable_time)
55073 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55074 +#endif
55075 + return;
55076 +}
55077 +
55078 +EXPORT_SYMBOL(gr_log_timechange);
55079 diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55080 --- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55081 +++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55082 @@ -0,0 +1,39 @@
55083 +#include <linux/kernel.h>
55084 +#include <linux/sched.h>
55085 +#include <linux/file.h>
55086 +#include <linux/fs.h>
55087 +#include <linux/grinternal.h>
55088 +
55089 +extern int gr_acl_tpe_check(void);
55090 +
55091 +int
55092 +gr_tpe_allow(const struct file *file)
55093 +{
55094 +#ifdef CONFIG_GRKERNSEC
55095 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55096 + const struct cred *cred = current_cred();
55097 +
55098 + if (cred->uid && ((grsec_enable_tpe &&
55099 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55100 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55101 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55102 +#else
55103 + in_group_p(grsec_tpe_gid)
55104 +#endif
55105 + ) || gr_acl_tpe_check()) &&
55106 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55107 + (inode->i_mode & S_IWOTH))))) {
55108 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55109 + return 0;
55110 + }
55111 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55112 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55113 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55114 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55115 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55116 + return 0;
55117 + }
55118 +#endif
55119 +#endif
55120 + return 1;
55121 +}
55122 diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55123 --- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55124 +++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55125 @@ -0,0 +1,61 @@
55126 +#include <linux/err.h>
55127 +#include <linux/kernel.h>
55128 +#include <linux/sched.h>
55129 +#include <linux/mm.h>
55130 +#include <linux/scatterlist.h>
55131 +#include <linux/crypto.h>
55132 +#include <linux/gracl.h>
55133 +
55134 +
55135 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55136 +#error "crypto and sha256 must be built into the kernel"
55137 +#endif
55138 +
55139 +int
55140 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55141 +{
55142 + char *p;
55143 + struct crypto_hash *tfm;
55144 + struct hash_desc desc;
55145 + struct scatterlist sg;
55146 + unsigned char temp_sum[GR_SHA_LEN];
55147 + volatile int retval = 0;
55148 + volatile int dummy = 0;
55149 + unsigned int i;
55150 +
55151 + sg_init_table(&sg, 1);
55152 +
55153 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55154 + if (IS_ERR(tfm)) {
55155 + /* should never happen, since sha256 should be built in */
55156 + return 1;
55157 + }
55158 +
55159 + desc.tfm = tfm;
55160 + desc.flags = 0;
55161 +
55162 + crypto_hash_init(&desc);
55163 +
55164 + p = salt;
55165 + sg_set_buf(&sg, p, GR_SALT_LEN);
55166 + crypto_hash_update(&desc, &sg, sg.length);
55167 +
55168 + p = entry->pw;
55169 + sg_set_buf(&sg, p, strlen(p));
55170 +
55171 + crypto_hash_update(&desc, &sg, sg.length);
55172 +
55173 + crypto_hash_final(&desc, temp_sum);
55174 +
55175 + memset(entry->pw, 0, GR_PW_LEN);
55176 +
55177 + for (i = 0; i < GR_SHA_LEN; i++)
55178 + if (sum[i] != temp_sum[i])
55179 + retval = 1;
55180 + else
55181 + dummy = 1; // waste a cycle
55182 +
55183 + crypto_free_hash(tfm);
55184 +
55185 + return retval;
55186 +}
55187 diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55188 --- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55189 +++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55190 @@ -0,0 +1,1037 @@
55191 +#
55192 +# grecurity configuration
55193 +#
55194 +
55195 +menu "Grsecurity"
55196 +
55197 +config GRKERNSEC
55198 + bool "Grsecurity"
55199 + select CRYPTO
55200 + select CRYPTO_SHA256
55201 + help
55202 + If you say Y here, you will be able to configure many features
55203 + that will enhance the security of your system. It is highly
55204 + recommended that you say Y here and read through the help
55205 + for each option so that you fully understand the features and
55206 + can evaluate their usefulness for your machine.
55207 +
55208 +choice
55209 + prompt "Security Level"
55210 + depends on GRKERNSEC
55211 + default GRKERNSEC_CUSTOM
55212 +
55213 +config GRKERNSEC_LOW
55214 + bool "Low"
55215 + select GRKERNSEC_LINK
55216 + select GRKERNSEC_FIFO
55217 + select GRKERNSEC_RANDNET
55218 + select GRKERNSEC_DMESG
55219 + select GRKERNSEC_CHROOT
55220 + select GRKERNSEC_CHROOT_CHDIR
55221 +
55222 + help
55223 + If you choose this option, several of the grsecurity options will
55224 + be enabled that will give you greater protection against a number
55225 + of attacks, while assuring that none of your software will have any
55226 + conflicts with the additional security measures. If you run a lot
55227 + of unusual software, or you are having problems with the higher
55228 + security levels, you should say Y here. With this option, the
55229 + following features are enabled:
55230 +
55231 + - Linking restrictions
55232 + - FIFO restrictions
55233 + - Restricted dmesg
55234 + - Enforced chdir("/") on chroot
55235 + - Runtime module disabling
55236 +
55237 +config GRKERNSEC_MEDIUM
55238 + bool "Medium"
55239 + select PAX
55240 + select PAX_EI_PAX
55241 + select PAX_PT_PAX_FLAGS
55242 + select PAX_HAVE_ACL_FLAGS
55243 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55244 + select GRKERNSEC_CHROOT
55245 + select GRKERNSEC_CHROOT_SYSCTL
55246 + select GRKERNSEC_LINK
55247 + select GRKERNSEC_FIFO
55248 + select GRKERNSEC_DMESG
55249 + select GRKERNSEC_RANDNET
55250 + select GRKERNSEC_FORKFAIL
55251 + select GRKERNSEC_TIME
55252 + select GRKERNSEC_SIGNAL
55253 + select GRKERNSEC_CHROOT
55254 + select GRKERNSEC_CHROOT_UNIX
55255 + select GRKERNSEC_CHROOT_MOUNT
55256 + select GRKERNSEC_CHROOT_PIVOT
55257 + select GRKERNSEC_CHROOT_DOUBLE
55258 + select GRKERNSEC_CHROOT_CHDIR
55259 + select GRKERNSEC_CHROOT_MKNOD
55260 + select GRKERNSEC_PROC
55261 + select GRKERNSEC_PROC_USERGROUP
55262 + select PAX_RANDUSTACK
55263 + select PAX_ASLR
55264 + select PAX_RANDMMAP
55265 + select PAX_REFCOUNT if (X86 || SPARC64)
55266 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55267 +
55268 + help
55269 + If you say Y here, several features in addition to those included
55270 + in the low additional security level will be enabled. These
55271 + features provide even more security to your system, though in rare
55272 + cases they may be incompatible with very old or poorly written
55273 + software. If you enable this option, make sure that your auth
55274 + service (identd) is running as gid 1001. With this option,
55275 + the following features (in addition to those provided in the
55276 + low additional security level) will be enabled:
55277 +
55278 + - Failed fork logging
55279 + - Time change logging
55280 + - Signal logging
55281 + - Deny mounts in chroot
55282 + - Deny double chrooting
55283 + - Deny sysctl writes in chroot
55284 + - Deny mknod in chroot
55285 + - Deny access to abstract AF_UNIX sockets out of chroot
55286 + - Deny pivot_root in chroot
55287 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55288 + - /proc restrictions with special GID set to 10 (usually wheel)
55289 + - Address Space Layout Randomization (ASLR)
55290 + - Prevent exploitation of most refcount overflows
55291 + - Bounds checking of copying between the kernel and userland
55292 +
55293 +config GRKERNSEC_HIGH
55294 + bool "High"
55295 + select GRKERNSEC_LINK
55296 + select GRKERNSEC_FIFO
55297 + select GRKERNSEC_DMESG
55298 + select GRKERNSEC_FORKFAIL
55299 + select GRKERNSEC_TIME
55300 + select GRKERNSEC_SIGNAL
55301 + select GRKERNSEC_CHROOT
55302 + select GRKERNSEC_CHROOT_SHMAT
55303 + select GRKERNSEC_CHROOT_UNIX
55304 + select GRKERNSEC_CHROOT_MOUNT
55305 + select GRKERNSEC_CHROOT_FCHDIR
55306 + select GRKERNSEC_CHROOT_PIVOT
55307 + select GRKERNSEC_CHROOT_DOUBLE
55308 + select GRKERNSEC_CHROOT_CHDIR
55309 + select GRKERNSEC_CHROOT_MKNOD
55310 + select GRKERNSEC_CHROOT_CAPS
55311 + select GRKERNSEC_CHROOT_SYSCTL
55312 + select GRKERNSEC_CHROOT_FINDTASK
55313 + select GRKERNSEC_SYSFS_RESTRICT
55314 + select GRKERNSEC_PROC
55315 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55316 + select GRKERNSEC_HIDESYM
55317 + select GRKERNSEC_BRUTE
55318 + select GRKERNSEC_PROC_USERGROUP
55319 + select GRKERNSEC_KMEM
55320 + select GRKERNSEC_RESLOG
55321 + select GRKERNSEC_RANDNET
55322 + select GRKERNSEC_PROC_ADD
55323 + select GRKERNSEC_CHROOT_CHMOD
55324 + select GRKERNSEC_CHROOT_NICE
55325 + select GRKERNSEC_AUDIT_MOUNT
55326 + select GRKERNSEC_MODHARDEN if (MODULES)
55327 + select GRKERNSEC_HARDEN_PTRACE
55328 + select GRKERNSEC_VM86 if (X86_32)
55329 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55330 + select PAX
55331 + select PAX_RANDUSTACK
55332 + select PAX_ASLR
55333 + select PAX_RANDMMAP
55334 + select PAX_NOEXEC
55335 + select PAX_MPROTECT
55336 + select PAX_EI_PAX
55337 + select PAX_PT_PAX_FLAGS
55338 + select PAX_HAVE_ACL_FLAGS
55339 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55340 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55341 + select PAX_RANDKSTACK if (X86_TSC && X86)
55342 + select PAX_SEGMEXEC if (X86_32)
55343 + select PAX_PAGEEXEC
55344 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55345 + select PAX_EMUTRAMP if (PARISC)
55346 + select PAX_EMUSIGRT if (PARISC)
55347 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55348 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55349 + select PAX_REFCOUNT if (X86 || SPARC64)
55350 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55351 + help
55352 + If you say Y here, many of the features of grsecurity will be
55353 + enabled, which will protect you against many kinds of attacks
55354 + against your system. The heightened security comes at a cost
55355 + of an increased chance of incompatibilities with rare software
55356 + on your machine. Since this security level enables PaX, you should
55357 + view <http://pax.grsecurity.net> and read about the PaX
55358 + project. While you are there, download chpax and run it on
55359 + binaries that cause problems with PaX. Also remember that
55360 + since the /proc restrictions are enabled, you must run your
55361 + identd as gid 1001. This security level enables the following
55362 + features in addition to those listed in the low and medium
55363 + security levels:
55364 +
55365 + - Additional /proc restrictions
55366 + - Chmod restrictions in chroot
55367 + - No signals, ptrace, or viewing of processes outside of chroot
55368 + - Capability restrictions in chroot
55369 + - Deny fchdir out of chroot
55370 + - Priority restrictions in chroot
55371 + - Segmentation-based implementation of PaX
55372 + - Mprotect restrictions
55373 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55374 + - Kernel stack randomization
55375 + - Mount/unmount/remount logging
55376 + - Kernel symbol hiding
55377 + - Prevention of memory exhaustion-based exploits
55378 + - Hardening of module auto-loading
55379 + - Ptrace restrictions
55380 + - Restricted vm86 mode
55381 + - Restricted sysfs/debugfs
55382 + - Active kernel exploit response
55383 +
55384 +config GRKERNSEC_CUSTOM
55385 + bool "Custom"
55386 + help
55387 + If you say Y here, you will be able to configure every grsecurity
55388 + option, which allows you to enable many more features that aren't
55389 + covered in the basic security levels. These additional features
55390 + include TPE, socket restrictions, and the sysctl system for
55391 + grsecurity. It is advised that you read through the help for
55392 + each option to determine its usefulness in your situation.
55393 +
55394 +endchoice
55395 +
55396 +menu "Address Space Protection"
55397 +depends on GRKERNSEC
55398 +
55399 +config GRKERNSEC_KMEM
55400 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55401 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55402 + help
55403 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55404 + be written to via mmap or otherwise to modify the running kernel.
55405 + /dev/port will also not be allowed to be opened. If you have module
55406 + support disabled, enabling this will close up four ways that are
55407 + currently used to insert malicious code into the running kernel.
55408 + Even with all these features enabled, we still highly recommend that
55409 + you use the RBAC system, as it is still possible for an attacker to
55410 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55411 + If you are not using XFree86, you may be able to stop this additional
55412 + case by enabling the 'Disable privileged I/O' option. Though nothing
55413 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55414 + but only to video memory, which is the only writing we allow in this
55415 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55416 + not be allowed to mprotect it with PROT_WRITE later.
55417 + It is highly recommended that you say Y here if you meet all the
55418 + conditions above.
55419 +
55420 +config GRKERNSEC_VM86
55421 + bool "Restrict VM86 mode"
55422 + depends on X86_32
55423 +
55424 + help
55425 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55426 + make use of a special execution mode on 32bit x86 processors called
55427 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55428 + video cards and will still work with this option enabled. The purpose
55429 + of the option is to prevent exploitation of emulation errors in
55430 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55431 + Nearly all users should be able to enable this option.
55432 +
55433 +config GRKERNSEC_IO
55434 + bool "Disable privileged I/O"
55435 + depends on X86
55436 + select RTC_CLASS
55437 + select RTC_INTF_DEV
55438 + select RTC_DRV_CMOS
55439 +
55440 + help
55441 + If you say Y here, all ioperm and iopl calls will return an error.
55442 + Ioperm and iopl can be used to modify the running kernel.
55443 + Unfortunately, some programs need this access to operate properly,
55444 + the most notable of which are XFree86 and hwclock. hwclock can be
55445 + remedied by having RTC support in the kernel, so real-time
55446 + clock support is enabled if this option is enabled, to ensure
55447 + that hwclock operates correctly. XFree86 still will not
55448 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55449 + IF YOU USE XFree86. If you use XFree86 and you still want to
55450 + protect your kernel against modification, use the RBAC system.
55451 +
55452 +config GRKERNSEC_PROC_MEMMAP
55453 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55454 + default y if (PAX_NOEXEC || PAX_ASLR)
55455 + depends on PAX_NOEXEC || PAX_ASLR
55456 + help
55457 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55458 + give no information about the addresses of its mappings if
55459 + PaX features that rely on random addresses are enabled on the task.
55460 + If you use PaX it is greatly recommended that you say Y here as it
55461 + closes up a hole that makes the full ASLR useless for suid
55462 + binaries.
55463 +
55464 +config GRKERNSEC_BRUTE
55465 + bool "Deter exploit bruteforcing"
55466 + help
55467 + If you say Y here, attempts to bruteforce exploits against forking
55468 + daemons such as apache or sshd, as well as against suid/sgid binaries
55469 + will be deterred. When a child of a forking daemon is killed by PaX
55470 + or crashes due to an illegal instruction or other suspicious signal,
55471 + the parent process will be delayed 30 seconds upon every subsequent
55472 + fork until the administrator is able to assess the situation and
55473 + restart the daemon.
55474 + In the suid/sgid case, the attempt is logged, the user has all their
55475 + processes terminated, and they are prevented from executing any further
55476 + processes for 15 minutes.
55477 + It is recommended that you also enable signal logging in the auditing
55478 + section so that logs are generated when a process triggers a suspicious
55479 + signal.
55480 + If the sysctl option is enabled, a sysctl option with name
55481 + "deter_bruteforce" is created.
55482 +
55483 +config GRKERNSEC_MODHARDEN
55484 + bool "Harden module auto-loading"
55485 + depends on MODULES
55486 + help
55487 + If you say Y here, module auto-loading in response to use of some
55488 + feature implemented by an unloaded module will be restricted to
55489 + root users. Enabling this option helps defend against attacks
55490 + by unprivileged users who abuse the auto-loading behavior to
55491 + cause a vulnerable module to load that is then exploited.
55492 +
55493 + If this option prevents a legitimate use of auto-loading for a
55494 + non-root user, the administrator can execute modprobe manually
55495 + with the exact name of the module mentioned in the alert log.
55496 + Alternatively, the administrator can add the module to the list
55497 + of modules loaded at boot by modifying init scripts.
55498 +
55499 + Modification of init scripts will most likely be needed on
55500 + Ubuntu servers with encrypted home directory support enabled,
55501 + as the first non-root user logging in will cause the ecb(aes),
55502 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55503 +
55504 +config GRKERNSEC_HIDESYM
55505 + bool "Hide kernel symbols"
55506 + help
55507 + If you say Y here, getting information on loaded modules, and
55508 + displaying all kernel symbols through a syscall will be restricted
55509 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55510 + /proc/kallsyms will be restricted to the root user. The RBAC
55511 + system can hide that entry even from root.
55512 +
55513 + This option also prevents leaking of kernel addresses through
55514 + several /proc entries.
55515 +
55516 + Note that this option is only effective provided the following
55517 + conditions are met:
55518 + 1) The kernel using grsecurity is not precompiled by some distribution
55519 + 2) You have also enabled GRKERNSEC_DMESG
55520 + 3) You are using the RBAC system and hiding other files such as your
55521 + kernel image and System.map. Alternatively, enabling this option
55522 + causes the permissions on /boot, /lib/modules, and the kernel
55523 + source directory to change at compile time to prevent
55524 + reading by non-root users.
55525 + If the above conditions are met, this option will aid in providing a
55526 + useful protection against local kernel exploitation of overflows
55527 + and arbitrary read/write vulnerabilities.
55528 +
55529 +config GRKERNSEC_KERN_LOCKOUT
55530 + bool "Active kernel exploit response"
55531 + depends on X86 || ARM || PPC || SPARC
55532 + help
55533 + If you say Y here, when a PaX alert is triggered due to suspicious
55534 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55535 + or an OOPs occurs due to bad memory accesses, instead of just
55536 + terminating the offending process (and potentially allowing
55537 + a subsequent exploit from the same user), we will take one of two
55538 + actions:
55539 + If the user was root, we will panic the system
55540 + If the user was non-root, we will log the attempt, terminate
55541 + all processes owned by the user, then prevent them from creating
55542 + any new processes until the system is restarted
55543 + This deters repeated kernel exploitation/bruteforcing attempts
55544 + and is useful for later forensics.
55545 +
55546 +endmenu
55547 +menu "Role Based Access Control Options"
55548 +depends on GRKERNSEC
55549 +
55550 +config GRKERNSEC_RBAC_DEBUG
55551 + bool
55552 +
55553 +config GRKERNSEC_NO_RBAC
55554 + bool "Disable RBAC system"
55555 + help
55556 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55557 + preventing the RBAC system from being enabled. You should only say Y
55558 + here if you have no intention of using the RBAC system, so as to prevent
55559 + an attacker with root access from misusing the RBAC system to hide files
55560 + and processes when loadable module support and /dev/[k]mem have been
55561 + locked down.
55562 +
55563 +config GRKERNSEC_ACL_HIDEKERN
55564 + bool "Hide kernel processes"
55565 + help
55566 + If you say Y here, all kernel threads will be hidden to all
55567 + processes but those whose subject has the "view hidden processes"
55568 + flag.
55569 +
55570 +config GRKERNSEC_ACL_MAXTRIES
55571 + int "Maximum tries before password lockout"
55572 + default 3
55573 + help
55574 + This option enforces the maximum number of times a user can attempt
55575 + to authorize themselves with the grsecurity RBAC system before being
55576 + denied the ability to attempt authorization again for a specified time.
55577 + The lower the number, the harder it will be to brute-force a password.
55578 +
55579 +config GRKERNSEC_ACL_TIMEOUT
55580 + int "Time to wait after max password tries, in seconds"
55581 + default 30
55582 + help
55583 + This option specifies the time the user must wait after attempting to
55584 + authorize to the RBAC system with the maximum number of invalid
55585 + passwords. The higher the number, the harder it will be to brute-force
55586 + a password.
55587 +
55588 +endmenu
55589 +menu "Filesystem Protections"
55590 +depends on GRKERNSEC
55591 +
55592 +config GRKERNSEC_PROC
55593 + bool "Proc restrictions"
55594 + help
55595 + If you say Y here, the permissions of the /proc filesystem
55596 + will be altered to enhance system security and privacy. You MUST
55597 + choose either a user only restriction or a user and group restriction.
55598 + Depending upon the option you choose, you can either restrict users to
55599 + see only the processes they themselves run, or choose a group that can
55600 + view all processes and files normally restricted to root if you choose
55601 + the "restrict to user only" option. NOTE: If you're running identd as
55602 + a non-root user, you will have to run it as the group you specify here.
55603 +
55604 +config GRKERNSEC_PROC_USER
55605 + bool "Restrict /proc to user only"
55606 + depends on GRKERNSEC_PROC
55607 + help
55608 + If you say Y here, non-root users will only be able to view their own
55609 + processes, and restricts them from viewing network-related information,
55610 + and viewing kernel symbol and module information.
55611 +
55612 +config GRKERNSEC_PROC_USERGROUP
55613 + bool "Allow special group"
55614 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55615 + help
55616 + If you say Y here, you will be able to select a group that will be
55617 + able to view all processes and network-related information. If you've
55618 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55619 + remain hidden. This option is useful if you want to run identd as
55620 + a non-root user.
55621 +
55622 +config GRKERNSEC_PROC_GID
55623 + int "GID for special group"
55624 + depends on GRKERNSEC_PROC_USERGROUP
55625 + default 1001
55626 +
55627 +config GRKERNSEC_PROC_ADD
55628 + bool "Additional restrictions"
55629 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55630 + help
55631 + If you say Y here, additional restrictions will be placed on
55632 + /proc that keep normal users from viewing device information and
55633 + slabinfo information that could be useful for exploits.
55634 +
55635 +config GRKERNSEC_LINK
55636 + bool "Linking restrictions"
55637 + help
55638 + If you say Y here, /tmp race exploits will be prevented, since users
55639 + will no longer be able to follow symlinks owned by other users in
55640 + world-writable +t directories (e.g. /tmp), unless the owner of the
55641 + symlink is the owner of the directory. users will also not be
55642 + able to hardlink to files they do not own. If the sysctl option is
55643 + enabled, a sysctl option with name "linking_restrictions" is created.
55644 +
55645 +config GRKERNSEC_FIFO
55646 + bool "FIFO restrictions"
55647 + help
55648 + If you say Y here, users will not be able to write to FIFOs they don't
55649 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55650 + the FIFO is the same owner of the directory it's held in. If the sysctl
55651 + option is enabled, a sysctl option with name "fifo_restrictions" is
55652 + created.
55653 +
55654 +config GRKERNSEC_SYSFS_RESTRICT
55655 + bool "Sysfs/debugfs restriction"
55656 + depends on SYSFS
55657 + help
55658 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55659 + any filesystem normally mounted under it (e.g. debugfs) will only
55660 + be accessible by root. These filesystems generally provide access
55661 + to hardware and debug information that isn't appropriate for unprivileged
55662 + users of the system. Sysfs and debugfs have also become a large source
55663 + of new vulnerabilities, ranging from infoleaks to local compromise.
55664 + There has been very little oversight with an eye toward security involved
55665 + in adding new exporters of information to these filesystems, so their
55666 + use is discouraged.
55667 + This option is equivalent to a chmod 0700 of the mount paths.
55668 +
55669 +config GRKERNSEC_ROFS
55670 + bool "Runtime read-only mount protection"
55671 + help
55672 + If you say Y here, a sysctl option with name "romount_protect" will
55673 + be created. By setting this option to 1 at runtime, filesystems
55674 + will be protected in the following ways:
55675 + * No new writable mounts will be allowed
55676 + * Existing read-only mounts won't be able to be remounted read/write
55677 + * Write operations will be denied on all block devices
55678 + This option acts independently of grsec_lock: once it is set to 1,
55679 + it cannot be turned off. Therefore, please be mindful of the resulting
55680 + behavior if this option is enabled in an init script on a read-only
55681 + filesystem. This feature is mainly intended for secure embedded systems.
55682 +
55683 +config GRKERNSEC_CHROOT
55684 + bool "Chroot jail restrictions"
55685 + help
55686 + If you say Y here, you will be able to choose several options that will
55687 + make breaking out of a chrooted jail much more difficult. If you
55688 + encounter no software incompatibilities with the following options, it
55689 + is recommended that you enable each one.
55690 +
55691 +config GRKERNSEC_CHROOT_MOUNT
55692 + bool "Deny mounts"
55693 + depends on GRKERNSEC_CHROOT
55694 + help
55695 + If you say Y here, processes inside a chroot will not be able to
55696 + mount or remount filesystems. If the sysctl option is enabled, a
55697 + sysctl option with name "chroot_deny_mount" is created.
55698 +
55699 +config GRKERNSEC_CHROOT_DOUBLE
55700 + bool "Deny double-chroots"
55701 + depends on GRKERNSEC_CHROOT
55702 + help
55703 + If you say Y here, processes inside a chroot will not be able to chroot
55704 + again outside the chroot. This is a widely used method of breaking
55705 + out of a chroot jail and should not be allowed. If the sysctl
55706 + option is enabled, a sysctl option with name
55707 + "chroot_deny_chroot" is created.
55708 +
55709 +config GRKERNSEC_CHROOT_PIVOT
55710 + bool "Deny pivot_root in chroot"
55711 + depends on GRKERNSEC_CHROOT
55712 + help
55713 + If you say Y here, processes inside a chroot will not be able to use
55714 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55715 + works similar to chroot in that it changes the root filesystem. This
55716 + function could be misused in a chrooted process to attempt to break out
55717 + of the chroot, and therefore should not be allowed. If the sysctl
55718 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55719 + created.
55720 +
55721 +config GRKERNSEC_CHROOT_CHDIR
55722 + bool "Enforce chdir(\"/\") on all chroots"
55723 + depends on GRKERNSEC_CHROOT
55724 + help
55725 + If you say Y here, the current working directory of all newly-chrooted
55726 + applications will be set to the the root directory of the chroot.
55727 + The man page on chroot(2) states:
55728 + Note that this call does not change the current working
55729 + directory, so that `.' can be outside the tree rooted at
55730 + `/'. In particular, the super-user can escape from a
55731 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55732 +
55733 + It is recommended that you say Y here, since it's not known to break
55734 + any software. If the sysctl option is enabled, a sysctl option with
55735 + name "chroot_enforce_chdir" is created.
55736 +
55737 +config GRKERNSEC_CHROOT_CHMOD
55738 + bool "Deny (f)chmod +s"
55739 + depends on GRKERNSEC_CHROOT
55740 + help
55741 + If you say Y here, processes inside a chroot will not be able to chmod
55742 + or fchmod files to make them have suid or sgid bits. This protects
55743 + against another published method of breaking a chroot. If the sysctl
55744 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55745 + created.
55746 +
55747 +config GRKERNSEC_CHROOT_FCHDIR
55748 + bool "Deny fchdir out of chroot"
55749 + depends on GRKERNSEC_CHROOT
55750 + help
55751 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55752 + to a file descriptor of the chrooting process that points to a directory
55753 + outside the filesystem will be stopped. If the sysctl option
55754 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55755 +
55756 +config GRKERNSEC_CHROOT_MKNOD
55757 + bool "Deny mknod"
55758 + depends on GRKERNSEC_CHROOT
55759 + help
55760 + If you say Y here, processes inside a chroot will not be allowed to
55761 + mknod. The problem with using mknod inside a chroot is that it
55762 + would allow an attacker to create a device entry that is the same
55763 + as one on the physical root of your system, which could range from
55764 + anything from the console device to a device for your harddrive (which
55765 + they could then use to wipe the drive or steal data). It is recommended
55766 + that you say Y here, unless you run into software incompatibilities.
55767 + If the sysctl option is enabled, a sysctl option with name
55768 + "chroot_deny_mknod" is created.
55769 +
55770 +config GRKERNSEC_CHROOT_SHMAT
55771 + bool "Deny shmat() out of chroot"
55772 + depends on GRKERNSEC_CHROOT
55773 + help
55774 + If you say Y here, processes inside a chroot will not be able to attach
55775 + to shared memory segments that were created outside of the chroot jail.
55776 + It is recommended that you say Y here. If the sysctl option is enabled,
55777 + a sysctl option with name "chroot_deny_shmat" is created.
55778 +
55779 +config GRKERNSEC_CHROOT_UNIX
55780 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55781 + depends on GRKERNSEC_CHROOT
55782 + help
55783 + If you say Y here, processes inside a chroot will not be able to
55784 + connect to abstract (meaning not belonging to a filesystem) Unix
55785 + domain sockets that were bound outside of a chroot. It is recommended
55786 + that you say Y here. If the sysctl option is enabled, a sysctl option
55787 + with name "chroot_deny_unix" is created.
55788 +
55789 +config GRKERNSEC_CHROOT_FINDTASK
55790 + bool "Protect outside processes"
55791 + depends on GRKERNSEC_CHROOT
55792 + help
55793 + If you say Y here, processes inside a chroot will not be able to
55794 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55795 + getsid, or view any process outside of the chroot. If the sysctl
55796 + option is enabled, a sysctl option with name "chroot_findtask" is
55797 + created.
55798 +
55799 +config GRKERNSEC_CHROOT_NICE
55800 + bool "Restrict priority changes"
55801 + depends on GRKERNSEC_CHROOT
55802 + help
55803 + If you say Y here, processes inside a chroot will not be able to raise
55804 + the priority of processes in the chroot, or alter the priority of
55805 + processes outside the chroot. This provides more security than simply
55806 + removing CAP_SYS_NICE from the process' capability set. If the
55807 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55808 + is created.
55809 +
55810 +config GRKERNSEC_CHROOT_SYSCTL
55811 + bool "Deny sysctl writes"
55812 + depends on GRKERNSEC_CHROOT
55813 + help
55814 + If you say Y here, an attacker in a chroot will not be able to
55815 + write to sysctl entries, either by sysctl(2) or through a /proc
55816 + interface. It is strongly recommended that you say Y here. If the
55817 + sysctl option is enabled, a sysctl option with name
55818 + "chroot_deny_sysctl" is created.
55819 +
55820 +config GRKERNSEC_CHROOT_CAPS
55821 + bool "Capability restrictions"
55822 + depends on GRKERNSEC_CHROOT
55823 + help
55824 + If you say Y here, the capabilities on all root processes within a
55825 + chroot jail will be lowered to stop module insertion, raw i/o,
55826 + system and net admin tasks, rebooting the system, modifying immutable
55827 + files, modifying IPC owned by another, and changing the system time.
55828 + This is left an option because it can break some apps. Disable this
55829 + if your chrooted apps are having problems performing those kinds of
55830 + tasks. If the sysctl option is enabled, a sysctl option with
55831 + name "chroot_caps" is created.
55832 +
55833 +endmenu
55834 +menu "Kernel Auditing"
55835 +depends on GRKERNSEC
55836 +
55837 +config GRKERNSEC_AUDIT_GROUP
55838 + bool "Single group for auditing"
55839 + help
55840 + If you say Y here, the exec, chdir, and (un)mount logging features
55841 + will only operate on a group you specify. This option is recommended
55842 + if you only want to watch certain users instead of having a large
55843 + amount of logs from the entire system. If the sysctl option is enabled,
55844 + a sysctl option with name "audit_group" is created.
55845 +
55846 +config GRKERNSEC_AUDIT_GID
55847 + int "GID for auditing"
55848 + depends on GRKERNSEC_AUDIT_GROUP
55849 + default 1007
55850 +
55851 +config GRKERNSEC_EXECLOG
55852 + bool "Exec logging"
55853 + help
55854 + If you say Y here, all execve() calls will be logged (since the
55855 + other exec*() calls are frontends to execve(), all execution
55856 + will be logged). Useful for shell-servers that like to keep track
55857 + of their users. If the sysctl option is enabled, a sysctl option with
55858 + name "exec_logging" is created.
55859 + WARNING: This option when enabled will produce a LOT of logs, especially
55860 + on an active system.
55861 +
55862 +config GRKERNSEC_RESLOG
55863 + bool "Resource logging"
55864 + help
55865 + If you say Y here, all attempts to overstep resource limits will
55866 + be logged with the resource name, the requested size, and the current
55867 + limit. It is highly recommended that you say Y here. If the sysctl
55868 + option is enabled, a sysctl option with name "resource_logging" is
55869 + created. If the RBAC system is enabled, the sysctl value is ignored.
55870 +
55871 +config GRKERNSEC_CHROOT_EXECLOG
55872 + bool "Log execs within chroot"
55873 + help
55874 + If you say Y here, all executions inside a chroot jail will be logged
55875 + to syslog. This can cause a large amount of logs if certain
55876 + applications (eg. djb's daemontools) are installed on the system, and
55877 + is therefore left as an option. If the sysctl option is enabled, a
55878 + sysctl option with name "chroot_execlog" is created.
55879 +
55880 +config GRKERNSEC_AUDIT_PTRACE
55881 + bool "Ptrace logging"
55882 + help
55883 + If you say Y here, all attempts to attach to a process via ptrace
55884 + will be logged. If the sysctl option is enabled, a sysctl option
55885 + with name "audit_ptrace" is created.
55886 +
55887 +config GRKERNSEC_AUDIT_CHDIR
55888 + bool "Chdir logging"
55889 + help
55890 + If you say Y here, all chdir() calls will be logged. If the sysctl
55891 + option is enabled, a sysctl option with name "audit_chdir" is created.
55892 +
55893 +config GRKERNSEC_AUDIT_MOUNT
55894 + bool "(Un)Mount logging"
55895 + help
55896 + If you say Y here, all mounts and unmounts will be logged. If the
55897 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55898 + created.
55899 +
55900 +config GRKERNSEC_SIGNAL
55901 + bool "Signal logging"
55902 + help
55903 + If you say Y here, certain important signals will be logged, such as
55904 + SIGSEGV, which will as a result inform you of when a error in a program
55905 + occurred, which in some cases could mean a possible exploit attempt.
55906 + If the sysctl option is enabled, a sysctl option with name
55907 + "signal_logging" is created.
55908 +
55909 +config GRKERNSEC_FORKFAIL
55910 + bool "Fork failure logging"
55911 + help
55912 + If you say Y here, all failed fork() attempts will be logged.
55913 + This could suggest a fork bomb, or someone attempting to overstep
55914 + their process limit. If the sysctl option is enabled, a sysctl option
55915 + with name "forkfail_logging" is created.
55916 +
55917 +config GRKERNSEC_TIME
55918 + bool "Time change logging"
55919 + help
55920 + If you say Y here, any changes of the system clock will be logged.
55921 + If the sysctl option is enabled, a sysctl option with name
55922 + "timechange_logging" is created.
55923 +
55924 +config GRKERNSEC_PROC_IPADDR
55925 + bool "/proc/<pid>/ipaddr support"
55926 + help
55927 + If you say Y here, a new entry will be added to each /proc/<pid>
55928 + directory that contains the IP address of the person using the task.
55929 + The IP is carried across local TCP and AF_UNIX stream sockets.
55930 + This information can be useful for IDS/IPSes to perform remote response
55931 + to a local attack. The entry is readable by only the owner of the
55932 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55933 + the RBAC system), and thus does not create privacy concerns.
55934 +
55935 +config GRKERNSEC_RWXMAP_LOG
55936 + bool 'Denied RWX mmap/mprotect logging'
55937 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55938 + help
55939 + If you say Y here, calls to mmap() and mprotect() with explicit
55940 + usage of PROT_WRITE and PROT_EXEC together will be logged when
55941 + denied by the PAX_MPROTECT feature. If the sysctl option is
55942 + enabled, a sysctl option with name "rwxmap_logging" is created.
55943 +
55944 +config GRKERNSEC_AUDIT_TEXTREL
55945 + bool 'ELF text relocations logging (READ HELP)'
55946 + depends on PAX_MPROTECT
55947 + help
55948 + If you say Y here, text relocations will be logged with the filename
55949 + of the offending library or binary. The purpose of the feature is
55950 + to help Linux distribution developers get rid of libraries and
55951 + binaries that need text relocations which hinder the future progress
55952 + of PaX. Only Linux distribution developers should say Y here, and
55953 + never on a production machine, as this option creates an information
55954 + leak that could aid an attacker in defeating the randomization of
55955 + a single memory region. If the sysctl option is enabled, a sysctl
55956 + option with name "audit_textrel" is created.
55957 +
55958 +endmenu
55959 +
55960 +menu "Executable Protections"
55961 +depends on GRKERNSEC
55962 +
55963 +config GRKERNSEC_DMESG
55964 + bool "Dmesg(8) restriction"
55965 + help
55966 + If you say Y here, non-root users will not be able to use dmesg(8)
55967 + to view up to the last 4kb of messages in the kernel's log buffer.
55968 + The kernel's log buffer often contains kernel addresses and other
55969 + identifying information useful to an attacker in fingerprinting a
55970 + system for a targeted exploit.
55971 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
55972 + created.
55973 +
55974 +config GRKERNSEC_HARDEN_PTRACE
55975 + bool "Deter ptrace-based process snooping"
55976 + help
55977 + If you say Y here, TTY sniffers and other malicious monitoring
55978 + programs implemented through ptrace will be defeated. If you
55979 + have been using the RBAC system, this option has already been
55980 + enabled for several years for all users, with the ability to make
55981 + fine-grained exceptions.
55982 +
55983 + This option only affects the ability of non-root users to ptrace
55984 + processes that are not a descendent of the ptracing process.
55985 + This means that strace ./binary and gdb ./binary will still work,
55986 + but attaching to arbitrary processes will not. If the sysctl
55987 + option is enabled, a sysctl option with name "harden_ptrace" is
55988 + created.
55989 +
55990 +config GRKERNSEC_TPE
55991 + bool "Trusted Path Execution (TPE)"
55992 + help
55993 + If you say Y here, you will be able to choose a gid to add to the
55994 + supplementary groups of users you want to mark as "untrusted."
55995 + These users will not be able to execute any files that are not in
55996 + root-owned directories writable only by root. If the sysctl option
55997 + is enabled, a sysctl option with name "tpe" is created.
55998 +
55999 +config GRKERNSEC_TPE_ALL
56000 + bool "Partially restrict all non-root users"
56001 + depends on GRKERNSEC_TPE
56002 + help
56003 + If you say Y here, all non-root users will be covered under
56004 + a weaker TPE restriction. This is separate from, and in addition to,
56005 + the main TPE options that you have selected elsewhere. Thus, if a
56006 + "trusted" GID is chosen, this restriction applies to even that GID.
56007 + Under this restriction, all non-root users will only be allowed to
56008 + execute files in directories they own that are not group or
56009 + world-writable, or in directories owned by root and writable only by
56010 + root. If the sysctl option is enabled, a sysctl option with name
56011 + "tpe_restrict_all" is created.
56012 +
56013 +config GRKERNSEC_TPE_INVERT
56014 + bool "Invert GID option"
56015 + depends on GRKERNSEC_TPE
56016 + help
56017 + If you say Y here, the group you specify in the TPE configuration will
56018 + decide what group TPE restrictions will be *disabled* for. This
56019 + option is useful if you want TPE restrictions to be applied to most
56020 + users on the system. If the sysctl option is enabled, a sysctl option
56021 + with name "tpe_invert" is created. Unlike other sysctl options, this
56022 + entry will default to on for backward-compatibility.
56023 +
56024 +config GRKERNSEC_TPE_GID
56025 + int "GID for untrusted users"
56026 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56027 + default 1005
56028 + help
56029 + Setting this GID determines what group TPE restrictions will be
56030 + *enabled* for. If the sysctl option is enabled, a sysctl option
56031 + with name "tpe_gid" is created.
56032 +
56033 +config GRKERNSEC_TPE_GID
56034 + int "GID for trusted users"
56035 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56036 + default 1005
56037 + help
56038 + Setting this GID determines what group TPE restrictions will be
56039 + *disabled* for. If the sysctl option is enabled, a sysctl option
56040 + with name "tpe_gid" is created.
56041 +
56042 +endmenu
56043 +menu "Network Protections"
56044 +depends on GRKERNSEC
56045 +
56046 +config GRKERNSEC_RANDNET
56047 + bool "Larger entropy pools"
56048 + help
56049 + If you say Y here, the entropy pools used for many features of Linux
56050 + and grsecurity will be doubled in size. Since several grsecurity
56051 + features use additional randomness, it is recommended that you say Y
56052 + here. Saying Y here has a similar effect as modifying
56053 + /proc/sys/kernel/random/poolsize.
56054 +
56055 +config GRKERNSEC_BLACKHOLE
56056 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56057 + depends on NET
56058 + help
56059 + If you say Y here, neither TCP resets nor ICMP
56060 + destination-unreachable packets will be sent in response to packets
56061 + sent to ports for which no associated listening process exists.
56062 + This feature supports both IPV4 and IPV6 and exempts the
56063 + loopback interface from blackholing. Enabling this feature
56064 + makes a host more resilient to DoS attacks and reduces network
56065 + visibility against scanners.
56066 +
56067 + The blackhole feature as-implemented is equivalent to the FreeBSD
56068 + blackhole feature, as it prevents RST responses to all packets, not
56069 + just SYNs. Under most application behavior this causes no
56070 + problems, but applications (like haproxy) may not close certain
56071 + connections in a way that cleanly terminates them on the remote
56072 + end, leaving the remote host in LAST_ACK state. Because of this
56073 + side-effect and to prevent intentional LAST_ACK DoSes, this
56074 + feature also adds automatic mitigation against such attacks.
56075 + The mitigation drastically reduces the amount of time a socket
56076 + can spend in LAST_ACK state. If you're using haproxy and not
56077 + all servers it connects to have this option enabled, consider
56078 + disabling this feature on the haproxy host.
56079 +
56080 + If the sysctl option is enabled, two sysctl options with names
56081 + "ip_blackhole" and "lastack_retries" will be created.
56082 + While "ip_blackhole" takes the standard zero/non-zero on/off
56083 + toggle, "lastack_retries" uses the same kinds of values as
56084 + "tcp_retries1" and "tcp_retries2". The default value of 4
56085 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56086 + state.
56087 +
56088 +config GRKERNSEC_SOCKET
56089 + bool "Socket restrictions"
56090 + depends on NET
56091 + help
56092 + If you say Y here, you will be able to choose from several options.
56093 + If you assign a GID on your system and add it to the supplementary
56094 + groups of users you want to restrict socket access to, this patch
56095 + will perform up to three things, based on the option(s) you choose.
56096 +
56097 +config GRKERNSEC_SOCKET_ALL
56098 + bool "Deny any sockets to group"
56099 + depends on GRKERNSEC_SOCKET
56100 + help
56101 + If you say Y here, you will be able to choose a GID of whose users will
56102 + be unable to connect to other hosts from your machine or run server
56103 + applications from your machine. If the sysctl option is enabled, a
56104 + sysctl option with name "socket_all" is created.
56105 +
56106 +config GRKERNSEC_SOCKET_ALL_GID
56107 + int "GID to deny all sockets for"
56108 + depends on GRKERNSEC_SOCKET_ALL
56109 + default 1004
56110 + help
56111 + Here you can choose the GID to disable socket access for. Remember to
56112 + add the users you want socket access disabled for to the GID
56113 + specified here. If the sysctl option is enabled, a sysctl option
56114 + with name "socket_all_gid" is created.
56115 +
56116 +config GRKERNSEC_SOCKET_CLIENT
56117 + bool "Deny client sockets to group"
56118 + depends on GRKERNSEC_SOCKET
56119 + help
56120 + If you say Y here, you will be able to choose a GID of whose users will
56121 + be unable to connect to other hosts from your machine, but will be
56122 + able to run servers. If this option is enabled, all users in the group
56123 + you specify will have to use passive mode when initiating ftp transfers
56124 + from the shell on your machine. If the sysctl option is enabled, a
56125 + sysctl option with name "socket_client" is created.
56126 +
56127 +config GRKERNSEC_SOCKET_CLIENT_GID
56128 + int "GID to deny client sockets for"
56129 + depends on GRKERNSEC_SOCKET_CLIENT
56130 + default 1003
56131 + help
56132 + Here you can choose the GID to disable client socket access for.
56133 + Remember to add the users you want client socket access disabled for to
56134 + the GID specified here. If the sysctl option is enabled, a sysctl
56135 + option with name "socket_client_gid" is created.
56136 +
56137 +config GRKERNSEC_SOCKET_SERVER
56138 + bool "Deny server sockets to group"
56139 + depends on GRKERNSEC_SOCKET
56140 + help
56141 + If you say Y here, you will be able to choose a GID of whose users will
56142 + be unable to run server applications from your machine. If the sysctl
56143 + option is enabled, a sysctl option with name "socket_server" is created.
56144 +
56145 +config GRKERNSEC_SOCKET_SERVER_GID
56146 + int "GID to deny server sockets for"
56147 + depends on GRKERNSEC_SOCKET_SERVER
56148 + default 1002
56149 + help
56150 + Here you can choose the GID to disable server socket access for.
56151 + Remember to add the users you want server socket access disabled for to
56152 + the GID specified here. If the sysctl option is enabled, a sysctl
56153 + option with name "socket_server_gid" is created.
56154 +
56155 +endmenu
56156 +menu "Sysctl support"
56157 +depends on GRKERNSEC && SYSCTL
56158 +
56159 +config GRKERNSEC_SYSCTL
56160 + bool "Sysctl support"
56161 + help
56162 + If you say Y here, you will be able to change the options that
56163 + grsecurity runs with at bootup, without having to recompile your
56164 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56165 + to enable (1) or disable (0) various features. All the sysctl entries
56166 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56167 + All features enabled in the kernel configuration are disabled at boot
56168 + if you do not say Y to the "Turn on features by default" option.
56169 + All options should be set at startup, and the grsec_lock entry should
56170 + be set to a non-zero value after all the options are set.
56171 + *THIS IS EXTREMELY IMPORTANT*
56172 +
56173 +config GRKERNSEC_SYSCTL_DISTRO
56174 + bool "Extra sysctl support for distro makers (READ HELP)"
56175 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56176 + help
56177 + If you say Y here, additional sysctl options will be created
56178 + for features that affect processes running as root. Therefore,
56179 + it is critical when using this option that the grsec_lock entry be
56180 + enabled after boot. Only distros with prebuilt kernel packages
56181 + with this option enabled that can ensure grsec_lock is enabled
56182 + after boot should use this option.
56183 + *Failure to set grsec_lock after boot makes all grsec features
56184 + this option covers useless*
56185 +
56186 + Currently this option creates the following sysctl entries:
56187 + "Disable Privileged I/O": "disable_priv_io"
56188 +
56189 +config GRKERNSEC_SYSCTL_ON
56190 + bool "Turn on features by default"
56191 + depends on GRKERNSEC_SYSCTL
56192 + help
56193 + If you say Y here, instead of having all features enabled in the
56194 + kernel configuration disabled at boot time, the features will be
56195 + enabled at boot time. It is recommended you say Y here unless
56196 + there is some reason you would want all sysctl-tunable features to
56197 + be disabled by default. As mentioned elsewhere, it is important
56198 + to enable the grsec_lock entry once you have finished modifying
56199 + the sysctl entries.
56200 +
56201 +endmenu
56202 +menu "Logging Options"
56203 +depends on GRKERNSEC
56204 +
56205 +config GRKERNSEC_FLOODTIME
56206 + int "Seconds in between log messages (minimum)"
56207 + default 10
56208 + help
56209 + This option allows you to enforce the number of seconds between
56210 + grsecurity log messages. The default should be suitable for most
56211 + people, however, if you choose to change it, choose a value small enough
56212 + to allow informative logs to be produced, but large enough to
56213 + prevent flooding.
56214 +
56215 +config GRKERNSEC_FLOODBURST
56216 + int "Number of messages in a burst (maximum)"
56217 + default 4
56218 + help
56219 + This option allows you to choose the maximum number of messages allowed
56220 + within the flood time interval you chose in a separate option. The
56221 + default should be suitable for most people, however if you find that
56222 + many of your logs are being interpreted as flooding, you may want to
56223 + raise this value.
56224 +
56225 +endmenu
56226 +
56227 +endmenu
56228 diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56229 --- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56230 +++ linux-2.6.32.45/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56231 @@ -0,0 +1,34 @@
56232 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56233 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56234 +# into an RBAC system
56235 +#
56236 +# All code in this directory and various hooks inserted throughout the kernel
56237 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56238 +# under the GPL v2 or higher
56239 +
56240 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56241 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56242 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56243 +
56244 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56245 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56246 + gracl_learn.o grsec_log.o
56247 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56248 +
56249 +ifdef CONFIG_NET
56250 +obj-y += grsec_sock.o
56251 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56252 +endif
56253 +
56254 +ifndef CONFIG_GRKERNSEC
56255 +obj-y += grsec_disabled.o
56256 +endif
56257 +
56258 +ifdef CONFIG_GRKERNSEC_HIDESYM
56259 +extra-y := grsec_hidesym.o
56260 +$(obj)/grsec_hidesym.o:
56261 + @-chmod -f 500 /boot
56262 + @-chmod -f 500 /lib/modules
56263 + @-chmod -f 700 .
56264 + @echo ' grsec: protected kernel image paths'
56265 +endif
56266 diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56267 --- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56268 +++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56269 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56270 acpi_op_bind bind;
56271 acpi_op_unbind unbind;
56272 acpi_op_notify notify;
56273 -};
56274 +} __no_const;
56275
56276 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56277
56278 diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56279 --- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56280 +++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56281 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56282 Dock Station
56283 -------------------------------------------------------------------------- */
56284 struct acpi_dock_ops {
56285 - acpi_notify_handler handler;
56286 - acpi_notify_handler uevent;
56287 + const acpi_notify_handler handler;
56288 + const acpi_notify_handler uevent;
56289 };
56290
56291 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56292 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56293 extern int register_dock_notifier(struct notifier_block *nb);
56294 extern void unregister_dock_notifier(struct notifier_block *nb);
56295 extern int register_hotplug_dock_device(acpi_handle handle,
56296 - struct acpi_dock_ops *ops,
56297 + const struct acpi_dock_ops *ops,
56298 void *context);
56299 extern void unregister_hotplug_dock_device(acpi_handle handle);
56300 #else
56301 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56302 {
56303 }
56304 static inline int register_hotplug_dock_device(acpi_handle handle,
56305 - struct acpi_dock_ops *ops,
56306 + const struct acpi_dock_ops *ops,
56307 void *context)
56308 {
56309 return -ENODEV;
56310 diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56311 --- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56312 +++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56313 @@ -22,6 +22,12 @@
56314
56315 typedef atomic64_t atomic_long_t;
56316
56317 +#ifdef CONFIG_PAX_REFCOUNT
56318 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56319 +#else
56320 +typedef atomic64_t atomic_long_unchecked_t;
56321 +#endif
56322 +
56323 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56324
56325 static inline long atomic_long_read(atomic_long_t *l)
56326 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56327 return (long)atomic64_read(v);
56328 }
56329
56330 +#ifdef CONFIG_PAX_REFCOUNT
56331 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56332 +{
56333 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56334 +
56335 + return (long)atomic64_read_unchecked(v);
56336 +}
56337 +#endif
56338 +
56339 static inline void atomic_long_set(atomic_long_t *l, long i)
56340 {
56341 atomic64_t *v = (atomic64_t *)l;
56342 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56343 atomic64_set(v, i);
56344 }
56345
56346 +#ifdef CONFIG_PAX_REFCOUNT
56347 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56348 +{
56349 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56350 +
56351 + atomic64_set_unchecked(v, i);
56352 +}
56353 +#endif
56354 +
56355 static inline void atomic_long_inc(atomic_long_t *l)
56356 {
56357 atomic64_t *v = (atomic64_t *)l;
56358 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56359 atomic64_inc(v);
56360 }
56361
56362 +#ifdef CONFIG_PAX_REFCOUNT
56363 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56364 +{
56365 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56366 +
56367 + atomic64_inc_unchecked(v);
56368 +}
56369 +#endif
56370 +
56371 static inline void atomic_long_dec(atomic_long_t *l)
56372 {
56373 atomic64_t *v = (atomic64_t *)l;
56374 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56375 atomic64_dec(v);
56376 }
56377
56378 +#ifdef CONFIG_PAX_REFCOUNT
56379 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56380 +{
56381 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56382 +
56383 + atomic64_dec_unchecked(v);
56384 +}
56385 +#endif
56386 +
56387 static inline void atomic_long_add(long i, atomic_long_t *l)
56388 {
56389 atomic64_t *v = (atomic64_t *)l;
56390 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56391 atomic64_add(i, v);
56392 }
56393
56394 +#ifdef CONFIG_PAX_REFCOUNT
56395 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56396 +{
56397 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56398 +
56399 + atomic64_add_unchecked(i, v);
56400 +}
56401 +#endif
56402 +
56403 static inline void atomic_long_sub(long i, atomic_long_t *l)
56404 {
56405 atomic64_t *v = (atomic64_t *)l;
56406 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56407 return (long)atomic64_inc_return(v);
56408 }
56409
56410 +#ifdef CONFIG_PAX_REFCOUNT
56411 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56412 +{
56413 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56414 +
56415 + return (long)atomic64_inc_return_unchecked(v);
56416 +}
56417 +#endif
56418 +
56419 static inline long atomic_long_dec_return(atomic_long_t *l)
56420 {
56421 atomic64_t *v = (atomic64_t *)l;
56422 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56423
56424 typedef atomic_t atomic_long_t;
56425
56426 +#ifdef CONFIG_PAX_REFCOUNT
56427 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56428 +#else
56429 +typedef atomic_t atomic_long_unchecked_t;
56430 +#endif
56431 +
56432 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56433 static inline long atomic_long_read(atomic_long_t *l)
56434 {
56435 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56436 return (long)atomic_read(v);
56437 }
56438
56439 +#ifdef CONFIG_PAX_REFCOUNT
56440 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56441 +{
56442 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56443 +
56444 + return (long)atomic_read_unchecked(v);
56445 +}
56446 +#endif
56447 +
56448 static inline void atomic_long_set(atomic_long_t *l, long i)
56449 {
56450 atomic_t *v = (atomic_t *)l;
56451 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56452 atomic_set(v, i);
56453 }
56454
56455 +#ifdef CONFIG_PAX_REFCOUNT
56456 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56457 +{
56458 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56459 +
56460 + atomic_set_unchecked(v, i);
56461 +}
56462 +#endif
56463 +
56464 static inline void atomic_long_inc(atomic_long_t *l)
56465 {
56466 atomic_t *v = (atomic_t *)l;
56467 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56468 atomic_inc(v);
56469 }
56470
56471 +#ifdef CONFIG_PAX_REFCOUNT
56472 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56473 +{
56474 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56475 +
56476 + atomic_inc_unchecked(v);
56477 +}
56478 +#endif
56479 +
56480 static inline void atomic_long_dec(atomic_long_t *l)
56481 {
56482 atomic_t *v = (atomic_t *)l;
56483 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56484 atomic_dec(v);
56485 }
56486
56487 +#ifdef CONFIG_PAX_REFCOUNT
56488 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56489 +{
56490 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56491 +
56492 + atomic_dec_unchecked(v);
56493 +}
56494 +#endif
56495 +
56496 static inline void atomic_long_add(long i, atomic_long_t *l)
56497 {
56498 atomic_t *v = (atomic_t *)l;
56499 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56500 atomic_add(i, v);
56501 }
56502
56503 +#ifdef CONFIG_PAX_REFCOUNT
56504 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56505 +{
56506 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56507 +
56508 + atomic_add_unchecked(i, v);
56509 +}
56510 +#endif
56511 +
56512 static inline void atomic_long_sub(long i, atomic_long_t *l)
56513 {
56514 atomic_t *v = (atomic_t *)l;
56515 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56516 return (long)atomic_inc_return(v);
56517 }
56518
56519 +#ifdef CONFIG_PAX_REFCOUNT
56520 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56521 +{
56522 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56523 +
56524 + return (long)atomic_inc_return_unchecked(v);
56525 +}
56526 +#endif
56527 +
56528 static inline long atomic_long_dec_return(atomic_long_t *l)
56529 {
56530 atomic_t *v = (atomic_t *)l;
56531 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56532
56533 #endif /* BITS_PER_LONG == 64 */
56534
56535 +#ifdef CONFIG_PAX_REFCOUNT
56536 +static inline void pax_refcount_needs_these_functions(void)
56537 +{
56538 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56539 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56540 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56541 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56542 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56543 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56544 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56545 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56546 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56547 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56548 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56549 +
56550 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56551 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56552 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56553 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56554 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56555 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56556 +}
56557 +#else
56558 +#define atomic_read_unchecked(v) atomic_read(v)
56559 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56560 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56561 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56562 +#define atomic_inc_unchecked(v) atomic_inc(v)
56563 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56564 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56565 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56566 +#define atomic_dec_unchecked(v) atomic_dec(v)
56567 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56568 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56569 +
56570 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56571 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56572 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56573 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56574 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56575 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56576 +#endif
56577 +
56578 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56579 diff -urNp linux-2.6.32.45/include/asm-generic/bug.h linux-2.6.32.45/include/asm-generic/bug.h
56580 --- linux-2.6.32.45/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
56581 +++ linux-2.6.32.45/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
56582 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
56583
56584 #else /* !CONFIG_BUG */
56585 #ifndef HAVE_ARCH_BUG
56586 -#define BUG() do {} while(0)
56587 +#define BUG() do { for (;;) ; } while(0)
56588 #endif
56589
56590 #ifndef HAVE_ARCH_BUG_ON
56591 -#define BUG_ON(condition) do { if (condition) ; } while(0)
56592 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
56593 #endif
56594
56595 #ifndef HAVE_ARCH_WARN_ON
56596 diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56597 --- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56598 +++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56599 @@ -6,7 +6,7 @@
56600 * cache lines need to provide their own cache.h.
56601 */
56602
56603 -#define L1_CACHE_SHIFT 5
56604 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56605 +#define L1_CACHE_SHIFT 5UL
56606 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56607
56608 #endif /* __ASM_GENERIC_CACHE_H */
56609 diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56610 --- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56611 +++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56612 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56613 enum dma_data_direction dir,
56614 struct dma_attrs *attrs)
56615 {
56616 - struct dma_map_ops *ops = get_dma_ops(dev);
56617 + const struct dma_map_ops *ops = get_dma_ops(dev);
56618 dma_addr_t addr;
56619
56620 kmemcheck_mark_initialized(ptr, size);
56621 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56622 enum dma_data_direction dir,
56623 struct dma_attrs *attrs)
56624 {
56625 - struct dma_map_ops *ops = get_dma_ops(dev);
56626 + const struct dma_map_ops *ops = get_dma_ops(dev);
56627
56628 BUG_ON(!valid_dma_direction(dir));
56629 if (ops->unmap_page)
56630 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56631 int nents, enum dma_data_direction dir,
56632 struct dma_attrs *attrs)
56633 {
56634 - struct dma_map_ops *ops = get_dma_ops(dev);
56635 + const struct dma_map_ops *ops = get_dma_ops(dev);
56636 int i, ents;
56637 struct scatterlist *s;
56638
56639 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56640 int nents, enum dma_data_direction dir,
56641 struct dma_attrs *attrs)
56642 {
56643 - struct dma_map_ops *ops = get_dma_ops(dev);
56644 + const struct dma_map_ops *ops = get_dma_ops(dev);
56645
56646 BUG_ON(!valid_dma_direction(dir));
56647 debug_dma_unmap_sg(dev, sg, nents, dir);
56648 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56649 size_t offset, size_t size,
56650 enum dma_data_direction dir)
56651 {
56652 - struct dma_map_ops *ops = get_dma_ops(dev);
56653 + const struct dma_map_ops *ops = get_dma_ops(dev);
56654 dma_addr_t addr;
56655
56656 kmemcheck_mark_initialized(page_address(page) + offset, size);
56657 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56658 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56659 size_t size, enum dma_data_direction dir)
56660 {
56661 - struct dma_map_ops *ops = get_dma_ops(dev);
56662 + const struct dma_map_ops *ops = get_dma_ops(dev);
56663
56664 BUG_ON(!valid_dma_direction(dir));
56665 if (ops->unmap_page)
56666 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56667 size_t size,
56668 enum dma_data_direction dir)
56669 {
56670 - struct dma_map_ops *ops = get_dma_ops(dev);
56671 + const struct dma_map_ops *ops = get_dma_ops(dev);
56672
56673 BUG_ON(!valid_dma_direction(dir));
56674 if (ops->sync_single_for_cpu)
56675 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
56676 dma_addr_t addr, size_t size,
56677 enum dma_data_direction dir)
56678 {
56679 - struct dma_map_ops *ops = get_dma_ops(dev);
56680 + const struct dma_map_ops *ops = get_dma_ops(dev);
56681
56682 BUG_ON(!valid_dma_direction(dir));
56683 if (ops->sync_single_for_device)
56684 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
56685 size_t size,
56686 enum dma_data_direction dir)
56687 {
56688 - struct dma_map_ops *ops = get_dma_ops(dev);
56689 + const struct dma_map_ops *ops = get_dma_ops(dev);
56690
56691 BUG_ON(!valid_dma_direction(dir));
56692 if (ops->sync_single_range_for_cpu) {
56693 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
56694 size_t size,
56695 enum dma_data_direction dir)
56696 {
56697 - struct dma_map_ops *ops = get_dma_ops(dev);
56698 + const struct dma_map_ops *ops = get_dma_ops(dev);
56699
56700 BUG_ON(!valid_dma_direction(dir));
56701 if (ops->sync_single_range_for_device) {
56702 @@ -155,7 +155,7 @@ static inline void
56703 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
56704 int nelems, enum dma_data_direction dir)
56705 {
56706 - struct dma_map_ops *ops = get_dma_ops(dev);
56707 + const struct dma_map_ops *ops = get_dma_ops(dev);
56708
56709 BUG_ON(!valid_dma_direction(dir));
56710 if (ops->sync_sg_for_cpu)
56711 @@ -167,7 +167,7 @@ static inline void
56712 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
56713 int nelems, enum dma_data_direction dir)
56714 {
56715 - struct dma_map_ops *ops = get_dma_ops(dev);
56716 + const struct dma_map_ops *ops = get_dma_ops(dev);
56717
56718 BUG_ON(!valid_dma_direction(dir));
56719 if (ops->sync_sg_for_device)
56720 diff -urNp linux-2.6.32.45/include/asm-generic/emergency-restart.h linux-2.6.32.45/include/asm-generic/emergency-restart.h
56721 --- linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
56722 +++ linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
56723 @@ -1,7 +1,7 @@
56724 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
56725 #define _ASM_GENERIC_EMERGENCY_RESTART_H
56726
56727 -static inline void machine_emergency_restart(void)
56728 +static inline __noreturn void machine_emergency_restart(void)
56729 {
56730 machine_restart(NULL);
56731 }
56732 diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
56733 --- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
56734 +++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
56735 @@ -6,7 +6,7 @@
56736 #include <asm/errno.h>
56737
56738 static inline int
56739 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56740 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
56741 {
56742 int op = (encoded_op >> 28) & 7;
56743 int cmp = (encoded_op >> 24) & 15;
56744 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
56745 }
56746
56747 static inline int
56748 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
56749 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
56750 {
56751 return -ENOSYS;
56752 }
56753 diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
56754 --- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
56755 +++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
56756 @@ -46,6 +46,8 @@ typedef unsigned int u32;
56757 typedef signed long s64;
56758 typedef unsigned long u64;
56759
56760 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56761 +
56762 #define S8_C(x) x
56763 #define U8_C(x) x ## U
56764 #define S16_C(x) x
56765 diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
56766 --- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
56767 +++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
56768 @@ -51,6 +51,8 @@ typedef unsigned int u32;
56769 typedef signed long long s64;
56770 typedef unsigned long long u64;
56771
56772 +typedef unsigned long long intoverflow_t;
56773 +
56774 #define S8_C(x) x
56775 #define U8_C(x) x ## U
56776 #define S16_C(x) x
56777 diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
56778 --- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
56779 +++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
56780 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
56781 KMAP_D(16) KM_IRQ_PTE,
56782 KMAP_D(17) KM_NMI,
56783 KMAP_D(18) KM_NMI_PTE,
56784 -KMAP_D(19) KM_TYPE_NR
56785 +KMAP_D(19) KM_CLEARPAGE,
56786 +KMAP_D(20) KM_TYPE_NR
56787 };
56788
56789 #undef KMAP_D
56790 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
56791 --- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
56792 +++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
56793 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
56794 unsigned long size);
56795 #endif
56796
56797 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56798 +static inline unsigned long pax_open_kernel(void) { return 0; }
56799 +#endif
56800 +
56801 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56802 +static inline unsigned long pax_close_kernel(void) { return 0; }
56803 +#endif
56804 +
56805 #endif /* !__ASSEMBLY__ */
56806
56807 #endif /* _ASM_GENERIC_PGTABLE_H */
56808 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
56809 --- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
56810 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
56811 @@ -1,14 +1,19 @@
56812 #ifndef _PGTABLE_NOPMD_H
56813 #define _PGTABLE_NOPMD_H
56814
56815 -#ifndef __ASSEMBLY__
56816 -
56817 #include <asm-generic/pgtable-nopud.h>
56818
56819 -struct mm_struct;
56820 -
56821 #define __PAGETABLE_PMD_FOLDED
56822
56823 +#define PMD_SHIFT PUD_SHIFT
56824 +#define PTRS_PER_PMD 1
56825 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56826 +#define PMD_MASK (~(PMD_SIZE-1))
56827 +
56828 +#ifndef __ASSEMBLY__
56829 +
56830 +struct mm_struct;
56831 +
56832 /*
56833 * Having the pmd type consist of a pud gets the size right, and allows
56834 * us to conceptually access the pud entry that this pmd is folded into
56835 @@ -16,11 +21,6 @@ struct mm_struct;
56836 */
56837 typedef struct { pud_t pud; } pmd_t;
56838
56839 -#define PMD_SHIFT PUD_SHIFT
56840 -#define PTRS_PER_PMD 1
56841 -#define PMD_SIZE (1UL << PMD_SHIFT)
56842 -#define PMD_MASK (~(PMD_SIZE-1))
56843 -
56844 /*
56845 * The "pud_xxx()" functions here are trivial for a folded two-level
56846 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56847 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
56848 --- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
56849 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
56850 @@ -1,10 +1,15 @@
56851 #ifndef _PGTABLE_NOPUD_H
56852 #define _PGTABLE_NOPUD_H
56853
56854 -#ifndef __ASSEMBLY__
56855 -
56856 #define __PAGETABLE_PUD_FOLDED
56857
56858 +#define PUD_SHIFT PGDIR_SHIFT
56859 +#define PTRS_PER_PUD 1
56860 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56861 +#define PUD_MASK (~(PUD_SIZE-1))
56862 +
56863 +#ifndef __ASSEMBLY__
56864 +
56865 /*
56866 * Having the pud type consist of a pgd gets the size right, and allows
56867 * us to conceptually access the pgd entry that this pud is folded into
56868 @@ -12,11 +17,6 @@
56869 */
56870 typedef struct { pgd_t pgd; } pud_t;
56871
56872 -#define PUD_SHIFT PGDIR_SHIFT
56873 -#define PTRS_PER_PUD 1
56874 -#define PUD_SIZE (1UL << PUD_SHIFT)
56875 -#define PUD_MASK (~(PUD_SIZE-1))
56876 -
56877 /*
56878 * The "pgd_xxx()" functions here are trivial for a folded two-level
56879 * setup: the pud is never bad, and a pud always exists (as it's folded
56880 diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
56881 --- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
56882 +++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
56883 @@ -199,6 +199,7 @@
56884 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56885 VMLINUX_SYMBOL(__start_rodata) = .; \
56886 *(.rodata) *(.rodata.*) \
56887 + *(.data.read_only) \
56888 *(__vermagic) /* Kernel version magic */ \
56889 *(__markers_strings) /* Markers: strings */ \
56890 *(__tracepoints_strings)/* Tracepoints: strings */ \
56891 @@ -656,22 +657,24 @@
56892 * section in the linker script will go there too. @phdr should have
56893 * a leading colon.
56894 *
56895 - * Note that this macros defines __per_cpu_load as an absolute symbol.
56896 + * Note that this macros defines per_cpu_load as an absolute symbol.
56897 * If there is no need to put the percpu section at a predetermined
56898 * address, use PERCPU().
56899 */
56900 #define PERCPU_VADDR(vaddr, phdr) \
56901 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
56902 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56903 + per_cpu_load = .; \
56904 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56905 - LOAD_OFFSET) { \
56906 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56907 VMLINUX_SYMBOL(__per_cpu_start) = .; \
56908 *(.data.percpu.first) \
56909 - *(.data.percpu.page_aligned) \
56910 *(.data.percpu) \
56911 + . = ALIGN(PAGE_SIZE); \
56912 + *(.data.percpu.page_aligned) \
56913 *(.data.percpu.shared_aligned) \
56914 VMLINUX_SYMBOL(__per_cpu_end) = .; \
56915 } phdr \
56916 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
56917 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
56918
56919 /**
56920 * PERCPU - define output section for percpu area, simple version
56921 diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
56922 --- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
56923 +++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
56924 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
56925
56926 /* reload the current crtc LUT */
56927 void (*load_lut)(struct drm_crtc *crtc);
56928 -};
56929 +} __no_const;
56930
56931 struct drm_encoder_helper_funcs {
56932 void (*dpms)(struct drm_encoder *encoder, int mode);
56933 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
56934 struct drm_connector *connector);
56935 /* disable encoder when not in use - more explicit than dpms off */
56936 void (*disable)(struct drm_encoder *encoder);
56937 -};
56938 +} __no_const;
56939
56940 struct drm_connector_helper_funcs {
56941 int (*get_modes)(struct drm_connector *connector);
56942 diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
56943 --- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
56944 +++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
56945 @@ -71,6 +71,7 @@
56946 #include <linux/workqueue.h>
56947 #include <linux/poll.h>
56948 #include <asm/pgalloc.h>
56949 +#include <asm/local.h>
56950 #include "drm.h"
56951
56952 #include <linux/idr.h>
56953 @@ -814,7 +815,7 @@ struct drm_driver {
56954 void (*vgaarb_irq)(struct drm_device *dev, bool state);
56955
56956 /* Driver private ops for this object */
56957 - struct vm_operations_struct *gem_vm_ops;
56958 + const struct vm_operations_struct *gem_vm_ops;
56959
56960 int major;
56961 int minor;
56962 @@ -917,7 +918,7 @@ struct drm_device {
56963
56964 /** \name Usage Counters */
56965 /*@{ */
56966 - int open_count; /**< Outstanding files open */
56967 + local_t open_count; /**< Outstanding files open */
56968 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
56969 atomic_t vma_count; /**< Outstanding vma areas open */
56970 int buf_use; /**< Buffers in use -- cannot alloc */
56971 @@ -928,7 +929,7 @@ struct drm_device {
56972 /*@{ */
56973 unsigned long counters;
56974 enum drm_stat_type types[15];
56975 - atomic_t counts[15];
56976 + atomic_unchecked_t counts[15];
56977 /*@} */
56978
56979 struct list_head filelist;
56980 @@ -1016,7 +1017,7 @@ struct drm_device {
56981 struct pci_controller *hose;
56982 #endif
56983 struct drm_sg_mem *sg; /**< Scatter gather memory */
56984 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
56985 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
56986 void *dev_private; /**< device private data */
56987 void *mm_private;
56988 struct address_space *dev_mapping;
56989 @@ -1042,11 +1043,11 @@ struct drm_device {
56990 spinlock_t object_name_lock;
56991 struct idr object_name_idr;
56992 atomic_t object_count;
56993 - atomic_t object_memory;
56994 + atomic_unchecked_t object_memory;
56995 atomic_t pin_count;
56996 - atomic_t pin_memory;
56997 + atomic_unchecked_t pin_memory;
56998 atomic_t gtt_count;
56999 - atomic_t gtt_memory;
57000 + atomic_unchecked_t gtt_memory;
57001 uint32_t gtt_total;
57002 uint32_t invalidate_domains; /* domains pending invalidation */
57003 uint32_t flush_domains; /* domains pending flush */
57004 diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57005 --- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57006 +++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57007 @@ -47,7 +47,7 @@
57008
57009 struct ttm_mem_shrink {
57010 int (*do_shrink) (struct ttm_mem_shrink *);
57011 -};
57012 +} __no_const;
57013
57014 /**
57015 * struct ttm_mem_global - Global memory accounting structure.
57016 diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57017 --- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57018 +++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57019 @@ -39,6 +39,14 @@ enum machine_type {
57020 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57021 };
57022
57023 +/* Constants for the N_FLAGS field */
57024 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57025 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57026 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57027 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57028 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57029 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57030 +
57031 #if !defined (N_MAGIC)
57032 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57033 #endif
57034 diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57035 --- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57036 +++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57037 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57038 #endif
57039
57040 struct k_atm_aal_stats {
57041 -#define __HANDLE_ITEM(i) atomic_t i
57042 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57043 __AAL_STAT_ITEMS
57044 #undef __HANDLE_ITEM
57045 };
57046 diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57047 --- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57048 +++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57049 @@ -36,18 +36,18 @@ struct backlight_device;
57050 struct fb_info;
57051
57052 struct backlight_ops {
57053 - unsigned int options;
57054 + const unsigned int options;
57055
57056 #define BL_CORE_SUSPENDRESUME (1 << 0)
57057
57058 /* Notify the backlight driver some property has changed */
57059 - int (*update_status)(struct backlight_device *);
57060 + int (* const update_status)(struct backlight_device *);
57061 /* Return the current backlight brightness (accounting for power,
57062 fb_blank etc.) */
57063 - int (*get_brightness)(struct backlight_device *);
57064 + int (* const get_brightness)(struct backlight_device *);
57065 /* Check if given framebuffer device is the one bound to this backlight;
57066 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57067 - int (*check_fb)(struct fb_info *);
57068 + int (* const check_fb)(struct fb_info *);
57069 };
57070
57071 /* This structure defines all the properties of a backlight */
57072 @@ -86,7 +86,7 @@ struct backlight_device {
57073 registered this device has been unloaded, and if class_get_devdata()
57074 points to something in the body of that driver, it is also invalid. */
57075 struct mutex ops_lock;
57076 - struct backlight_ops *ops;
57077 + const struct backlight_ops *ops;
57078
57079 /* The framebuffer notifier block */
57080 struct notifier_block fb_notif;
57081 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57082 }
57083
57084 extern struct backlight_device *backlight_device_register(const char *name,
57085 - struct device *dev, void *devdata, struct backlight_ops *ops);
57086 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57087 extern void backlight_device_unregister(struct backlight_device *bd);
57088 extern void backlight_force_update(struct backlight_device *bd,
57089 enum backlight_update_reason reason);
57090 diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57091 --- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57092 +++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57093 @@ -83,6 +83,7 @@ struct linux_binfmt {
57094 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57095 int (*load_shlib)(struct file *);
57096 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57097 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57098 unsigned long min_coredump; /* minimal dump size */
57099 int hasvdso;
57100 };
57101 diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57102 --- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57103 +++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57104 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57105 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57106
57107 struct block_device_operations {
57108 - int (*open) (struct block_device *, fmode_t);
57109 - int (*release) (struct gendisk *, fmode_t);
57110 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57111 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57112 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57113 - int (*direct_access) (struct block_device *, sector_t,
57114 + int (* const open) (struct block_device *, fmode_t);
57115 + int (* const release) (struct gendisk *, fmode_t);
57116 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57117 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57118 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57119 + int (* const direct_access) (struct block_device *, sector_t,
57120 void **, unsigned long *);
57121 - int (*media_changed) (struct gendisk *);
57122 - unsigned long long (*set_capacity) (struct gendisk *,
57123 + int (* const media_changed) (struct gendisk *);
57124 + unsigned long long (* const set_capacity) (struct gendisk *,
57125 unsigned long long);
57126 - int (*revalidate_disk) (struct gendisk *);
57127 - int (*getgeo)(struct block_device *, struct hd_geometry *);
57128 - struct module *owner;
57129 + int (* const revalidate_disk) (struct gendisk *);
57130 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
57131 + struct module * const owner;
57132 };
57133
57134 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57135 diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57136 --- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57137 +++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57138 @@ -160,7 +160,7 @@ struct blk_trace {
57139 struct dentry *dir;
57140 struct dentry *dropped_file;
57141 struct dentry *msg_file;
57142 - atomic_t dropped;
57143 + atomic_unchecked_t dropped;
57144 };
57145
57146 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57147 diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57148 --- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57149 +++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57150 @@ -42,51 +42,51 @@
57151
57152 static inline __le64 __cpu_to_le64p(const __u64 *p)
57153 {
57154 - return (__force __le64)*p;
57155 + return (__force const __le64)*p;
57156 }
57157 static inline __u64 __le64_to_cpup(const __le64 *p)
57158 {
57159 - return (__force __u64)*p;
57160 + return (__force const __u64)*p;
57161 }
57162 static inline __le32 __cpu_to_le32p(const __u32 *p)
57163 {
57164 - return (__force __le32)*p;
57165 + return (__force const __le32)*p;
57166 }
57167 static inline __u32 __le32_to_cpup(const __le32 *p)
57168 {
57169 - return (__force __u32)*p;
57170 + return (__force const __u32)*p;
57171 }
57172 static inline __le16 __cpu_to_le16p(const __u16 *p)
57173 {
57174 - return (__force __le16)*p;
57175 + return (__force const __le16)*p;
57176 }
57177 static inline __u16 __le16_to_cpup(const __le16 *p)
57178 {
57179 - return (__force __u16)*p;
57180 + return (__force const __u16)*p;
57181 }
57182 static inline __be64 __cpu_to_be64p(const __u64 *p)
57183 {
57184 - return (__force __be64)__swab64p(p);
57185 + return (__force const __be64)__swab64p(p);
57186 }
57187 static inline __u64 __be64_to_cpup(const __be64 *p)
57188 {
57189 - return __swab64p((__u64 *)p);
57190 + return __swab64p((const __u64 *)p);
57191 }
57192 static inline __be32 __cpu_to_be32p(const __u32 *p)
57193 {
57194 - return (__force __be32)__swab32p(p);
57195 + return (__force const __be32)__swab32p(p);
57196 }
57197 static inline __u32 __be32_to_cpup(const __be32 *p)
57198 {
57199 - return __swab32p((__u32 *)p);
57200 + return __swab32p((const __u32 *)p);
57201 }
57202 static inline __be16 __cpu_to_be16p(const __u16 *p)
57203 {
57204 - return (__force __be16)__swab16p(p);
57205 + return (__force const __be16)__swab16p(p);
57206 }
57207 static inline __u16 __be16_to_cpup(const __be16 *p)
57208 {
57209 - return __swab16p((__u16 *)p);
57210 + return __swab16p((const __u16 *)p);
57211 }
57212 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57213 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57214 diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57215 --- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57216 +++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57217 @@ -16,6 +16,10 @@
57218 #define __read_mostly
57219 #endif
57220
57221 +#ifndef __read_only
57222 +#define __read_only __read_mostly
57223 +#endif
57224 +
57225 #ifndef ____cacheline_aligned
57226 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57227 #endif
57228 diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57229 --- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57230 +++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57231 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57232 (security_real_capable_noaudit((t), (cap)) == 0)
57233
57234 extern int capable(int cap);
57235 +int capable_nolog(int cap);
57236
57237 /* audit system wants to get cap info from files as well */
57238 struct dentry;
57239 diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57240 --- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57241 +++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57242 @@ -36,4 +36,13 @@
57243 the kernel context */
57244 #define __cold __attribute__((__cold__))
57245
57246 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57247 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57248 +#define __bos0(ptr) __bos((ptr), 0)
57249 +#define __bos1(ptr) __bos((ptr), 1)
57250 +
57251 +#if __GNUC_MINOR__ >= 5
57252 +#define __no_const __attribute__((no_const))
57253 +#endif
57254 +
57255 #endif
57256 diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57257 --- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57258 +++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57259 @@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57260 # define __attribute_const__ /* unimplemented */
57261 #endif
57262
57263 +#ifndef __no_const
57264 +# define __no_const
57265 +#endif
57266 +
57267 /*
57268 * Tell gcc if a function is cold. The compiler will assume any path
57269 * directly leading to the call is unlikely.
57270 @@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57271 #define __cold
57272 #endif
57273
57274 +#ifndef __alloc_size
57275 +#define __alloc_size(...)
57276 +#endif
57277 +
57278 +#ifndef __bos
57279 +#define __bos(ptr, arg)
57280 +#endif
57281 +
57282 +#ifndef __bos0
57283 +#define __bos0(ptr)
57284 +#endif
57285 +
57286 +#ifndef __bos1
57287 +#define __bos1(ptr)
57288 +#endif
57289 +
57290 /* Simple shorthand for a section definition */
57291 #ifndef __section
57292 # define __section(S) __attribute__ ((__section__(#S)))
57293 @@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57294 * use is to mediate communication between process-level code and irq/NMI
57295 * handlers, all running on the same CPU.
57296 */
57297 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57298 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57299 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57300
57301 #endif /* __LINUX_COMPILER_H */
57302 diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57303 --- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57304 +++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57305 @@ -394,7 +394,7 @@ struct cipher_tfm {
57306 const u8 *key, unsigned int keylen);
57307 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57308 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57309 -};
57310 +} __no_const;
57311
57312 struct hash_tfm {
57313 int (*init)(struct hash_desc *desc);
57314 @@ -415,13 +415,13 @@ struct compress_tfm {
57315 int (*cot_decompress)(struct crypto_tfm *tfm,
57316 const u8 *src, unsigned int slen,
57317 u8 *dst, unsigned int *dlen);
57318 -};
57319 +} __no_const;
57320
57321 struct rng_tfm {
57322 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57323 unsigned int dlen);
57324 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57325 -};
57326 +} __no_const;
57327
57328 #define crt_ablkcipher crt_u.ablkcipher
57329 #define crt_aead crt_u.aead
57330 diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57331 --- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57332 +++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57333 @@ -119,6 +119,8 @@ struct dentry {
57334 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57335 };
57336
57337 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57338 +
57339 /*
57340 * dentry->d_lock spinlock nesting subclasses:
57341 *
57342 diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57343 --- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57344 +++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57345 @@ -78,7 +78,7 @@ static void free(void *where)
57346 * warnings when not needed (indeed large_malloc / large_free are not
57347 * needed by inflate */
57348
57349 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57350 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57351 #define free(a) kfree(a)
57352
57353 #define large_malloc(a) vmalloc(a)
57354 diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57355 --- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57356 +++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57357 @@ -16,50 +16,50 @@ enum dma_data_direction {
57358 };
57359
57360 struct dma_map_ops {
57361 - void* (*alloc_coherent)(struct device *dev, size_t size,
57362 + void* (* const alloc_coherent)(struct device *dev, size_t size,
57363 dma_addr_t *dma_handle, gfp_t gfp);
57364 - void (*free_coherent)(struct device *dev, size_t size,
57365 + void (* const free_coherent)(struct device *dev, size_t size,
57366 void *vaddr, dma_addr_t dma_handle);
57367 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
57368 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57369 unsigned long offset, size_t size,
57370 enum dma_data_direction dir,
57371 struct dma_attrs *attrs);
57372 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57373 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57374 size_t size, enum dma_data_direction dir,
57375 struct dma_attrs *attrs);
57376 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
57377 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57378 int nents, enum dma_data_direction dir,
57379 struct dma_attrs *attrs);
57380 - void (*unmap_sg)(struct device *dev,
57381 + void (* const unmap_sg)(struct device *dev,
57382 struct scatterlist *sg, int nents,
57383 enum dma_data_direction dir,
57384 struct dma_attrs *attrs);
57385 - void (*sync_single_for_cpu)(struct device *dev,
57386 + void (* const sync_single_for_cpu)(struct device *dev,
57387 dma_addr_t dma_handle, size_t size,
57388 enum dma_data_direction dir);
57389 - void (*sync_single_for_device)(struct device *dev,
57390 + void (* const sync_single_for_device)(struct device *dev,
57391 dma_addr_t dma_handle, size_t size,
57392 enum dma_data_direction dir);
57393 - void (*sync_single_range_for_cpu)(struct device *dev,
57394 + void (* const sync_single_range_for_cpu)(struct device *dev,
57395 dma_addr_t dma_handle,
57396 unsigned long offset,
57397 size_t size,
57398 enum dma_data_direction dir);
57399 - void (*sync_single_range_for_device)(struct device *dev,
57400 + void (* const sync_single_range_for_device)(struct device *dev,
57401 dma_addr_t dma_handle,
57402 unsigned long offset,
57403 size_t size,
57404 enum dma_data_direction dir);
57405 - void (*sync_sg_for_cpu)(struct device *dev,
57406 + void (* const sync_sg_for_cpu)(struct device *dev,
57407 struct scatterlist *sg, int nents,
57408 enum dma_data_direction dir);
57409 - void (*sync_sg_for_device)(struct device *dev,
57410 + void (* const sync_sg_for_device)(struct device *dev,
57411 struct scatterlist *sg, int nents,
57412 enum dma_data_direction dir);
57413 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57414 - int (*dma_supported)(struct device *dev, u64 mask);
57415 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57416 + int (* const dma_supported)(struct device *dev, u64 mask);
57417 int (*set_dma_mask)(struct device *dev, u64 mask);
57418 - int is_phys;
57419 + const int is_phys;
57420 };
57421
57422 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57423 diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57424 --- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57425 +++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57426 @@ -380,7 +380,7 @@ struct dst_node
57427 struct thread_pool *pool;
57428
57429 /* Transaction IDs live here */
57430 - atomic_long_t gen;
57431 + atomic_long_unchecked_t gen;
57432
57433 /*
57434 * How frequently and how many times transaction
57435 diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57436 --- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57437 +++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57438 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57439 #define PT_GNU_EH_FRAME 0x6474e550
57440
57441 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57442 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57443 +
57444 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57445 +
57446 +/* Constants for the e_flags field */
57447 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57448 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57449 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57450 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57451 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57452 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57453
57454 /* These constants define the different elf file types */
57455 #define ET_NONE 0
57456 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57457 #define DT_DEBUG 21
57458 #define DT_TEXTREL 22
57459 #define DT_JMPREL 23
57460 +#define DT_FLAGS 30
57461 + #define DF_TEXTREL 0x00000004
57462 #define DT_ENCODING 32
57463 #define OLD_DT_LOOS 0x60000000
57464 #define DT_LOOS 0x6000000d
57465 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57466 #define PF_W 0x2
57467 #define PF_X 0x1
57468
57469 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57470 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57471 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57472 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57473 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57474 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57475 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57476 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57477 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57478 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57479 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57480 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57481 +
57482 typedef struct elf32_phdr{
57483 Elf32_Word p_type;
57484 Elf32_Off p_offset;
57485 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57486 #define EI_OSABI 7
57487 #define EI_PAD 8
57488
57489 +#define EI_PAX 14
57490 +
57491 #define ELFMAG0 0x7f /* EI_MAG */
57492 #define ELFMAG1 'E'
57493 #define ELFMAG2 'L'
57494 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57495 #define elf_phdr elf32_phdr
57496 #define elf_note elf32_note
57497 #define elf_addr_t Elf32_Off
57498 +#define elf_dyn Elf32_Dyn
57499
57500 #else
57501
57502 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57503 #define elf_phdr elf64_phdr
57504 #define elf_note elf64_note
57505 #define elf_addr_t Elf64_Off
57506 +#define elf_dyn Elf64_Dyn
57507
57508 #endif
57509
57510 diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57511 --- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57512 +++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57513 @@ -116,7 +116,7 @@ struct fscache_operation {
57514 #endif
57515 };
57516
57517 -extern atomic_t fscache_op_debug_id;
57518 +extern atomic_unchecked_t fscache_op_debug_id;
57519 extern const struct slow_work_ops fscache_op_slow_work_ops;
57520
57521 extern void fscache_enqueue_operation(struct fscache_operation *);
57522 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57523 fscache_operation_release_t release)
57524 {
57525 atomic_set(&op->usage, 1);
57526 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57527 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57528 op->release = release;
57529 INIT_LIST_HEAD(&op->pend_link);
57530 fscache_set_op_state(op, "Init");
57531 diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57532 --- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57533 +++ linux-2.6.32.45/include/linux/fs.h 2011-08-23 21:22:32.000000000 -0400
57534 @@ -90,6 +90,11 @@ struct inodes_stat_t {
57535 /* Expect random access pattern */
57536 #define FMODE_RANDOM ((__force fmode_t)4096)
57537
57538 +/* Hack for grsec so as not to require read permission simply to execute
57539 + * a binary
57540 + */
57541 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57542 +
57543 /*
57544 * The below are the various read and write types that we support. Some of
57545 * them include behavioral modifiers that send information down to the
57546 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57547 unsigned long, unsigned long);
57548
57549 struct address_space_operations {
57550 - int (*writepage)(struct page *page, struct writeback_control *wbc);
57551 - int (*readpage)(struct file *, struct page *);
57552 - void (*sync_page)(struct page *);
57553 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
57554 + int (* const readpage)(struct file *, struct page *);
57555 + void (* const sync_page)(struct page *);
57556
57557 /* Write back some dirty pages from this mapping. */
57558 - int (*writepages)(struct address_space *, struct writeback_control *);
57559 + int (* const writepages)(struct address_space *, struct writeback_control *);
57560
57561 /* Set a page dirty. Return true if this dirtied it */
57562 - int (*set_page_dirty)(struct page *page);
57563 + int (* const set_page_dirty)(struct page *page);
57564
57565 - int (*readpages)(struct file *filp, struct address_space *mapping,
57566 + int (* const readpages)(struct file *filp, struct address_space *mapping,
57567 struct list_head *pages, unsigned nr_pages);
57568
57569 - int (*write_begin)(struct file *, struct address_space *mapping,
57570 + int (* const write_begin)(struct file *, struct address_space *mapping,
57571 loff_t pos, unsigned len, unsigned flags,
57572 struct page **pagep, void **fsdata);
57573 - int (*write_end)(struct file *, struct address_space *mapping,
57574 + int (* const write_end)(struct file *, struct address_space *mapping,
57575 loff_t pos, unsigned len, unsigned copied,
57576 struct page *page, void *fsdata);
57577
57578 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57579 - sector_t (*bmap)(struct address_space *, sector_t);
57580 - void (*invalidatepage) (struct page *, unsigned long);
57581 - int (*releasepage) (struct page *, gfp_t);
57582 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57583 + sector_t (* const bmap)(struct address_space *, sector_t);
57584 + void (* const invalidatepage) (struct page *, unsigned long);
57585 + int (* const releasepage) (struct page *, gfp_t);
57586 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57587 loff_t offset, unsigned long nr_segs);
57588 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57589 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57590 void **, unsigned long *);
57591 /* migrate the contents of a page to the specified target */
57592 - int (*migratepage) (struct address_space *,
57593 + int (* const migratepage) (struct address_space *,
57594 struct page *, struct page *);
57595 - int (*launder_page) (struct page *);
57596 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57597 + int (* const launder_page) (struct page *);
57598 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57599 unsigned long);
57600 - int (*error_remove_page)(struct address_space *, struct page *);
57601 + int (* const error_remove_page)(struct address_space *, struct page *);
57602 };
57603
57604 /*
57605 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57606 typedef struct files_struct *fl_owner_t;
57607
57608 struct file_lock_operations {
57609 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57610 - void (*fl_release_private)(struct file_lock *);
57611 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57612 + void (* const fl_release_private)(struct file_lock *);
57613 };
57614
57615 struct lock_manager_operations {
57616 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57617 - void (*fl_notify)(struct file_lock *); /* unblock callback */
57618 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57619 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57620 - void (*fl_release_private)(struct file_lock *);
57621 - void (*fl_break)(struct file_lock *);
57622 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
57623 - int (*fl_change)(struct file_lock **, int);
57624 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57625 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
57626 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57627 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57628 + void (* const fl_release_private)(struct file_lock *);
57629 + void (* const fl_break)(struct file_lock *);
57630 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57631 + int (* const fl_change)(struct file_lock **, int);
57632 };
57633
57634 struct lock_manager {
57635 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57636 unsigned int fi_flags; /* Flags as passed from user */
57637 unsigned int fi_extents_mapped; /* Number of mapped extents */
57638 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57639 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57640 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57641 * array */
57642 };
57643 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57644 @@ -1486,7 +1491,7 @@ struct block_device_operations;
57645 * can be called without the big kernel lock held in all filesystems.
57646 */
57647 struct file_operations {
57648 - struct module *owner;
57649 + struct module * const owner;
57650 loff_t (*llseek) (struct file *, loff_t, int);
57651 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
57652 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
57653 @@ -1513,6 +1518,7 @@ struct file_operations {
57654 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
57655 int (*setlease)(struct file *, long, struct file_lock **);
57656 };
57657 +typedef struct file_operations __no_const file_operations_no_const;
57658
57659 struct inode_operations {
57660 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
57661 @@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
57662 unsigned long, loff_t *);
57663
57664 struct super_operations {
57665 - struct inode *(*alloc_inode)(struct super_block *sb);
57666 - void (*destroy_inode)(struct inode *);
57667 + struct inode *(* const alloc_inode)(struct super_block *sb);
57668 + void (* const destroy_inode)(struct inode *);
57669
57670 - void (*dirty_inode) (struct inode *);
57671 - int (*write_inode) (struct inode *, int);
57672 - void (*drop_inode) (struct inode *);
57673 - void (*delete_inode) (struct inode *);
57674 - void (*put_super) (struct super_block *);
57675 - void (*write_super) (struct super_block *);
57676 - int (*sync_fs)(struct super_block *sb, int wait);
57677 - int (*freeze_fs) (struct super_block *);
57678 - int (*unfreeze_fs) (struct super_block *);
57679 - int (*statfs) (struct dentry *, struct kstatfs *);
57680 - int (*remount_fs) (struct super_block *, int *, char *);
57681 - void (*clear_inode) (struct inode *);
57682 - void (*umount_begin) (struct super_block *);
57683 + void (* const dirty_inode) (struct inode *);
57684 + int (* const write_inode) (struct inode *, int);
57685 + void (* const drop_inode) (struct inode *);
57686 + void (* const delete_inode) (struct inode *);
57687 + void (* const put_super) (struct super_block *);
57688 + void (* const write_super) (struct super_block *);
57689 + int (* const sync_fs)(struct super_block *sb, int wait);
57690 + int (* const freeze_fs) (struct super_block *);
57691 + int (* const unfreeze_fs) (struct super_block *);
57692 + int (* const statfs) (struct dentry *, struct kstatfs *);
57693 + int (* const remount_fs) (struct super_block *, int *, char *);
57694 + void (* const clear_inode) (struct inode *);
57695 + void (* const umount_begin) (struct super_block *);
57696
57697 - int (*show_options)(struct seq_file *, struct vfsmount *);
57698 - int (*show_stats)(struct seq_file *, struct vfsmount *);
57699 + int (* const show_options)(struct seq_file *, struct vfsmount *);
57700 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
57701 #ifdef CONFIG_QUOTA
57702 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
57703 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57704 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
57705 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57706 #endif
57707 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57708 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57709 };
57710
57711 /*
57712 diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
57713 --- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
57714 +++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
57715 @@ -4,7 +4,7 @@
57716 #include <linux/path.h>
57717
57718 struct fs_struct {
57719 - int users;
57720 + atomic_t users;
57721 rwlock_t lock;
57722 int umask;
57723 int in_exec;
57724 diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
57725 --- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
57726 +++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
57727 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
57728 int filter_type);
57729 extern int trace_define_common_fields(struct ftrace_event_call *call);
57730
57731 -#define is_signed_type(type) (((type)(-1)) < 0)
57732 +#define is_signed_type(type) (((type)(-1)) < (type)1)
57733
57734 int trace_set_clr_event(const char *system, const char *event, int set);
57735
57736 diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
57737 --- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
57738 +++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
57739 @@ -161,7 +161,7 @@ struct gendisk {
57740
57741 struct timer_rand_state *random;
57742
57743 - atomic_t sync_io; /* RAID */
57744 + atomic_unchecked_t sync_io; /* RAID */
57745 struct work_struct async_notify;
57746 #ifdef CONFIG_BLK_DEV_INTEGRITY
57747 struct blk_integrity *integrity;
57748 diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
57749 --- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57750 +++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
57751 @@ -0,0 +1,317 @@
57752 +#ifndef GR_ACL_H
57753 +#define GR_ACL_H
57754 +
57755 +#include <linux/grdefs.h>
57756 +#include <linux/resource.h>
57757 +#include <linux/capability.h>
57758 +#include <linux/dcache.h>
57759 +#include <asm/resource.h>
57760 +
57761 +/* Major status information */
57762 +
57763 +#define GR_VERSION "grsecurity 2.2.2"
57764 +#define GRSECURITY_VERSION 0x2202
57765 +
57766 +enum {
57767 + GR_SHUTDOWN = 0,
57768 + GR_ENABLE = 1,
57769 + GR_SPROLE = 2,
57770 + GR_RELOAD = 3,
57771 + GR_SEGVMOD = 4,
57772 + GR_STATUS = 5,
57773 + GR_UNSPROLE = 6,
57774 + GR_PASSSET = 7,
57775 + GR_SPROLEPAM = 8,
57776 +};
57777 +
57778 +/* Password setup definitions
57779 + * kernel/grhash.c */
57780 +enum {
57781 + GR_PW_LEN = 128,
57782 + GR_SALT_LEN = 16,
57783 + GR_SHA_LEN = 32,
57784 +};
57785 +
57786 +enum {
57787 + GR_SPROLE_LEN = 64,
57788 +};
57789 +
57790 +enum {
57791 + GR_NO_GLOB = 0,
57792 + GR_REG_GLOB,
57793 + GR_CREATE_GLOB
57794 +};
57795 +
57796 +#define GR_NLIMITS 32
57797 +
57798 +/* Begin Data Structures */
57799 +
57800 +struct sprole_pw {
57801 + unsigned char *rolename;
57802 + unsigned char salt[GR_SALT_LEN];
57803 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57804 +};
57805 +
57806 +struct name_entry {
57807 + __u32 key;
57808 + ino_t inode;
57809 + dev_t device;
57810 + char *name;
57811 + __u16 len;
57812 + __u8 deleted;
57813 + struct name_entry *prev;
57814 + struct name_entry *next;
57815 +};
57816 +
57817 +struct inodev_entry {
57818 + struct name_entry *nentry;
57819 + struct inodev_entry *prev;
57820 + struct inodev_entry *next;
57821 +};
57822 +
57823 +struct acl_role_db {
57824 + struct acl_role_label **r_hash;
57825 + __u32 r_size;
57826 +};
57827 +
57828 +struct inodev_db {
57829 + struct inodev_entry **i_hash;
57830 + __u32 i_size;
57831 +};
57832 +
57833 +struct name_db {
57834 + struct name_entry **n_hash;
57835 + __u32 n_size;
57836 +};
57837 +
57838 +struct crash_uid {
57839 + uid_t uid;
57840 + unsigned long expires;
57841 +};
57842 +
57843 +struct gr_hash_struct {
57844 + void **table;
57845 + void **nametable;
57846 + void *first;
57847 + __u32 table_size;
57848 + __u32 used_size;
57849 + int type;
57850 +};
57851 +
57852 +/* Userspace Grsecurity ACL data structures */
57853 +
57854 +struct acl_subject_label {
57855 + char *filename;
57856 + ino_t inode;
57857 + dev_t device;
57858 + __u32 mode;
57859 + kernel_cap_t cap_mask;
57860 + kernel_cap_t cap_lower;
57861 + kernel_cap_t cap_invert_audit;
57862 +
57863 + struct rlimit res[GR_NLIMITS];
57864 + __u32 resmask;
57865 +
57866 + __u8 user_trans_type;
57867 + __u8 group_trans_type;
57868 + uid_t *user_transitions;
57869 + gid_t *group_transitions;
57870 + __u16 user_trans_num;
57871 + __u16 group_trans_num;
57872 +
57873 + __u32 sock_families[2];
57874 + __u32 ip_proto[8];
57875 + __u32 ip_type;
57876 + struct acl_ip_label **ips;
57877 + __u32 ip_num;
57878 + __u32 inaddr_any_override;
57879 +
57880 + __u32 crashes;
57881 + unsigned long expires;
57882 +
57883 + struct acl_subject_label *parent_subject;
57884 + struct gr_hash_struct *hash;
57885 + struct acl_subject_label *prev;
57886 + struct acl_subject_label *next;
57887 +
57888 + struct acl_object_label **obj_hash;
57889 + __u32 obj_hash_size;
57890 + __u16 pax_flags;
57891 +};
57892 +
57893 +struct role_allowed_ip {
57894 + __u32 addr;
57895 + __u32 netmask;
57896 +
57897 + struct role_allowed_ip *prev;
57898 + struct role_allowed_ip *next;
57899 +};
57900 +
57901 +struct role_transition {
57902 + char *rolename;
57903 +
57904 + struct role_transition *prev;
57905 + struct role_transition *next;
57906 +};
57907 +
57908 +struct acl_role_label {
57909 + char *rolename;
57910 + uid_t uidgid;
57911 + __u16 roletype;
57912 +
57913 + __u16 auth_attempts;
57914 + unsigned long expires;
57915 +
57916 + struct acl_subject_label *root_label;
57917 + struct gr_hash_struct *hash;
57918 +
57919 + struct acl_role_label *prev;
57920 + struct acl_role_label *next;
57921 +
57922 + struct role_transition *transitions;
57923 + struct role_allowed_ip *allowed_ips;
57924 + uid_t *domain_children;
57925 + __u16 domain_child_num;
57926 +
57927 + struct acl_subject_label **subj_hash;
57928 + __u32 subj_hash_size;
57929 +};
57930 +
57931 +struct user_acl_role_db {
57932 + struct acl_role_label **r_table;
57933 + __u32 num_pointers; /* Number of allocations to track */
57934 + __u32 num_roles; /* Number of roles */
57935 + __u32 num_domain_children; /* Number of domain children */
57936 + __u32 num_subjects; /* Number of subjects */
57937 + __u32 num_objects; /* Number of objects */
57938 +};
57939 +
57940 +struct acl_object_label {
57941 + char *filename;
57942 + ino_t inode;
57943 + dev_t device;
57944 + __u32 mode;
57945 +
57946 + struct acl_subject_label *nested;
57947 + struct acl_object_label *globbed;
57948 +
57949 + /* next two structures not used */
57950 +
57951 + struct acl_object_label *prev;
57952 + struct acl_object_label *next;
57953 +};
57954 +
57955 +struct acl_ip_label {
57956 + char *iface;
57957 + __u32 addr;
57958 + __u32 netmask;
57959 + __u16 low, high;
57960 + __u8 mode;
57961 + __u32 type;
57962 + __u32 proto[8];
57963 +
57964 + /* next two structures not used */
57965 +
57966 + struct acl_ip_label *prev;
57967 + struct acl_ip_label *next;
57968 +};
57969 +
57970 +struct gr_arg {
57971 + struct user_acl_role_db role_db;
57972 + unsigned char pw[GR_PW_LEN];
57973 + unsigned char salt[GR_SALT_LEN];
57974 + unsigned char sum[GR_SHA_LEN];
57975 + unsigned char sp_role[GR_SPROLE_LEN];
57976 + struct sprole_pw *sprole_pws;
57977 + dev_t segv_device;
57978 + ino_t segv_inode;
57979 + uid_t segv_uid;
57980 + __u16 num_sprole_pws;
57981 + __u16 mode;
57982 +};
57983 +
57984 +struct gr_arg_wrapper {
57985 + struct gr_arg *arg;
57986 + __u32 version;
57987 + __u32 size;
57988 +};
57989 +
57990 +struct subject_map {
57991 + struct acl_subject_label *user;
57992 + struct acl_subject_label *kernel;
57993 + struct subject_map *prev;
57994 + struct subject_map *next;
57995 +};
57996 +
57997 +struct acl_subj_map_db {
57998 + struct subject_map **s_hash;
57999 + __u32 s_size;
58000 +};
58001 +
58002 +/* End Data Structures Section */
58003 +
58004 +/* Hash functions generated by empirical testing by Brad Spengler
58005 + Makes good use of the low bits of the inode. Generally 0-1 times
58006 + in loop for successful match. 0-3 for unsuccessful match.
58007 + Shift/add algorithm with modulus of table size and an XOR*/
58008 +
58009 +static __inline__ unsigned int
58010 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58011 +{
58012 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58013 +}
58014 +
58015 + static __inline__ unsigned int
58016 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58017 +{
58018 + return ((const unsigned long)userp % sz);
58019 +}
58020 +
58021 +static __inline__ unsigned int
58022 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58023 +{
58024 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58025 +}
58026 +
58027 +static __inline__ unsigned int
58028 +nhash(const char *name, const __u16 len, const unsigned int sz)
58029 +{
58030 + return full_name_hash((const unsigned char *)name, len) % sz;
58031 +}
58032 +
58033 +#define FOR_EACH_ROLE_START(role) \
58034 + role = role_list; \
58035 + while (role) {
58036 +
58037 +#define FOR_EACH_ROLE_END(role) \
58038 + role = role->prev; \
58039 + }
58040 +
58041 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58042 + subj = NULL; \
58043 + iter = 0; \
58044 + while (iter < role->subj_hash_size) { \
58045 + if (subj == NULL) \
58046 + subj = role->subj_hash[iter]; \
58047 + if (subj == NULL) { \
58048 + iter++; \
58049 + continue; \
58050 + }
58051 +
58052 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58053 + subj = subj->next; \
58054 + if (subj == NULL) \
58055 + iter++; \
58056 + }
58057 +
58058 +
58059 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58060 + subj = role->hash->first; \
58061 + while (subj != NULL) {
58062 +
58063 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58064 + subj = subj->next; \
58065 + }
58066 +
58067 +#endif
58068 +
58069 diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58070 --- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58071 +++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58072 @@ -0,0 +1,9 @@
58073 +#ifndef __GRALLOC_H
58074 +#define __GRALLOC_H
58075 +
58076 +void acl_free_all(void);
58077 +int acl_alloc_stack_init(unsigned long size);
58078 +void *acl_alloc(unsigned long len);
58079 +void *acl_alloc_num(unsigned long num, unsigned long len);
58080 +
58081 +#endif
58082 diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58083 --- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58084 +++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58085 @@ -0,0 +1,140 @@
58086 +#ifndef GRDEFS_H
58087 +#define GRDEFS_H
58088 +
58089 +/* Begin grsecurity status declarations */
58090 +
58091 +enum {
58092 + GR_READY = 0x01,
58093 + GR_STATUS_INIT = 0x00 // disabled state
58094 +};
58095 +
58096 +/* Begin ACL declarations */
58097 +
58098 +/* Role flags */
58099 +
58100 +enum {
58101 + GR_ROLE_USER = 0x0001,
58102 + GR_ROLE_GROUP = 0x0002,
58103 + GR_ROLE_DEFAULT = 0x0004,
58104 + GR_ROLE_SPECIAL = 0x0008,
58105 + GR_ROLE_AUTH = 0x0010,
58106 + GR_ROLE_NOPW = 0x0020,
58107 + GR_ROLE_GOD = 0x0040,
58108 + GR_ROLE_LEARN = 0x0080,
58109 + GR_ROLE_TPE = 0x0100,
58110 + GR_ROLE_DOMAIN = 0x0200,
58111 + GR_ROLE_PAM = 0x0400,
58112 + GR_ROLE_PERSIST = 0x800
58113 +};
58114 +
58115 +/* ACL Subject and Object mode flags */
58116 +enum {
58117 + GR_DELETED = 0x80000000
58118 +};
58119 +
58120 +/* ACL Object-only mode flags */
58121 +enum {
58122 + GR_READ = 0x00000001,
58123 + GR_APPEND = 0x00000002,
58124 + GR_WRITE = 0x00000004,
58125 + GR_EXEC = 0x00000008,
58126 + GR_FIND = 0x00000010,
58127 + GR_INHERIT = 0x00000020,
58128 + GR_SETID = 0x00000040,
58129 + GR_CREATE = 0x00000080,
58130 + GR_DELETE = 0x00000100,
58131 + GR_LINK = 0x00000200,
58132 + GR_AUDIT_READ = 0x00000400,
58133 + GR_AUDIT_APPEND = 0x00000800,
58134 + GR_AUDIT_WRITE = 0x00001000,
58135 + GR_AUDIT_EXEC = 0x00002000,
58136 + GR_AUDIT_FIND = 0x00004000,
58137 + GR_AUDIT_INHERIT= 0x00008000,
58138 + GR_AUDIT_SETID = 0x00010000,
58139 + GR_AUDIT_CREATE = 0x00020000,
58140 + GR_AUDIT_DELETE = 0x00040000,
58141 + GR_AUDIT_LINK = 0x00080000,
58142 + GR_PTRACERD = 0x00100000,
58143 + GR_NOPTRACE = 0x00200000,
58144 + GR_SUPPRESS = 0x00400000,
58145 + GR_NOLEARN = 0x00800000,
58146 + GR_INIT_TRANSFER= 0x01000000
58147 +};
58148 +
58149 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58150 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58151 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58152 +
58153 +/* ACL subject-only mode flags */
58154 +enum {
58155 + GR_KILL = 0x00000001,
58156 + GR_VIEW = 0x00000002,
58157 + GR_PROTECTED = 0x00000004,
58158 + GR_LEARN = 0x00000008,
58159 + GR_OVERRIDE = 0x00000010,
58160 + /* just a placeholder, this mode is only used in userspace */
58161 + GR_DUMMY = 0x00000020,
58162 + GR_PROTSHM = 0x00000040,
58163 + GR_KILLPROC = 0x00000080,
58164 + GR_KILLIPPROC = 0x00000100,
58165 + /* just a placeholder, this mode is only used in userspace */
58166 + GR_NOTROJAN = 0x00000200,
58167 + GR_PROTPROCFD = 0x00000400,
58168 + GR_PROCACCT = 0x00000800,
58169 + GR_RELAXPTRACE = 0x00001000,
58170 + GR_NESTED = 0x00002000,
58171 + GR_INHERITLEARN = 0x00004000,
58172 + GR_PROCFIND = 0x00008000,
58173 + GR_POVERRIDE = 0x00010000,
58174 + GR_KERNELAUTH = 0x00020000,
58175 + GR_ATSECURE = 0x00040000,
58176 + GR_SHMEXEC = 0x00080000
58177 +};
58178 +
58179 +enum {
58180 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58181 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58182 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58183 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58184 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58185 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58186 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58187 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58188 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58189 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58190 +};
58191 +
58192 +enum {
58193 + GR_ID_USER = 0x01,
58194 + GR_ID_GROUP = 0x02,
58195 +};
58196 +
58197 +enum {
58198 + GR_ID_ALLOW = 0x01,
58199 + GR_ID_DENY = 0x02,
58200 +};
58201 +
58202 +#define GR_CRASH_RES 31
58203 +#define GR_UIDTABLE_MAX 500
58204 +
58205 +/* begin resource learning section */
58206 +enum {
58207 + GR_RLIM_CPU_BUMP = 60,
58208 + GR_RLIM_FSIZE_BUMP = 50000,
58209 + GR_RLIM_DATA_BUMP = 10000,
58210 + GR_RLIM_STACK_BUMP = 1000,
58211 + GR_RLIM_CORE_BUMP = 10000,
58212 + GR_RLIM_RSS_BUMP = 500000,
58213 + GR_RLIM_NPROC_BUMP = 1,
58214 + GR_RLIM_NOFILE_BUMP = 5,
58215 + GR_RLIM_MEMLOCK_BUMP = 50000,
58216 + GR_RLIM_AS_BUMP = 500000,
58217 + GR_RLIM_LOCKS_BUMP = 2,
58218 + GR_RLIM_SIGPENDING_BUMP = 5,
58219 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58220 + GR_RLIM_NICE_BUMP = 1,
58221 + GR_RLIM_RTPRIO_BUMP = 1,
58222 + GR_RLIM_RTTIME_BUMP = 1000000
58223 +};
58224 +
58225 +#endif
58226 diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58227 --- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58228 +++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58229 @@ -0,0 +1,217 @@
58230 +#ifndef __GRINTERNAL_H
58231 +#define __GRINTERNAL_H
58232 +
58233 +#ifdef CONFIG_GRKERNSEC
58234 +
58235 +#include <linux/fs.h>
58236 +#include <linux/mnt_namespace.h>
58237 +#include <linux/nsproxy.h>
58238 +#include <linux/gracl.h>
58239 +#include <linux/grdefs.h>
58240 +#include <linux/grmsg.h>
58241 +
58242 +void gr_add_learn_entry(const char *fmt, ...)
58243 + __attribute__ ((format (printf, 1, 2)));
58244 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58245 + const struct vfsmount *mnt);
58246 +__u32 gr_check_create(const struct dentry *new_dentry,
58247 + const struct dentry *parent,
58248 + const struct vfsmount *mnt, const __u32 mode);
58249 +int gr_check_protected_task(const struct task_struct *task);
58250 +__u32 to_gr_audit(const __u32 reqmode);
58251 +int gr_set_acls(const int type);
58252 +int gr_apply_subject_to_task(struct task_struct *task);
58253 +int gr_acl_is_enabled(void);
58254 +char gr_roletype_to_char(void);
58255 +
58256 +void gr_handle_alertkill(struct task_struct *task);
58257 +char *gr_to_filename(const struct dentry *dentry,
58258 + const struct vfsmount *mnt);
58259 +char *gr_to_filename1(const struct dentry *dentry,
58260 + const struct vfsmount *mnt);
58261 +char *gr_to_filename2(const struct dentry *dentry,
58262 + const struct vfsmount *mnt);
58263 +char *gr_to_filename3(const struct dentry *dentry,
58264 + const struct vfsmount *mnt);
58265 +
58266 +extern int grsec_enable_harden_ptrace;
58267 +extern int grsec_enable_link;
58268 +extern int grsec_enable_fifo;
58269 +extern int grsec_enable_shm;
58270 +extern int grsec_enable_execlog;
58271 +extern int grsec_enable_signal;
58272 +extern int grsec_enable_audit_ptrace;
58273 +extern int grsec_enable_forkfail;
58274 +extern int grsec_enable_time;
58275 +extern int grsec_enable_rofs;
58276 +extern int grsec_enable_chroot_shmat;
58277 +extern int grsec_enable_chroot_mount;
58278 +extern int grsec_enable_chroot_double;
58279 +extern int grsec_enable_chroot_pivot;
58280 +extern int grsec_enable_chroot_chdir;
58281 +extern int grsec_enable_chroot_chmod;
58282 +extern int grsec_enable_chroot_mknod;
58283 +extern int grsec_enable_chroot_fchdir;
58284 +extern int grsec_enable_chroot_nice;
58285 +extern int grsec_enable_chroot_execlog;
58286 +extern int grsec_enable_chroot_caps;
58287 +extern int grsec_enable_chroot_sysctl;
58288 +extern int grsec_enable_chroot_unix;
58289 +extern int grsec_enable_tpe;
58290 +extern int grsec_tpe_gid;
58291 +extern int grsec_enable_tpe_all;
58292 +extern int grsec_enable_tpe_invert;
58293 +extern int grsec_enable_socket_all;
58294 +extern int grsec_socket_all_gid;
58295 +extern int grsec_enable_socket_client;
58296 +extern int grsec_socket_client_gid;
58297 +extern int grsec_enable_socket_server;
58298 +extern int grsec_socket_server_gid;
58299 +extern int grsec_audit_gid;
58300 +extern int grsec_enable_group;
58301 +extern int grsec_enable_audit_textrel;
58302 +extern int grsec_enable_log_rwxmaps;
58303 +extern int grsec_enable_mount;
58304 +extern int grsec_enable_chdir;
58305 +extern int grsec_resource_logging;
58306 +extern int grsec_enable_blackhole;
58307 +extern int grsec_lastack_retries;
58308 +extern int grsec_enable_brute;
58309 +extern int grsec_lock;
58310 +
58311 +extern spinlock_t grsec_alert_lock;
58312 +extern unsigned long grsec_alert_wtime;
58313 +extern unsigned long grsec_alert_fyet;
58314 +
58315 +extern spinlock_t grsec_audit_lock;
58316 +
58317 +extern rwlock_t grsec_exec_file_lock;
58318 +
58319 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58320 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58321 + (tsk)->exec_file->f_vfsmnt) : "/")
58322 +
58323 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58324 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58325 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58326 +
58327 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58328 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58329 + (tsk)->exec_file->f_vfsmnt) : "/")
58330 +
58331 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58332 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58333 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58334 +
58335 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58336 +
58337 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58338 +
58339 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58340 + (task)->pid, (cred)->uid, \
58341 + (cred)->euid, (cred)->gid, (cred)->egid, \
58342 + gr_parent_task_fullpath(task), \
58343 + (task)->real_parent->comm, (task)->real_parent->pid, \
58344 + (pcred)->uid, (pcred)->euid, \
58345 + (pcred)->gid, (pcred)->egid
58346 +
58347 +#define GR_CHROOT_CAPS {{ \
58348 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58349 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58350 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58351 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58352 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58353 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58354 +
58355 +#define security_learn(normal_msg,args...) \
58356 +({ \
58357 + read_lock(&grsec_exec_file_lock); \
58358 + gr_add_learn_entry(normal_msg "\n", ## args); \
58359 + read_unlock(&grsec_exec_file_lock); \
58360 +})
58361 +
58362 +enum {
58363 + GR_DO_AUDIT,
58364 + GR_DONT_AUDIT,
58365 + GR_DONT_AUDIT_GOOD
58366 +};
58367 +
58368 +enum {
58369 + GR_TTYSNIFF,
58370 + GR_RBAC,
58371 + GR_RBAC_STR,
58372 + GR_STR_RBAC,
58373 + GR_RBAC_MODE2,
58374 + GR_RBAC_MODE3,
58375 + GR_FILENAME,
58376 + GR_SYSCTL_HIDDEN,
58377 + GR_NOARGS,
58378 + GR_ONE_INT,
58379 + GR_ONE_INT_TWO_STR,
58380 + GR_ONE_STR,
58381 + GR_STR_INT,
58382 + GR_TWO_STR_INT,
58383 + GR_TWO_INT,
58384 + GR_TWO_U64,
58385 + GR_THREE_INT,
58386 + GR_FIVE_INT_TWO_STR,
58387 + GR_TWO_STR,
58388 + GR_THREE_STR,
58389 + GR_FOUR_STR,
58390 + GR_STR_FILENAME,
58391 + GR_FILENAME_STR,
58392 + GR_FILENAME_TWO_INT,
58393 + GR_FILENAME_TWO_INT_STR,
58394 + GR_TEXTREL,
58395 + GR_PTRACE,
58396 + GR_RESOURCE,
58397 + GR_CAP,
58398 + GR_SIG,
58399 + GR_SIG2,
58400 + GR_CRASH1,
58401 + GR_CRASH2,
58402 + GR_PSACCT,
58403 + GR_RWXMAP
58404 +};
58405 +
58406 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58407 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58408 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58409 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58410 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58411 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58412 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58413 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58414 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58415 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58416 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58417 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58418 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58419 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58420 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58421 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58422 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58423 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58424 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58425 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58426 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58427 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58428 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58429 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58430 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58431 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58432 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58433 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58434 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58435 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58436 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58437 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58438 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58439 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58440 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58441 +
58442 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58443 +
58444 +#endif
58445 +
58446 +#endif
58447 diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58448 --- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58449 +++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58450 @@ -0,0 +1,108 @@
58451 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58452 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58453 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58454 +#define GR_STOPMOD_MSG "denied modification of module state by "
58455 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58456 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58457 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58458 +#define GR_IOPL_MSG "denied use of iopl() by "
58459 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58460 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58461 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58462 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58463 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58464 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58465 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58466 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58467 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58468 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58469 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58470 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58471 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58472 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58473 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58474 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58475 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58476 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58477 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58478 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58479 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58480 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58481 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58482 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58483 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58484 +#define GR_NPROC_MSG "denied overstep of process limit by "
58485 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58486 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58487 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58488 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58489 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58490 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58491 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58492 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58493 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58494 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58495 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58496 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58497 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58498 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58499 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58500 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58501 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58502 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58503 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58504 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58505 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58506 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58507 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58508 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58509 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58510 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58511 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58512 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58513 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58514 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58515 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58516 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58517 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58518 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58519 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58520 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58521 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58522 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58523 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58524 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58525 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58526 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58527 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58528 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58529 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58530 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58531 +#define GR_TIME_MSG "time set by "
58532 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58533 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58534 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58535 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58536 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58537 +#define GR_BIND_MSG "denied bind() by "
58538 +#define GR_CONNECT_MSG "denied connect() by "
58539 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58540 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58541 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58542 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58543 +#define GR_CAP_ACL_MSG "use of %s denied for "
58544 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58545 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58546 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58547 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58548 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58549 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58550 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58551 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58552 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58553 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58554 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58555 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58556 +#define GR_VM86_MSG "denied use of vm86 by "
58557 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58558 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58559 diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58560 --- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58561 +++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58562 @@ -0,0 +1,217 @@
58563 +#ifndef GR_SECURITY_H
58564 +#define GR_SECURITY_H
58565 +#include <linux/fs.h>
58566 +#include <linux/fs_struct.h>
58567 +#include <linux/binfmts.h>
58568 +#include <linux/gracl.h>
58569 +#include <linux/compat.h>
58570 +
58571 +/* notify of brain-dead configs */
58572 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58573 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58574 +#endif
58575 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58576 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58577 +#endif
58578 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58579 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58580 +#endif
58581 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58582 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58583 +#endif
58584 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58585 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58586 +#endif
58587 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58588 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58589 +#endif
58590 +
58591 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58592 +void gr_handle_brute_check(void);
58593 +void gr_handle_kernel_exploit(void);
58594 +int gr_process_user_ban(void);
58595 +
58596 +char gr_roletype_to_char(void);
58597 +
58598 +int gr_acl_enable_at_secure(void);
58599 +
58600 +int gr_check_user_change(int real, int effective, int fs);
58601 +int gr_check_group_change(int real, int effective, int fs);
58602 +
58603 +void gr_del_task_from_ip_table(struct task_struct *p);
58604 +
58605 +int gr_pid_is_chrooted(struct task_struct *p);
58606 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58607 +int gr_handle_chroot_nice(void);
58608 +int gr_handle_chroot_sysctl(const int op);
58609 +int gr_handle_chroot_setpriority(struct task_struct *p,
58610 + const int niceval);
58611 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58612 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58613 + const struct vfsmount *mnt);
58614 +int gr_handle_chroot_caps(struct path *path);
58615 +void gr_handle_chroot_chdir(struct path *path);
58616 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58617 + const struct vfsmount *mnt, const int mode);
58618 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58619 + const struct vfsmount *mnt, const int mode);
58620 +int gr_handle_chroot_mount(const struct dentry *dentry,
58621 + const struct vfsmount *mnt,
58622 + const char *dev_name);
58623 +int gr_handle_chroot_pivot(void);
58624 +int gr_handle_chroot_unix(const pid_t pid);
58625 +
58626 +int gr_handle_rawio(const struct inode *inode);
58627 +
58628 +void gr_handle_ioperm(void);
58629 +void gr_handle_iopl(void);
58630 +
58631 +int gr_tpe_allow(const struct file *file);
58632 +
58633 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58634 +void gr_clear_chroot_entries(struct task_struct *task);
58635 +
58636 +void gr_log_forkfail(const int retval);
58637 +void gr_log_timechange(void);
58638 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58639 +void gr_log_chdir(const struct dentry *dentry,
58640 + const struct vfsmount *mnt);
58641 +void gr_log_chroot_exec(const struct dentry *dentry,
58642 + const struct vfsmount *mnt);
58643 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58644 +#ifdef CONFIG_COMPAT
58645 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58646 +#endif
58647 +void gr_log_remount(const char *devname, const int retval);
58648 +void gr_log_unmount(const char *devname, const int retval);
58649 +void gr_log_mount(const char *from, const char *to, const int retval);
58650 +void gr_log_textrel(struct vm_area_struct *vma);
58651 +void gr_log_rwxmmap(struct file *file);
58652 +void gr_log_rwxmprotect(struct file *file);
58653 +
58654 +int gr_handle_follow_link(const struct inode *parent,
58655 + const struct inode *inode,
58656 + const struct dentry *dentry,
58657 + const struct vfsmount *mnt);
58658 +int gr_handle_fifo(const struct dentry *dentry,
58659 + const struct vfsmount *mnt,
58660 + const struct dentry *dir, const int flag,
58661 + const int acc_mode);
58662 +int gr_handle_hardlink(const struct dentry *dentry,
58663 + const struct vfsmount *mnt,
58664 + struct inode *inode,
58665 + const int mode, const char *to);
58666 +
58667 +int gr_is_capable(const int cap);
58668 +int gr_is_capable_nolog(const int cap);
58669 +void gr_learn_resource(const struct task_struct *task, const int limit,
58670 + const unsigned long wanted, const int gt);
58671 +void gr_copy_label(struct task_struct *tsk);
58672 +void gr_handle_crash(struct task_struct *task, const int sig);
58673 +int gr_handle_signal(const struct task_struct *p, const int sig);
58674 +int gr_check_crash_uid(const uid_t uid);
58675 +int gr_check_protected_task(const struct task_struct *task);
58676 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58677 +int gr_acl_handle_mmap(const struct file *file,
58678 + const unsigned long prot);
58679 +int gr_acl_handle_mprotect(const struct file *file,
58680 + const unsigned long prot);
58681 +int gr_check_hidden_task(const struct task_struct *tsk);
58682 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58683 + const struct vfsmount *mnt);
58684 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
58685 + const struct vfsmount *mnt);
58686 +__u32 gr_acl_handle_access(const struct dentry *dentry,
58687 + const struct vfsmount *mnt, const int fmode);
58688 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58689 + const struct vfsmount *mnt, mode_t mode);
58690 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58691 + const struct vfsmount *mnt, mode_t mode);
58692 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
58693 + const struct vfsmount *mnt);
58694 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58695 + const struct vfsmount *mnt);
58696 +int gr_handle_ptrace(struct task_struct *task, const long request);
58697 +int gr_handle_proc_ptrace(struct task_struct *task);
58698 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
58699 + const struct vfsmount *mnt);
58700 +int gr_check_crash_exec(const struct file *filp);
58701 +int gr_acl_is_enabled(void);
58702 +void gr_set_kernel_label(struct task_struct *task);
58703 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
58704 + const gid_t gid);
58705 +int gr_set_proc_label(const struct dentry *dentry,
58706 + const struct vfsmount *mnt,
58707 + const int unsafe_share);
58708 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58709 + const struct vfsmount *mnt);
58710 +__u32 gr_acl_handle_open(const struct dentry *dentry,
58711 + const struct vfsmount *mnt, const int fmode);
58712 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
58713 + const struct dentry *p_dentry,
58714 + const struct vfsmount *p_mnt, const int fmode,
58715 + const int imode);
58716 +void gr_handle_create(const struct dentry *dentry,
58717 + const struct vfsmount *mnt);
58718 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58719 + const struct dentry *parent_dentry,
58720 + const struct vfsmount *parent_mnt,
58721 + const int mode);
58722 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58723 + const struct dentry *parent_dentry,
58724 + const struct vfsmount *parent_mnt);
58725 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58726 + const struct vfsmount *mnt);
58727 +void gr_handle_delete(const ino_t ino, const dev_t dev);
58728 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58729 + const struct vfsmount *mnt);
58730 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58731 + const struct dentry *parent_dentry,
58732 + const struct vfsmount *parent_mnt,
58733 + const char *from);
58734 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58735 + const struct dentry *parent_dentry,
58736 + const struct vfsmount *parent_mnt,
58737 + const struct dentry *old_dentry,
58738 + const struct vfsmount *old_mnt, const char *to);
58739 +int gr_acl_handle_rename(struct dentry *new_dentry,
58740 + struct dentry *parent_dentry,
58741 + const struct vfsmount *parent_mnt,
58742 + struct dentry *old_dentry,
58743 + struct inode *old_parent_inode,
58744 + struct vfsmount *old_mnt, const char *newname);
58745 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58746 + struct dentry *old_dentry,
58747 + struct dentry *new_dentry,
58748 + struct vfsmount *mnt, const __u8 replace);
58749 +__u32 gr_check_link(const struct dentry *new_dentry,
58750 + const struct dentry *parent_dentry,
58751 + const struct vfsmount *parent_mnt,
58752 + const struct dentry *old_dentry,
58753 + const struct vfsmount *old_mnt);
58754 +int gr_acl_handle_filldir(const struct file *file, const char *name,
58755 + const unsigned int namelen, const ino_t ino);
58756 +
58757 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
58758 + const struct vfsmount *mnt);
58759 +void gr_acl_handle_exit(void);
58760 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
58761 +int gr_acl_handle_procpidmem(const struct task_struct *task);
58762 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58763 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58764 +void gr_audit_ptrace(struct task_struct *task);
58765 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58766 +
58767 +#ifdef CONFIG_GRKERNSEC
58768 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58769 +void gr_handle_vm86(void);
58770 +void gr_handle_mem_readwrite(u64 from, u64 to);
58771 +
58772 +extern int grsec_enable_dmesg;
58773 +extern int grsec_disable_privio;
58774 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58775 +extern int grsec_enable_chroot_findtask;
58776 +#endif
58777 +#endif
58778 +
58779 +#endif
58780 diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
58781 --- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
58782 +++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
58783 @@ -3,7 +3,7 @@
58784 struct cpustate_t {
58785 spinlock_t lock;
58786 int excl;
58787 - int open_count;
58788 + atomic_t open_count;
58789 unsigned char cached_val;
58790 int inited;
58791 unsigned long *set_addr;
58792 diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
58793 --- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
58794 +++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
58795 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
58796 kunmap_atomic(kaddr, KM_USER0);
58797 }
58798
58799 +static inline void sanitize_highpage(struct page *page)
58800 +{
58801 + void *kaddr;
58802 + unsigned long flags;
58803 +
58804 + local_irq_save(flags);
58805 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
58806 + clear_page(kaddr);
58807 + kunmap_atomic(kaddr, KM_CLEARPAGE);
58808 + local_irq_restore(flags);
58809 +}
58810 +
58811 static inline void zero_user_segments(struct page *page,
58812 unsigned start1, unsigned end1,
58813 unsigned start2, unsigned end2)
58814 diff -urNp linux-2.6.32.45/include/linux/i2c.h linux-2.6.32.45/include/linux/i2c.h
58815 --- linux-2.6.32.45/include/linux/i2c.h 2011-03-27 14:31:47.000000000 -0400
58816 +++ linux-2.6.32.45/include/linux/i2c.h 2011-08-23 21:22:38.000000000 -0400
58817 @@ -325,6 +325,7 @@ struct i2c_algorithm {
58818 /* To determine what the adapter supports */
58819 u32 (*functionality) (struct i2c_adapter *);
58820 };
58821 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58822
58823 /*
58824 * i2c_adapter is the structure used to identify a physical i2c bus along
58825 diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
58826 --- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
58827 +++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
58828 @@ -564,7 +564,7 @@ struct i2o_controller {
58829 struct i2o_device *exec; /* Executive */
58830 #if BITS_PER_LONG == 64
58831 spinlock_t context_list_lock; /* lock for context_list */
58832 - atomic_t context_list_counter; /* needed for unique contexts */
58833 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58834 struct list_head context_list; /* list of context id's
58835 and pointers */
58836 #endif
58837 diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
58838 --- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
58839 +++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
58840 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
58841 #define INIT_IDS
58842 #endif
58843
58844 +#ifdef CONFIG_X86
58845 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58846 +#else
58847 +#define INIT_TASK_THREAD_INFO
58848 +#endif
58849 +
58850 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
58851 /*
58852 * Because of the reduced scope of CAP_SETPCAP when filesystem
58853 @@ -156,6 +162,7 @@ extern struct cred init_cred;
58854 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
58855 .comm = "swapper", \
58856 .thread = INIT_THREAD, \
58857 + INIT_TASK_THREAD_INFO \
58858 .fs = &init_fs, \
58859 .files = &init_files, \
58860 .signal = &init_signals, \
58861 diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
58862 --- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
58863 +++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
58864 @@ -296,7 +296,7 @@ struct iommu_flush {
58865 u8 fm, u64 type);
58866 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58867 unsigned int size_order, u64 type);
58868 -};
58869 +} __no_const;
58870
58871 enum {
58872 SR_DMAR_FECTL_REG,
58873 diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
58874 --- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
58875 +++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
58876 @@ -363,7 +363,7 @@ enum
58877 /* map softirq index to softirq name. update 'softirq_to_name' in
58878 * kernel/softirq.c when adding a new softirq.
58879 */
58880 -extern char *softirq_to_name[NR_SOFTIRQS];
58881 +extern const char * const softirq_to_name[NR_SOFTIRQS];
58882
58883 /* softirq mask and active fields moved to irq_cpustat_t in
58884 * asm/hardirq.h to get better cache usage. KAO
58885 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58886
58887 struct softirq_action
58888 {
58889 - void (*action)(struct softirq_action *);
58890 + void (*action)(void);
58891 };
58892
58893 asmlinkage void do_softirq(void);
58894 asmlinkage void __do_softirq(void);
58895 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58896 +extern void open_softirq(int nr, void (*action)(void));
58897 extern void softirq_init(void);
58898 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
58899 extern void raise_softirq_irqoff(unsigned int nr);
58900 diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
58901 --- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
58902 +++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
58903 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
58904 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
58905 bool boot)
58906 {
58907 +#ifdef CONFIG_CPUMASK_OFFSTACK
58908 gfp_t gfp = GFP_ATOMIC;
58909
58910 if (boot)
58911 gfp = GFP_NOWAIT;
58912
58913 -#ifdef CONFIG_CPUMASK_OFFSTACK
58914 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
58915 return false;
58916
58917 diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
58918 --- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
58919 +++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
58920 @@ -15,7 +15,8 @@
58921
58922 struct module;
58923
58924 -#ifdef CONFIG_KALLSYMS
58925 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58926 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58927 /* Lookup the address for a symbol. Returns 0 if not found. */
58928 unsigned long kallsyms_lookup_name(const char *name);
58929
58930 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
58931 /* Stupid that this does nothing, but I didn't create this mess. */
58932 #define __print_symbol(fmt, addr)
58933 #endif /*CONFIG_KALLSYMS*/
58934 +#else /* when included by kallsyms.c, vsnprintf.c, or
58935 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58936 +extern void __print_symbol(const char *fmt, unsigned long address);
58937 +extern int sprint_symbol(char *buffer, unsigned long address);
58938 +const char *kallsyms_lookup(unsigned long addr,
58939 + unsigned long *symbolsize,
58940 + unsigned long *offset,
58941 + char **modname, char *namebuf);
58942 +#endif
58943
58944 /* This macro allows us to keep printk typechecking */
58945 static void __check_printsym_format(const char *fmt, ...)
58946 diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
58947 --- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
58948 +++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
58949 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
58950
58951 extern int kgdb_connected;
58952
58953 -extern atomic_t kgdb_setting_breakpoint;
58954 -extern atomic_t kgdb_cpu_doing_single_step;
58955 +extern atomic_unchecked_t kgdb_setting_breakpoint;
58956 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
58957
58958 extern struct task_struct *kgdb_usethread;
58959 extern struct task_struct *kgdb_contthread;
58960 @@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
58961 * hardware debug registers.
58962 */
58963 struct kgdb_arch {
58964 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
58965 - unsigned long flags;
58966 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
58967 + const unsigned long flags;
58968
58969 int (*set_breakpoint)(unsigned long, char *);
58970 int (*remove_breakpoint)(unsigned long, char *);
58971 @@ -251,20 +251,20 @@ struct kgdb_arch {
58972 */
58973 struct kgdb_io {
58974 const char *name;
58975 - int (*read_char) (void);
58976 - void (*write_char) (u8);
58977 - void (*flush) (void);
58978 - int (*init) (void);
58979 - void (*pre_exception) (void);
58980 - void (*post_exception) (void);
58981 + int (* const read_char) (void);
58982 + void (* const write_char) (u8);
58983 + void (* const flush) (void);
58984 + int (* const init) (void);
58985 + void (* const pre_exception) (void);
58986 + void (* const post_exception) (void);
58987 };
58988
58989 -extern struct kgdb_arch arch_kgdb_ops;
58990 +extern const struct kgdb_arch arch_kgdb_ops;
58991
58992 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
58993
58994 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
58995 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
58996 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
58997 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
58998
58999 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59000 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59001 diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59002 --- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59003 +++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59004 @@ -31,6 +31,8 @@
59005 * usually useless though. */
59006 extern int __request_module(bool wait, const char *name, ...) \
59007 __attribute__((format(printf, 2, 3)));
59008 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59009 + __attribute__((format(printf, 3, 4)));
59010 #define request_module(mod...) __request_module(true, mod)
59011 #define request_module_nowait(mod...) __request_module(false, mod)
59012 #define try_then_request_module(x, mod...) \
59013 diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59014 --- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59015 +++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59016 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59017
59018 struct kobj_type {
59019 void (*release)(struct kobject *kobj);
59020 - struct sysfs_ops *sysfs_ops;
59021 + const struct sysfs_ops *sysfs_ops;
59022 struct attribute **default_attrs;
59023 };
59024
59025 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59026 };
59027
59028 struct kset_uevent_ops {
59029 - int (*filter)(struct kset *kset, struct kobject *kobj);
59030 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59031 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59032 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59033 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59034 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59035 struct kobj_uevent_env *env);
59036 };
59037
59038 @@ -132,7 +132,7 @@ struct kobj_attribute {
59039 const char *buf, size_t count);
59040 };
59041
59042 -extern struct sysfs_ops kobj_sysfs_ops;
59043 +extern const struct sysfs_ops kobj_sysfs_ops;
59044
59045 /**
59046 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59047 @@ -155,14 +155,14 @@ struct kset {
59048 struct list_head list;
59049 spinlock_t list_lock;
59050 struct kobject kobj;
59051 - struct kset_uevent_ops *uevent_ops;
59052 + const struct kset_uevent_ops *uevent_ops;
59053 };
59054
59055 extern void kset_init(struct kset *kset);
59056 extern int __must_check kset_register(struct kset *kset);
59057 extern void kset_unregister(struct kset *kset);
59058 extern struct kset * __must_check kset_create_and_add(const char *name,
59059 - struct kset_uevent_ops *u,
59060 + const struct kset_uevent_ops *u,
59061 struct kobject *parent_kobj);
59062
59063 static inline struct kset *to_kset(struct kobject *kobj)
59064 diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59065 --- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59066 +++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59067 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59068 void vcpu_load(struct kvm_vcpu *vcpu);
59069 void vcpu_put(struct kvm_vcpu *vcpu);
59070
59071 -int kvm_init(void *opaque, unsigned int vcpu_size,
59072 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59073 struct module *module);
59074 void kvm_exit(void);
59075
59076 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59077 struct kvm_guest_debug *dbg);
59078 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59079
59080 -int kvm_arch_init(void *opaque);
59081 +int kvm_arch_init(const void *opaque);
59082 void kvm_arch_exit(void);
59083
59084 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59085 diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59086 --- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59087 +++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59088 @@ -525,11 +525,11 @@ struct ata_ioports {
59089
59090 struct ata_host {
59091 spinlock_t lock;
59092 - struct device *dev;
59093 + struct device *dev;
59094 void __iomem * const *iomap;
59095 unsigned int n_ports;
59096 void *private_data;
59097 - struct ata_port_operations *ops;
59098 + const struct ata_port_operations *ops;
59099 unsigned long flags;
59100 #ifdef CONFIG_ATA_ACPI
59101 acpi_handle acpi_handle;
59102 @@ -710,7 +710,7 @@ struct ata_link {
59103
59104 struct ata_port {
59105 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59106 - struct ata_port_operations *ops;
59107 + const struct ata_port_operations *ops;
59108 spinlock_t *lock;
59109 /* Flags owned by the EH context. Only EH should touch these once the
59110 port is active */
59111 @@ -883,7 +883,7 @@ struct ata_port_operations {
59112 * ->inherits must be the last field and all the preceding
59113 * fields must be pointers.
59114 */
59115 - const struct ata_port_operations *inherits;
59116 + const struct ata_port_operations * const inherits;
59117 };
59118
59119 struct ata_port_info {
59120 @@ -892,7 +892,7 @@ struct ata_port_info {
59121 unsigned long pio_mask;
59122 unsigned long mwdma_mask;
59123 unsigned long udma_mask;
59124 - struct ata_port_operations *port_ops;
59125 + const struct ata_port_operations *port_ops;
59126 void *private_data;
59127 };
59128
59129 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59130 extern const unsigned long sata_deb_timing_hotplug[];
59131 extern const unsigned long sata_deb_timing_long[];
59132
59133 -extern struct ata_port_operations ata_dummy_port_ops;
59134 +extern const struct ata_port_operations ata_dummy_port_ops;
59135 extern const struct ata_port_info ata_dummy_port_info;
59136
59137 static inline const unsigned long *
59138 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59139 struct scsi_host_template *sht);
59140 extern void ata_host_detach(struct ata_host *host);
59141 extern void ata_host_init(struct ata_host *, struct device *,
59142 - unsigned long, struct ata_port_operations *);
59143 + unsigned long, const struct ata_port_operations *);
59144 extern int ata_scsi_detect(struct scsi_host_template *sht);
59145 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59146 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59147 diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59148 --- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59149 +++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59150 @@ -23,13 +23,13 @@ struct svc_rqst;
59151 * This is the set of functions for lockd->nfsd communication
59152 */
59153 struct nlmsvc_binding {
59154 - __be32 (*fopen)(struct svc_rqst *,
59155 + __be32 (* const fopen)(struct svc_rqst *,
59156 struct nfs_fh *,
59157 struct file **);
59158 - void (*fclose)(struct file *);
59159 + void (* const fclose)(struct file *);
59160 };
59161
59162 -extern struct nlmsvc_binding * nlmsvc_ops;
59163 +extern const struct nlmsvc_binding * nlmsvc_ops;
59164
59165 /*
59166 * Similar to nfs_client_initdata, but without the NFS-specific
59167 diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59168 --- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59169 +++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59170 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59171 int region);
59172 void * (*mca_transform_memory)(struct mca_device *,
59173 void *memory);
59174 -};
59175 +} __no_const;
59176
59177 struct mca_bus {
59178 u64 default_dma_mask;
59179 diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59180 --- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59181 +++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59182 @@ -108,7 +108,7 @@ struct memory_accessor {
59183 size_t count);
59184 ssize_t (*write)(struct memory_accessor *, const char *buf,
59185 off_t offset, size_t count);
59186 -};
59187 +} __no_const;
59188
59189 /*
59190 * Kernel text modification mutex, used for code patching. Users of this lock
59191 diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59192 --- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59193 +++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59194 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59195
59196 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59197 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59198 +
59199 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59200 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59201 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59202 +#else
59203 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59204 +#endif
59205 +
59206 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59207 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59208
59209 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59210 int set_page_dirty_lock(struct page *page);
59211 int clear_page_dirty_for_io(struct page *page);
59212
59213 -/* Is the vma a continuation of the stack vma above it? */
59214 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59215 -{
59216 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59217 -}
59218 -
59219 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59220 unsigned long old_addr, struct vm_area_struct *new_vma,
59221 unsigned long new_addr, unsigned long len);
59222 @@ -890,6 +891,8 @@ struct shrinker {
59223 extern void register_shrinker(struct shrinker *);
59224 extern void unregister_shrinker(struct shrinker *);
59225
59226 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
59227 +
59228 int vma_wants_writenotify(struct vm_area_struct *vma);
59229
59230 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59231 @@ -1162,6 +1165,7 @@ out:
59232 }
59233
59234 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59235 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59236
59237 extern unsigned long do_brk(unsigned long, unsigned long);
59238
59239 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59240 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59241 struct vm_area_struct **pprev);
59242
59243 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59244 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59245 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59246 +
59247 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59248 NULL if none. Assume start_addr < end_addr. */
59249 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59250 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59251 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59252 }
59253
59254 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59255 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59256 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59257 unsigned long pfn, unsigned long size, pgprot_t);
59258 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59259 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59260 extern int sysctl_memory_failure_early_kill;
59261 extern int sysctl_memory_failure_recovery;
59262 -extern atomic_long_t mce_bad_pages;
59263 +extern atomic_long_unchecked_t mce_bad_pages;
59264 +
59265 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59266 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59267 +#else
59268 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59269 +#endif
59270
59271 #endif /* __KERNEL__ */
59272 #endif /* _LINUX_MM_H */
59273 diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59274 --- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59275 +++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59276 @@ -186,6 +186,8 @@ struct vm_area_struct {
59277 #ifdef CONFIG_NUMA
59278 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59279 #endif
59280 +
59281 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59282 };
59283
59284 struct core_thread {
59285 @@ -287,6 +289,24 @@ struct mm_struct {
59286 #ifdef CONFIG_MMU_NOTIFIER
59287 struct mmu_notifier_mm *mmu_notifier_mm;
59288 #endif
59289 +
59290 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59291 + unsigned long pax_flags;
59292 +#endif
59293 +
59294 +#ifdef CONFIG_PAX_DLRESOLVE
59295 + unsigned long call_dl_resolve;
59296 +#endif
59297 +
59298 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59299 + unsigned long call_syscall;
59300 +#endif
59301 +
59302 +#ifdef CONFIG_PAX_ASLR
59303 + unsigned long delta_mmap; /* randomized offset */
59304 + unsigned long delta_stack; /* randomized offset */
59305 +#endif
59306 +
59307 };
59308
59309 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59310 diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59311 --- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59312 +++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59313 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59314 */
59315 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59316 ({ \
59317 - pte_t __pte; \
59318 + pte_t ___pte; \
59319 struct vm_area_struct *___vma = __vma; \
59320 unsigned long ___address = __address; \
59321 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59322 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59323 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59324 - __pte; \
59325 + ___pte; \
59326 })
59327
59328 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59329 diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59330 --- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59331 +++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59332 @@ -350,7 +350,7 @@ struct zone {
59333 unsigned long flags; /* zone flags, see below */
59334
59335 /* Zone statistics */
59336 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59337 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59338
59339 /*
59340 * prev_priority holds the scanning priority for this zone. It is
59341 diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59342 --- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59343 +++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59344 @@ -12,7 +12,7 @@
59345 typedef unsigned long kernel_ulong_t;
59346 #endif
59347
59348 -#define PCI_ANY_ID (~0)
59349 +#define PCI_ANY_ID ((__u16)~0)
59350
59351 struct pci_device_id {
59352 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59353 @@ -131,7 +131,7 @@ struct usb_device_id {
59354 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59355 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59356
59357 -#define HID_ANY_ID (~0)
59358 +#define HID_ANY_ID (~0U)
59359
59360 struct hid_device_id {
59361 __u16 bus;
59362 diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59363 --- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59364 +++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59365 @@ -16,6 +16,7 @@
59366 #include <linux/kobject.h>
59367 #include <linux/moduleparam.h>
59368 #include <linux/tracepoint.h>
59369 +#include <linux/fs.h>
59370
59371 #include <asm/local.h>
59372 #include <asm/module.h>
59373 @@ -287,16 +288,16 @@ struct module
59374 int (*init)(void);
59375
59376 /* If this is non-NULL, vfree after init() returns */
59377 - void *module_init;
59378 + void *module_init_rx, *module_init_rw;
59379
59380 /* Here is the actual code + data, vfree'd on unload. */
59381 - void *module_core;
59382 + void *module_core_rx, *module_core_rw;
59383
59384 /* Here are the sizes of the init and core sections */
59385 - unsigned int init_size, core_size;
59386 + unsigned int init_size_rw, core_size_rw;
59387
59388 /* The size of the executable code in each section. */
59389 - unsigned int init_text_size, core_text_size;
59390 + unsigned int init_size_rx, core_size_rx;
59391
59392 /* Arch-specific module values */
59393 struct mod_arch_specific arch;
59394 @@ -345,6 +346,10 @@ struct module
59395 #ifdef CONFIG_EVENT_TRACING
59396 struct ftrace_event_call *trace_events;
59397 unsigned int num_trace_events;
59398 + struct file_operations trace_id;
59399 + struct file_operations trace_enable;
59400 + struct file_operations trace_format;
59401 + struct file_operations trace_filter;
59402 #endif
59403 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59404 unsigned long *ftrace_callsites;
59405 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59406 bool is_module_address(unsigned long addr);
59407 bool is_module_text_address(unsigned long addr);
59408
59409 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59410 +{
59411 +
59412 +#ifdef CONFIG_PAX_KERNEXEC
59413 + if (ktla_ktva(addr) >= (unsigned long)start &&
59414 + ktla_ktva(addr) < (unsigned long)start + size)
59415 + return 1;
59416 +#endif
59417 +
59418 + return ((void *)addr >= start && (void *)addr < start + size);
59419 +}
59420 +
59421 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59422 +{
59423 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59424 +}
59425 +
59426 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59427 +{
59428 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59429 +}
59430 +
59431 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59432 +{
59433 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59434 +}
59435 +
59436 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59437 +{
59438 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59439 +}
59440 +
59441 static inline int within_module_core(unsigned long addr, struct module *mod)
59442 {
59443 - return (unsigned long)mod->module_core <= addr &&
59444 - addr < (unsigned long)mod->module_core + mod->core_size;
59445 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59446 }
59447
59448 static inline int within_module_init(unsigned long addr, struct module *mod)
59449 {
59450 - return (unsigned long)mod->module_init <= addr &&
59451 - addr < (unsigned long)mod->module_init + mod->init_size;
59452 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59453 }
59454
59455 /* Search for module by name: must hold module_mutex. */
59456 diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59457 --- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59458 +++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59459 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59460 sections. Returns NULL on failure. */
59461 void *module_alloc(unsigned long size);
59462
59463 +#ifdef CONFIG_PAX_KERNEXEC
59464 +void *module_alloc_exec(unsigned long size);
59465 +#else
59466 +#define module_alloc_exec(x) module_alloc(x)
59467 +#endif
59468 +
59469 /* Free memory returned from module_alloc. */
59470 void module_free(struct module *mod, void *module_region);
59471
59472 +#ifdef CONFIG_PAX_KERNEXEC
59473 +void module_free_exec(struct module *mod, void *module_region);
59474 +#else
59475 +#define module_free_exec(x, y) module_free((x), (y))
59476 +#endif
59477 +
59478 /* Apply the given relocation to the (simplified) ELF. Return -error
59479 or 0. */
59480 int apply_relocate(Elf_Shdr *sechdrs,
59481 diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59482 --- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59483 +++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59484 @@ -132,7 +132,7 @@ struct kparam_array
59485
59486 /* Actually copy string: maxlen param is usually sizeof(string). */
59487 #define module_param_string(name, string, len, perm) \
59488 - static const struct kparam_string __param_string_##name \
59489 + static const struct kparam_string __param_string_##name __used \
59490 = { len, string }; \
59491 __module_param_call(MODULE_PARAM_PREFIX, name, \
59492 param_set_copystring, param_get_string, \
59493 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59494
59495 /* Comma-separated array: *nump is set to number they actually specified. */
59496 #define module_param_array_named(name, array, type, nump, perm) \
59497 - static const struct kparam_array __param_arr_##name \
59498 + static const struct kparam_array __param_arr_##name __used \
59499 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59500 sizeof(array[0]), array }; \
59501 __module_param_call(MODULE_PARAM_PREFIX, name, \
59502 diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59503 --- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59504 +++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59505 @@ -51,7 +51,7 @@ struct mutex {
59506 spinlock_t wait_lock;
59507 struct list_head wait_list;
59508 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59509 - struct thread_info *owner;
59510 + struct task_struct *owner;
59511 #endif
59512 #ifdef CONFIG_DEBUG_MUTEXES
59513 const char *name;
59514 diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59515 --- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59516 +++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59517 @@ -22,7 +22,7 @@ struct nameidata {
59518 unsigned int flags;
59519 int last_type;
59520 unsigned depth;
59521 - char *saved_names[MAX_NESTED_LINKS + 1];
59522 + const char *saved_names[MAX_NESTED_LINKS + 1];
59523
59524 /* Intent data */
59525 union {
59526 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59527 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59528 extern void unlock_rename(struct dentry *, struct dentry *);
59529
59530 -static inline void nd_set_link(struct nameidata *nd, char *path)
59531 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59532 {
59533 nd->saved_names[nd->depth] = path;
59534 }
59535
59536 -static inline char *nd_get_link(struct nameidata *nd)
59537 +static inline const char *nd_get_link(const struct nameidata *nd)
59538 {
59539 return nd->saved_names[nd->depth];
59540 }
59541 diff -urNp linux-2.6.32.45/include/linux/netdevice.h linux-2.6.32.45/include/linux/netdevice.h
59542 --- linux-2.6.32.45/include/linux/netdevice.h 2011-08-09 18:35:30.000000000 -0400
59543 +++ linux-2.6.32.45/include/linux/netdevice.h 2011-08-23 21:22:38.000000000 -0400
59544 @@ -637,6 +637,7 @@ struct net_device_ops {
59545 u16 xid);
59546 #endif
59547 };
59548 +typedef struct net_device_ops __no_const net_device_ops_no_const;
59549
59550 /*
59551 * The DEVICE structure.
59552 diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59553 --- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59554 +++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59555 @@ -0,0 +1,9 @@
59556 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59557 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59558 +
59559 +struct xt_gradm_mtinfo {
59560 + __u16 flags;
59561 + __u16 invflags;
59562 +};
59563 +
59564 +#endif
59565 diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59566 --- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59567 +++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59568 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59569
59570 #define any_online_node(mask) \
59571 ({ \
59572 - int node; \
59573 - for_each_node_mask(node, (mask)) \
59574 - if (node_online(node)) \
59575 + int __node; \
59576 + for_each_node_mask(__node, (mask)) \
59577 + if (node_online(__node)) \
59578 break; \
59579 - node; \
59580 + __node; \
59581 })
59582
59583 #define num_online_nodes() num_node_state(N_ONLINE)
59584 diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59585 --- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59586 +++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59587 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59588 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59589 char const * name, ulong * val);
59590
59591 -/** Create a file for read-only access to an atomic_t. */
59592 +/** Create a file for read-only access to an atomic_unchecked_t. */
59593 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59594 - char const * name, atomic_t * val);
59595 + char const * name, atomic_unchecked_t * val);
59596
59597 /** create a directory */
59598 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59599 diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59600 --- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59601 +++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59602 @@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59603 if (((unsigned long)uaddr & PAGE_MASK) !=
59604 ((unsigned long)end & PAGE_MASK))
59605 ret = __get_user(c, end);
59606 + (void)c;
59607 }
59608 return ret;
59609 }
59610 diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59611 --- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59612 +++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59613 @@ -476,7 +476,7 @@ struct hw_perf_event {
59614 struct hrtimer hrtimer;
59615 };
59616 };
59617 - atomic64_t prev_count;
59618 + atomic64_unchecked_t prev_count;
59619 u64 sample_period;
59620 u64 last_period;
59621 atomic64_t period_left;
59622 @@ -557,7 +557,7 @@ struct perf_event {
59623 const struct pmu *pmu;
59624
59625 enum perf_event_active_state state;
59626 - atomic64_t count;
59627 + atomic64_unchecked_t count;
59628
59629 /*
59630 * These are the total time in nanoseconds that the event
59631 @@ -595,8 +595,8 @@ struct perf_event {
59632 * These accumulate total time (in nanoseconds) that children
59633 * events have been enabled and running, respectively.
59634 */
59635 - atomic64_t child_total_time_enabled;
59636 - atomic64_t child_total_time_running;
59637 + atomic64_unchecked_t child_total_time_enabled;
59638 + atomic64_unchecked_t child_total_time_running;
59639
59640 /*
59641 * Protect attach/detach and child_list:
59642 diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59643 --- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59644 +++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59645 @@ -46,9 +46,9 @@ struct pipe_inode_info {
59646 wait_queue_head_t wait;
59647 unsigned int nrbufs, curbuf;
59648 struct page *tmp_page;
59649 - unsigned int readers;
59650 - unsigned int writers;
59651 - unsigned int waiting_writers;
59652 + atomic_t readers;
59653 + atomic_t writers;
59654 + atomic_t waiting_writers;
59655 unsigned int r_counter;
59656 unsigned int w_counter;
59657 struct fasync_struct *fasync_readers;
59658 diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59659 --- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59660 +++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59661 @@ -19,8 +19,8 @@
59662 * under normal circumstances, used to verify that nobody uses
59663 * non-initialized list entries.
59664 */
59665 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59666 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59667 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59668 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59669
59670 /********** include/linux/timer.h **********/
59671 /*
59672 diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59673 --- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59674 +++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59675 @@ -67,7 +67,7 @@ struct k_itimer {
59676 };
59677
59678 struct k_clock {
59679 - int res; /* in nanoseconds */
59680 + const int res; /* in nanoseconds */
59681 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59682 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59683 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59684 diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
59685 --- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59686 +++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59687 @@ -110,7 +110,7 @@ struct preempt_ops {
59688 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59689 void (*sched_out)(struct preempt_notifier *notifier,
59690 struct task_struct *next);
59691 -};
59692 +} __no_const;
59693
59694 /**
59695 * preempt_notifier - key for installing preemption notifiers
59696 diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
59697 --- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59698 +++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59699 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59700 return proc_create_data(name, mode, parent, proc_fops, NULL);
59701 }
59702
59703 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59704 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59705 +{
59706 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59707 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59708 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59709 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59710 +#else
59711 + return proc_create_data(name, mode, parent, proc_fops, NULL);
59712 +#endif
59713 +}
59714 +
59715 +
59716 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59717 mode_t mode, struct proc_dir_entry *base,
59718 read_proc_t *read_proc, void * data)
59719 @@ -256,7 +269,7 @@ union proc_op {
59720 int (*proc_show)(struct seq_file *m,
59721 struct pid_namespace *ns, struct pid *pid,
59722 struct task_struct *task);
59723 -};
59724 +} __no_const;
59725
59726 struct ctl_table_header;
59727 struct ctl_table;
59728 diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
59729 --- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
59730 +++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
59731 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
59732 extern void exit_ptrace(struct task_struct *tracer);
59733 #define PTRACE_MODE_READ 1
59734 #define PTRACE_MODE_ATTACH 2
59735 -/* Returns 0 on success, -errno on denial. */
59736 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59737 /* Returns true on success, false on denial. */
59738 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59739 +/* Returns true on success, false on denial. */
59740 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59741
59742 static inline int ptrace_reparented(struct task_struct *child)
59743 {
59744 diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
59745 --- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
59746 +++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
59747 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
59748 u32 random32(void);
59749 void srandom32(u32 seed);
59750
59751 +static inline unsigned long pax_get_random_long(void)
59752 +{
59753 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59754 +}
59755 +
59756 #endif /* __KERNEL___ */
59757
59758 #endif /* _LINUX_RANDOM_H */
59759 diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
59760 --- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
59761 +++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
59762 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59763 * Architecture-specific implementations of sys_reboot commands.
59764 */
59765
59766 -extern void machine_restart(char *cmd);
59767 -extern void machine_halt(void);
59768 -extern void machine_power_off(void);
59769 +extern void machine_restart(char *cmd) __noreturn;
59770 +extern void machine_halt(void) __noreturn;
59771 +extern void machine_power_off(void) __noreturn;
59772
59773 extern void machine_shutdown(void);
59774 struct pt_regs;
59775 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59776 */
59777
59778 extern void kernel_restart_prepare(char *cmd);
59779 -extern void kernel_restart(char *cmd);
59780 -extern void kernel_halt(void);
59781 -extern void kernel_power_off(void);
59782 +extern void kernel_restart(char *cmd) __noreturn;
59783 +extern void kernel_halt(void) __noreturn;
59784 +extern void kernel_power_off(void) __noreturn;
59785
59786 void ctrl_alt_del(void);
59787
59788 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
59789 * Emergency restart, callable from an interrupt handler.
59790 */
59791
59792 -extern void emergency_restart(void);
59793 +extern void emergency_restart(void) __noreturn;
59794 #include <asm/emergency-restart.h>
59795
59796 #endif
59797 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
59798 --- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
59799 +++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
59800 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
59801 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59802
59803 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59804 -#define get_generation(s) atomic_read (&fs_generation(s))
59805 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59806 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59807 #define __fs_changed(gen,s) (gen != get_generation (s))
59808 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
59809 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
59810 */
59811
59812 struct item_operations {
59813 - int (*bytes_number) (struct item_head * ih, int block_size);
59814 - void (*decrement_key) (struct cpu_key *);
59815 - int (*is_left_mergeable) (struct reiserfs_key * ih,
59816 + int (* const bytes_number) (struct item_head * ih, int block_size);
59817 + void (* const decrement_key) (struct cpu_key *);
59818 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
59819 unsigned long bsize);
59820 - void (*print_item) (struct item_head *, char *item);
59821 - void (*check_item) (struct item_head *, char *item);
59822 + void (* const print_item) (struct item_head *, char *item);
59823 + void (* const check_item) (struct item_head *, char *item);
59824
59825 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59826 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59827 int is_affected, int insert_size);
59828 - int (*check_left) (struct virtual_item * vi, int free,
59829 + int (* const check_left) (struct virtual_item * vi, int free,
59830 int start_skip, int end_skip);
59831 - int (*check_right) (struct virtual_item * vi, int free);
59832 - int (*part_size) (struct virtual_item * vi, int from, int to);
59833 - int (*unit_num) (struct virtual_item * vi);
59834 - void (*print_vi) (struct virtual_item * vi);
59835 + int (* const check_right) (struct virtual_item * vi, int free);
59836 + int (* const part_size) (struct virtual_item * vi, int from, int to);
59837 + int (* const unit_num) (struct virtual_item * vi);
59838 + void (* const print_vi) (struct virtual_item * vi);
59839 };
59840
59841 -extern struct item_operations *item_ops[TYPE_ANY + 1];
59842 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
59843
59844 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
59845 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
59846 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
59847 --- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
59848 +++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
59849 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
59850 /* Comment? -Hans */
59851 wait_queue_head_t s_wait;
59852 /* To be obsoleted soon by per buffer seals.. -Hans */
59853 - atomic_t s_generation_counter; // increased by one every time the
59854 + atomic_unchecked_t s_generation_counter; // increased by one every time the
59855 // tree gets re-balanced
59856 unsigned long s_properties; /* File system properties. Currently holds
59857 on-disk FS format */
59858 diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
59859 --- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
59860 +++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
59861 @@ -159,7 +159,7 @@ struct rchan_callbacks
59862 * The callback should return 0 if successful, negative if not.
59863 */
59864 int (*remove_buf_file)(struct dentry *dentry);
59865 -};
59866 +} __no_const;
59867
59868 /*
59869 * CONFIG_RELAY kernel API, kernel/relay.c
59870 diff -urNp linux-2.6.32.45/include/linux/rfkill.h linux-2.6.32.45/include/linux/rfkill.h
59871 --- linux-2.6.32.45/include/linux/rfkill.h 2011-03-27 14:31:47.000000000 -0400
59872 +++ linux-2.6.32.45/include/linux/rfkill.h 2011-08-23 21:22:38.000000000 -0400
59873 @@ -144,6 +144,7 @@ struct rfkill_ops {
59874 void (*query)(struct rfkill *rfkill, void *data);
59875 int (*set_block)(void *data, bool blocked);
59876 };
59877 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59878
59879 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59880 /**
59881 diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
59882 --- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
59883 +++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
59884 @@ -101,6 +101,7 @@ struct bio;
59885 struct fs_struct;
59886 struct bts_context;
59887 struct perf_event_context;
59888 +struct linux_binprm;
59889
59890 /*
59891 * List of flags we want to share for kernel threads,
59892 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
59893 extern signed long schedule_timeout_uninterruptible(signed long timeout);
59894 asmlinkage void __schedule(void);
59895 asmlinkage void schedule(void);
59896 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
59897 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
59898
59899 struct nsproxy;
59900 struct user_namespace;
59901 @@ -371,9 +372,12 @@ struct user_namespace;
59902 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59903
59904 extern int sysctl_max_map_count;
59905 +extern unsigned long sysctl_heap_stack_gap;
59906
59907 #include <linux/aio.h>
59908
59909 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59910 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59911 extern unsigned long
59912 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59913 unsigned long, unsigned long);
59914 @@ -666,6 +670,16 @@ struct signal_struct {
59915 struct tty_audit_buf *tty_audit_buf;
59916 #endif
59917
59918 +#ifdef CONFIG_GRKERNSEC
59919 + u32 curr_ip;
59920 + u32 saved_ip;
59921 + u32 gr_saddr;
59922 + u32 gr_daddr;
59923 + u16 gr_sport;
59924 + u16 gr_dport;
59925 + u8 used_accept:1;
59926 +#endif
59927 +
59928 int oom_adj; /* OOM kill score adjustment (bit shift) */
59929 };
59930
59931 @@ -723,6 +737,11 @@ struct user_struct {
59932 struct key *session_keyring; /* UID's default session keyring */
59933 #endif
59934
59935 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59936 + unsigned int banned;
59937 + unsigned long ban_expires;
59938 +#endif
59939 +
59940 /* Hash table maintenance information */
59941 struct hlist_node uidhash_node;
59942 uid_t uid;
59943 @@ -1328,8 +1347,8 @@ struct task_struct {
59944 struct list_head thread_group;
59945
59946 struct completion *vfork_done; /* for vfork() */
59947 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59948 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59949 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59950 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59951
59952 cputime_t utime, stime, utimescaled, stimescaled;
59953 cputime_t gtime;
59954 @@ -1343,16 +1362,6 @@ struct task_struct {
59955 struct task_cputime cputime_expires;
59956 struct list_head cpu_timers[3];
59957
59958 -/* process credentials */
59959 - const struct cred *real_cred; /* objective and real subjective task
59960 - * credentials (COW) */
59961 - const struct cred *cred; /* effective (overridable) subjective task
59962 - * credentials (COW) */
59963 - struct mutex cred_guard_mutex; /* guard against foreign influences on
59964 - * credential calculations
59965 - * (notably. ptrace) */
59966 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59967 -
59968 char comm[TASK_COMM_LEN]; /* executable name excluding path
59969 - access with [gs]et_task_comm (which lock
59970 it with task_lock())
59971 @@ -1369,6 +1378,10 @@ struct task_struct {
59972 #endif
59973 /* CPU-specific state of this task */
59974 struct thread_struct thread;
59975 +/* thread_info moved to task_struct */
59976 +#ifdef CONFIG_X86
59977 + struct thread_info tinfo;
59978 +#endif
59979 /* filesystem information */
59980 struct fs_struct *fs;
59981 /* open file information */
59982 @@ -1436,6 +1449,15 @@ struct task_struct {
59983 int hardirq_context;
59984 int softirq_context;
59985 #endif
59986 +
59987 +/* process credentials */
59988 + const struct cred *real_cred; /* objective and real subjective task
59989 + * credentials (COW) */
59990 + struct mutex cred_guard_mutex; /* guard against foreign influences on
59991 + * credential calculations
59992 + * (notably. ptrace) */
59993 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
59994 +
59995 #ifdef CONFIG_LOCKDEP
59996 # define MAX_LOCK_DEPTH 48UL
59997 u64 curr_chain_key;
59998 @@ -1456,6 +1478,9 @@ struct task_struct {
59999
60000 struct backing_dev_info *backing_dev_info;
60001
60002 + const struct cred *cred; /* effective (overridable) subjective task
60003 + * credentials (COW) */
60004 +
60005 struct io_context *io_context;
60006
60007 unsigned long ptrace_message;
60008 @@ -1519,6 +1544,21 @@ struct task_struct {
60009 unsigned long default_timer_slack_ns;
60010
60011 struct list_head *scm_work_list;
60012 +
60013 +#ifdef CONFIG_GRKERNSEC
60014 + /* grsecurity */
60015 + struct dentry *gr_chroot_dentry;
60016 + struct acl_subject_label *acl;
60017 + struct acl_role_label *role;
60018 + struct file *exec_file;
60019 + u16 acl_role_id;
60020 + /* is this the task that authenticated to the special role */
60021 + u8 acl_sp_role;
60022 + u8 is_writable;
60023 + u8 brute;
60024 + u8 gr_is_chrooted;
60025 +#endif
60026 +
60027 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60028 /* Index of current stored adress in ret_stack */
60029 int curr_ret_stack;
60030 @@ -1542,6 +1582,57 @@ struct task_struct {
60031 #endif /* CONFIG_TRACING */
60032 };
60033
60034 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60035 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60036 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60037 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60038 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60039 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60040 +
60041 +#ifdef CONFIG_PAX_SOFTMODE
60042 +extern int pax_softmode;
60043 +#endif
60044 +
60045 +extern int pax_check_flags(unsigned long *);
60046 +
60047 +/* if tsk != current then task_lock must be held on it */
60048 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60049 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60050 +{
60051 + if (likely(tsk->mm))
60052 + return tsk->mm->pax_flags;
60053 + else
60054 + return 0UL;
60055 +}
60056 +
60057 +/* if tsk != current then task_lock must be held on it */
60058 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60059 +{
60060 + if (likely(tsk->mm)) {
60061 + tsk->mm->pax_flags = flags;
60062 + return 0;
60063 + }
60064 + return -EINVAL;
60065 +}
60066 +#endif
60067 +
60068 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60069 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60070 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60071 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60072 +#endif
60073 +
60074 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60075 +extern void pax_report_insns(void *pc, void *sp);
60076 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60077 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60078 +
60079 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60080 +extern void pax_track_stack(void);
60081 +#else
60082 +static inline void pax_track_stack(void) {}
60083 +#endif
60084 +
60085 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60086 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60087
60088 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60089 #define PF_DUMPCORE 0x00000200 /* dumped core */
60090 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60091 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60092 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60093 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60094 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60095 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60096 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60097 @@ -1978,7 +2069,9 @@ void yield(void);
60098 extern struct exec_domain default_exec_domain;
60099
60100 union thread_union {
60101 +#ifndef CONFIG_X86
60102 struct thread_info thread_info;
60103 +#endif
60104 unsigned long stack[THREAD_SIZE/sizeof(long)];
60105 };
60106
60107 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60108 */
60109
60110 extern struct task_struct *find_task_by_vpid(pid_t nr);
60111 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60112 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60113 struct pid_namespace *ns);
60114
60115 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60116 extern void exit_itimers(struct signal_struct *);
60117 extern void flush_itimer_signals(void);
60118
60119 -extern NORET_TYPE void do_group_exit(int);
60120 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60121
60122 extern void daemonize(const char *, ...);
60123 extern int allow_signal(int);
60124 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60125
60126 #endif
60127
60128 -static inline int object_is_on_stack(void *obj)
60129 +static inline int object_starts_on_stack(void *obj)
60130 {
60131 - void *stack = task_stack_page(current);
60132 + const void *stack = task_stack_page(current);
60133
60134 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60135 }
60136
60137 +#ifdef CONFIG_PAX_USERCOPY
60138 +extern int object_is_on_stack(const void *obj, unsigned long len);
60139 +#endif
60140 +
60141 extern void thread_info_cache_init(void);
60142
60143 #ifdef CONFIG_DEBUG_STACK_USAGE
60144 diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60145 --- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60146 +++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60147 @@ -42,7 +42,8 @@ struct screen_info {
60148 __u16 pages; /* 0x32 */
60149 __u16 vesa_attributes; /* 0x34 */
60150 __u32 capabilities; /* 0x36 */
60151 - __u8 _reserved[6]; /* 0x3a */
60152 + __u16 vesapm_size; /* 0x3a */
60153 + __u8 _reserved[4]; /* 0x3c */
60154 } __attribute__((packed));
60155
60156 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60157 diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60158 --- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60159 +++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60160 @@ -34,6 +34,7 @@
60161 #include <linux/key.h>
60162 #include <linux/xfrm.h>
60163 #include <linux/gfp.h>
60164 +#include <linux/grsecurity.h>
60165 #include <net/flow.h>
60166
60167 /* Maximum number of letters for an LSM name string */
60168 diff -urNp linux-2.6.32.45/include/linux/seq_file.h linux-2.6.32.45/include/linux/seq_file.h
60169 --- linux-2.6.32.45/include/linux/seq_file.h 2011-03-27 14:31:47.000000000 -0400
60170 +++ linux-2.6.32.45/include/linux/seq_file.h 2011-08-23 21:22:38.000000000 -0400
60171 @@ -32,6 +32,7 @@ struct seq_operations {
60172 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60173 int (*show) (struct seq_file *m, void *v);
60174 };
60175 +typedef struct seq_operations __no_const seq_operations_no_const;
60176
60177 #define SEQ_SKIP 1
60178
60179 diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60180 --- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60181 +++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60182 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60183 pid_t shm_cprid;
60184 pid_t shm_lprid;
60185 struct user_struct *mlock_user;
60186 +#ifdef CONFIG_GRKERNSEC
60187 + time_t shm_createtime;
60188 + pid_t shm_lapid;
60189 +#endif
60190 };
60191
60192 /* shm_mode upper byte flags */
60193 diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60194 --- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60195 +++ linux-2.6.32.45/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60196 @@ -14,6 +14,7 @@
60197 #ifndef _LINUX_SKBUFF_H
60198 #define _LINUX_SKBUFF_H
60199
60200 +#include <linux/const.h>
60201 #include <linux/kernel.h>
60202 #include <linux/kmemcheck.h>
60203 #include <linux/compiler.h>
60204 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60205 */
60206 static inline int skb_queue_empty(const struct sk_buff_head *list)
60207 {
60208 - return list->next == (struct sk_buff *)list;
60209 + return list->next == (const struct sk_buff *)list;
60210 }
60211
60212 /**
60213 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60214 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60215 const struct sk_buff *skb)
60216 {
60217 - return (skb->next == (struct sk_buff *) list);
60218 + return (skb->next == (const struct sk_buff *) list);
60219 }
60220
60221 /**
60222 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60223 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60224 const struct sk_buff *skb)
60225 {
60226 - return (skb->prev == (struct sk_buff *) list);
60227 + return (skb->prev == (const struct sk_buff *) list);
60228 }
60229
60230 /**
60231 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60232 * headroom, you should not reduce this.
60233 */
60234 #ifndef NET_SKB_PAD
60235 -#define NET_SKB_PAD 32
60236 +#define NET_SKB_PAD (_AC(32,UL))
60237 #endif
60238
60239 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60240 diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60241 --- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60242 +++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60243 @@ -69,10 +69,10 @@ struct kmem_cache {
60244 unsigned long node_allocs;
60245 unsigned long node_frees;
60246 unsigned long node_overflow;
60247 - atomic_t allochit;
60248 - atomic_t allocmiss;
60249 - atomic_t freehit;
60250 - atomic_t freemiss;
60251 + atomic_unchecked_t allochit;
60252 + atomic_unchecked_t allocmiss;
60253 + atomic_unchecked_t freehit;
60254 + atomic_unchecked_t freemiss;
60255
60256 /*
60257 * If debugging is enabled, then the allocator can add additional
60258 diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60259 --- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60260 +++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60261 @@ -11,12 +11,20 @@
60262
60263 #include <linux/gfp.h>
60264 #include <linux/types.h>
60265 +#include <linux/err.h>
60266
60267 /*
60268 * Flags to pass to kmem_cache_create().
60269 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60270 */
60271 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60272 +
60273 +#ifdef CONFIG_PAX_USERCOPY
60274 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60275 +#else
60276 +#define SLAB_USERCOPY 0x00000000UL
60277 +#endif
60278 +
60279 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60280 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60281 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60282 @@ -82,10 +90,13 @@
60283 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60284 * Both make kfree a no-op.
60285 */
60286 -#define ZERO_SIZE_PTR ((void *)16)
60287 +#define ZERO_SIZE_PTR \
60288 +({ \
60289 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60290 + (void *)(-MAX_ERRNO-1L); \
60291 +})
60292
60293 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60294 - (unsigned long)ZERO_SIZE_PTR)
60295 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60296
60297 /*
60298 * struct kmem_cache related prototypes
60299 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60300 void kfree(const void *);
60301 void kzfree(const void *);
60302 size_t ksize(const void *);
60303 +void check_object_size(const void *ptr, unsigned long n, bool to);
60304
60305 /*
60306 * Allocator specific definitions. These are mainly used to establish optimized
60307 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60308
60309 void __init kmem_cache_init_late(void);
60310
60311 +#define kmalloc(x, y) \
60312 +({ \
60313 + void *___retval; \
60314 + intoverflow_t ___x = (intoverflow_t)x; \
60315 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60316 + ___retval = NULL; \
60317 + else \
60318 + ___retval = kmalloc((size_t)___x, (y)); \
60319 + ___retval; \
60320 +})
60321 +
60322 +#define kmalloc_node(x, y, z) \
60323 +({ \
60324 + void *___retval; \
60325 + intoverflow_t ___x = (intoverflow_t)x; \
60326 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60327 + ___retval = NULL; \
60328 + else \
60329 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60330 + ___retval; \
60331 +})
60332 +
60333 +#define kzalloc(x, y) \
60334 +({ \
60335 + void *___retval; \
60336 + intoverflow_t ___x = (intoverflow_t)x; \
60337 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60338 + ___retval = NULL; \
60339 + else \
60340 + ___retval = kzalloc((size_t)___x, (y)); \
60341 + ___retval; \
60342 +})
60343 +
60344 #endif /* _LINUX_SLAB_H */
60345 diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60346 --- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60347 +++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60348 @@ -86,7 +86,7 @@ struct kmem_cache {
60349 struct kmem_cache_order_objects max;
60350 struct kmem_cache_order_objects min;
60351 gfp_t allocflags; /* gfp flags to use on each alloc */
60352 - int refcount; /* Refcount for slab cache destroy */
60353 + atomic_t refcount; /* Refcount for slab cache destroy */
60354 void (*ctor)(void *);
60355 int inuse; /* Offset to metadata */
60356 int align; /* Alignment */
60357 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60358 #endif
60359
60360 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60361 -void *__kmalloc(size_t size, gfp_t flags);
60362 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60363
60364 #ifdef CONFIG_KMEMTRACE
60365 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60366 diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60367 --- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60368 +++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60369 @@ -61,7 +61,7 @@ struct sonet_stats {
60370 #include <asm/atomic.h>
60371
60372 struct k_sonet_stats {
60373 -#define __HANDLE_ITEM(i) atomic_t i
60374 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60375 __SONET_ITEMS
60376 #undef __HANDLE_ITEM
60377 };
60378 diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60379 --- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60380 +++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60381 @@ -125,7 +125,7 @@ struct cache_detail {
60382 */
60383 struct cache_req {
60384 struct cache_deferred_req *(*defer)(struct cache_req *req);
60385 -};
60386 +} __no_const;
60387 /* this must be embedded in a deferred_request that is being
60388 * delayed awaiting cache-fill
60389 */
60390 diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60391 --- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60392 +++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60393 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60394 {
60395 switch (sap->sa_family) {
60396 case AF_INET:
60397 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60398 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60399 case AF_INET6:
60400 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60401 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60402 }
60403 return 0;
60404 }
60405 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60406 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60407 const struct sockaddr *src)
60408 {
60409 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60410 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60411 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60412
60413 dsin->sin_family = ssin->sin_family;
60414 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60415 if (sa->sa_family != AF_INET6)
60416 return 0;
60417
60418 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60419 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60420 }
60421
60422 #endif /* __KERNEL__ */
60423 diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60424 --- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60425 +++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60426 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60427 extern unsigned int svcrdma_max_requests;
60428 extern unsigned int svcrdma_max_req_size;
60429
60430 -extern atomic_t rdma_stat_recv;
60431 -extern atomic_t rdma_stat_read;
60432 -extern atomic_t rdma_stat_write;
60433 -extern atomic_t rdma_stat_sq_starve;
60434 -extern atomic_t rdma_stat_rq_starve;
60435 -extern atomic_t rdma_stat_rq_poll;
60436 -extern atomic_t rdma_stat_rq_prod;
60437 -extern atomic_t rdma_stat_sq_poll;
60438 -extern atomic_t rdma_stat_sq_prod;
60439 +extern atomic_unchecked_t rdma_stat_recv;
60440 +extern atomic_unchecked_t rdma_stat_read;
60441 +extern atomic_unchecked_t rdma_stat_write;
60442 +extern atomic_unchecked_t rdma_stat_sq_starve;
60443 +extern atomic_unchecked_t rdma_stat_rq_starve;
60444 +extern atomic_unchecked_t rdma_stat_rq_poll;
60445 +extern atomic_unchecked_t rdma_stat_rq_prod;
60446 +extern atomic_unchecked_t rdma_stat_sq_poll;
60447 +extern atomic_unchecked_t rdma_stat_sq_prod;
60448
60449 #define RPCRDMA_VERSION 1
60450
60451 diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60452 --- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60453 +++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60454 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60455 * which require special recovery actions in that situation.
60456 */
60457 struct platform_suspend_ops {
60458 - int (*valid)(suspend_state_t state);
60459 - int (*begin)(suspend_state_t state);
60460 - int (*prepare)(void);
60461 - int (*prepare_late)(void);
60462 - int (*enter)(suspend_state_t state);
60463 - void (*wake)(void);
60464 - void (*finish)(void);
60465 - void (*end)(void);
60466 - void (*recover)(void);
60467 + int (* const valid)(suspend_state_t state);
60468 + int (* const begin)(suspend_state_t state);
60469 + int (* const prepare)(void);
60470 + int (* const prepare_late)(void);
60471 + int (* const enter)(suspend_state_t state);
60472 + void (* const wake)(void);
60473 + void (* const finish)(void);
60474 + void (* const end)(void);
60475 + void (* const recover)(void);
60476 };
60477
60478 #ifdef CONFIG_SUSPEND
60479 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
60480 * suspend_set_ops - set platform dependent suspend operations
60481 * @ops: The new suspend operations to set.
60482 */
60483 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
60484 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60485 extern int suspend_valid_only_mem(suspend_state_t state);
60486
60487 /**
60488 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60489 #else /* !CONFIG_SUSPEND */
60490 #define suspend_valid_only_mem NULL
60491
60492 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60493 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60494 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60495 #endif /* !CONFIG_SUSPEND */
60496
60497 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60498 * platforms which require special recovery actions in that situation.
60499 */
60500 struct platform_hibernation_ops {
60501 - int (*begin)(void);
60502 - void (*end)(void);
60503 - int (*pre_snapshot)(void);
60504 - void (*finish)(void);
60505 - int (*prepare)(void);
60506 - int (*enter)(void);
60507 - void (*leave)(void);
60508 - int (*pre_restore)(void);
60509 - void (*restore_cleanup)(void);
60510 - void (*recover)(void);
60511 + int (* const begin)(void);
60512 + void (* const end)(void);
60513 + int (* const pre_snapshot)(void);
60514 + void (* const finish)(void);
60515 + int (* const prepare)(void);
60516 + int (* const enter)(void);
60517 + void (* const leave)(void);
60518 + int (* const pre_restore)(void);
60519 + void (* const restore_cleanup)(void);
60520 + void (* const recover)(void);
60521 };
60522
60523 #ifdef CONFIG_HIBERNATION
60524 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60525 extern void swsusp_unset_page_free(struct page *);
60526 extern unsigned long get_safe_page(gfp_t gfp_mask);
60527
60528 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60529 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60530 extern int hibernate(void);
60531 extern bool system_entering_hibernation(void);
60532 #else /* CONFIG_HIBERNATION */
60533 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60534 static inline void swsusp_set_page_free(struct page *p) {}
60535 static inline void swsusp_unset_page_free(struct page *p) {}
60536
60537 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60538 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60539 static inline int hibernate(void) { return -ENOSYS; }
60540 static inline bool system_entering_hibernation(void) { return false; }
60541 #endif /* CONFIG_HIBERNATION */
60542 diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60543 --- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60544 +++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60545 @@ -164,7 +164,11 @@ enum
60546 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60547 };
60548
60549 -
60550 +#ifdef CONFIG_PAX_SOFTMODE
60551 +enum {
60552 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60553 +};
60554 +#endif
60555
60556 /* CTL_VM names: */
60557 enum
60558 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60559
60560 extern int proc_dostring(struct ctl_table *, int,
60561 void __user *, size_t *, loff_t *);
60562 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60563 + void __user *, size_t *, loff_t *);
60564 extern int proc_dointvec(struct ctl_table *, int,
60565 void __user *, size_t *, loff_t *);
60566 extern int proc_dointvec_minmax(struct ctl_table *, int,
60567 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60568
60569 extern ctl_handler sysctl_data;
60570 extern ctl_handler sysctl_string;
60571 +extern ctl_handler sysctl_string_modpriv;
60572 extern ctl_handler sysctl_intvec;
60573 extern ctl_handler sysctl_jiffies;
60574 extern ctl_handler sysctl_ms_jiffies;
60575 diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60576 --- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60577 +++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60578 @@ -75,8 +75,8 @@ struct bin_attribute {
60579 };
60580
60581 struct sysfs_ops {
60582 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
60583 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60584 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60585 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60586 };
60587
60588 struct sysfs_dirent;
60589 diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60590 --- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60591 +++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60592 @@ -23,7 +23,7 @@ struct restart_block {
60593 };
60594 /* For futex_wait and futex_wait_requeue_pi */
60595 struct {
60596 - u32 *uaddr;
60597 + u32 __user *uaddr;
60598 u32 val;
60599 u32 flags;
60600 u32 bitset;
60601 diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60602 --- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60603 +++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60604 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60605 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60606 extern void tty_ldisc_enable(struct tty_struct *tty);
60607
60608 -
60609 /* n_tty.c */
60610 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60611
60612 diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60613 --- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60614 +++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60615 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60616
60617 struct module *owner;
60618
60619 - int refcount;
60620 + atomic_t refcount;
60621 };
60622
60623 struct tty_ldisc {
60624 diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60625 --- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60626 +++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60627 @@ -191,10 +191,26 @@ typedef struct {
60628 volatile int counter;
60629 } atomic_t;
60630
60631 +#ifdef CONFIG_PAX_REFCOUNT
60632 +typedef struct {
60633 + volatile int counter;
60634 +} atomic_unchecked_t;
60635 +#else
60636 +typedef atomic_t atomic_unchecked_t;
60637 +#endif
60638 +
60639 #ifdef CONFIG_64BIT
60640 typedef struct {
60641 volatile long counter;
60642 } atomic64_t;
60643 +
60644 +#ifdef CONFIG_PAX_REFCOUNT
60645 +typedef struct {
60646 + volatile long counter;
60647 +} atomic64_unchecked_t;
60648 +#else
60649 +typedef atomic64_t atomic64_unchecked_t;
60650 +#endif
60651 #endif
60652
60653 struct ustat {
60654 diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60655 --- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60656 +++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60657 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60658 long ret; \
60659 mm_segment_t old_fs = get_fs(); \
60660 \
60661 - set_fs(KERNEL_DS); \
60662 pagefault_disable(); \
60663 + set_fs(KERNEL_DS); \
60664 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60665 - pagefault_enable(); \
60666 set_fs(old_fs); \
60667 + pagefault_enable(); \
60668 ret; \
60669 })
60670
60671 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60672 * Safely read from address @src to the buffer at @dst. If a kernel fault
60673 * happens, handle that and return -EFAULT.
60674 */
60675 -extern long probe_kernel_read(void *dst, void *src, size_t size);
60676 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
60677
60678 /*
60679 * probe_kernel_write(): safely attempt to write to a location
60680 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60681 * Safely write to address @dst from the buffer at @src. If a kernel fault
60682 * happens, handle that and return -EFAULT.
60683 */
60684 -extern long probe_kernel_write(void *dst, void *src, size_t size);
60685 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
60686
60687 #endif /* __LINUX_UACCESS_H__ */
60688 diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60689 --- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60690 +++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60691 @@ -6,32 +6,32 @@
60692
60693 static inline u16 get_unaligned_le16(const void *p)
60694 {
60695 - return le16_to_cpup((__le16 *)p);
60696 + return le16_to_cpup((const __le16 *)p);
60697 }
60698
60699 static inline u32 get_unaligned_le32(const void *p)
60700 {
60701 - return le32_to_cpup((__le32 *)p);
60702 + return le32_to_cpup((const __le32 *)p);
60703 }
60704
60705 static inline u64 get_unaligned_le64(const void *p)
60706 {
60707 - return le64_to_cpup((__le64 *)p);
60708 + return le64_to_cpup((const __le64 *)p);
60709 }
60710
60711 static inline u16 get_unaligned_be16(const void *p)
60712 {
60713 - return be16_to_cpup((__be16 *)p);
60714 + return be16_to_cpup((const __be16 *)p);
60715 }
60716
60717 static inline u32 get_unaligned_be32(const void *p)
60718 {
60719 - return be32_to_cpup((__be32 *)p);
60720 + return be32_to_cpup((const __be32 *)p);
60721 }
60722
60723 static inline u64 get_unaligned_be64(const void *p)
60724 {
60725 - return be64_to_cpup((__be64 *)p);
60726 + return be64_to_cpup((const __be64 *)p);
60727 }
60728
60729 static inline void put_unaligned_le16(u16 val, void *p)
60730 diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
60731 --- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60732 +++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60733 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60734 #define VM_MAP 0x00000004 /* vmap()ed pages */
60735 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60736 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60737 +
60738 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60739 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60740 +#endif
60741 +
60742 /* bits [20..32] reserved for arch specific ioremap internals */
60743
60744 /*
60745 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60746
60747 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60748
60749 +#define vmalloc(x) \
60750 +({ \
60751 + void *___retval; \
60752 + intoverflow_t ___x = (intoverflow_t)x; \
60753 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60754 + ___retval = NULL; \
60755 + else \
60756 + ___retval = vmalloc((unsigned long)___x); \
60757 + ___retval; \
60758 +})
60759 +
60760 +#define __vmalloc(x, y, z) \
60761 +({ \
60762 + void *___retval; \
60763 + intoverflow_t ___x = (intoverflow_t)x; \
60764 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60765 + ___retval = NULL; \
60766 + else \
60767 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60768 + ___retval; \
60769 +})
60770 +
60771 +#define vmalloc_user(x) \
60772 +({ \
60773 + void *___retval; \
60774 + intoverflow_t ___x = (intoverflow_t)x; \
60775 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60776 + ___retval = NULL; \
60777 + else \
60778 + ___retval = vmalloc_user((unsigned long)___x); \
60779 + ___retval; \
60780 +})
60781 +
60782 +#define vmalloc_exec(x) \
60783 +({ \
60784 + void *___retval; \
60785 + intoverflow_t ___x = (intoverflow_t)x; \
60786 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60787 + ___retval = NULL; \
60788 + else \
60789 + ___retval = vmalloc_exec((unsigned long)___x); \
60790 + ___retval; \
60791 +})
60792 +
60793 +#define vmalloc_node(x, y) \
60794 +({ \
60795 + void *___retval; \
60796 + intoverflow_t ___x = (intoverflow_t)x; \
60797 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60798 + ___retval = NULL; \
60799 + else \
60800 + ___retval = vmalloc_node((unsigned long)___x, (y));\
60801 + ___retval; \
60802 +})
60803 +
60804 +#define vmalloc_32(x) \
60805 +({ \
60806 + void *___retval; \
60807 + intoverflow_t ___x = (intoverflow_t)x; \
60808 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60809 + ___retval = NULL; \
60810 + else \
60811 + ___retval = vmalloc_32((unsigned long)___x); \
60812 + ___retval; \
60813 +})
60814 +
60815 +#define vmalloc_32_user(x) \
60816 +({ \
60817 + void *___retval; \
60818 + intoverflow_t ___x = (intoverflow_t)x; \
60819 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60820 + ___retval = NULL; \
60821 + else \
60822 + ___retval = vmalloc_32_user((unsigned long)___x);\
60823 + ___retval; \
60824 +})
60825 +
60826 #endif /* _LINUX_VMALLOC_H */
60827 diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
60828 --- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
60829 +++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
60830 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
60831 /*
60832 * Zone based page accounting with per cpu differentials.
60833 */
60834 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60835 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60836
60837 static inline void zone_page_state_add(long x, struct zone *zone,
60838 enum zone_stat_item item)
60839 {
60840 - atomic_long_add(x, &zone->vm_stat[item]);
60841 - atomic_long_add(x, &vm_stat[item]);
60842 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60843 + atomic_long_add_unchecked(x, &vm_stat[item]);
60844 }
60845
60846 static inline unsigned long global_page_state(enum zone_stat_item item)
60847 {
60848 - long x = atomic_long_read(&vm_stat[item]);
60849 + long x = atomic_long_read_unchecked(&vm_stat[item]);
60850 #ifdef CONFIG_SMP
60851 if (x < 0)
60852 x = 0;
60853 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
60854 static inline unsigned long zone_page_state(struct zone *zone,
60855 enum zone_stat_item item)
60856 {
60857 - long x = atomic_long_read(&zone->vm_stat[item]);
60858 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60859 #ifdef CONFIG_SMP
60860 if (x < 0)
60861 x = 0;
60862 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
60863 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60864 enum zone_stat_item item)
60865 {
60866 - long x = atomic_long_read(&zone->vm_stat[item]);
60867 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60868
60869 #ifdef CONFIG_SMP
60870 int cpu;
60871 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
60872
60873 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60874 {
60875 - atomic_long_inc(&zone->vm_stat[item]);
60876 - atomic_long_inc(&vm_stat[item]);
60877 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
60878 + atomic_long_inc_unchecked(&vm_stat[item]);
60879 }
60880
60881 static inline void __inc_zone_page_state(struct page *page,
60882 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
60883
60884 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60885 {
60886 - atomic_long_dec(&zone->vm_stat[item]);
60887 - atomic_long_dec(&vm_stat[item]);
60888 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
60889 + atomic_long_dec_unchecked(&vm_stat[item]);
60890 }
60891
60892 static inline void __dec_zone_page_state(struct page *page,
60893 diff -urNp linux-2.6.32.45/include/media/saa7146_vv.h linux-2.6.32.45/include/media/saa7146_vv.h
60894 --- linux-2.6.32.45/include/media/saa7146_vv.h 2011-03-27 14:31:47.000000000 -0400
60895 +++ linux-2.6.32.45/include/media/saa7146_vv.h 2011-08-23 21:22:38.000000000 -0400
60896 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
60897 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60898
60899 /* the extension can override this */
60900 - struct v4l2_ioctl_ops ops;
60901 + v4l2_ioctl_ops_no_const ops;
60902 /* pointer to the saa7146 core ops */
60903 const struct v4l2_ioctl_ops *core_ops;
60904
60905 diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
60906 --- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
60907 +++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
60908 @@ -34,7 +34,7 @@ struct v4l2_device;
60909 #define V4L2_FL_UNREGISTERED (0)
60910
60911 struct v4l2_file_operations {
60912 - struct module *owner;
60913 + struct module * const owner;
60914 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60915 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60916 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60917 diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
60918 --- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
60919 +++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
60920 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
60921 this function returns 0. If the name ends with a digit (e.g. cx18),
60922 then the name will be set to cx18-0 since cx180 looks really odd. */
60923 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
60924 - atomic_t *instance);
60925 + atomic_unchecked_t *instance);
60926
60927 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
60928 Since the parent disappears this ensures that v4l2_dev doesn't have an
60929 diff -urNp linux-2.6.32.45/include/media/v4l2-ioctl.h linux-2.6.32.45/include/media/v4l2-ioctl.h
60930 --- linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-03-27 14:31:47.000000000 -0400
60931 +++ linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-08-23 21:22:38.000000000 -0400
60932 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
60933 long (*vidioc_default) (struct file *file, void *fh,
60934 int cmd, void *arg);
60935 };
60936 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60937
60938
60939 /* v4l debugging and diagnostics */
60940 diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
60941 --- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
60942 +++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
60943 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
60944 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
60945 u8 dir, flow_resolve_t resolver);
60946 extern void flow_cache_flush(void);
60947 -extern atomic_t flow_cache_genid;
60948 +extern atomic_unchecked_t flow_cache_genid;
60949
60950 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
60951 {
60952 diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
60953 --- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
60954 +++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
60955 @@ -24,7 +24,7 @@ struct inet_peer
60956 __u32 dtime; /* the time of last use of not
60957 * referenced entries */
60958 atomic_t refcnt;
60959 - atomic_t rid; /* Frag reception counter */
60960 + atomic_unchecked_t rid; /* Frag reception counter */
60961 __u32 tcp_ts;
60962 unsigned long tcp_ts_stamp;
60963 };
60964 diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
60965 --- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
60966 +++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
60967 @@ -365,7 +365,7 @@ struct ip_vs_conn {
60968 struct ip_vs_conn *control; /* Master control connection */
60969 atomic_t n_control; /* Number of controlled ones */
60970 struct ip_vs_dest *dest; /* real server */
60971 - atomic_t in_pkts; /* incoming packet counter */
60972 + atomic_unchecked_t in_pkts; /* incoming packet counter */
60973
60974 /* packet transmitter for different forwarding methods. If it
60975 mangles the packet, it must return NF_DROP or better NF_STOLEN,
60976 @@ -466,7 +466,7 @@ struct ip_vs_dest {
60977 union nf_inet_addr addr; /* IP address of the server */
60978 __be16 port; /* port number of the server */
60979 volatile unsigned flags; /* dest status flags */
60980 - atomic_t conn_flags; /* flags to copy to conn */
60981 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
60982 atomic_t weight; /* server weight */
60983
60984 atomic_t refcnt; /* reference counter */
60985 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
60986 --- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
60987 +++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
60988 @@ -51,7 +51,7 @@ typedef struct {
60989 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
60990 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
60991 struct ircomm_info *);
60992 -} call_t;
60993 +} __no_const call_t;
60994
60995 struct ircomm_cb {
60996 irda_queue_t queue;
60997 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
60998 --- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
60999 +++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61000 @@ -35,6 +35,7 @@
61001 #include <linux/termios.h>
61002 #include <linux/timer.h>
61003 #include <linux/tty.h> /* struct tty_struct */
61004 +#include <asm/local.h>
61005
61006 #include <net/irda/irias_object.h>
61007 #include <net/irda/ircomm_core.h>
61008 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61009 unsigned short close_delay;
61010 unsigned short closing_wait; /* time to wait before closing */
61011
61012 - int open_count;
61013 - int blocked_open; /* # of blocked opens */
61014 + local_t open_count;
61015 + local_t blocked_open; /* # of blocked opens */
61016
61017 /* Protect concurent access to :
61018 * o self->open_count
61019 diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61020 --- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61021 +++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61022 @@ -87,7 +87,7 @@ struct iucv_sock {
61023 struct iucv_sock_list {
61024 struct hlist_head head;
61025 rwlock_t lock;
61026 - atomic_t autobind_name;
61027 + atomic_unchecked_t autobind_name;
61028 };
61029
61030 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61031 diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61032 --- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61033 +++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61034 @@ -95,7 +95,7 @@ struct lapb_cb {
61035 struct sk_buff_head write_queue;
61036 struct sk_buff_head ack_queue;
61037 unsigned char window;
61038 - struct lapb_register_struct callbacks;
61039 + struct lapb_register_struct *callbacks;
61040
61041 /* FRMR control information */
61042 struct lapb_frame frmr_data;
61043 diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61044 --- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61045 +++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61046 @@ -125,12 +125,12 @@ struct neighbour
61047 struct neigh_ops
61048 {
61049 int family;
61050 - void (*solicit)(struct neighbour *, struct sk_buff*);
61051 - void (*error_report)(struct neighbour *, struct sk_buff*);
61052 - int (*output)(struct sk_buff*);
61053 - int (*connected_output)(struct sk_buff*);
61054 - int (*hh_output)(struct sk_buff*);
61055 - int (*queue_xmit)(struct sk_buff*);
61056 + void (* const solicit)(struct neighbour *, struct sk_buff*);
61057 + void (* const error_report)(struct neighbour *, struct sk_buff*);
61058 + int (* const output)(struct sk_buff*);
61059 + int (* const connected_output)(struct sk_buff*);
61060 + int (* const hh_output)(struct sk_buff*);
61061 + int (* const queue_xmit)(struct sk_buff*);
61062 };
61063
61064 struct pneigh_entry
61065 diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61066 --- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61067 +++ linux-2.6.32.45/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61068 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61069 {
61070 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61071 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61072 - nlh->nlmsg_len <= remaining);
61073 + nlh->nlmsg_len <= (unsigned int)remaining);
61074 }
61075
61076 /**
61077 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61078 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61079 {
61080 if (mark)
61081 - skb_trim(skb, (unsigned char *) mark - skb->data);
61082 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61083 }
61084
61085 /**
61086 diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61087 --- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61088 +++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61089 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61090 int current_rt_cache_rebuild_count;
61091
61092 struct timer_list rt_secret_timer;
61093 - atomic_t rt_genid;
61094 + atomic_unchecked_t rt_genid;
61095
61096 #ifdef CONFIG_IP_MROUTE
61097 struct sock *mroute_sk;
61098 diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61099 --- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61100 +++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61101 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61102
61103 #else /* SCTP_DEBUG */
61104
61105 -#define SCTP_DEBUG_PRINTK(whatever...)
61106 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61107 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61108 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61109 #define SCTP_ENABLE_DEBUG
61110 #define SCTP_DISABLE_DEBUG
61111 #define SCTP_ASSERT(expr, str, func)
61112 diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61113 --- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61114 +++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61115 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61116 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61117 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61118 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61119 - __be16 dport);
61120 + __be16 dport);
61121 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61122 __be16 sport, __be16 dport);
61123 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61124 - __be16 sport, __be16 dport);
61125 + __be16 sport, __be16 dport);
61126 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61127 - __be16 sport, __be16 dport);
61128 + __be16 sport, __be16 dport);
61129 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61130 - __be16 sport, __be16 dport);
61131 + __be16 sport, __be16 dport);
61132
61133 #endif /* _NET_SECURE_SEQ */
61134 diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61135 --- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61136 +++ linux-2.6.32.45/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61137 @@ -272,7 +272,7 @@ struct sock {
61138 rwlock_t sk_callback_lock;
61139 int sk_err,
61140 sk_err_soft;
61141 - atomic_t sk_drops;
61142 + atomic_unchecked_t sk_drops;
61143 unsigned short sk_ack_backlog;
61144 unsigned short sk_max_ack_backlog;
61145 __u32 sk_priority;
61146 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61147 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61148 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61149 #else
61150 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61151 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61152 int inc)
61153 {
61154 }
61155 diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61156 --- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61157 +++ linux-2.6.32.45/include/net/tcp.h 2011-08-23 21:29:10.000000000 -0400
61158 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
61159 struct tcp_seq_afinfo {
61160 char *name;
61161 sa_family_t family;
61162 - struct file_operations seq_fops;
61163 - struct seq_operations seq_ops;
61164 + file_operations_no_const seq_fops;
61165 + seq_operations_no_const seq_ops;
61166 };
61167
61168 struct tcp_iter_state {
61169 diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61170 --- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61171 +++ linux-2.6.32.45/include/net/udp.h 2011-08-23 21:29:34.000000000 -0400
61172 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
61173 char *name;
61174 sa_family_t family;
61175 struct udp_table *udp_table;
61176 - struct file_operations seq_fops;
61177 - struct seq_operations seq_ops;
61178 + file_operations_no_const seq_fops;
61179 + seq_operations_no_const seq_ops;
61180 };
61181
61182 struct udp_iter_state {
61183 diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61184 --- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61185 +++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61186 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61187 int backlog);
61188
61189 int (*destroy_listen)(struct iw_cm_id *cm_id);
61190 -};
61191 +} __no_const;
61192
61193 /**
61194 * iw_create_cm_id - Create an IW CM identifier.
61195 diff -urNp linux-2.6.32.45/include/scsi/libfc.h linux-2.6.32.45/include/scsi/libfc.h
61196 --- linux-2.6.32.45/include/scsi/libfc.h 2011-03-27 14:31:47.000000000 -0400
61197 +++ linux-2.6.32.45/include/scsi/libfc.h 2011-08-23 21:22:38.000000000 -0400
61198 @@ -675,6 +675,7 @@ struct libfc_function_template {
61199 */
61200 void (*disc_stop_final) (struct fc_lport *);
61201 };
61202 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61203
61204 /* information used by the discovery layer */
61205 struct fc_disc {
61206 @@ -707,7 +708,7 @@ struct fc_lport {
61207 struct fc_disc disc;
61208
61209 /* Operational Information */
61210 - struct libfc_function_template tt;
61211 + libfc_function_template_no_const tt;
61212 u8 link_up;
61213 u8 qfull;
61214 enum fc_lport_state state;
61215 diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61216 --- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61217 +++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61218 @@ -156,9 +156,9 @@ struct scsi_device {
61219 unsigned int max_device_blocked; /* what device_blocked counts down from */
61220 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61221
61222 - atomic_t iorequest_cnt;
61223 - atomic_t iodone_cnt;
61224 - atomic_t ioerr_cnt;
61225 + atomic_unchecked_t iorequest_cnt;
61226 + atomic_unchecked_t iodone_cnt;
61227 + atomic_unchecked_t ioerr_cnt;
61228
61229 struct device sdev_gendev,
61230 sdev_dev;
61231 diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61232 --- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61233 +++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61234 @@ -663,9 +663,9 @@ struct fc_function_template {
61235 int (*bsg_timeout)(struct fc_bsg_job *);
61236
61237 /* allocation lengths for host-specific data */
61238 - u32 dd_fcrport_size;
61239 - u32 dd_fcvport_size;
61240 - u32 dd_bsg_size;
61241 + const u32 dd_fcrport_size;
61242 + const u32 dd_fcvport_size;
61243 + const u32 dd_bsg_size;
61244
61245 /*
61246 * The driver sets these to tell the transport class it
61247 @@ -675,39 +675,39 @@ struct fc_function_template {
61248 */
61249
61250 /* remote port fixed attributes */
61251 - unsigned long show_rport_maxframe_size:1;
61252 - unsigned long show_rport_supported_classes:1;
61253 - unsigned long show_rport_dev_loss_tmo:1;
61254 + const unsigned long show_rport_maxframe_size:1;
61255 + const unsigned long show_rport_supported_classes:1;
61256 + const unsigned long show_rport_dev_loss_tmo:1;
61257
61258 /*
61259 * target dynamic attributes
61260 * These should all be "1" if the driver uses the remote port
61261 * add/delete functions (so attributes reflect rport values).
61262 */
61263 - unsigned long show_starget_node_name:1;
61264 - unsigned long show_starget_port_name:1;
61265 - unsigned long show_starget_port_id:1;
61266 + const unsigned long show_starget_node_name:1;
61267 + const unsigned long show_starget_port_name:1;
61268 + const unsigned long show_starget_port_id:1;
61269
61270 /* host fixed attributes */
61271 - unsigned long show_host_node_name:1;
61272 - unsigned long show_host_port_name:1;
61273 - unsigned long show_host_permanent_port_name:1;
61274 - unsigned long show_host_supported_classes:1;
61275 - unsigned long show_host_supported_fc4s:1;
61276 - unsigned long show_host_supported_speeds:1;
61277 - unsigned long show_host_maxframe_size:1;
61278 - unsigned long show_host_serial_number:1;
61279 + const unsigned long show_host_node_name:1;
61280 + const unsigned long show_host_port_name:1;
61281 + const unsigned long show_host_permanent_port_name:1;
61282 + const unsigned long show_host_supported_classes:1;
61283 + const unsigned long show_host_supported_fc4s:1;
61284 + const unsigned long show_host_supported_speeds:1;
61285 + const unsigned long show_host_maxframe_size:1;
61286 + const unsigned long show_host_serial_number:1;
61287 /* host dynamic attributes */
61288 - unsigned long show_host_port_id:1;
61289 - unsigned long show_host_port_type:1;
61290 - unsigned long show_host_port_state:1;
61291 - unsigned long show_host_active_fc4s:1;
61292 - unsigned long show_host_speed:1;
61293 - unsigned long show_host_fabric_name:1;
61294 - unsigned long show_host_symbolic_name:1;
61295 - unsigned long show_host_system_hostname:1;
61296 + const unsigned long show_host_port_id:1;
61297 + const unsigned long show_host_port_type:1;
61298 + const unsigned long show_host_port_state:1;
61299 + const unsigned long show_host_active_fc4s:1;
61300 + const unsigned long show_host_speed:1;
61301 + const unsigned long show_host_fabric_name:1;
61302 + const unsigned long show_host_symbolic_name:1;
61303 + const unsigned long show_host_system_hostname:1;
61304
61305 - unsigned long disable_target_scan:1;
61306 + const unsigned long disable_target_scan:1;
61307 };
61308
61309
61310 diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61311 --- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61312 +++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61313 @@ -419,15 +419,15 @@
61314 struct snd_ac97;
61315
61316 struct snd_ac97_build_ops {
61317 - int (*build_3d) (struct snd_ac97 *ac97);
61318 - int (*build_specific) (struct snd_ac97 *ac97);
61319 - int (*build_spdif) (struct snd_ac97 *ac97);
61320 - int (*build_post_spdif) (struct snd_ac97 *ac97);
61321 + int (* const build_3d) (struct snd_ac97 *ac97);
61322 + int (* const build_specific) (struct snd_ac97 *ac97);
61323 + int (* const build_spdif) (struct snd_ac97 *ac97);
61324 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
61325 #ifdef CONFIG_PM
61326 - void (*suspend) (struct snd_ac97 *ac97);
61327 - void (*resume) (struct snd_ac97 *ac97);
61328 + void (* const suspend) (struct snd_ac97 *ac97);
61329 + void (* const resume) (struct snd_ac97 *ac97);
61330 #endif
61331 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61332 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61333 };
61334
61335 struct snd_ac97_bus_ops {
61336 @@ -477,7 +477,7 @@ struct snd_ac97_template {
61337
61338 struct snd_ac97 {
61339 /* -- lowlevel (hardware) driver specific -- */
61340 - struct snd_ac97_build_ops * build_ops;
61341 + const struct snd_ac97_build_ops * build_ops;
61342 void *private_data;
61343 void (*private_free) (struct snd_ac97 *ac97);
61344 /* --- */
61345 diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61346 --- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61347 +++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61348 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61349 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61350 unsigned char val);
61351 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61352 -};
61353 +} __no_const;
61354
61355 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61356
61357 diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61358 --- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61359 +++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61360 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61361 struct snd_hwdep_dsp_status *status);
61362 int (*dsp_load)(struct snd_hwdep *hw,
61363 struct snd_hwdep_dsp_image *image);
61364 -};
61365 +} __no_const;
61366
61367 struct snd_hwdep {
61368 struct snd_card *card;
61369 diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61370 --- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61371 +++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61372 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61373 struct snd_info_buffer *buffer);
61374 void (*write)(struct snd_info_entry *entry,
61375 struct snd_info_buffer *buffer);
61376 -};
61377 +} __no_const;
61378
61379 struct snd_info_entry_ops {
61380 int (*open)(struct snd_info_entry *entry,
61381 diff -urNp linux-2.6.32.45/include/sound/pcm.h linux-2.6.32.45/include/sound/pcm.h
61382 --- linux-2.6.32.45/include/sound/pcm.h 2011-03-27 14:31:47.000000000 -0400
61383 +++ linux-2.6.32.45/include/sound/pcm.h 2011-08-23 21:22:38.000000000 -0400
61384 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
61385 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61386 int (*ack)(struct snd_pcm_substream *substream);
61387 };
61388 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61389
61390 /*
61391 *
61392 diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61393 --- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61394 +++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61395 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61396 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61397 int (*csp_stop) (struct snd_sb_csp * p);
61398 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61399 -};
61400 +} __no_const;
61401
61402 /*
61403 * CSP private data
61404 diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61405 --- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61406 +++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61407 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61408 spinlock_t reg_lock;
61409 spinlock_t voice_lock;
61410 wait_queue_head_t interrupt_sleep;
61411 - atomic_t interrupt_sleep_count;
61412 + atomic_unchecked_t interrupt_sleep_count;
61413 struct snd_info_entry *proc_entry;
61414 const struct firmware *dsp_microcode;
61415 const struct firmware *controller_microcode;
61416 diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61417 --- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61418 +++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61419 @@ -34,7 +34,7 @@
61420 */
61421 TRACE_EVENT(irq_handler_entry,
61422
61423 - TP_PROTO(int irq, struct irqaction *action),
61424 + TP_PROTO(int irq, const struct irqaction *action),
61425
61426 TP_ARGS(irq, action),
61427
61428 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61429 */
61430 TRACE_EVENT(irq_handler_exit,
61431
61432 - TP_PROTO(int irq, struct irqaction *action, int ret),
61433 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61434
61435 TP_ARGS(irq, action, ret),
61436
61437 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61438 */
61439 TRACE_EVENT(softirq_entry,
61440
61441 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61442 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61443
61444 TP_ARGS(h, vec),
61445
61446 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61447 */
61448 TRACE_EVENT(softirq_exit,
61449
61450 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61451 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61452
61453 TP_ARGS(h, vec),
61454
61455 diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61456 --- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61457 +++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61458 @@ -177,6 +177,7 @@ struct uvesafb_par {
61459 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61460 u8 pmi_setpal; /* PMI for palette changes */
61461 u16 *pmi_base; /* protected mode interface location */
61462 + u8 *pmi_code; /* protected mode code location */
61463 void *pmi_start;
61464 void *pmi_pal;
61465 u8 *vbe_state_orig; /*
61466 diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61467 --- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61468 +++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61469 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61470
61471 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61472 {
61473 - int err = sys_mount(name, "/root", fs, flags, data);
61474 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61475 if (err)
61476 return err;
61477
61478 - sys_chdir("/root");
61479 + sys_chdir((__force const char __user *)"/root");
61480 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61481 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61482 current->fs->pwd.mnt->mnt_sb->s_type->name,
61483 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61484 va_start(args, fmt);
61485 vsprintf(buf, fmt, args);
61486 va_end(args);
61487 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61488 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61489 if (fd >= 0) {
61490 sys_ioctl(fd, FDEJECT, 0);
61491 sys_close(fd);
61492 }
61493 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61494 - fd = sys_open("/dev/console", O_RDWR, 0);
61495 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61496 if (fd >= 0) {
61497 sys_ioctl(fd, TCGETS, (long)&termios);
61498 termios.c_lflag &= ~ICANON;
61499 sys_ioctl(fd, TCSETSF, (long)&termios);
61500 - sys_read(fd, &c, 1);
61501 + sys_read(fd, (char __user *)&c, 1);
61502 termios.c_lflag |= ICANON;
61503 sys_ioctl(fd, TCSETSF, (long)&termios);
61504 sys_close(fd);
61505 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61506 mount_root();
61507 out:
61508 devtmpfs_mount("dev");
61509 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61510 - sys_chroot(".");
61511 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61512 + sys_chroot((__force char __user *)".");
61513 }
61514 diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61515 --- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61516 +++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61517 @@ -15,15 +15,15 @@ extern int root_mountflags;
61518
61519 static inline int create_dev(char *name, dev_t dev)
61520 {
61521 - sys_unlink(name);
61522 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61523 + sys_unlink((__force char __user *)name);
61524 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61525 }
61526
61527 #if BITS_PER_LONG == 32
61528 static inline u32 bstat(char *name)
61529 {
61530 struct stat64 stat;
61531 - if (sys_stat64(name, &stat) != 0)
61532 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61533 return 0;
61534 if (!S_ISBLK(stat.st_mode))
61535 return 0;
61536 diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61537 --- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61538 +++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61539 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61540 sys_close(old_fd);sys_close(root_fd);
61541 sys_close(0);sys_close(1);sys_close(2);
61542 sys_setsid();
61543 - (void) sys_open("/dev/console",O_RDWR,0);
61544 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61545 (void) sys_dup(0);
61546 (void) sys_dup(0);
61547 return kernel_execve(shell, argv, envp_init);
61548 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61549 create_dev("/dev/root.old", Root_RAM0);
61550 /* mount initrd on rootfs' /root */
61551 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61552 - sys_mkdir("/old", 0700);
61553 - root_fd = sys_open("/", 0, 0);
61554 - old_fd = sys_open("/old", 0, 0);
61555 + sys_mkdir((__force const char __user *)"/old", 0700);
61556 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
61557 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61558 /* move initrd over / and chdir/chroot in initrd root */
61559 - sys_chdir("/root");
61560 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61561 - sys_chroot(".");
61562 + sys_chdir((__force const char __user *)"/root");
61563 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61564 + sys_chroot((__force const char __user *)".");
61565
61566 /*
61567 * In case that a resume from disk is carried out by linuxrc or one of
61568 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61569
61570 /* move initrd to rootfs' /old */
61571 sys_fchdir(old_fd);
61572 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61573 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61574 /* switch root and cwd back to / of rootfs */
61575 sys_fchdir(root_fd);
61576 - sys_chroot(".");
61577 + sys_chroot((__force const char __user *)".");
61578 sys_close(old_fd);
61579 sys_close(root_fd);
61580
61581 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61582 - sys_chdir("/old");
61583 + sys_chdir((__force const char __user *)"/old");
61584 return;
61585 }
61586
61587 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61588 mount_root();
61589
61590 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61591 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61592 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61593 if (!error)
61594 printk("okay\n");
61595 else {
61596 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61597 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61598 if (error == -ENOENT)
61599 printk("/initrd does not exist. Ignored.\n");
61600 else
61601 printk("failed\n");
61602 printk(KERN_NOTICE "Unmounting old root\n");
61603 - sys_umount("/old", MNT_DETACH);
61604 + sys_umount((__force char __user *)"/old", MNT_DETACH);
61605 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61606 if (fd < 0) {
61607 error = fd;
61608 @@ -119,11 +119,11 @@ int __init initrd_load(void)
61609 * mounted in the normal path.
61610 */
61611 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61612 - sys_unlink("/initrd.image");
61613 + sys_unlink((__force const char __user *)"/initrd.image");
61614 handle_initrd();
61615 return 1;
61616 }
61617 }
61618 - sys_unlink("/initrd.image");
61619 + sys_unlink((__force const char __user *)"/initrd.image");
61620 return 0;
61621 }
61622 diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61623 --- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61624 +++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61625 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61626 partitioned ? "_d" : "", minor,
61627 md_setup_args[ent].device_names);
61628
61629 - fd = sys_open(name, 0, 0);
61630 + fd = sys_open((__force char __user *)name, 0, 0);
61631 if (fd < 0) {
61632 printk(KERN_ERR "md: open failed - cannot start "
61633 "array %s\n", name);
61634 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61635 * array without it
61636 */
61637 sys_close(fd);
61638 - fd = sys_open(name, 0, 0);
61639 + fd = sys_open((__force char __user *)name, 0, 0);
61640 sys_ioctl(fd, BLKRRPART, 0);
61641 }
61642 sys_close(fd);
61643 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61644
61645 wait_for_device_probe();
61646
61647 - fd = sys_open("/dev/md0", 0, 0);
61648 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61649 if (fd >= 0) {
61650 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61651 sys_close(fd);
61652 diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61653 --- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61654 +++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61655 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61656 }
61657 }
61658
61659 -static long __init do_utime(char __user *filename, time_t mtime)
61660 +static long __init do_utime(__force char __user *filename, time_t mtime)
61661 {
61662 struct timespec t[2];
61663
61664 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61665 struct dir_entry *de, *tmp;
61666 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61667 list_del(&de->list);
61668 - do_utime(de->name, de->mtime);
61669 + do_utime((__force char __user *)de->name, de->mtime);
61670 kfree(de->name);
61671 kfree(de);
61672 }
61673 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61674 if (nlink >= 2) {
61675 char *old = find_link(major, minor, ino, mode, collected);
61676 if (old)
61677 - return (sys_link(old, collected) < 0) ? -1 : 1;
61678 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61679 }
61680 return 0;
61681 }
61682 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61683 {
61684 struct stat st;
61685
61686 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61687 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61688 if (S_ISDIR(st.st_mode))
61689 - sys_rmdir(path);
61690 + sys_rmdir((__force char __user *)path);
61691 else
61692 - sys_unlink(path);
61693 + sys_unlink((__force char __user *)path);
61694 }
61695 }
61696
61697 @@ -305,7 +305,7 @@ static int __init do_name(void)
61698 int openflags = O_WRONLY|O_CREAT;
61699 if (ml != 1)
61700 openflags |= O_TRUNC;
61701 - wfd = sys_open(collected, openflags, mode);
61702 + wfd = sys_open((__force char __user *)collected, openflags, mode);
61703
61704 if (wfd >= 0) {
61705 sys_fchown(wfd, uid, gid);
61706 @@ -317,17 +317,17 @@ static int __init do_name(void)
61707 }
61708 }
61709 } else if (S_ISDIR(mode)) {
61710 - sys_mkdir(collected, mode);
61711 - sys_chown(collected, uid, gid);
61712 - sys_chmod(collected, mode);
61713 + sys_mkdir((__force char __user *)collected, mode);
61714 + sys_chown((__force char __user *)collected, uid, gid);
61715 + sys_chmod((__force char __user *)collected, mode);
61716 dir_add(collected, mtime);
61717 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61718 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61719 if (maybe_link() == 0) {
61720 - sys_mknod(collected, mode, rdev);
61721 - sys_chown(collected, uid, gid);
61722 - sys_chmod(collected, mode);
61723 - do_utime(collected, mtime);
61724 + sys_mknod((__force char __user *)collected, mode, rdev);
61725 + sys_chown((__force char __user *)collected, uid, gid);
61726 + sys_chmod((__force char __user *)collected, mode);
61727 + do_utime((__force char __user *)collected, mtime);
61728 }
61729 }
61730 return 0;
61731 @@ -336,15 +336,15 @@ static int __init do_name(void)
61732 static int __init do_copy(void)
61733 {
61734 if (count >= body_len) {
61735 - sys_write(wfd, victim, body_len);
61736 + sys_write(wfd, (__force char __user *)victim, body_len);
61737 sys_close(wfd);
61738 - do_utime(vcollected, mtime);
61739 + do_utime((__force char __user *)vcollected, mtime);
61740 kfree(vcollected);
61741 eat(body_len);
61742 state = SkipIt;
61743 return 0;
61744 } else {
61745 - sys_write(wfd, victim, count);
61746 + sys_write(wfd, (__force char __user *)victim, count);
61747 body_len -= count;
61748 eat(count);
61749 return 1;
61750 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
61751 {
61752 collected[N_ALIGN(name_len) + body_len] = '\0';
61753 clean_path(collected, 0);
61754 - sys_symlink(collected + N_ALIGN(name_len), collected);
61755 - sys_lchown(collected, uid, gid);
61756 - do_utime(collected, mtime);
61757 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61758 + sys_lchown((__force char __user *)collected, uid, gid);
61759 + do_utime((__force char __user *)collected, mtime);
61760 state = SkipIt;
61761 next_state = Reset;
61762 return 0;
61763 diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61764 --- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61765 +++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61766 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61767
61768 config COMPAT_BRK
61769 bool "Disable heap randomization"
61770 - default y
61771 + default n
61772 help
61773 Randomizing heap placement makes heap exploits harder, but it
61774 also breaks ancient binaries (including anything libc5 based).
61775 diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61776 --- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61777 +++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61778 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61779 #ifdef CONFIG_TC
61780 extern void tc_init(void);
61781 #endif
61782 +extern void grsecurity_init(void);
61783
61784 enum system_states system_state __read_mostly;
61785 EXPORT_SYMBOL(system_state);
61786 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61787
61788 __setup("reset_devices", set_reset_devices);
61789
61790 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61791 +extern char pax_enter_kernel_user[];
61792 +extern char pax_exit_kernel_user[];
61793 +extern pgdval_t clone_pgd_mask;
61794 +#endif
61795 +
61796 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61797 +static int __init setup_pax_nouderef(char *str)
61798 +{
61799 +#ifdef CONFIG_X86_32
61800 + unsigned int cpu;
61801 + struct desc_struct *gdt;
61802 +
61803 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
61804 + gdt = get_cpu_gdt_table(cpu);
61805 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61806 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61807 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61808 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61809 + }
61810 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61811 +#else
61812 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61813 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61814 + clone_pgd_mask = ~(pgdval_t)0UL;
61815 +#endif
61816 +
61817 + return 0;
61818 +}
61819 +early_param("pax_nouderef", setup_pax_nouderef);
61820 +#endif
61821 +
61822 +#ifdef CONFIG_PAX_SOFTMODE
61823 +int pax_softmode;
61824 +
61825 +static int __init setup_pax_softmode(char *str)
61826 +{
61827 + get_option(&str, &pax_softmode);
61828 + return 1;
61829 +}
61830 +__setup("pax_softmode=", setup_pax_softmode);
61831 +#endif
61832 +
61833 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61834 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61835 static const char *panic_later, *panic_param;
61836 @@ -705,52 +749,53 @@ int initcall_debug;
61837 core_param(initcall_debug, initcall_debug, bool, 0644);
61838
61839 static char msgbuf[64];
61840 -static struct boot_trace_call call;
61841 -static struct boot_trace_ret ret;
61842 +static struct boot_trace_call trace_call;
61843 +static struct boot_trace_ret trace_ret;
61844
61845 int do_one_initcall(initcall_t fn)
61846 {
61847 int count = preempt_count();
61848 ktime_t calltime, delta, rettime;
61849 + const char *msg1 = "", *msg2 = "";
61850
61851 if (initcall_debug) {
61852 - call.caller = task_pid_nr(current);
61853 - printk("calling %pF @ %i\n", fn, call.caller);
61854 + trace_call.caller = task_pid_nr(current);
61855 + printk("calling %pF @ %i\n", fn, trace_call.caller);
61856 calltime = ktime_get();
61857 - trace_boot_call(&call, fn);
61858 + trace_boot_call(&trace_call, fn);
61859 enable_boot_trace();
61860 }
61861
61862 - ret.result = fn();
61863 + trace_ret.result = fn();
61864
61865 if (initcall_debug) {
61866 disable_boot_trace();
61867 rettime = ktime_get();
61868 delta = ktime_sub(rettime, calltime);
61869 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61870 - trace_boot_ret(&ret, fn);
61871 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61872 + trace_boot_ret(&trace_ret, fn);
61873 printk("initcall %pF returned %d after %Ld usecs\n", fn,
61874 - ret.result, ret.duration);
61875 + trace_ret.result, trace_ret.duration);
61876 }
61877
61878 msgbuf[0] = 0;
61879
61880 - if (ret.result && ret.result != -ENODEV && initcall_debug)
61881 - sprintf(msgbuf, "error code %d ", ret.result);
61882 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
61883 + sprintf(msgbuf, "error code %d ", trace_ret.result);
61884
61885 if (preempt_count() != count) {
61886 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61887 + msg1 = " preemption imbalance";
61888 preempt_count() = count;
61889 }
61890 if (irqs_disabled()) {
61891 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61892 + msg2 = " disabled interrupts";
61893 local_irq_enable();
61894 }
61895 - if (msgbuf[0]) {
61896 - printk("initcall %pF returned with %s\n", fn, msgbuf);
61897 + if (msgbuf[0] || *msg1 || *msg2) {
61898 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61899 }
61900
61901 - return ret.result;
61902 + return trace_ret.result;
61903 }
61904
61905
61906 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
61907 if (!ramdisk_execute_command)
61908 ramdisk_execute_command = "/init";
61909
61910 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61911 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
61912 ramdisk_execute_command = NULL;
61913 prepare_namespace();
61914 }
61915
61916 + grsecurity_init();
61917 +
61918 /*
61919 * Ok, we have completed the initial bootup, and
61920 * we're essentially up and running. Get rid of the
61921 diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
61922 --- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
61923 +++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
61924 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
61925 {
61926 int err;
61927
61928 - err = sys_mkdir("/dev", 0755);
61929 + err = sys_mkdir((const char __user *)"/dev", 0755);
61930 if (err < 0)
61931 goto out;
61932
61933 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
61934 if (err < 0)
61935 goto out;
61936
61937 - err = sys_mkdir("/root", 0700);
61938 + err = sys_mkdir((const char __user *)"/root", 0700);
61939 if (err < 0)
61940 goto out;
61941
61942 diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
61943 --- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
61944 +++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
61945 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
61946 mq_bytes = (mq_msg_tblsz +
61947 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61948
61949 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61950 spin_lock(&mq_lock);
61951 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61952 u->mq_bytes + mq_bytes >
61953 diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
61954 --- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
61955 +++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
61956 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
61957 return security_msg_queue_associate(msq, msgflg);
61958 }
61959
61960 +static struct ipc_ops msg_ops = {
61961 + .getnew = newque,
61962 + .associate = msg_security,
61963 + .more_checks = NULL
61964 +};
61965 +
61966 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61967 {
61968 struct ipc_namespace *ns;
61969 - struct ipc_ops msg_ops;
61970 struct ipc_params msg_params;
61971
61972 ns = current->nsproxy->ipc_ns;
61973
61974 - msg_ops.getnew = newque;
61975 - msg_ops.associate = msg_security;
61976 - msg_ops.more_checks = NULL;
61977 -
61978 msg_params.key = key;
61979 msg_params.flg = msgflg;
61980
61981 diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
61982 --- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
61983 +++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
61984 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
61985 return 0;
61986 }
61987
61988 +static struct ipc_ops sem_ops = {
61989 + .getnew = newary,
61990 + .associate = sem_security,
61991 + .more_checks = sem_more_checks
61992 +};
61993 +
61994 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61995 {
61996 struct ipc_namespace *ns;
61997 - struct ipc_ops sem_ops;
61998 struct ipc_params sem_params;
61999
62000 ns = current->nsproxy->ipc_ns;
62001 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62002 if (nsems < 0 || nsems > ns->sc_semmsl)
62003 return -EINVAL;
62004
62005 - sem_ops.getnew = newary;
62006 - sem_ops.associate = sem_security;
62007 - sem_ops.more_checks = sem_more_checks;
62008 -
62009 sem_params.key = key;
62010 sem_params.flg = semflg;
62011 sem_params.u.nsems = nsems;
62012 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62013 ushort* sem_io = fast_sem_io;
62014 int nsems;
62015
62016 + pax_track_stack();
62017 +
62018 sma = sem_lock_check(ns, semid);
62019 if (IS_ERR(sma))
62020 return PTR_ERR(sma);
62021 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62022 unsigned long jiffies_left = 0;
62023 struct ipc_namespace *ns;
62024
62025 + pax_track_stack();
62026 +
62027 ns = current->nsproxy->ipc_ns;
62028
62029 if (nsops < 1 || semid < 0)
62030 diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62031 --- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62032 +++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62033 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62034 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62035 #endif
62036
62037 +#ifdef CONFIG_GRKERNSEC
62038 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62039 + const time_t shm_createtime, const uid_t cuid,
62040 + const int shmid);
62041 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62042 + const time_t shm_createtime);
62043 +#endif
62044 +
62045 void shm_init_ns(struct ipc_namespace *ns)
62046 {
62047 ns->shm_ctlmax = SHMMAX;
62048 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62049 shp->shm_lprid = 0;
62050 shp->shm_atim = shp->shm_dtim = 0;
62051 shp->shm_ctim = get_seconds();
62052 +#ifdef CONFIG_GRKERNSEC
62053 + {
62054 + struct timespec timeval;
62055 + do_posix_clock_monotonic_gettime(&timeval);
62056 +
62057 + shp->shm_createtime = timeval.tv_sec;
62058 + }
62059 +#endif
62060 shp->shm_segsz = size;
62061 shp->shm_nattch = 0;
62062 shp->shm_file = file;
62063 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62064 return 0;
62065 }
62066
62067 +static struct ipc_ops shm_ops = {
62068 + .getnew = newseg,
62069 + .associate = shm_security,
62070 + .more_checks = shm_more_checks
62071 +};
62072 +
62073 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62074 {
62075 struct ipc_namespace *ns;
62076 - struct ipc_ops shm_ops;
62077 struct ipc_params shm_params;
62078
62079 ns = current->nsproxy->ipc_ns;
62080
62081 - shm_ops.getnew = newseg;
62082 - shm_ops.associate = shm_security;
62083 - shm_ops.more_checks = shm_more_checks;
62084 -
62085 shm_params.key = key;
62086 shm_params.flg = shmflg;
62087 shm_params.u.size = size;
62088 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62089 if (err)
62090 goto out_unlock;
62091
62092 +#ifdef CONFIG_GRKERNSEC
62093 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62094 + shp->shm_perm.cuid, shmid) ||
62095 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62096 + err = -EACCES;
62097 + goto out_unlock;
62098 + }
62099 +#endif
62100 +
62101 path.dentry = dget(shp->shm_file->f_path.dentry);
62102 path.mnt = shp->shm_file->f_path.mnt;
62103 shp->shm_nattch++;
62104 +#ifdef CONFIG_GRKERNSEC
62105 + shp->shm_lapid = current->pid;
62106 +#endif
62107 size = i_size_read(path.dentry->d_inode);
62108 shm_unlock(shp);
62109
62110 diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62111 --- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62112 +++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62113 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62114 */
62115 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62116 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62117 - file->f_op->write(file, (char *)&ac,
62118 + file->f_op->write(file, (__force char __user *)&ac,
62119 sizeof(acct_t), &file->f_pos);
62120 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62121 set_fs(fs);
62122 diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62123 --- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62124 +++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62125 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62126 3) suppressed due to audit_rate_limit
62127 4) suppressed due to audit_backlog_limit
62128 */
62129 -static atomic_t audit_lost = ATOMIC_INIT(0);
62130 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62131
62132 /* The netlink socket. */
62133 static struct sock *audit_sock;
62134 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62135 unsigned long now;
62136 int print;
62137
62138 - atomic_inc(&audit_lost);
62139 + atomic_inc_unchecked(&audit_lost);
62140
62141 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62142
62143 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62144 printk(KERN_WARNING
62145 "audit: audit_lost=%d audit_rate_limit=%d "
62146 "audit_backlog_limit=%d\n",
62147 - atomic_read(&audit_lost),
62148 + atomic_read_unchecked(&audit_lost),
62149 audit_rate_limit,
62150 audit_backlog_limit);
62151 audit_panic(message);
62152 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62153 status_set.pid = audit_pid;
62154 status_set.rate_limit = audit_rate_limit;
62155 status_set.backlog_limit = audit_backlog_limit;
62156 - status_set.lost = atomic_read(&audit_lost);
62157 + status_set.lost = atomic_read_unchecked(&audit_lost);
62158 status_set.backlog = skb_queue_len(&audit_skb_queue);
62159 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62160 &status_set, sizeof(status_set));
62161 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62162 spin_unlock_irq(&tsk->sighand->siglock);
62163 }
62164 read_unlock(&tasklist_lock);
62165 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62166 - &s, sizeof(s));
62167 +
62168 + if (!err)
62169 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62170 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62171 break;
62172 }
62173 case AUDIT_TTY_SET: {
62174 diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62175 --- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62176 +++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62177 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62178 }
62179
62180 /* global counter which is incremented every time something logs in */
62181 -static atomic_t session_id = ATOMIC_INIT(0);
62182 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62183
62184 /**
62185 * audit_set_loginuid - set a task's audit_context loginuid
62186 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62187 */
62188 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62189 {
62190 - unsigned int sessionid = atomic_inc_return(&session_id);
62191 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62192 struct audit_context *context = task->audit_context;
62193
62194 if (context && context->in_syscall) {
62195 diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62196 --- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62197 +++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62198 @@ -305,10 +305,26 @@ int capable(int cap)
62199 BUG();
62200 }
62201
62202 - if (security_capable(cap) == 0) {
62203 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62204 current->flags |= PF_SUPERPRIV;
62205 return 1;
62206 }
62207 return 0;
62208 }
62209 +
62210 +int capable_nolog(int cap)
62211 +{
62212 + if (unlikely(!cap_valid(cap))) {
62213 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62214 + BUG();
62215 + }
62216 +
62217 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62218 + current->flags |= PF_SUPERPRIV;
62219 + return 1;
62220 + }
62221 + return 0;
62222 +}
62223 +
62224 EXPORT_SYMBOL(capable);
62225 +EXPORT_SYMBOL(capable_nolog);
62226 diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62227 --- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62228 +++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62229 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62230 struct hlist_head *hhead;
62231 struct cg_cgroup_link *link;
62232
62233 + pax_track_stack();
62234 +
62235 /* First see if we already have a cgroup group that matches
62236 * the desired set */
62237 read_lock(&css_set_lock);
62238 diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62239 --- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62240 +++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62241 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62242 struct proc_dir_entry *entry;
62243
62244 /* create the current config file */
62245 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62246 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62247 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62248 + &ikconfig_file_ops);
62249 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62250 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62251 + &ikconfig_file_ops);
62252 +#endif
62253 +#else
62254 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62255 &ikconfig_file_ops);
62256 +#endif
62257 +
62258 if (!entry)
62259 return -ENOMEM;
62260
62261 diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62262 --- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62263 +++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62264 @@ -19,7 +19,7 @@
62265 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62266 static DEFINE_MUTEX(cpu_add_remove_lock);
62267
62268 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62269 +static RAW_NOTIFIER_HEAD(cpu_chain);
62270
62271 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62272 * Should always be manipulated under cpu_add_remove_lock
62273 diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62274 --- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62275 +++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62276 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62277 */
62278 void __put_cred(struct cred *cred)
62279 {
62280 + pax_track_stack();
62281 +
62282 kdebug("__put_cred(%p{%d,%d})", cred,
62283 atomic_read(&cred->usage),
62284 read_cred_subscribers(cred));
62285 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62286 {
62287 struct cred *cred;
62288
62289 + pax_track_stack();
62290 +
62291 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62292 atomic_read(&tsk->cred->usage),
62293 read_cred_subscribers(tsk->cred));
62294 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62295 {
62296 const struct cred *cred;
62297
62298 + pax_track_stack();
62299 +
62300 rcu_read_lock();
62301
62302 do {
62303 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62304 {
62305 struct cred *new;
62306
62307 + pax_track_stack();
62308 +
62309 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62310 if (!new)
62311 return NULL;
62312 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62313 const struct cred *old;
62314 struct cred *new;
62315
62316 + pax_track_stack();
62317 +
62318 validate_process_creds();
62319
62320 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62321 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62322 struct thread_group_cred *tgcred = NULL;
62323 struct cred *new;
62324
62325 + pax_track_stack();
62326 +
62327 #ifdef CONFIG_KEYS
62328 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62329 if (!tgcred)
62330 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62331 struct cred *new;
62332 int ret;
62333
62334 + pax_track_stack();
62335 +
62336 mutex_init(&p->cred_guard_mutex);
62337
62338 if (
62339 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62340 struct task_struct *task = current;
62341 const struct cred *old = task->real_cred;
62342
62343 + pax_track_stack();
62344 +
62345 kdebug("commit_creds(%p{%d,%d})", new,
62346 atomic_read(&new->usage),
62347 read_cred_subscribers(new));
62348 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62349
62350 get_cred(new); /* we will require a ref for the subj creds too */
62351
62352 + gr_set_role_label(task, new->uid, new->gid);
62353 +
62354 /* dumpability changes */
62355 if (old->euid != new->euid ||
62356 old->egid != new->egid ||
62357 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62358 key_fsgid_changed(task);
62359
62360 /* do it
62361 - * - What if a process setreuid()'s and this brings the
62362 - * new uid over his NPROC rlimit? We can check this now
62363 - * cheaply with the new uid cache, so if it matters
62364 - * we should be checking for it. -DaveM
62365 + * RLIMIT_NPROC limits on user->processes have already been checked
62366 + * in set_user().
62367 */
62368 alter_cred_subscribers(new, 2);
62369 if (new->user != old->user)
62370 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62371 */
62372 void abort_creds(struct cred *new)
62373 {
62374 + pax_track_stack();
62375 +
62376 kdebug("abort_creds(%p{%d,%d})", new,
62377 atomic_read(&new->usage),
62378 read_cred_subscribers(new));
62379 @@ -629,6 +647,8 @@ const struct cred *override_creds(const
62380 {
62381 const struct cred *old = current->cred;
62382
62383 + pax_track_stack();
62384 +
62385 kdebug("override_creds(%p{%d,%d})", new,
62386 atomic_read(&new->usage),
62387 read_cred_subscribers(new));
62388 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62389 {
62390 const struct cred *override = current->cred;
62391
62392 + pax_track_stack();
62393 +
62394 kdebug("revert_creds(%p{%d,%d})", old,
62395 atomic_read(&old->usage),
62396 read_cred_subscribers(old));
62397 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62398 const struct cred *old;
62399 struct cred *new;
62400
62401 + pax_track_stack();
62402 +
62403 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62404 if (!new)
62405 return NULL;
62406 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62407 */
62408 int set_security_override(struct cred *new, u32 secid)
62409 {
62410 + pax_track_stack();
62411 +
62412 return security_kernel_act_as(new, secid);
62413 }
62414 EXPORT_SYMBOL(set_security_override);
62415 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62416 u32 secid;
62417 int ret;
62418
62419 + pax_track_stack();
62420 +
62421 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62422 if (ret < 0)
62423 return ret;
62424 diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62425 --- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62426 +++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62427 @@ -55,6 +55,10 @@
62428 #include <asm/pgtable.h>
62429 #include <asm/mmu_context.h>
62430
62431 +#ifdef CONFIG_GRKERNSEC
62432 +extern rwlock_t grsec_exec_file_lock;
62433 +#endif
62434 +
62435 static void exit_mm(struct task_struct * tsk);
62436
62437 static void __unhash_process(struct task_struct *p)
62438 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62439 struct task_struct *leader;
62440 int zap_leader;
62441 repeat:
62442 +#ifdef CONFIG_NET
62443 + gr_del_task_from_ip_table(p);
62444 +#endif
62445 +
62446 tracehook_prepare_release_task(p);
62447 /* don't need to get the RCU readlock here - the process is dead and
62448 * can't be modifying its own credentials */
62449 @@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62450 {
62451 write_lock_irq(&tasklist_lock);
62452
62453 +#ifdef CONFIG_GRKERNSEC
62454 + write_lock(&grsec_exec_file_lock);
62455 + if (current->exec_file) {
62456 + fput(current->exec_file);
62457 + current->exec_file = NULL;
62458 + }
62459 + write_unlock(&grsec_exec_file_lock);
62460 +#endif
62461 +
62462 ptrace_unlink(current);
62463 /* Reparent to init */
62464 current->real_parent = current->parent = kthreadd_task;
62465 list_move_tail(&current->sibling, &current->real_parent->children);
62466
62467 + gr_set_kernel_label(current);
62468 +
62469 /* Set the exit signal to SIGCHLD so we signal init on exit */
62470 current->exit_signal = SIGCHLD;
62471
62472 @@ -397,7 +416,7 @@ int allow_signal(int sig)
62473 * know it'll be handled, so that they don't get converted to
62474 * SIGKILL or just silently dropped.
62475 */
62476 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62477 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62478 recalc_sigpending();
62479 spin_unlock_irq(&current->sighand->siglock);
62480 return 0;
62481 @@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62482 vsnprintf(current->comm, sizeof(current->comm), name, args);
62483 va_end(args);
62484
62485 +#ifdef CONFIG_GRKERNSEC
62486 + write_lock(&grsec_exec_file_lock);
62487 + if (current->exec_file) {
62488 + fput(current->exec_file);
62489 + current->exec_file = NULL;
62490 + }
62491 + write_unlock(&grsec_exec_file_lock);
62492 +#endif
62493 +
62494 + gr_set_kernel_label(current);
62495 +
62496 /*
62497 * If we were started as result of loading a module, close all of the
62498 * user space pages. We don't need them, and if we didn't close them
62499 @@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62500 struct task_struct *tsk = current;
62501 int group_dead;
62502
62503 - profile_task_exit(tsk);
62504 -
62505 - WARN_ON(atomic_read(&tsk->fs_excl));
62506 -
62507 + /*
62508 + * Check this first since set_fs() below depends on
62509 + * current_thread_info(), which we better not access when we're in
62510 + * interrupt context. Other than that, we want to do the set_fs()
62511 + * as early as possible.
62512 + */
62513 if (unlikely(in_interrupt()))
62514 panic("Aiee, killing interrupt handler!");
62515 - if (unlikely(!tsk->pid))
62516 - panic("Attempted to kill the idle task!");
62517
62518 /*
62519 - * If do_exit is called because this processes oopsed, it's possible
62520 + * If do_exit is called because this processes Oops'ed, it's possible
62521 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62522 * continuing. Amongst other possible reasons, this is to prevent
62523 * mm_release()->clear_child_tid() from writing to a user-controlled
62524 @@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62525 */
62526 set_fs(USER_DS);
62527
62528 + profile_task_exit(tsk);
62529 +
62530 + WARN_ON(atomic_read(&tsk->fs_excl));
62531 +
62532 + if (unlikely(!tsk->pid))
62533 + panic("Attempted to kill the idle task!");
62534 +
62535 tracehook_report_exit(&code);
62536
62537 validate_creds_for_do_exit(tsk);
62538 @@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62539 tsk->exit_code = code;
62540 taskstats_exit(tsk, group_dead);
62541
62542 + gr_acl_handle_psacct(tsk, code);
62543 + gr_acl_handle_exit();
62544 +
62545 exit_mm(tsk);
62546
62547 if (group_dead)
62548 @@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62549
62550 if (unlikely(wo->wo_flags & WNOWAIT)) {
62551 int exit_code = p->exit_code;
62552 - int why, status;
62553 + int why;
62554
62555 get_task_struct(p);
62556 read_unlock(&tasklist_lock);
62557 diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62558 --- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62559 +++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62560 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62561 *stackend = STACK_END_MAGIC; /* for overflow detection */
62562
62563 #ifdef CONFIG_CC_STACKPROTECTOR
62564 - tsk->stack_canary = get_random_int();
62565 + tsk->stack_canary = pax_get_random_long();
62566 #endif
62567
62568 /* One for us, one for whoever does the "release_task()" (usually parent) */
62569 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62570 mm->locked_vm = 0;
62571 mm->mmap = NULL;
62572 mm->mmap_cache = NULL;
62573 - mm->free_area_cache = oldmm->mmap_base;
62574 - mm->cached_hole_size = ~0UL;
62575 + mm->free_area_cache = oldmm->free_area_cache;
62576 + mm->cached_hole_size = oldmm->cached_hole_size;
62577 mm->map_count = 0;
62578 cpumask_clear(mm_cpumask(mm));
62579 mm->mm_rb = RB_ROOT;
62580 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62581 tmp->vm_flags &= ~VM_LOCKED;
62582 tmp->vm_mm = mm;
62583 tmp->vm_next = tmp->vm_prev = NULL;
62584 + tmp->vm_mirror = NULL;
62585 anon_vma_link(tmp);
62586 file = tmp->vm_file;
62587 if (file) {
62588 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62589 if (retval)
62590 goto out;
62591 }
62592 +
62593 +#ifdef CONFIG_PAX_SEGMEXEC
62594 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62595 + struct vm_area_struct *mpnt_m;
62596 +
62597 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62598 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62599 +
62600 + if (!mpnt->vm_mirror)
62601 + continue;
62602 +
62603 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62604 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62605 + mpnt->vm_mirror = mpnt_m;
62606 + } else {
62607 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62608 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62609 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62610 + mpnt->vm_mirror->vm_mirror = mpnt;
62611 + }
62612 + }
62613 + BUG_ON(mpnt_m);
62614 + }
62615 +#endif
62616 +
62617 /* a new mm has just been created */
62618 arch_dup_mmap(oldmm, mm);
62619 retval = 0;
62620 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62621 write_unlock(&fs->lock);
62622 return -EAGAIN;
62623 }
62624 - fs->users++;
62625 + atomic_inc(&fs->users);
62626 write_unlock(&fs->lock);
62627 return 0;
62628 }
62629 tsk->fs = copy_fs_struct(fs);
62630 if (!tsk->fs)
62631 return -ENOMEM;
62632 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62633 return 0;
62634 }
62635
62636 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62637 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62638 #endif
62639 retval = -EAGAIN;
62640 +
62641 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62642 +
62643 if (atomic_read(&p->real_cred->user->processes) >=
62644 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62645 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62646 - p->real_cred->user != INIT_USER)
62647 + if (p->real_cred->user != INIT_USER &&
62648 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62649 goto bad_fork_free;
62650 }
62651 + current->flags &= ~PF_NPROC_EXCEEDED;
62652
62653 retval = copy_creds(p, clone_flags);
62654 if (retval < 0)
62655 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62656 goto bad_fork_free_pid;
62657 }
62658
62659 + gr_copy_label(p);
62660 +
62661 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62662 /*
62663 * Clear TID on mm_release()?
62664 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62665 bad_fork_free:
62666 free_task(p);
62667 fork_out:
62668 + gr_log_forkfail(retval);
62669 +
62670 return ERR_PTR(retval);
62671 }
62672
62673 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62674 if (clone_flags & CLONE_PARENT_SETTID)
62675 put_user(nr, parent_tidptr);
62676
62677 + gr_handle_brute_check();
62678 +
62679 if (clone_flags & CLONE_VFORK) {
62680 p->vfork_done = &vfork;
62681 init_completion(&vfork);
62682 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62683 return 0;
62684
62685 /* don't need lock here; in the worst case we'll do useless copy */
62686 - if (fs->users == 1)
62687 + if (atomic_read(&fs->users) == 1)
62688 return 0;
62689
62690 *new_fsp = copy_fs_struct(fs);
62691 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62692 fs = current->fs;
62693 write_lock(&fs->lock);
62694 current->fs = new_fs;
62695 - if (--fs->users)
62696 + gr_set_chroot_entries(current, &current->fs->root);
62697 + if (atomic_dec_return(&fs->users))
62698 new_fs = NULL;
62699 else
62700 new_fs = fs;
62701 diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62702 --- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62703 +++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62704 @@ -54,6 +54,7 @@
62705 #include <linux/mount.h>
62706 #include <linux/pagemap.h>
62707 #include <linux/syscalls.h>
62708 +#include <linux/ptrace.h>
62709 #include <linux/signal.h>
62710 #include <linux/module.h>
62711 #include <linux/magic.h>
62712 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62713 struct page *page;
62714 int err;
62715
62716 +#ifdef CONFIG_PAX_SEGMEXEC
62717 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62718 + return -EFAULT;
62719 +#endif
62720 +
62721 /*
62722 * The futex address must be "naturally" aligned.
62723 */
62724 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62725 struct futex_q q;
62726 int ret;
62727
62728 + pax_track_stack();
62729 +
62730 if (!bitset)
62731 return -EINVAL;
62732
62733 @@ -1841,7 +1849,7 @@ retry:
62734
62735 restart = &current_thread_info()->restart_block;
62736 restart->fn = futex_wait_restart;
62737 - restart->futex.uaddr = (u32 *)uaddr;
62738 + restart->futex.uaddr = uaddr;
62739 restart->futex.val = val;
62740 restart->futex.time = abs_time->tv64;
62741 restart->futex.bitset = bitset;
62742 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62743 struct futex_q q;
62744 int res, ret;
62745
62746 + pax_track_stack();
62747 +
62748 if (!bitset)
62749 return -EINVAL;
62750
62751 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62752 {
62753 struct robust_list_head __user *head;
62754 unsigned long ret;
62755 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62756 const struct cred *cred = current_cred(), *pcred;
62757 +#endif
62758
62759 if (!futex_cmpxchg_enabled)
62760 return -ENOSYS;
62761 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62762 if (!p)
62763 goto err_unlock;
62764 ret = -EPERM;
62765 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62766 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62767 + goto err_unlock;
62768 +#else
62769 pcred = __task_cred(p);
62770 if (cred->euid != pcred->euid &&
62771 cred->euid != pcred->uid &&
62772 !capable(CAP_SYS_PTRACE))
62773 goto err_unlock;
62774 +#endif
62775 head = p->robust_list;
62776 rcu_read_unlock();
62777 }
62778 @@ -2459,7 +2476,7 @@ retry:
62779 */
62780 static inline int fetch_robust_entry(struct robust_list __user **entry,
62781 struct robust_list __user * __user *head,
62782 - int *pi)
62783 + unsigned int *pi)
62784 {
62785 unsigned long uentry;
62786
62787 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
62788 {
62789 u32 curval;
62790 int i;
62791 + mm_segment_t oldfs;
62792
62793 /*
62794 * This will fail and we want it. Some arch implementations do
62795 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
62796 * implementation, the non functional ones will return
62797 * -ENOSYS.
62798 */
62799 + oldfs = get_fs();
62800 + set_fs(USER_DS);
62801 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62802 + set_fs(oldfs);
62803 if (curval == -EFAULT)
62804 futex_cmpxchg_enabled = 1;
62805
62806 diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
62807 --- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62808 +++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62809 @@ -10,6 +10,7 @@
62810 #include <linux/compat.h>
62811 #include <linux/nsproxy.h>
62812 #include <linux/futex.h>
62813 +#include <linux/ptrace.h>
62814
62815 #include <asm/uaccess.h>
62816
62817 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62818 {
62819 struct compat_robust_list_head __user *head;
62820 unsigned long ret;
62821 - const struct cred *cred = current_cred(), *pcred;
62822 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62823 + const struct cred *cred = current_cred();
62824 + const struct cred *pcred;
62825 +#endif
62826
62827 if (!futex_cmpxchg_enabled)
62828 return -ENOSYS;
62829 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62830 if (!p)
62831 goto err_unlock;
62832 ret = -EPERM;
62833 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62834 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62835 + goto err_unlock;
62836 +#else
62837 pcred = __task_cred(p);
62838 if (cred->euid != pcred->euid &&
62839 cred->euid != pcred->uid &&
62840 !capable(CAP_SYS_PTRACE))
62841 goto err_unlock;
62842 +#endif
62843 head = p->compat_robust_list;
62844 read_unlock(&tasklist_lock);
62845 }
62846 diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
62847 --- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
62848 +++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
62849 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
62850 }
62851
62852 #ifdef CONFIG_MODULES
62853 -static inline int within(void *addr, void *start, unsigned long size)
62854 -{
62855 - return ((addr >= start) && (addr < start + size));
62856 -}
62857 -
62858 /* Update list and generate events when modules are unloaded. */
62859 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62860 void *data)
62861 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62862 prev = NULL;
62863 /* Remove entries located in module from linked list. */
62864 for (info = gcov_info_head; info; info = info->next) {
62865 - if (within(info, mod->module_core, mod->core_size)) {
62866 + if (within_module_core_rw((unsigned long)info, mod)) {
62867 if (prev)
62868 prev->next = info->next;
62869 else
62870 diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
62871 --- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
62872 +++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
62873 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62874 local_irq_restore(flags);
62875 }
62876
62877 -static void run_hrtimer_softirq(struct softirq_action *h)
62878 +static void run_hrtimer_softirq(void)
62879 {
62880 hrtimer_peek_ahead_timers();
62881 }
62882 diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
62883 --- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
62884 +++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
62885 @@ -11,6 +11,9 @@
62886 * Changed the compression method from stem compression to "table lookup"
62887 * compression (see scripts/kallsyms.c for a more complete description)
62888 */
62889 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62890 +#define __INCLUDED_BY_HIDESYM 1
62891 +#endif
62892 #include <linux/kallsyms.h>
62893 #include <linux/module.h>
62894 #include <linux/init.h>
62895 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
62896
62897 static inline int is_kernel_inittext(unsigned long addr)
62898 {
62899 + if (system_state != SYSTEM_BOOTING)
62900 + return 0;
62901 +
62902 if (addr >= (unsigned long)_sinittext
62903 && addr <= (unsigned long)_einittext)
62904 return 1;
62905 return 0;
62906 }
62907
62908 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62909 +#ifdef CONFIG_MODULES
62910 +static inline int is_module_text(unsigned long addr)
62911 +{
62912 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62913 + return 1;
62914 +
62915 + addr = ktla_ktva(addr);
62916 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62917 +}
62918 +#else
62919 +static inline int is_module_text(unsigned long addr)
62920 +{
62921 + return 0;
62922 +}
62923 +#endif
62924 +#endif
62925 +
62926 static inline int is_kernel_text(unsigned long addr)
62927 {
62928 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
62929 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
62930
62931 static inline int is_kernel(unsigned long addr)
62932 {
62933 +
62934 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62935 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
62936 + return 1;
62937 +
62938 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
62939 +#else
62940 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
62941 +#endif
62942 +
62943 return 1;
62944 return in_gate_area_no_task(addr);
62945 }
62946
62947 static int is_ksym_addr(unsigned long addr)
62948 {
62949 +
62950 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62951 + if (is_module_text(addr))
62952 + return 0;
62953 +#endif
62954 +
62955 if (all_var)
62956 return is_kernel(addr);
62957
62958 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
62959
62960 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
62961 {
62962 - iter->name[0] = '\0';
62963 iter->nameoff = get_symbol_offset(new_pos);
62964 iter->pos = new_pos;
62965 }
62966 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
62967 {
62968 struct kallsym_iter *iter = m->private;
62969
62970 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62971 + if (current_uid())
62972 + return 0;
62973 +#endif
62974 +
62975 /* Some debugging symbols have no name. Ignore them. */
62976 if (!iter->name[0])
62977 return 0;
62978 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
62979 struct kallsym_iter *iter;
62980 int ret;
62981
62982 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
62983 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
62984 if (!iter)
62985 return -ENOMEM;
62986 reset_iter(iter, 0);
62987 diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
62988 --- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
62989 +++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
62990 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
62991 /* Guard for recursive entry */
62992 static int exception_level;
62993
62994 -static struct kgdb_io *kgdb_io_ops;
62995 +static const struct kgdb_io *kgdb_io_ops;
62996 static DEFINE_SPINLOCK(kgdb_registration_lock);
62997
62998 /* kgdb console driver is loaded */
62999 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63000 */
63001 static atomic_t passive_cpu_wait[NR_CPUS];
63002 static atomic_t cpu_in_kgdb[NR_CPUS];
63003 -atomic_t kgdb_setting_breakpoint;
63004 +atomic_unchecked_t kgdb_setting_breakpoint;
63005
63006 struct task_struct *kgdb_usethread;
63007 struct task_struct *kgdb_contthread;
63008 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63009 sizeof(unsigned long)];
63010
63011 /* to keep track of the CPU which is doing the single stepping*/
63012 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63013 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63014
63015 /*
63016 * If you are debugging a problem where roundup (the collection of
63017 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63018 return 0;
63019 if (kgdb_connected)
63020 return 1;
63021 - if (atomic_read(&kgdb_setting_breakpoint))
63022 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63023 return 1;
63024 if (print_wait)
63025 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63026 @@ -1426,8 +1426,8 @@ acquirelock:
63027 * instance of the exception handler wanted to come into the
63028 * debugger on a different CPU via a single step
63029 */
63030 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63031 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63032 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63033 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63034
63035 atomic_set(&kgdb_active, -1);
63036 touch_softlockup_watchdog();
63037 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63038 *
63039 * Register it with the KGDB core.
63040 */
63041 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63042 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63043 {
63044 int err;
63045
63046 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63047 *
63048 * Unregister it with the KGDB core.
63049 */
63050 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63051 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63052 {
63053 BUG_ON(kgdb_connected);
63054
63055 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63056 */
63057 void kgdb_breakpoint(void)
63058 {
63059 - atomic_set(&kgdb_setting_breakpoint, 1);
63060 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63061 wmb(); /* Sync point before breakpoint */
63062 arch_kgdb_breakpoint();
63063 wmb(); /* Sync point after breakpoint */
63064 - atomic_set(&kgdb_setting_breakpoint, 0);
63065 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63066 }
63067 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63068
63069 diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63070 --- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63071 +++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63072 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63073 * If module auto-loading support is disabled then this function
63074 * becomes a no-operation.
63075 */
63076 -int __request_module(bool wait, const char *fmt, ...)
63077 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63078 {
63079 - va_list args;
63080 char module_name[MODULE_NAME_LEN];
63081 unsigned int max_modprobes;
63082 int ret;
63083 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63084 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63085 static char *envp[] = { "HOME=/",
63086 "TERM=linux",
63087 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63088 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63089 if (ret)
63090 return ret;
63091
63092 - va_start(args, fmt);
63093 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63094 - va_end(args);
63095 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63096 if (ret >= MODULE_NAME_LEN)
63097 return -ENAMETOOLONG;
63098
63099 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63100 + if (!current_uid()) {
63101 + /* hack to workaround consolekit/udisks stupidity */
63102 + read_lock(&tasklist_lock);
63103 + if (!strcmp(current->comm, "mount") &&
63104 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63105 + read_unlock(&tasklist_lock);
63106 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63107 + return -EPERM;
63108 + }
63109 + read_unlock(&tasklist_lock);
63110 + }
63111 +#endif
63112 +
63113 /* If modprobe needs a service that is in a module, we get a recursive
63114 * loop. Limit the number of running kmod threads to max_threads/2 or
63115 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63116 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63117 atomic_dec(&kmod_concurrent);
63118 return ret;
63119 }
63120 +
63121 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63122 +{
63123 + va_list args;
63124 + int ret;
63125 +
63126 + va_start(args, fmt);
63127 + ret = ____request_module(wait, module_param, fmt, args);
63128 + va_end(args);
63129 +
63130 + return ret;
63131 +}
63132 +
63133 +int __request_module(bool wait, const char *fmt, ...)
63134 +{
63135 + va_list args;
63136 + int ret;
63137 +
63138 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63139 + if (current_uid()) {
63140 + char module_param[MODULE_NAME_LEN];
63141 +
63142 + memset(module_param, 0, sizeof(module_param));
63143 +
63144 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63145 +
63146 + va_start(args, fmt);
63147 + ret = ____request_module(wait, module_param, fmt, args);
63148 + va_end(args);
63149 +
63150 + return ret;
63151 + }
63152 +#endif
63153 +
63154 + va_start(args, fmt);
63155 + ret = ____request_module(wait, NULL, fmt, args);
63156 + va_end(args);
63157 +
63158 + return ret;
63159 +}
63160 +
63161 +
63162 EXPORT_SYMBOL(__request_module);
63163 #endif /* CONFIG_MODULES */
63164
63165 diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63166 --- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63167 +++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63168 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63169 * kernel image and loaded module images reside. This is required
63170 * so x86_64 can correctly handle the %rip-relative fixups.
63171 */
63172 - kip->insns = module_alloc(PAGE_SIZE);
63173 + kip->insns = module_alloc_exec(PAGE_SIZE);
63174 if (!kip->insns) {
63175 kfree(kip);
63176 return NULL;
63177 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63178 */
63179 if (!list_is_singular(&kprobe_insn_pages)) {
63180 list_del(&kip->list);
63181 - module_free(NULL, kip->insns);
63182 + module_free_exec(NULL, kip->insns);
63183 kfree(kip);
63184 }
63185 return 1;
63186 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63187 {
63188 int i, err = 0;
63189 unsigned long offset = 0, size = 0;
63190 - char *modname, namebuf[128];
63191 + char *modname, namebuf[KSYM_NAME_LEN];
63192 const char *symbol_name;
63193 void *addr;
63194 struct kprobe_blackpoint *kb;
63195 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63196 const char *sym = NULL;
63197 unsigned int i = *(loff_t *) v;
63198 unsigned long offset = 0;
63199 - char *modname, namebuf[128];
63200 + char *modname, namebuf[KSYM_NAME_LEN];
63201
63202 head = &kprobe_table[i];
63203 preempt_disable();
63204 diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63205 --- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63206 +++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63207 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63208 /*
63209 * Various lockdep statistics:
63210 */
63211 -atomic_t chain_lookup_hits;
63212 -atomic_t chain_lookup_misses;
63213 -atomic_t hardirqs_on_events;
63214 -atomic_t hardirqs_off_events;
63215 -atomic_t redundant_hardirqs_on;
63216 -atomic_t redundant_hardirqs_off;
63217 -atomic_t softirqs_on_events;
63218 -atomic_t softirqs_off_events;
63219 -atomic_t redundant_softirqs_on;
63220 -atomic_t redundant_softirqs_off;
63221 -atomic_t nr_unused_locks;
63222 -atomic_t nr_cyclic_checks;
63223 -atomic_t nr_find_usage_forwards_checks;
63224 -atomic_t nr_find_usage_backwards_checks;
63225 +atomic_unchecked_t chain_lookup_hits;
63226 +atomic_unchecked_t chain_lookup_misses;
63227 +atomic_unchecked_t hardirqs_on_events;
63228 +atomic_unchecked_t hardirqs_off_events;
63229 +atomic_unchecked_t redundant_hardirqs_on;
63230 +atomic_unchecked_t redundant_hardirqs_off;
63231 +atomic_unchecked_t softirqs_on_events;
63232 +atomic_unchecked_t softirqs_off_events;
63233 +atomic_unchecked_t redundant_softirqs_on;
63234 +atomic_unchecked_t redundant_softirqs_off;
63235 +atomic_unchecked_t nr_unused_locks;
63236 +atomic_unchecked_t nr_cyclic_checks;
63237 +atomic_unchecked_t nr_find_usage_forwards_checks;
63238 +atomic_unchecked_t nr_find_usage_backwards_checks;
63239 #endif
63240
63241 /*
63242 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63243 int i;
63244 #endif
63245
63246 +#ifdef CONFIG_PAX_KERNEXEC
63247 + start = ktla_ktva(start);
63248 +#endif
63249 +
63250 /*
63251 * static variable?
63252 */
63253 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63254 */
63255 for_each_possible_cpu(i) {
63256 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63257 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63258 - + per_cpu_offset(i);
63259 + end = start + PERCPU_ENOUGH_ROOM;
63260
63261 if ((addr >= start) && (addr < end))
63262 return 1;
63263 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63264 if (!static_obj(lock->key)) {
63265 debug_locks_off();
63266 printk("INFO: trying to register non-static key.\n");
63267 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63268 printk("the code is fine but needs lockdep annotation.\n");
63269 printk("turning off the locking correctness validator.\n");
63270 dump_stack();
63271 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63272 if (!class)
63273 return 0;
63274 }
63275 - debug_atomic_inc((atomic_t *)&class->ops);
63276 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63277 if (very_verbose(class)) {
63278 printk("\nacquire class [%p] %s", class->key, class->name);
63279 if (class->name_version > 1)
63280 diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63281 --- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63282 +++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63283 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63284 /*
63285 * Various lockdep statistics:
63286 */
63287 -extern atomic_t chain_lookup_hits;
63288 -extern atomic_t chain_lookup_misses;
63289 -extern atomic_t hardirqs_on_events;
63290 -extern atomic_t hardirqs_off_events;
63291 -extern atomic_t redundant_hardirqs_on;
63292 -extern atomic_t redundant_hardirqs_off;
63293 -extern atomic_t softirqs_on_events;
63294 -extern atomic_t softirqs_off_events;
63295 -extern atomic_t redundant_softirqs_on;
63296 -extern atomic_t redundant_softirqs_off;
63297 -extern atomic_t nr_unused_locks;
63298 -extern atomic_t nr_cyclic_checks;
63299 -extern atomic_t nr_cyclic_check_recursions;
63300 -extern atomic_t nr_find_usage_forwards_checks;
63301 -extern atomic_t nr_find_usage_forwards_recursions;
63302 -extern atomic_t nr_find_usage_backwards_checks;
63303 -extern atomic_t nr_find_usage_backwards_recursions;
63304 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63305 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63306 -# define debug_atomic_read(ptr) atomic_read(ptr)
63307 +extern atomic_unchecked_t chain_lookup_hits;
63308 +extern atomic_unchecked_t chain_lookup_misses;
63309 +extern atomic_unchecked_t hardirqs_on_events;
63310 +extern atomic_unchecked_t hardirqs_off_events;
63311 +extern atomic_unchecked_t redundant_hardirqs_on;
63312 +extern atomic_unchecked_t redundant_hardirqs_off;
63313 +extern atomic_unchecked_t softirqs_on_events;
63314 +extern atomic_unchecked_t softirqs_off_events;
63315 +extern atomic_unchecked_t redundant_softirqs_on;
63316 +extern atomic_unchecked_t redundant_softirqs_off;
63317 +extern atomic_unchecked_t nr_unused_locks;
63318 +extern atomic_unchecked_t nr_cyclic_checks;
63319 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63320 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63321 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63322 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63323 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63324 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63325 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63326 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63327 #else
63328 # define debug_atomic_inc(ptr) do { } while (0)
63329 # define debug_atomic_dec(ptr) do { } while (0)
63330 diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63331 --- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63332 +++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63333 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63334
63335 static void print_name(struct seq_file *m, struct lock_class *class)
63336 {
63337 - char str[128];
63338 + char str[KSYM_NAME_LEN];
63339 const char *name = class->name;
63340
63341 if (!name) {
63342 diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63343 --- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63344 +++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63345 @@ -55,6 +55,7 @@
63346 #include <linux/async.h>
63347 #include <linux/percpu.h>
63348 #include <linux/kmemleak.h>
63349 +#include <linux/grsecurity.h>
63350
63351 #define CREATE_TRACE_POINTS
63352 #include <trace/events/module.h>
63353 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63354 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63355
63356 /* Bounds of module allocation, for speeding __module_address */
63357 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63358 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63359 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63360
63361 int register_module_notifier(struct notifier_block * nb)
63362 {
63363 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63364 return true;
63365
63366 list_for_each_entry_rcu(mod, &modules, list) {
63367 - struct symsearch arr[] = {
63368 + struct symsearch modarr[] = {
63369 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63370 NOT_GPL_ONLY, false },
63371 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63372 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63373 #endif
63374 };
63375
63376 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63377 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63378 return true;
63379 }
63380 return false;
63381 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63382 void *ptr;
63383 int cpu;
63384
63385 - if (align > PAGE_SIZE) {
63386 + if (align-1 >= PAGE_SIZE) {
63387 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63388 name, align, PAGE_SIZE);
63389 align = PAGE_SIZE;
63390 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63391 * /sys/module/foo/sections stuff
63392 * J. Corbet <corbet@lwn.net>
63393 */
63394 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63395 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63396
63397 static inline bool sect_empty(const Elf_Shdr *sect)
63398 {
63399 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63400 destroy_params(mod->kp, mod->num_kp);
63401
63402 /* This may be NULL, but that's OK */
63403 - module_free(mod, mod->module_init);
63404 + module_free(mod, mod->module_init_rw);
63405 + module_free_exec(mod, mod->module_init_rx);
63406 kfree(mod->args);
63407 if (mod->percpu)
63408 percpu_modfree(mod->percpu);
63409 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63410 percpu_modfree(mod->refptr);
63411 #endif
63412 /* Free lock-classes: */
63413 - lockdep_free_key_range(mod->module_core, mod->core_size);
63414 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63415 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63416
63417 /* Finally, free the core (containing the module structure) */
63418 - module_free(mod, mod->module_core);
63419 + module_free_exec(mod, mod->module_core_rx);
63420 + module_free(mod, mod->module_core_rw);
63421
63422 #ifdef CONFIG_MPU
63423 update_protections(current->mm);
63424 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63425 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63426 int ret = 0;
63427 const struct kernel_symbol *ksym;
63428 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63429 + int is_fs_load = 0;
63430 + int register_filesystem_found = 0;
63431 + char *p;
63432 +
63433 + p = strstr(mod->args, "grsec_modharden_fs");
63434 +
63435 + if (p) {
63436 + char *endptr = p + strlen("grsec_modharden_fs");
63437 + /* copy \0 as well */
63438 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63439 + is_fs_load = 1;
63440 + }
63441 +#endif
63442 +
63443
63444 for (i = 1; i < n; i++) {
63445 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63446 + const char *name = strtab + sym[i].st_name;
63447 +
63448 + /* it's a real shame this will never get ripped and copied
63449 + upstream! ;(
63450 + */
63451 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63452 + register_filesystem_found = 1;
63453 +#endif
63454 switch (sym[i].st_shndx) {
63455 case SHN_COMMON:
63456 /* We compiled with -fno-common. These are not
63457 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63458 strtab + sym[i].st_name, mod);
63459 /* Ok if resolved. */
63460 if (ksym) {
63461 + pax_open_kernel();
63462 sym[i].st_value = ksym->value;
63463 + pax_close_kernel();
63464 break;
63465 }
63466
63467 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63468 secbase = (unsigned long)mod->percpu;
63469 else
63470 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63471 + pax_open_kernel();
63472 sym[i].st_value += secbase;
63473 + pax_close_kernel();
63474 break;
63475 }
63476 }
63477
63478 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63479 + if (is_fs_load && !register_filesystem_found) {
63480 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63481 + ret = -EPERM;
63482 + }
63483 +#endif
63484 +
63485 return ret;
63486 }
63487
63488 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63489 || s->sh_entsize != ~0UL
63490 || strstarts(secstrings + s->sh_name, ".init"))
63491 continue;
63492 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63493 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63494 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63495 + else
63496 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63497 DEBUGP("\t%s\n", secstrings + s->sh_name);
63498 }
63499 - if (m == 0)
63500 - mod->core_text_size = mod->core_size;
63501 }
63502
63503 DEBUGP("Init section allocation order:\n");
63504 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63505 || s->sh_entsize != ~0UL
63506 || !strstarts(secstrings + s->sh_name, ".init"))
63507 continue;
63508 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63509 - | INIT_OFFSET_MASK);
63510 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63511 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63512 + else
63513 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63514 + s->sh_entsize |= INIT_OFFSET_MASK;
63515 DEBUGP("\t%s\n", secstrings + s->sh_name);
63516 }
63517 - if (m == 0)
63518 - mod->init_text_size = mod->init_size;
63519 }
63520 }
63521
63522 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63523
63524 /* As per nm */
63525 static char elf_type(const Elf_Sym *sym,
63526 - Elf_Shdr *sechdrs,
63527 - const char *secstrings,
63528 - struct module *mod)
63529 + const Elf_Shdr *sechdrs,
63530 + const char *secstrings)
63531 {
63532 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63533 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63534 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63535
63536 /* Put symbol section at end of init part of module. */
63537 symsect->sh_flags |= SHF_ALLOC;
63538 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63539 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63540 symindex) | INIT_OFFSET_MASK;
63541 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63542
63543 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63544 }
63545
63546 /* Append room for core symbols at end of core part. */
63547 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63548 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63549 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63550 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63551
63552 /* Put string table section at end of init part of module. */
63553 strsect->sh_flags |= SHF_ALLOC;
63554 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63555 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63556 strindex) | INIT_OFFSET_MASK;
63557 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63558
63559 /* Append room for core symbols' strings at end of core part. */
63560 - *pstroffs = mod->core_size;
63561 + *pstroffs = mod->core_size_rx;
63562 __set_bit(0, strmap);
63563 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63564 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63565
63566 return symoffs;
63567 }
63568 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63569 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63570 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63571
63572 + pax_open_kernel();
63573 +
63574 /* Set types up while we still have access to sections. */
63575 for (i = 0; i < mod->num_symtab; i++)
63576 mod->symtab[i].st_info
63577 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63578 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
63579
63580 - mod->core_symtab = dst = mod->module_core + symoffs;
63581 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
63582 src = mod->symtab;
63583 *dst = *src;
63584 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63585 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63586 }
63587 mod->core_num_syms = ndst;
63588
63589 - mod->core_strtab = s = mod->module_core + stroffs;
63590 + mod->core_strtab = s = mod->module_core_rx + stroffs;
63591 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63592 if (test_bit(i, strmap))
63593 *++s = mod->strtab[i];
63594 +
63595 + pax_close_kernel();
63596 }
63597 #else
63598 static inline unsigned long layout_symtab(struct module *mod,
63599 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63600 #endif
63601 }
63602
63603 -static void *module_alloc_update_bounds(unsigned long size)
63604 +static void *module_alloc_update_bounds_rw(unsigned long size)
63605 {
63606 void *ret = module_alloc(size);
63607
63608 if (ret) {
63609 /* Update module bounds. */
63610 - if ((unsigned long)ret < module_addr_min)
63611 - module_addr_min = (unsigned long)ret;
63612 - if ((unsigned long)ret + size > module_addr_max)
63613 - module_addr_max = (unsigned long)ret + size;
63614 + if ((unsigned long)ret < module_addr_min_rw)
63615 + module_addr_min_rw = (unsigned long)ret;
63616 + if ((unsigned long)ret + size > module_addr_max_rw)
63617 + module_addr_max_rw = (unsigned long)ret + size;
63618 + }
63619 + return ret;
63620 +}
63621 +
63622 +static void *module_alloc_update_bounds_rx(unsigned long size)
63623 +{
63624 + void *ret = module_alloc_exec(size);
63625 +
63626 + if (ret) {
63627 + /* Update module bounds. */
63628 + if ((unsigned long)ret < module_addr_min_rx)
63629 + module_addr_min_rx = (unsigned long)ret;
63630 + if ((unsigned long)ret + size > module_addr_max_rx)
63631 + module_addr_max_rx = (unsigned long)ret + size;
63632 }
63633 return ret;
63634 }
63635 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63636 unsigned int i;
63637
63638 /* only scan the sections containing data */
63639 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63640 - (unsigned long)mod->module_core,
63641 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63642 + (unsigned long)mod->module_core_rw,
63643 sizeof(struct module), GFP_KERNEL);
63644
63645 for (i = 1; i < hdr->e_shnum; i++) {
63646 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63647 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63648 continue;
63649
63650 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63651 - (unsigned long)mod->module_core,
63652 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63653 + (unsigned long)mod->module_core_rw,
63654 sechdrs[i].sh_size, GFP_KERNEL);
63655 }
63656 }
63657 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63658 secstrings, &stroffs, strmap);
63659
63660 /* Do the allocs. */
63661 - ptr = module_alloc_update_bounds(mod->core_size);
63662 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63663 /*
63664 * The pointer to this block is stored in the module structure
63665 * which is inside the block. Just mark it as not being a
63666 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63667 err = -ENOMEM;
63668 goto free_percpu;
63669 }
63670 - memset(ptr, 0, mod->core_size);
63671 - mod->module_core = ptr;
63672 + memset(ptr, 0, mod->core_size_rw);
63673 + mod->module_core_rw = ptr;
63674
63675 - ptr = module_alloc_update_bounds(mod->init_size);
63676 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63677 /*
63678 * The pointer to this block is stored in the module structure
63679 * which is inside the block. This block doesn't need to be
63680 * scanned as it contains data and code that will be freed
63681 * after the module is initialized.
63682 */
63683 - kmemleak_ignore(ptr);
63684 - if (!ptr && mod->init_size) {
63685 + kmemleak_not_leak(ptr);
63686 + if (!ptr && mod->init_size_rw) {
63687 + err = -ENOMEM;
63688 + goto free_core_rw;
63689 + }
63690 + memset(ptr, 0, mod->init_size_rw);
63691 + mod->module_init_rw = ptr;
63692 +
63693 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63694 + kmemleak_not_leak(ptr);
63695 + if (!ptr) {
63696 err = -ENOMEM;
63697 - goto free_core;
63698 + goto free_init_rw;
63699 }
63700 - memset(ptr, 0, mod->init_size);
63701 - mod->module_init = ptr;
63702 +
63703 + pax_open_kernel();
63704 + memset(ptr, 0, mod->core_size_rx);
63705 + pax_close_kernel();
63706 + mod->module_core_rx = ptr;
63707 +
63708 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63709 + kmemleak_not_leak(ptr);
63710 + if (!ptr && mod->init_size_rx) {
63711 + err = -ENOMEM;
63712 + goto free_core_rx;
63713 + }
63714 +
63715 + pax_open_kernel();
63716 + memset(ptr, 0, mod->init_size_rx);
63717 + pax_close_kernel();
63718 + mod->module_init_rx = ptr;
63719
63720 /* Transfer each section which specifies SHF_ALLOC */
63721 DEBUGP("final section addresses:\n");
63722 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63723 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63724 continue;
63725
63726 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63727 - dest = mod->module_init
63728 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63729 - else
63730 - dest = mod->module_core + sechdrs[i].sh_entsize;
63731 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63732 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63733 + dest = mod->module_init_rw
63734 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63735 + else
63736 + dest = mod->module_init_rx
63737 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63738 + } else {
63739 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63740 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63741 + else
63742 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63743 + }
63744 +
63745 + if (sechdrs[i].sh_type != SHT_NOBITS) {
63746
63747 - if (sechdrs[i].sh_type != SHT_NOBITS)
63748 - memcpy(dest, (void *)sechdrs[i].sh_addr,
63749 - sechdrs[i].sh_size);
63750 +#ifdef CONFIG_PAX_KERNEXEC
63751 +#ifdef CONFIG_X86_64
63752 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63753 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63754 +#endif
63755 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63756 + pax_open_kernel();
63757 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63758 + pax_close_kernel();
63759 + } else
63760 +#endif
63761 +
63762 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63763 + }
63764 /* Update sh_addr to point to copy in image. */
63765 - sechdrs[i].sh_addr = (unsigned long)dest;
63766 +
63767 +#ifdef CONFIG_PAX_KERNEXEC
63768 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63769 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63770 + else
63771 +#endif
63772 +
63773 + sechdrs[i].sh_addr = (unsigned long)dest;
63774 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63775 }
63776 /* Module has been moved. */
63777 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63778 mod->name);
63779 if (!mod->refptr) {
63780 err = -ENOMEM;
63781 - goto free_init;
63782 + goto free_init_rx;
63783 }
63784 #endif
63785 /* Now we've moved module, initialize linked lists, etc. */
63786 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63787 /* Set up MODINFO_ATTR fields */
63788 setup_modinfo(mod, sechdrs, infoindex);
63789
63790 + mod->args = args;
63791 +
63792 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63793 + {
63794 + char *p, *p2;
63795 +
63796 + if (strstr(mod->args, "grsec_modharden_netdev")) {
63797 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63798 + err = -EPERM;
63799 + goto cleanup;
63800 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63801 + p += strlen("grsec_modharden_normal");
63802 + p2 = strstr(p, "_");
63803 + if (p2) {
63804 + *p2 = '\0';
63805 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63806 + *p2 = '_';
63807 + }
63808 + err = -EPERM;
63809 + goto cleanup;
63810 + }
63811 + }
63812 +#endif
63813 +
63814 +
63815 /* Fix up syms, so that st_value is a pointer to location. */
63816 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63817 mod);
63818 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63819
63820 /* Now do relocations. */
63821 for (i = 1; i < hdr->e_shnum; i++) {
63822 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
63823 unsigned int info = sechdrs[i].sh_info;
63824 + strtab = (char *)sechdrs[strindex].sh_addr;
63825
63826 /* Not a valid relocation section? */
63827 if (info >= hdr->e_shnum)
63828 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63829 * Do it before processing of module parameters, so the module
63830 * can provide parameter accessor functions of its own.
63831 */
63832 - if (mod->module_init)
63833 - flush_icache_range((unsigned long)mod->module_init,
63834 - (unsigned long)mod->module_init
63835 - + mod->init_size);
63836 - flush_icache_range((unsigned long)mod->module_core,
63837 - (unsigned long)mod->module_core + mod->core_size);
63838 + if (mod->module_init_rx)
63839 + flush_icache_range((unsigned long)mod->module_init_rx,
63840 + (unsigned long)mod->module_init_rx
63841 + + mod->init_size_rx);
63842 + flush_icache_range((unsigned long)mod->module_core_rx,
63843 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
63844
63845 set_fs(old_fs);
63846
63847 - mod->args = args;
63848 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
63849 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
63850 mod->name);
63851 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
63852 free_unload:
63853 module_unload_free(mod);
63854 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
63855 + free_init_rx:
63856 percpu_modfree(mod->refptr);
63857 - free_init:
63858 #endif
63859 - module_free(mod, mod->module_init);
63860 - free_core:
63861 - module_free(mod, mod->module_core);
63862 + module_free_exec(mod, mod->module_init_rx);
63863 + free_core_rx:
63864 + module_free_exec(mod, mod->module_core_rx);
63865 + free_init_rw:
63866 + module_free(mod, mod->module_init_rw);
63867 + free_core_rw:
63868 + module_free(mod, mod->module_core_rw);
63869 /* mod will be freed with core. Don't access it beyond this line! */
63870 free_percpu:
63871 if (percpu)
63872 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
63873 mod->symtab = mod->core_symtab;
63874 mod->strtab = mod->core_strtab;
63875 #endif
63876 - module_free(mod, mod->module_init);
63877 - mod->module_init = NULL;
63878 - mod->init_size = 0;
63879 - mod->init_text_size = 0;
63880 + module_free(mod, mod->module_init_rw);
63881 + module_free_exec(mod, mod->module_init_rx);
63882 + mod->module_init_rw = NULL;
63883 + mod->module_init_rx = NULL;
63884 + mod->init_size_rw = 0;
63885 + mod->init_size_rx = 0;
63886 mutex_unlock(&module_mutex);
63887
63888 return 0;
63889 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
63890 unsigned long nextval;
63891
63892 /* At worse, next value is at end of module */
63893 - if (within_module_init(addr, mod))
63894 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
63895 + if (within_module_init_rx(addr, mod))
63896 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63897 + else if (within_module_init_rw(addr, mod))
63898 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63899 + else if (within_module_core_rx(addr, mod))
63900 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63901 + else if (within_module_core_rw(addr, mod))
63902 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63903 else
63904 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
63905 + return NULL;
63906
63907 /* Scan for closest preceeding symbol, and next symbol. (ELF
63908 starts real symbols at 1). */
63909 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
63910 char buf[8];
63911
63912 seq_printf(m, "%s %u",
63913 - mod->name, mod->init_size + mod->core_size);
63914 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63915 print_unload_info(m, mod);
63916
63917 /* Informative for users. */
63918 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
63919 mod->state == MODULE_STATE_COMING ? "Loading":
63920 "Live");
63921 /* Used by oprofile and other similar tools. */
63922 - seq_printf(m, " 0x%p", mod->module_core);
63923 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63924
63925 /* Taints info */
63926 if (mod->taints)
63927 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
63928
63929 static int __init proc_modules_init(void)
63930 {
63931 +#ifndef CONFIG_GRKERNSEC_HIDESYM
63932 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63933 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63934 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63935 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63936 +#else
63937 proc_create("modules", 0, NULL, &proc_modules_operations);
63938 +#endif
63939 +#else
63940 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63941 +#endif
63942 return 0;
63943 }
63944 module_init(proc_modules_init);
63945 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
63946 {
63947 struct module *mod;
63948
63949 - if (addr < module_addr_min || addr > module_addr_max)
63950 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63951 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
63952 return NULL;
63953
63954 list_for_each_entry_rcu(mod, &modules, list)
63955 - if (within_module_core(addr, mod)
63956 - || within_module_init(addr, mod))
63957 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
63958 return mod;
63959 return NULL;
63960 }
63961 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
63962 */
63963 struct module *__module_text_address(unsigned long addr)
63964 {
63965 - struct module *mod = __module_address(addr);
63966 + struct module *mod;
63967 +
63968 +#ifdef CONFIG_X86_32
63969 + addr = ktla_ktva(addr);
63970 +#endif
63971 +
63972 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
63973 + return NULL;
63974 +
63975 + mod = __module_address(addr);
63976 +
63977 if (mod) {
63978 /* Make sure it's within the text section. */
63979 - if (!within(addr, mod->module_init, mod->init_text_size)
63980 - && !within(addr, mod->module_core, mod->core_text_size))
63981 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
63982 mod = NULL;
63983 }
63984 return mod;
63985 diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
63986 --- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
63987 +++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
63988 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
63989 */
63990
63991 for (;;) {
63992 - struct thread_info *owner;
63993 + struct task_struct *owner;
63994
63995 /*
63996 * If we own the BKL, then don't spin. The owner of
63997 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
63998 spin_lock_mutex(&lock->wait_lock, flags);
63999
64000 debug_mutex_lock_common(lock, &waiter);
64001 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64002 + debug_mutex_add_waiter(lock, &waiter, task);
64003
64004 /* add waiting tasks to the end of the waitqueue (FIFO): */
64005 list_add_tail(&waiter.list, &lock->wait_list);
64006 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64007 * TASK_UNINTERRUPTIBLE case.)
64008 */
64009 if (unlikely(signal_pending_state(state, task))) {
64010 - mutex_remove_waiter(lock, &waiter,
64011 - task_thread_info(task));
64012 + mutex_remove_waiter(lock, &waiter, task);
64013 mutex_release(&lock->dep_map, 1, ip);
64014 spin_unlock_mutex(&lock->wait_lock, flags);
64015
64016 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64017 done:
64018 lock_acquired(&lock->dep_map, ip);
64019 /* got the lock - rejoice! */
64020 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64021 + mutex_remove_waiter(lock, &waiter, task);
64022 mutex_set_owner(lock);
64023
64024 /* set it to 0 if there are no waiters left: */
64025 diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64026 --- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64027 +++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64028 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64029 }
64030
64031 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64032 - struct thread_info *ti)
64033 + struct task_struct *task)
64034 {
64035 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64036
64037 /* Mark the current thread as blocked on the lock: */
64038 - ti->task->blocked_on = waiter;
64039 + task->blocked_on = waiter;
64040 }
64041
64042 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64043 - struct thread_info *ti)
64044 + struct task_struct *task)
64045 {
64046 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64047 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64048 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64049 - ti->task->blocked_on = NULL;
64050 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64051 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64052 + task->blocked_on = NULL;
64053
64054 list_del_init(&waiter->list);
64055 waiter->task = NULL;
64056 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64057 return;
64058
64059 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64060 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64061 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64062 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64063 mutex_clear_owner(lock);
64064 }
64065 diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64066 --- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64067 +++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64068 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64069 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64070 extern void debug_mutex_add_waiter(struct mutex *lock,
64071 struct mutex_waiter *waiter,
64072 - struct thread_info *ti);
64073 + struct task_struct *task);
64074 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64075 - struct thread_info *ti);
64076 + struct task_struct *task);
64077 extern void debug_mutex_unlock(struct mutex *lock);
64078 extern void debug_mutex_init(struct mutex *lock, const char *name,
64079 struct lock_class_key *key);
64080
64081 static inline void mutex_set_owner(struct mutex *lock)
64082 {
64083 - lock->owner = current_thread_info();
64084 + lock->owner = current;
64085 }
64086
64087 static inline void mutex_clear_owner(struct mutex *lock)
64088 diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64089 --- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64090 +++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64091 @@ -19,7 +19,7 @@
64092 #ifdef CONFIG_SMP
64093 static inline void mutex_set_owner(struct mutex *lock)
64094 {
64095 - lock->owner = current_thread_info();
64096 + lock->owner = current;
64097 }
64098
64099 static inline void mutex_clear_owner(struct mutex *lock)
64100 diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64101 --- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64102 +++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64103 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64104 const char *board;
64105
64106 printk(KERN_WARNING "------------[ cut here ]------------\n");
64107 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64108 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64109 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64110 if (board)
64111 printk(KERN_WARNING "Hardware name: %s\n", board);
64112 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64113 */
64114 void __stack_chk_fail(void)
64115 {
64116 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64117 + dump_stack();
64118 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64119 __builtin_return_address(0));
64120 }
64121 EXPORT_SYMBOL(__stack_chk_fail);
64122 diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64123 --- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64124 +++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64125 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64126 return ret;
64127 }
64128
64129 -static struct sysfs_ops module_sysfs_ops = {
64130 +static const struct sysfs_ops module_sysfs_ops = {
64131 .show = module_attr_show,
64132 .store = module_attr_store,
64133 };
64134 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64135 return 0;
64136 }
64137
64138 -static struct kset_uevent_ops module_uevent_ops = {
64139 +static const struct kset_uevent_ops module_uevent_ops = {
64140 .filter = uevent_filter,
64141 };
64142
64143 diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64144 --- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64145 +++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64146 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64147 */
64148 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64149
64150 -static atomic64_t perf_event_id;
64151 +static atomic64_unchecked_t perf_event_id;
64152
64153 /*
64154 * Lock for (sysadmin-configurable) event reservations:
64155 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64156 * In order to keep per-task stats reliable we need to flip the event
64157 * values when we flip the contexts.
64158 */
64159 - value = atomic64_read(&next_event->count);
64160 - value = atomic64_xchg(&event->count, value);
64161 - atomic64_set(&next_event->count, value);
64162 + value = atomic64_read_unchecked(&next_event->count);
64163 + value = atomic64_xchg_unchecked(&event->count, value);
64164 + atomic64_set_unchecked(&next_event->count, value);
64165
64166 swap(event->total_time_enabled, next_event->total_time_enabled);
64167 swap(event->total_time_running, next_event->total_time_running);
64168 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64169 update_event_times(event);
64170 }
64171
64172 - return atomic64_read(&event->count);
64173 + return atomic64_read_unchecked(&event->count);
64174 }
64175
64176 /*
64177 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64178 values[n++] = 1 + leader->nr_siblings;
64179 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64180 values[n++] = leader->total_time_enabled +
64181 - atomic64_read(&leader->child_total_time_enabled);
64182 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64183 }
64184 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64185 values[n++] = leader->total_time_running +
64186 - atomic64_read(&leader->child_total_time_running);
64187 + atomic64_read_unchecked(&leader->child_total_time_running);
64188 }
64189
64190 size = n * sizeof(u64);
64191 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64192 values[n++] = perf_event_read_value(event);
64193 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64194 values[n++] = event->total_time_enabled +
64195 - atomic64_read(&event->child_total_time_enabled);
64196 + atomic64_read_unchecked(&event->child_total_time_enabled);
64197 }
64198 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64199 values[n++] = event->total_time_running +
64200 - atomic64_read(&event->child_total_time_running);
64201 + atomic64_read_unchecked(&event->child_total_time_running);
64202 }
64203 if (read_format & PERF_FORMAT_ID)
64204 values[n++] = primary_event_id(event);
64205 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64206 static void perf_event_reset(struct perf_event *event)
64207 {
64208 (void)perf_event_read(event);
64209 - atomic64_set(&event->count, 0);
64210 + atomic64_set_unchecked(&event->count, 0);
64211 perf_event_update_userpage(event);
64212 }
64213
64214 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64215 ++userpg->lock;
64216 barrier();
64217 userpg->index = perf_event_index(event);
64218 - userpg->offset = atomic64_read(&event->count);
64219 + userpg->offset = atomic64_read_unchecked(&event->count);
64220 if (event->state == PERF_EVENT_STATE_ACTIVE)
64221 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64222 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64223
64224 userpg->time_enabled = event->total_time_enabled +
64225 - atomic64_read(&event->child_total_time_enabled);
64226 + atomic64_read_unchecked(&event->child_total_time_enabled);
64227
64228 userpg->time_running = event->total_time_running +
64229 - atomic64_read(&event->child_total_time_running);
64230 + atomic64_read_unchecked(&event->child_total_time_running);
64231
64232 barrier();
64233 ++userpg->lock;
64234 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64235 u64 values[4];
64236 int n = 0;
64237
64238 - values[n++] = atomic64_read(&event->count);
64239 + values[n++] = atomic64_read_unchecked(&event->count);
64240 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64241 values[n++] = event->total_time_enabled +
64242 - atomic64_read(&event->child_total_time_enabled);
64243 + atomic64_read_unchecked(&event->child_total_time_enabled);
64244 }
64245 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64246 values[n++] = event->total_time_running +
64247 - atomic64_read(&event->child_total_time_running);
64248 + atomic64_read_unchecked(&event->child_total_time_running);
64249 }
64250 if (read_format & PERF_FORMAT_ID)
64251 values[n++] = primary_event_id(event);
64252 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64253 if (leader != event)
64254 leader->pmu->read(leader);
64255
64256 - values[n++] = atomic64_read(&leader->count);
64257 + values[n++] = atomic64_read_unchecked(&leader->count);
64258 if (read_format & PERF_FORMAT_ID)
64259 values[n++] = primary_event_id(leader);
64260
64261 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64262 if (sub != event)
64263 sub->pmu->read(sub);
64264
64265 - values[n++] = atomic64_read(&sub->count);
64266 + values[n++] = atomic64_read_unchecked(&sub->count);
64267 if (read_format & PERF_FORMAT_ID)
64268 values[n++] = primary_event_id(sub);
64269
64270 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64271 {
64272 struct hw_perf_event *hwc = &event->hw;
64273
64274 - atomic64_add(nr, &event->count);
64275 + atomic64_add_unchecked(nr, &event->count);
64276
64277 if (!hwc->sample_period)
64278 return;
64279 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64280 u64 now;
64281
64282 now = cpu_clock(cpu);
64283 - prev = atomic64_read(&event->hw.prev_count);
64284 - atomic64_set(&event->hw.prev_count, now);
64285 - atomic64_add(now - prev, &event->count);
64286 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64287 + atomic64_set_unchecked(&event->hw.prev_count, now);
64288 + atomic64_add_unchecked(now - prev, &event->count);
64289 }
64290
64291 static int cpu_clock_perf_event_enable(struct perf_event *event)
64292 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64293 struct hw_perf_event *hwc = &event->hw;
64294 int cpu = raw_smp_processor_id();
64295
64296 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64297 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64298 perf_swevent_start_hrtimer(event);
64299
64300 return 0;
64301 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64302 u64 prev;
64303 s64 delta;
64304
64305 - prev = atomic64_xchg(&event->hw.prev_count, now);
64306 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64307 delta = now - prev;
64308 - atomic64_add(delta, &event->count);
64309 + atomic64_add_unchecked(delta, &event->count);
64310 }
64311
64312 static int task_clock_perf_event_enable(struct perf_event *event)
64313 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64314
64315 now = event->ctx->time;
64316
64317 - atomic64_set(&hwc->prev_count, now);
64318 + atomic64_set_unchecked(&hwc->prev_count, now);
64319
64320 perf_swevent_start_hrtimer(event);
64321
64322 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64323 event->parent = parent_event;
64324
64325 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64326 - event->id = atomic64_inc_return(&perf_event_id);
64327 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64328
64329 event->state = PERF_EVENT_STATE_INACTIVE;
64330
64331 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64332 if (child_event->attr.inherit_stat)
64333 perf_event_read_event(child_event, child);
64334
64335 - child_val = atomic64_read(&child_event->count);
64336 + child_val = atomic64_read_unchecked(&child_event->count);
64337
64338 /*
64339 * Add back the child's count to the parent's count:
64340 */
64341 - atomic64_add(child_val, &parent_event->count);
64342 - atomic64_add(child_event->total_time_enabled,
64343 + atomic64_add_unchecked(child_val, &parent_event->count);
64344 + atomic64_add_unchecked(child_event->total_time_enabled,
64345 &parent_event->child_total_time_enabled);
64346 - atomic64_add(child_event->total_time_running,
64347 + atomic64_add_unchecked(child_event->total_time_running,
64348 &parent_event->child_total_time_running);
64349
64350 /*
64351 diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64352 --- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64353 +++ linux-2.6.32.45/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64354 @@ -33,6 +33,7 @@
64355 #include <linux/rculist.h>
64356 #include <linux/bootmem.h>
64357 #include <linux/hash.h>
64358 +#include <linux/security.h>
64359 #include <linux/pid_namespace.h>
64360 #include <linux/init_task.h>
64361 #include <linux/syscalls.h>
64362 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64363
64364 int pid_max = PID_MAX_DEFAULT;
64365
64366 -#define RESERVED_PIDS 300
64367 +#define RESERVED_PIDS 500
64368
64369 int pid_max_min = RESERVED_PIDS + 1;
64370 int pid_max_max = PID_MAX_LIMIT;
64371 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64372 */
64373 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64374 {
64375 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64376 + struct task_struct *task;
64377 +
64378 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64379 +
64380 + if (gr_pid_is_chrooted(task))
64381 + return NULL;
64382 +
64383 + return task;
64384 }
64385
64386 struct task_struct *find_task_by_vpid(pid_t vnr)
64387 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64388 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64389 }
64390
64391 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64392 +{
64393 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64394 +}
64395 +
64396 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64397 {
64398 struct pid *pid;
64399 diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64400 --- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64401 +++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64402 @@ -6,6 +6,7 @@
64403 #include <linux/posix-timers.h>
64404 #include <linux/errno.h>
64405 #include <linux/math64.h>
64406 +#include <linux/security.h>
64407 #include <asm/uaccess.h>
64408 #include <linux/kernel_stat.h>
64409 #include <trace/events/timer.h>
64410 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64411
64412 static __init int init_posix_cpu_timers(void)
64413 {
64414 - struct k_clock process = {
64415 + static struct k_clock process = {
64416 .clock_getres = process_cpu_clock_getres,
64417 .clock_get = process_cpu_clock_get,
64418 .clock_set = do_posix_clock_nosettime,
64419 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64420 .nsleep = process_cpu_nsleep,
64421 .nsleep_restart = process_cpu_nsleep_restart,
64422 };
64423 - struct k_clock thread = {
64424 + static struct k_clock thread = {
64425 .clock_getres = thread_cpu_clock_getres,
64426 .clock_get = thread_cpu_clock_get,
64427 .clock_set = do_posix_clock_nosettime,
64428 diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64429 --- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64430 +++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-23 20:22:38.000000000 -0400
64431 @@ -42,6 +42,7 @@
64432 #include <linux/compiler.h>
64433 #include <linux/idr.h>
64434 #include <linux/posix-timers.h>
64435 +#include <linux/grsecurity.h>
64436 #include <linux/syscalls.h>
64437 #include <linux/wait.h>
64438 #include <linux/workqueue.h>
64439 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64440 * which we beg off on and pass to do_sys_settimeofday().
64441 */
64442
64443 -static struct k_clock posix_clocks[MAX_CLOCKS];
64444 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64445
64446 /*
64447 * These ones are defined below.
64448 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64449 */
64450 #define CLOCK_DISPATCH(clock, call, arglist) \
64451 ((clock) < 0 ? posix_cpu_##call arglist : \
64452 - (posix_clocks[clock].call != NULL \
64453 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64454 + (posix_clocks[clock]->call != NULL \
64455 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64456
64457 /*
64458 * Default clock hook functions when the struct k_clock passed
64459 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64460 struct timespec *tp)
64461 {
64462 tp->tv_sec = 0;
64463 - tp->tv_nsec = posix_clocks[which_clock].res;
64464 + tp->tv_nsec = posix_clocks[which_clock]->res;
64465 return 0;
64466 }
64467
64468 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64469 return 0;
64470 if ((unsigned) which_clock >= MAX_CLOCKS)
64471 return 1;
64472 - if (posix_clocks[which_clock].clock_getres != NULL)
64473 + if (posix_clocks[which_clock] == NULL)
64474 return 0;
64475 - if (posix_clocks[which_clock].res != 0)
64476 + if (posix_clocks[which_clock]->clock_getres != NULL)
64477 + return 0;
64478 + if (posix_clocks[which_clock]->res != 0)
64479 return 0;
64480 return 1;
64481 }
64482 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64483 */
64484 static __init int init_posix_timers(void)
64485 {
64486 - struct k_clock clock_realtime = {
64487 + static struct k_clock clock_realtime = {
64488 .clock_getres = hrtimer_get_res,
64489 };
64490 - struct k_clock clock_monotonic = {
64491 + static struct k_clock clock_monotonic = {
64492 .clock_getres = hrtimer_get_res,
64493 .clock_get = posix_ktime_get_ts,
64494 .clock_set = do_posix_clock_nosettime,
64495 };
64496 - struct k_clock clock_monotonic_raw = {
64497 + static struct k_clock clock_monotonic_raw = {
64498 .clock_getres = hrtimer_get_res,
64499 .clock_get = posix_get_monotonic_raw,
64500 .clock_set = do_posix_clock_nosettime,
64501 .timer_create = no_timer_create,
64502 .nsleep = no_nsleep,
64503 };
64504 - struct k_clock clock_realtime_coarse = {
64505 + static struct k_clock clock_realtime_coarse = {
64506 .clock_getres = posix_get_coarse_res,
64507 .clock_get = posix_get_realtime_coarse,
64508 .clock_set = do_posix_clock_nosettime,
64509 .timer_create = no_timer_create,
64510 .nsleep = no_nsleep,
64511 };
64512 - struct k_clock clock_monotonic_coarse = {
64513 + static struct k_clock clock_monotonic_coarse = {
64514 .clock_getres = posix_get_coarse_res,
64515 .clock_get = posix_get_monotonic_coarse,
64516 .clock_set = do_posix_clock_nosettime,
64517 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64518 .nsleep = no_nsleep,
64519 };
64520
64521 + pax_track_stack();
64522 +
64523 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64524 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64525 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64526 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64527 return;
64528 }
64529
64530 - posix_clocks[clock_id] = *new_clock;
64531 + posix_clocks[clock_id] = new_clock;
64532 }
64533 EXPORT_SYMBOL_GPL(register_posix_clock);
64534
64535 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64536 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64537 return -EFAULT;
64538
64539 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64540 + have their clock_set fptr set to a nosettime dummy function
64541 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64542 + call common_clock_set, which calls do_sys_settimeofday, which
64543 + we hook
64544 + */
64545 +
64546 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64547 }
64548
64549 diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64550 --- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64551 +++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64552 @@ -48,14 +48,14 @@ enum {
64553
64554 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64555
64556 -static struct platform_hibernation_ops *hibernation_ops;
64557 +static const struct platform_hibernation_ops *hibernation_ops;
64558
64559 /**
64560 * hibernation_set_ops - set the global hibernate operations
64561 * @ops: the hibernation operations to use in subsequent hibernation transitions
64562 */
64563
64564 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
64565 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64566 {
64567 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64568 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64569 diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64570 --- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64571 +++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64572 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64573 .enable_mask = SYSRQ_ENABLE_BOOT,
64574 };
64575
64576 -static int pm_sysrq_init(void)
64577 +static int __init pm_sysrq_init(void)
64578 {
64579 register_sysrq_key('o', &sysrq_poweroff_op);
64580 return 0;
64581 diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64582 --- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64583 +++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64584 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64585 struct timeval start, end;
64586 u64 elapsed_csecs64;
64587 unsigned int elapsed_csecs;
64588 + bool timedout = false;
64589
64590 do_gettimeofday(&start);
64591
64592 end_time = jiffies + TIMEOUT;
64593 do {
64594 todo = 0;
64595 + if (time_after(jiffies, end_time))
64596 + timedout = true;
64597 read_lock(&tasklist_lock);
64598 do_each_thread(g, p) {
64599 if (frozen(p) || !freezeable(p))
64600 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64601 * It is "frozen enough". If the task does wake
64602 * up, it will immediately call try_to_freeze.
64603 */
64604 - if (!task_is_stopped_or_traced(p) &&
64605 - !freezer_should_skip(p))
64606 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64607 todo++;
64608 + if (timedout) {
64609 + printk(KERN_ERR "Task refusing to freeze:\n");
64610 + sched_show_task(p);
64611 + }
64612 + }
64613 } while_each_thread(g, p);
64614 read_unlock(&tasklist_lock);
64615 yield(); /* Yield is okay here */
64616 - if (time_after(jiffies, end_time))
64617 - break;
64618 - } while (todo);
64619 + } while (todo && !timedout);
64620
64621 do_gettimeofday(&end);
64622 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64623 diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64624 --- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64625 +++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64626 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64627 [PM_SUSPEND_MEM] = "mem",
64628 };
64629
64630 -static struct platform_suspend_ops *suspend_ops;
64631 +static const struct platform_suspend_ops *suspend_ops;
64632
64633 /**
64634 * suspend_set_ops - Set the global suspend method table.
64635 * @ops: Pointer to ops structure.
64636 */
64637 -void suspend_set_ops(struct platform_suspend_ops *ops)
64638 +void suspend_set_ops(const struct platform_suspend_ops *ops)
64639 {
64640 mutex_lock(&pm_mutex);
64641 suspend_ops = ops;
64642 diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64643 --- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64644 +++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64645 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64646 char c;
64647 int error = 0;
64648
64649 +#ifdef CONFIG_GRKERNSEC_DMESG
64650 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64651 + return -EPERM;
64652 +#endif
64653 +
64654 error = security_syslog(type);
64655 if (error)
64656 return error;
64657 diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64658 --- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64659 +++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64660 @@ -39,7 +39,7 @@ struct profile_hit {
64661 /* Oprofile timer tick hook */
64662 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64663
64664 -static atomic_t *prof_buffer;
64665 +static atomic_unchecked_t *prof_buffer;
64666 static unsigned long prof_len, prof_shift;
64667
64668 int prof_on __read_mostly;
64669 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64670 hits[i].pc = 0;
64671 continue;
64672 }
64673 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64674 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64675 hits[i].hits = hits[i].pc = 0;
64676 }
64677 }
64678 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64679 * Add the current hit(s) and flush the write-queue out
64680 * to the global buffer:
64681 */
64682 - atomic_add(nr_hits, &prof_buffer[pc]);
64683 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64684 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64685 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64686 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64687 hits[i].pc = hits[i].hits = 0;
64688 }
64689 out:
64690 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64691 if (prof_on != type || !prof_buffer)
64692 return;
64693 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64694 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64695 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64696 }
64697 #endif /* !CONFIG_SMP */
64698 EXPORT_SYMBOL_GPL(profile_hits);
64699 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64700 return -EFAULT;
64701 buf++; p++; count--; read++;
64702 }
64703 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64704 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64705 if (copy_to_user(buf, (void *)pnt, count))
64706 return -EFAULT;
64707 read += count;
64708 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64709 }
64710 #endif
64711 profile_discard_flip_buffers();
64712 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64713 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64714 return count;
64715 }
64716
64717 diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64718 --- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64719 +++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64720 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64721 return ret;
64722 }
64723
64724 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64725 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64726 + unsigned int log)
64727 {
64728 const struct cred *cred = current_cred(), *tcred;
64729
64730 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64731 cred->gid != tcred->egid ||
64732 cred->gid != tcred->sgid ||
64733 cred->gid != tcred->gid) &&
64734 - !capable(CAP_SYS_PTRACE)) {
64735 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64736 + (log && !capable(CAP_SYS_PTRACE)))
64737 + ) {
64738 rcu_read_unlock();
64739 return -EPERM;
64740 }
64741 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64742 smp_rmb();
64743 if (task->mm)
64744 dumpable = get_dumpable(task->mm);
64745 - if (!dumpable && !capable(CAP_SYS_PTRACE))
64746 + if (!dumpable &&
64747 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64748 + (log && !capable(CAP_SYS_PTRACE))))
64749 return -EPERM;
64750
64751 return security_ptrace_access_check(task, mode);
64752 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64753 {
64754 int err;
64755 task_lock(task);
64756 - err = __ptrace_may_access(task, mode);
64757 + err = __ptrace_may_access(task, mode, 0);
64758 + task_unlock(task);
64759 + return !err;
64760 +}
64761 +
64762 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64763 +{
64764 + int err;
64765 + task_lock(task);
64766 + err = __ptrace_may_access(task, mode, 1);
64767 task_unlock(task);
64768 return !err;
64769 }
64770 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64771 goto out;
64772
64773 task_lock(task);
64774 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64775 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64776 task_unlock(task);
64777 if (retval)
64778 goto unlock_creds;
64779 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64780 goto unlock_tasklist;
64781
64782 task->ptrace = PT_PTRACED;
64783 - if (capable(CAP_SYS_PTRACE))
64784 + if (capable_nolog(CAP_SYS_PTRACE))
64785 task->ptrace |= PT_PTRACE_CAP;
64786
64787 __ptrace_link(task, current);
64788 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64789 {
64790 int copied = 0;
64791
64792 + pax_track_stack();
64793 +
64794 while (len > 0) {
64795 char buf[128];
64796 int this_len, retval;
64797 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64798 {
64799 int copied = 0;
64800
64801 + pax_track_stack();
64802 +
64803 while (len > 0) {
64804 char buf[128];
64805 int this_len, retval;
64806 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64807 int ret = -EIO;
64808 siginfo_t siginfo;
64809
64810 + pax_track_stack();
64811 +
64812 switch (request) {
64813 case PTRACE_PEEKTEXT:
64814 case PTRACE_PEEKDATA:
64815 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64816 ret = ptrace_setoptions(child, data);
64817 break;
64818 case PTRACE_GETEVENTMSG:
64819 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64820 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64821 break;
64822
64823 case PTRACE_GETSIGINFO:
64824 ret = ptrace_getsiginfo(child, &siginfo);
64825 if (!ret)
64826 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
64827 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64828 &siginfo);
64829 break;
64830
64831 case PTRACE_SETSIGINFO:
64832 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64833 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64834 sizeof siginfo))
64835 ret = -EFAULT;
64836 else
64837 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64838 goto out;
64839 }
64840
64841 + if (gr_handle_ptrace(child, request)) {
64842 + ret = -EPERM;
64843 + goto out_put_task_struct;
64844 + }
64845 +
64846 if (request == PTRACE_ATTACH) {
64847 ret = ptrace_attach(child);
64848 /*
64849 * Some architectures need to do book-keeping after
64850 * a ptrace attach.
64851 */
64852 - if (!ret)
64853 + if (!ret) {
64854 arch_ptrace_attach(child);
64855 + gr_audit_ptrace(child);
64856 + }
64857 goto out_put_task_struct;
64858 }
64859
64860 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
64861 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64862 if (copied != sizeof(tmp))
64863 return -EIO;
64864 - return put_user(tmp, (unsigned long __user *)data);
64865 + return put_user(tmp, (__force unsigned long __user *)data);
64866 }
64867
64868 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
64869 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
64870 siginfo_t siginfo;
64871 int ret;
64872
64873 + pax_track_stack();
64874 +
64875 switch (request) {
64876 case PTRACE_PEEKTEXT:
64877 case PTRACE_PEEKDATA:
64878 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
64879 goto out;
64880 }
64881
64882 + if (gr_handle_ptrace(child, request)) {
64883 + ret = -EPERM;
64884 + goto out_put_task_struct;
64885 + }
64886 +
64887 if (request == PTRACE_ATTACH) {
64888 ret = ptrace_attach(child);
64889 /*
64890 * Some architectures need to do book-keeping after
64891 * a ptrace attach.
64892 */
64893 - if (!ret)
64894 + if (!ret) {
64895 arch_ptrace_attach(child);
64896 + gr_audit_ptrace(child);
64897 + }
64898 goto out_put_task_struct;
64899 }
64900
64901 diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
64902 --- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
64903 +++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
64904 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64905 { 0 };
64906 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64907 { 0 };
64908 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64909 -static atomic_t n_rcu_torture_alloc;
64910 -static atomic_t n_rcu_torture_alloc_fail;
64911 -static atomic_t n_rcu_torture_free;
64912 -static atomic_t n_rcu_torture_mberror;
64913 -static atomic_t n_rcu_torture_error;
64914 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64915 +static atomic_unchecked_t n_rcu_torture_alloc;
64916 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
64917 +static atomic_unchecked_t n_rcu_torture_free;
64918 +static atomic_unchecked_t n_rcu_torture_mberror;
64919 +static atomic_unchecked_t n_rcu_torture_error;
64920 static long n_rcu_torture_timers;
64921 static struct list_head rcu_torture_removed;
64922 static cpumask_var_t shuffle_tmp_mask;
64923 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
64924
64925 spin_lock_bh(&rcu_torture_lock);
64926 if (list_empty(&rcu_torture_freelist)) {
64927 - atomic_inc(&n_rcu_torture_alloc_fail);
64928 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64929 spin_unlock_bh(&rcu_torture_lock);
64930 return NULL;
64931 }
64932 - atomic_inc(&n_rcu_torture_alloc);
64933 + atomic_inc_unchecked(&n_rcu_torture_alloc);
64934 p = rcu_torture_freelist.next;
64935 list_del_init(p);
64936 spin_unlock_bh(&rcu_torture_lock);
64937 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
64938 static void
64939 rcu_torture_free(struct rcu_torture *p)
64940 {
64941 - atomic_inc(&n_rcu_torture_free);
64942 + atomic_inc_unchecked(&n_rcu_torture_free);
64943 spin_lock_bh(&rcu_torture_lock);
64944 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64945 spin_unlock_bh(&rcu_torture_lock);
64946 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
64947 i = rp->rtort_pipe_count;
64948 if (i > RCU_TORTURE_PIPE_LEN)
64949 i = RCU_TORTURE_PIPE_LEN;
64950 - atomic_inc(&rcu_torture_wcount[i]);
64951 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64952 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64953 rp->rtort_mbtest = 0;
64954 rcu_torture_free(rp);
64955 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
64956 i = rp->rtort_pipe_count;
64957 if (i > RCU_TORTURE_PIPE_LEN)
64958 i = RCU_TORTURE_PIPE_LEN;
64959 - atomic_inc(&rcu_torture_wcount[i]);
64960 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64961 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64962 rp->rtort_mbtest = 0;
64963 list_del(&rp->rtort_free);
64964 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
64965 i = old_rp->rtort_pipe_count;
64966 if (i > RCU_TORTURE_PIPE_LEN)
64967 i = RCU_TORTURE_PIPE_LEN;
64968 - atomic_inc(&rcu_torture_wcount[i]);
64969 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64970 old_rp->rtort_pipe_count++;
64971 cur_ops->deferred_free(old_rp);
64972 }
64973 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
64974 return;
64975 }
64976 if (p->rtort_mbtest == 0)
64977 - atomic_inc(&n_rcu_torture_mberror);
64978 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64979 spin_lock(&rand_lock);
64980 cur_ops->read_delay(&rand);
64981 n_rcu_torture_timers++;
64982 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
64983 continue;
64984 }
64985 if (p->rtort_mbtest == 0)
64986 - atomic_inc(&n_rcu_torture_mberror);
64987 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64988 cur_ops->read_delay(&rand);
64989 preempt_disable();
64990 pipe_count = p->rtort_pipe_count;
64991 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
64992 rcu_torture_current,
64993 rcu_torture_current_version,
64994 list_empty(&rcu_torture_freelist),
64995 - atomic_read(&n_rcu_torture_alloc),
64996 - atomic_read(&n_rcu_torture_alloc_fail),
64997 - atomic_read(&n_rcu_torture_free),
64998 - atomic_read(&n_rcu_torture_mberror),
64999 + atomic_read_unchecked(&n_rcu_torture_alloc),
65000 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65001 + atomic_read_unchecked(&n_rcu_torture_free),
65002 + atomic_read_unchecked(&n_rcu_torture_mberror),
65003 n_rcu_torture_timers);
65004 - if (atomic_read(&n_rcu_torture_mberror) != 0)
65005 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65006 cnt += sprintf(&page[cnt], " !!!");
65007 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65008 if (i > 1) {
65009 cnt += sprintf(&page[cnt], "!!! ");
65010 - atomic_inc(&n_rcu_torture_error);
65011 + atomic_inc_unchecked(&n_rcu_torture_error);
65012 WARN_ON_ONCE(1);
65013 }
65014 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65015 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65016 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65017 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65018 cnt += sprintf(&page[cnt], " %d",
65019 - atomic_read(&rcu_torture_wcount[i]));
65020 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65021 }
65022 cnt += sprintf(&page[cnt], "\n");
65023 if (cur_ops->stats)
65024 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65025
65026 if (cur_ops->cleanup)
65027 cur_ops->cleanup();
65028 - if (atomic_read(&n_rcu_torture_error))
65029 + if (atomic_read_unchecked(&n_rcu_torture_error))
65030 rcu_torture_print_module_parms("End of test: FAILURE");
65031 else
65032 rcu_torture_print_module_parms("End of test: SUCCESS");
65033 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65034
65035 rcu_torture_current = NULL;
65036 rcu_torture_current_version = 0;
65037 - atomic_set(&n_rcu_torture_alloc, 0);
65038 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65039 - atomic_set(&n_rcu_torture_free, 0);
65040 - atomic_set(&n_rcu_torture_mberror, 0);
65041 - atomic_set(&n_rcu_torture_error, 0);
65042 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65043 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65044 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65045 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65046 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65047 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65048 - atomic_set(&rcu_torture_wcount[i], 0);
65049 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65050 for_each_possible_cpu(cpu) {
65051 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65052 per_cpu(rcu_torture_count, cpu)[i] = 0;
65053 diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65054 --- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65055 +++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65056 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65057 /*
65058 * Do softirq processing for the current CPU.
65059 */
65060 -static void rcu_process_callbacks(struct softirq_action *unused)
65061 +static void rcu_process_callbacks(void)
65062 {
65063 /*
65064 * Memory references from any prior RCU read-side critical sections
65065 diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65066 --- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65067 +++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65068 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65069 */
65070 void __rcu_read_lock(void)
65071 {
65072 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65073 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65074 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65075 }
65076 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65077 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65078 struct task_struct *t = current;
65079
65080 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65081 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65082 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65083 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65084 rcu_read_unlock_special(t);
65085 }
65086 diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65087 --- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65088 +++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65089 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65090 unsigned int flags,
65091 int *nonpad_ret)
65092 {
65093 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65094 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65095 struct rchan_buf *rbuf = in->private_data;
65096 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65097 uint64_t pos = (uint64_t) *ppos;
65098 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65099 .ops = &relay_pipe_buf_ops,
65100 .spd_release = relay_page_release,
65101 };
65102 + ssize_t ret;
65103 +
65104 + pax_track_stack();
65105
65106 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65107 return 0;
65108 diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65109 --- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65110 +++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65111 @@ -132,8 +132,18 @@ static const struct file_operations proc
65112
65113 static int __init ioresources_init(void)
65114 {
65115 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65116 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65117 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65118 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65119 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65120 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65121 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65122 +#endif
65123 +#else
65124 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65125 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65126 +#endif
65127 return 0;
65128 }
65129 __initcall(ioresources_init);
65130 diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65131 --- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65132 +++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65133 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65134 */
65135 spin_lock_irqsave(&pendowner->pi_lock, flags);
65136
65137 - WARN_ON(!pendowner->pi_blocked_on);
65138 + BUG_ON(!pendowner->pi_blocked_on);
65139 WARN_ON(pendowner->pi_blocked_on != waiter);
65140 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65141
65142 diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65143 --- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65144 +++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65145 @@ -21,7 +21,7 @@
65146 #define MAX_RT_TEST_MUTEXES 8
65147
65148 static spinlock_t rttest_lock;
65149 -static atomic_t rttest_event;
65150 +static atomic_unchecked_t rttest_event;
65151
65152 struct test_thread_data {
65153 int opcode;
65154 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65155
65156 case RTTEST_LOCKCONT:
65157 td->mutexes[td->opdata] = 1;
65158 - td->event = atomic_add_return(1, &rttest_event);
65159 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65160 return 0;
65161
65162 case RTTEST_RESET:
65163 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65164 return 0;
65165
65166 case RTTEST_RESETEVENT:
65167 - atomic_set(&rttest_event, 0);
65168 + atomic_set_unchecked(&rttest_event, 0);
65169 return 0;
65170
65171 default:
65172 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65173 return ret;
65174
65175 td->mutexes[id] = 1;
65176 - td->event = atomic_add_return(1, &rttest_event);
65177 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65178 rt_mutex_lock(&mutexes[id]);
65179 - td->event = atomic_add_return(1, &rttest_event);
65180 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65181 td->mutexes[id] = 4;
65182 return 0;
65183
65184 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65185 return ret;
65186
65187 td->mutexes[id] = 1;
65188 - td->event = atomic_add_return(1, &rttest_event);
65189 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65190 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65191 - td->event = atomic_add_return(1, &rttest_event);
65192 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65193 td->mutexes[id] = ret ? 0 : 4;
65194 return ret ? -EINTR : 0;
65195
65196 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65197 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65198 return ret;
65199
65200 - td->event = atomic_add_return(1, &rttest_event);
65201 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65202 rt_mutex_unlock(&mutexes[id]);
65203 - td->event = atomic_add_return(1, &rttest_event);
65204 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65205 td->mutexes[id] = 0;
65206 return 0;
65207
65208 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65209 break;
65210
65211 td->mutexes[dat] = 2;
65212 - td->event = atomic_add_return(1, &rttest_event);
65213 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65214 break;
65215
65216 case RTTEST_LOCKBKL:
65217 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65218 return;
65219
65220 td->mutexes[dat] = 3;
65221 - td->event = atomic_add_return(1, &rttest_event);
65222 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65223 break;
65224
65225 case RTTEST_LOCKNOWAIT:
65226 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65227 return;
65228
65229 td->mutexes[dat] = 1;
65230 - td->event = atomic_add_return(1, &rttest_event);
65231 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65232 return;
65233
65234 case RTTEST_LOCKBKL:
65235 diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65236 --- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65237 +++ linux-2.6.32.45/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65238 @@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65239 {
65240 unsigned long flags;
65241 struct rq *rq;
65242 - int cpu = get_cpu();
65243
65244 #ifdef CONFIG_SMP
65245 + int cpu = get_cpu();
65246 +
65247 rq = task_rq_lock(p, &flags);
65248 p->state = TASK_WAKING;
65249
65250 @@ -5043,7 +5044,7 @@ out:
65251 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65252 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65253 */
65254 -static void run_rebalance_domains(struct softirq_action *h)
65255 +static void run_rebalance_domains(void)
65256 {
65257 int this_cpu = smp_processor_id();
65258 struct rq *this_rq = cpu_rq(this_cpu);
65259 @@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65260 struct rq *rq;
65261 int cpu;
65262
65263 + pax_track_stack();
65264 +
65265 need_resched:
65266 preempt_disable();
65267 cpu = smp_processor_id();
65268 @@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65269 * Look out! "owner" is an entirely speculative pointer
65270 * access and not reliable.
65271 */
65272 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65273 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65274 {
65275 unsigned int cpu;
65276 struct rq *rq;
65277 @@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65278 * DEBUG_PAGEALLOC could have unmapped it if
65279 * the mutex owner just released it and exited.
65280 */
65281 - if (probe_kernel_address(&owner->cpu, cpu))
65282 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65283 return 0;
65284 #else
65285 - cpu = owner->cpu;
65286 + cpu = task_thread_info(owner)->cpu;
65287 #endif
65288
65289 /*
65290 @@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65291 /*
65292 * Is that owner really running on that cpu?
65293 */
65294 - if (task_thread_info(rq->curr) != owner || need_resched())
65295 + if (rq->curr != owner || need_resched())
65296 return 0;
65297
65298 cpu_relax();
65299 @@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65300 /* convert nice value [19,-20] to rlimit style value [1,40] */
65301 int nice_rlim = 20 - nice;
65302
65303 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65304 +
65305 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65306 capable(CAP_SYS_NICE));
65307 }
65308 @@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65309 if (nice > 19)
65310 nice = 19;
65311
65312 - if (increment < 0 && !can_nice(current, nice))
65313 + if (increment < 0 && (!can_nice(current, nice) ||
65314 + gr_handle_chroot_nice()))
65315 return -EPERM;
65316
65317 retval = security_task_setnice(current, nice);
65318 @@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65319 long power;
65320 int weight;
65321
65322 - WARN_ON(!sd || !sd->groups);
65323 + BUG_ON(!sd || !sd->groups);
65324
65325 if (cpu != group_first_cpu(sd->groups))
65326 return;
65327 diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65328 --- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65329 +++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65330 @@ -41,12 +41,12 @@
65331
65332 static struct kmem_cache *sigqueue_cachep;
65333
65334 -static void __user *sig_handler(struct task_struct *t, int sig)
65335 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65336 {
65337 return t->sighand->action[sig - 1].sa.sa_handler;
65338 }
65339
65340 -static int sig_handler_ignored(void __user *handler, int sig)
65341 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65342 {
65343 /* Is it explicitly or implicitly ignored? */
65344 return handler == SIG_IGN ||
65345 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65346 static int sig_task_ignored(struct task_struct *t, int sig,
65347 int from_ancestor_ns)
65348 {
65349 - void __user *handler;
65350 + __sighandler_t handler;
65351
65352 handler = sig_handler(t, sig);
65353
65354 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65355 */
65356 user = get_uid(__task_cred(t)->user);
65357 atomic_inc(&user->sigpending);
65358 +
65359 + if (!override_rlimit)
65360 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65361 if (override_rlimit ||
65362 atomic_read(&user->sigpending) <=
65363 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65364 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65365
65366 int unhandled_signal(struct task_struct *tsk, int sig)
65367 {
65368 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65369 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65370 if (is_global_init(tsk))
65371 return 1;
65372 if (handler != SIG_IGN && handler != SIG_DFL)
65373 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65374 }
65375 }
65376
65377 + /* allow glibc communication via tgkill to other threads in our
65378 + thread group */
65379 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65380 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65381 + && gr_handle_signal(t, sig))
65382 + return -EPERM;
65383 +
65384 return security_task_kill(t, info, sig, 0);
65385 }
65386
65387 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65388 return send_signal(sig, info, p, 1);
65389 }
65390
65391 -static int
65392 +int
65393 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65394 {
65395 return send_signal(sig, info, t, 0);
65396 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65397 unsigned long int flags;
65398 int ret, blocked, ignored;
65399 struct k_sigaction *action;
65400 + int is_unhandled = 0;
65401
65402 spin_lock_irqsave(&t->sighand->siglock, flags);
65403 action = &t->sighand->action[sig-1];
65404 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65405 }
65406 if (action->sa.sa_handler == SIG_DFL)
65407 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65408 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65409 + is_unhandled = 1;
65410 ret = specific_send_sig_info(sig, info, t);
65411 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65412
65413 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65414 + normal operation */
65415 + if (is_unhandled) {
65416 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65417 + gr_handle_crash(t, sig);
65418 + }
65419 +
65420 return ret;
65421 }
65422
65423 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65424 {
65425 int ret = check_kill_permission(sig, info, p);
65426
65427 - if (!ret && sig)
65428 + if (!ret && sig) {
65429 ret = do_send_sig_info(sig, info, p, true);
65430 + if (!ret)
65431 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65432 + }
65433
65434 return ret;
65435 }
65436 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65437 {
65438 siginfo_t info;
65439
65440 + pax_track_stack();
65441 +
65442 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65443
65444 memset(&info, 0, sizeof info);
65445 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65446 int error = -ESRCH;
65447
65448 rcu_read_lock();
65449 - p = find_task_by_vpid(pid);
65450 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65451 + /* allow glibc communication via tgkill to other threads in our
65452 + thread group */
65453 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65454 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65455 + p = find_task_by_vpid_unrestricted(pid);
65456 + else
65457 +#endif
65458 + p = find_task_by_vpid(pid);
65459 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65460 error = check_kill_permission(sig, info, p);
65461 /*
65462 diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65463 --- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65464 +++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65465 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65466 }
65467 EXPORT_SYMBOL(smp_call_function);
65468
65469 -void ipi_call_lock(void)
65470 +void ipi_call_lock(void) __acquires(call_function.lock)
65471 {
65472 spin_lock(&call_function.lock);
65473 }
65474
65475 -void ipi_call_unlock(void)
65476 +void ipi_call_unlock(void) __releases(call_function.lock)
65477 {
65478 spin_unlock(&call_function.lock);
65479 }
65480
65481 -void ipi_call_lock_irq(void)
65482 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65483 {
65484 spin_lock_irq(&call_function.lock);
65485 }
65486
65487 -void ipi_call_unlock_irq(void)
65488 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65489 {
65490 spin_unlock_irq(&call_function.lock);
65491 }
65492 diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65493 --- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65494 +++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65495 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65496
65497 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65498
65499 -char *softirq_to_name[NR_SOFTIRQS] = {
65500 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65501 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65502 "TASKLET", "SCHED", "HRTIMER", "RCU"
65503 };
65504 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65505
65506 asmlinkage void __do_softirq(void)
65507 {
65508 - struct softirq_action *h;
65509 + const struct softirq_action *h;
65510 __u32 pending;
65511 int max_restart = MAX_SOFTIRQ_RESTART;
65512 int cpu;
65513 @@ -233,7 +233,7 @@ restart:
65514 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65515
65516 trace_softirq_entry(h, softirq_vec);
65517 - h->action(h);
65518 + h->action();
65519 trace_softirq_exit(h, softirq_vec);
65520 if (unlikely(prev_count != preempt_count())) {
65521 printk(KERN_ERR "huh, entered softirq %td %s %p"
65522 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65523 local_irq_restore(flags);
65524 }
65525
65526 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65527 +void open_softirq(int nr, void (*action)(void))
65528 {
65529 - softirq_vec[nr].action = action;
65530 + pax_open_kernel();
65531 + *(void **)&softirq_vec[nr].action = action;
65532 + pax_close_kernel();
65533 }
65534
65535 /*
65536 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65537
65538 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65539
65540 -static void tasklet_action(struct softirq_action *a)
65541 +static void tasklet_action(void)
65542 {
65543 struct tasklet_struct *list;
65544
65545 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65546 }
65547 }
65548
65549 -static void tasklet_hi_action(struct softirq_action *a)
65550 +static void tasklet_hi_action(void)
65551 {
65552 struct tasklet_struct *list;
65553
65554 diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65555 --- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65556 +++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65557 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65558 error = -EACCES;
65559 goto out;
65560 }
65561 +
65562 + if (gr_handle_chroot_setpriority(p, niceval)) {
65563 + error = -EACCES;
65564 + goto out;
65565 + }
65566 +
65567 no_nice = security_task_setnice(p, niceval);
65568 if (no_nice) {
65569 error = no_nice;
65570 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65571 !(user = find_user(who)))
65572 goto out_unlock; /* No processes for this user */
65573
65574 - do_each_thread(g, p)
65575 + do_each_thread(g, p) {
65576 if (__task_cred(p)->uid == who)
65577 error = set_one_prio(p, niceval, error);
65578 - while_each_thread(g, p);
65579 + } while_each_thread(g, p);
65580 if (who != cred->uid)
65581 free_uid(user); /* For find_user() */
65582 break;
65583 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65584 !(user = find_user(who)))
65585 goto out_unlock; /* No processes for this user */
65586
65587 - do_each_thread(g, p)
65588 + do_each_thread(g, p) {
65589 if (__task_cred(p)->uid == who) {
65590 niceval = 20 - task_nice(p);
65591 if (niceval > retval)
65592 retval = niceval;
65593 }
65594 - while_each_thread(g, p);
65595 + } while_each_thread(g, p);
65596 if (who != cred->uid)
65597 free_uid(user); /* for find_user() */
65598 break;
65599 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65600 goto error;
65601 }
65602
65603 + if (gr_check_group_change(new->gid, new->egid, -1))
65604 + goto error;
65605 +
65606 if (rgid != (gid_t) -1 ||
65607 (egid != (gid_t) -1 && egid != old->gid))
65608 new->sgid = new->egid;
65609 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65610 goto error;
65611
65612 retval = -EPERM;
65613 +
65614 + if (gr_check_group_change(gid, gid, gid))
65615 + goto error;
65616 +
65617 if (capable(CAP_SETGID))
65618 new->gid = new->egid = new->sgid = new->fsgid = gid;
65619 else if (gid == old->gid || gid == old->sgid)
65620 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65621 if (!new_user)
65622 return -EAGAIN;
65623
65624 + /*
65625 + * We don't fail in case of NPROC limit excess here because too many
65626 + * poorly written programs don't check set*uid() return code, assuming
65627 + * it never fails if called by root. We may still enforce NPROC limit
65628 + * for programs doing set*uid()+execve() by harmlessly deferring the
65629 + * failure to the execve() stage.
65630 + */
65631 if (atomic_read(&new_user->processes) >=
65632 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65633 - new_user != INIT_USER) {
65634 - free_uid(new_user);
65635 - return -EAGAIN;
65636 - }
65637 + new_user != INIT_USER)
65638 + current->flags |= PF_NPROC_EXCEEDED;
65639 + else
65640 + current->flags &= ~PF_NPROC_EXCEEDED;
65641
65642 free_uid(new->user);
65643 new->user = new_user;
65644 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65645 goto error;
65646 }
65647
65648 + if (gr_check_user_change(new->uid, new->euid, -1))
65649 + goto error;
65650 +
65651 if (new->uid != old->uid) {
65652 retval = set_user(new);
65653 if (retval < 0)
65654 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65655 goto error;
65656
65657 retval = -EPERM;
65658 +
65659 + if (gr_check_crash_uid(uid))
65660 + goto error;
65661 + if (gr_check_user_change(uid, uid, uid))
65662 + goto error;
65663 +
65664 if (capable(CAP_SETUID)) {
65665 new->suid = new->uid = uid;
65666 if (uid != old->uid) {
65667 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65668 goto error;
65669 }
65670
65671 + if (gr_check_user_change(ruid, euid, -1))
65672 + goto error;
65673 +
65674 if (ruid != (uid_t) -1) {
65675 new->uid = ruid;
65676 if (ruid != old->uid) {
65677 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65678 goto error;
65679 }
65680
65681 + if (gr_check_group_change(rgid, egid, -1))
65682 + goto error;
65683 +
65684 if (rgid != (gid_t) -1)
65685 new->gid = rgid;
65686 if (egid != (gid_t) -1)
65687 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65688 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65689 goto error;
65690
65691 + if (gr_check_user_change(-1, -1, uid))
65692 + goto error;
65693 +
65694 if (uid == old->uid || uid == old->euid ||
65695 uid == old->suid || uid == old->fsuid ||
65696 capable(CAP_SETUID)) {
65697 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65698 if (gid == old->gid || gid == old->egid ||
65699 gid == old->sgid || gid == old->fsgid ||
65700 capable(CAP_SETGID)) {
65701 + if (gr_check_group_change(-1, -1, gid))
65702 + goto error;
65703 +
65704 if (gid != old_fsgid) {
65705 new->fsgid = gid;
65706 goto change_okay;
65707 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65708 error = get_dumpable(me->mm);
65709 break;
65710 case PR_SET_DUMPABLE:
65711 - if (arg2 < 0 || arg2 > 1) {
65712 + if (arg2 > 1) {
65713 error = -EINVAL;
65714 break;
65715 }
65716 diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65717 --- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65718 +++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65719 @@ -63,6 +63,13 @@
65720 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65721
65722 #if defined(CONFIG_SYSCTL)
65723 +#include <linux/grsecurity.h>
65724 +#include <linux/grinternal.h>
65725 +
65726 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65727 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65728 + const int op);
65729 +extern int gr_handle_chroot_sysctl(const int op);
65730
65731 /* External variables not in a header file. */
65732 extern int C_A_D;
65733 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65734 static int proc_taint(struct ctl_table *table, int write,
65735 void __user *buffer, size_t *lenp, loff_t *ppos);
65736 #endif
65737 +extern ctl_table grsecurity_table[];
65738
65739 static struct ctl_table root_table[];
65740 static struct ctl_table_root sysctl_table_root;
65741 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65742 int sysctl_legacy_va_layout;
65743 #endif
65744
65745 +#ifdef CONFIG_PAX_SOFTMODE
65746 +static ctl_table pax_table[] = {
65747 + {
65748 + .ctl_name = CTL_UNNUMBERED,
65749 + .procname = "softmode",
65750 + .data = &pax_softmode,
65751 + .maxlen = sizeof(unsigned int),
65752 + .mode = 0600,
65753 + .proc_handler = &proc_dointvec,
65754 + },
65755 +
65756 + { .ctl_name = 0 }
65757 +};
65758 +#endif
65759 +
65760 extern int prove_locking;
65761 extern int lock_stat;
65762
65763 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65764 #endif
65765
65766 static struct ctl_table kern_table[] = {
65767 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65768 + {
65769 + .ctl_name = CTL_UNNUMBERED,
65770 + .procname = "grsecurity",
65771 + .mode = 0500,
65772 + .child = grsecurity_table,
65773 + },
65774 +#endif
65775 +
65776 +#ifdef CONFIG_PAX_SOFTMODE
65777 + {
65778 + .ctl_name = CTL_UNNUMBERED,
65779 + .procname = "pax",
65780 + .mode = 0500,
65781 + .child = pax_table,
65782 + },
65783 +#endif
65784 +
65785 {
65786 .ctl_name = CTL_UNNUMBERED,
65787 .procname = "sched_child_runs_first",
65788 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65789 .data = &modprobe_path,
65790 .maxlen = KMOD_PATH_LEN,
65791 .mode = 0644,
65792 - .proc_handler = &proc_dostring,
65793 - .strategy = &sysctl_string,
65794 + .proc_handler = &proc_dostring_modpriv,
65795 + .strategy = &sysctl_string_modpriv,
65796 },
65797 {
65798 .ctl_name = CTL_UNNUMBERED,
65799 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65800 .mode = 0644,
65801 .proc_handler = &proc_dointvec
65802 },
65803 + {
65804 + .procname = "heap_stack_gap",
65805 + .data = &sysctl_heap_stack_gap,
65806 + .maxlen = sizeof(sysctl_heap_stack_gap),
65807 + .mode = 0644,
65808 + .proc_handler = proc_doulongvec_minmax,
65809 + },
65810 #else
65811 {
65812 .ctl_name = CTL_UNNUMBERED,
65813 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65814 return 0;
65815 }
65816
65817 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65818 +
65819 static int parse_table(int __user *name, int nlen,
65820 void __user *oldval, size_t __user *oldlenp,
65821 void __user *newval, size_t newlen,
65822 @@ -1821,7 +1871,7 @@ repeat:
65823 if (n == table->ctl_name) {
65824 int error;
65825 if (table->child) {
65826 - if (sysctl_perm(root, table, MAY_EXEC))
65827 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
65828 return -EPERM;
65829 name++;
65830 nlen--;
65831 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65832 int error;
65833 int mode;
65834
65835 + if (table->parent != NULL && table->parent->procname != NULL &&
65836 + table->procname != NULL &&
65837 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65838 + return -EACCES;
65839 + if (gr_handle_chroot_sysctl(op))
65840 + return -EACCES;
65841 + error = gr_handle_sysctl(table, op);
65842 + if (error)
65843 + return error;
65844 +
65845 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65846 + if (error)
65847 + return error;
65848 +
65849 + if (root->permissions)
65850 + mode = root->permissions(root, current->nsproxy, table);
65851 + else
65852 + mode = table->mode;
65853 +
65854 + return test_perm(mode, op);
65855 +}
65856 +
65857 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
65858 +{
65859 + int error;
65860 + int mode;
65861 +
65862 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65863 if (error)
65864 return error;
65865 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
65866 buffer, lenp, ppos);
65867 }
65868
65869 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65870 + void __user *buffer, size_t *lenp, loff_t *ppos)
65871 +{
65872 + if (write && !capable(CAP_SYS_MODULE))
65873 + return -EPERM;
65874 +
65875 + return _proc_do_string(table->data, table->maxlen, write,
65876 + buffer, lenp, ppos);
65877 +}
65878 +
65879
65880 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
65881 int *valp,
65882 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
65883 vleft = table->maxlen / sizeof(unsigned long);
65884 left = *lenp;
65885
65886 - for (; left && vleft--; i++, min++, max++, first=0) {
65887 + for (; left && vleft--; i++, first=0) {
65888 if (write) {
65889 while (left) {
65890 char c;
65891 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
65892 return -ENOSYS;
65893 }
65894
65895 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65896 + void __user *buffer, size_t *lenp, loff_t *ppos)
65897 +{
65898 + return -ENOSYS;
65899 +}
65900 +
65901 int proc_dointvec(struct ctl_table *table, int write,
65902 void __user *buffer, size_t *lenp, loff_t *ppos)
65903 {
65904 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
65905 return 1;
65906 }
65907
65908 +int sysctl_string_modpriv(struct ctl_table *table,
65909 + void __user *oldval, size_t __user *oldlenp,
65910 + void __user *newval, size_t newlen)
65911 +{
65912 + if (newval && newlen && !capable(CAP_SYS_MODULE))
65913 + return -EPERM;
65914 +
65915 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
65916 +}
65917 +
65918 /*
65919 * This function makes sure that all of the integers in the vector
65920 * are between the minimum and maximum values given in the arrays
65921 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
65922 return -ENOSYS;
65923 }
65924
65925 +int sysctl_string_modpriv(struct ctl_table *table,
65926 + void __user *oldval, size_t __user *oldlenp,
65927 + void __user *newval, size_t newlen)
65928 +{
65929 + return -ENOSYS;
65930 +}
65931 +
65932 int sysctl_intvec(struct ctl_table *table,
65933 void __user *oldval, size_t __user *oldlenp,
65934 void __user *newval, size_t newlen)
65935 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65936 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65937 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65938 EXPORT_SYMBOL(proc_dostring);
65939 +EXPORT_SYMBOL(proc_dostring_modpriv);
65940 EXPORT_SYMBOL(proc_doulongvec_minmax);
65941 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65942 EXPORT_SYMBOL(register_sysctl_table);
65943 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
65944 EXPORT_SYMBOL(sysctl_jiffies);
65945 EXPORT_SYMBOL(sysctl_ms_jiffies);
65946 EXPORT_SYMBOL(sysctl_string);
65947 +EXPORT_SYMBOL(sysctl_string_modpriv);
65948 EXPORT_SYMBOL(sysctl_data);
65949 EXPORT_SYMBOL(unregister_sysctl_table);
65950 diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
65951 --- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
65952 +++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
65953 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
65954 } else {
65955 if ((table->strategy == sysctl_data) ||
65956 (table->strategy == sysctl_string) ||
65957 + (table->strategy == sysctl_string_modpriv) ||
65958 (table->strategy == sysctl_intvec) ||
65959 (table->strategy == sysctl_jiffies) ||
65960 (table->strategy == sysctl_ms_jiffies) ||
65961 (table->proc_handler == proc_dostring) ||
65962 + (table->proc_handler == proc_dostring_modpriv) ||
65963 (table->proc_handler == proc_dointvec) ||
65964 (table->proc_handler == proc_dointvec_minmax) ||
65965 (table->proc_handler == proc_dointvec_jiffies) ||
65966 diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
65967 --- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
65968 +++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
65969 @@ -26,9 +26,12 @@
65970 #include <linux/cgroup.h>
65971 #include <linux/fs.h>
65972 #include <linux/file.h>
65973 +#include <linux/grsecurity.h>
65974 #include <net/genetlink.h>
65975 #include <asm/atomic.h>
65976
65977 +extern int gr_is_taskstats_denied(int pid);
65978 +
65979 /*
65980 * Maximum length of a cpumask that can be specified in
65981 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65982 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
65983 size_t size;
65984 cpumask_var_t mask;
65985
65986 + if (gr_is_taskstats_denied(current->pid))
65987 + return -EACCES;
65988 +
65989 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
65990 return -ENOMEM;
65991
65992 diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
65993 --- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
65994 +++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
65995 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
65996 * then clear the broadcast bit.
65997 */
65998 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65999 - int cpu = smp_processor_id();
66000 + cpu = smp_processor_id();
66001
66002 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66003 tick_broadcast_clear_oneshot(cpu);
66004 diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66005 --- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66006 +++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66007 @@ -14,6 +14,7 @@
66008 #include <linux/init.h>
66009 #include <linux/mm.h>
66010 #include <linux/sched.h>
66011 +#include <linux/grsecurity.h>
66012 #include <linux/sysdev.h>
66013 #include <linux/clocksource.h>
66014 #include <linux/jiffies.h>
66015 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66016 */
66017 struct timespec ts = xtime;
66018 timespec_add_ns(&ts, nsec);
66019 - ACCESS_ONCE(xtime_cache) = ts;
66020 + ACCESS_ONCE_RW(xtime_cache) = ts;
66021 }
66022
66023 /* must hold xtime_lock */
66024 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66025 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66026 return -EINVAL;
66027
66028 + gr_log_timechange();
66029 +
66030 write_seqlock_irqsave(&xtime_lock, flags);
66031
66032 timekeeping_forward_now();
66033 diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66034 --- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66035 +++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66036 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66037
66038 static void print_name_offset(struct seq_file *m, void *sym)
66039 {
66040 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66041 + SEQ_printf(m, "<%p>", NULL);
66042 +#else
66043 char symname[KSYM_NAME_LEN];
66044
66045 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66046 SEQ_printf(m, "<%p>", sym);
66047 else
66048 SEQ_printf(m, "%s", symname);
66049 +#endif
66050 }
66051
66052 static void
66053 @@ -112,7 +116,11 @@ next_one:
66054 static void
66055 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66056 {
66057 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66058 + SEQ_printf(m, " .base: %p\n", NULL);
66059 +#else
66060 SEQ_printf(m, " .base: %p\n", base);
66061 +#endif
66062 SEQ_printf(m, " .index: %d\n",
66063 base->index);
66064 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66065 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66066 {
66067 struct proc_dir_entry *pe;
66068
66069 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66070 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66071 +#else
66072 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66073 +#endif
66074 if (!pe)
66075 return -ENOMEM;
66076 return 0;
66077 diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66078 --- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66079 +++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66080 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66081 static unsigned long nr_entries;
66082 static struct entry entries[MAX_ENTRIES];
66083
66084 -static atomic_t overflow_count;
66085 +static atomic_unchecked_t overflow_count;
66086
66087 /*
66088 * The entries are in a hash-table, for fast lookup:
66089 @@ -140,7 +140,7 @@ static void reset_entries(void)
66090 nr_entries = 0;
66091 memset(entries, 0, sizeof(entries));
66092 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66093 - atomic_set(&overflow_count, 0);
66094 + atomic_set_unchecked(&overflow_count, 0);
66095 }
66096
66097 static struct entry *alloc_entry(void)
66098 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66099 if (likely(entry))
66100 entry->count++;
66101 else
66102 - atomic_inc(&overflow_count);
66103 + atomic_inc_unchecked(&overflow_count);
66104
66105 out_unlock:
66106 spin_unlock_irqrestore(lock, flags);
66107 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66108
66109 static void print_name_offset(struct seq_file *m, unsigned long addr)
66110 {
66111 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66112 + seq_printf(m, "<%p>", NULL);
66113 +#else
66114 char symname[KSYM_NAME_LEN];
66115
66116 if (lookup_symbol_name(addr, symname) < 0)
66117 seq_printf(m, "<%p>", (void *)addr);
66118 else
66119 seq_printf(m, "%s", symname);
66120 +#endif
66121 }
66122
66123 static int tstats_show(struct seq_file *m, void *v)
66124 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66125
66126 seq_puts(m, "Timer Stats Version: v0.2\n");
66127 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66128 - if (atomic_read(&overflow_count))
66129 + if (atomic_read_unchecked(&overflow_count))
66130 seq_printf(m, "Overflow: %d entries\n",
66131 - atomic_read(&overflow_count));
66132 + atomic_read_unchecked(&overflow_count));
66133
66134 for (i = 0; i < nr_entries; i++) {
66135 entry = entries + i;
66136 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66137 {
66138 struct proc_dir_entry *pe;
66139
66140 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66141 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66142 +#else
66143 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66144 +#endif
66145 if (!pe)
66146 return -ENOMEM;
66147 return 0;
66148 diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66149 --- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66150 +++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66151 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66152 return error;
66153
66154 if (tz) {
66155 + /* we log in do_settimeofday called below, so don't log twice
66156 + */
66157 + if (!tv)
66158 + gr_log_timechange();
66159 +
66160 /* SMP safe, global irq locking makes it work. */
66161 sys_tz = *tz;
66162 update_vsyscall_tz();
66163 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66164 * Avoid unnecessary multiplications/divisions in the
66165 * two most common HZ cases:
66166 */
66167 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66168 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66169 {
66170 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66171 return (MSEC_PER_SEC / HZ) * j;
66172 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66173 }
66174 EXPORT_SYMBOL(jiffies_to_msecs);
66175
66176 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66177 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66178 {
66179 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66180 return (USEC_PER_SEC / HZ) * j;
66181 diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66182 --- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66183 +++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66184 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66185 /*
66186 * This function runs timers and the timer-tq in bottom half context.
66187 */
66188 -static void run_timer_softirq(struct softirq_action *h)
66189 +static void run_timer_softirq(void)
66190 {
66191 struct tvec_base *base = __get_cpu_var(tvec_bases);
66192
66193 diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66194 --- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66195 +++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66196 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66197 struct blk_trace *bt = filp->private_data;
66198 char buf[16];
66199
66200 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66201 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66202
66203 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66204 }
66205 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66206 return 1;
66207
66208 bt = buf->chan->private_data;
66209 - atomic_inc(&bt->dropped);
66210 + atomic_inc_unchecked(&bt->dropped);
66211 return 0;
66212 }
66213
66214 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66215
66216 bt->dir = dir;
66217 bt->dev = dev;
66218 - atomic_set(&bt->dropped, 0);
66219 + atomic_set_unchecked(&bt->dropped, 0);
66220
66221 ret = -EIO;
66222 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66223 diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66224 --- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66225 +++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66226 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66227
66228 ip = rec->ip;
66229
66230 + ret = ftrace_arch_code_modify_prepare();
66231 + FTRACE_WARN_ON(ret);
66232 + if (ret)
66233 + return 0;
66234 +
66235 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66236 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66237 if (ret) {
66238 ftrace_bug(ret, ip);
66239 rec->flags |= FTRACE_FL_FAILED;
66240 - return 0;
66241 }
66242 - return 1;
66243 + return ret ? 0 : 1;
66244 }
66245
66246 /*
66247 diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66248 --- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66249 +++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66250 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66251 * the reader page). But if the next page is a header page,
66252 * its flags will be non zero.
66253 */
66254 -static int inline
66255 +static inline int
66256 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66257 struct buffer_page *page, struct list_head *list)
66258 {
66259 diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66260 --- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66261 +++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66262 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66263 size_t rem;
66264 unsigned int i;
66265
66266 + pax_track_stack();
66267 +
66268 /* copy the tracer to avoid using a global lock all around */
66269 mutex_lock(&trace_types_lock);
66270 if (unlikely(old_tracer != current_trace && current_trace)) {
66271 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66272 int entries, size, i;
66273 size_t ret;
66274
66275 + pax_track_stack();
66276 +
66277 if (*ppos & (PAGE_SIZE - 1)) {
66278 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66279 return -EINVAL;
66280 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66281 };
66282 #endif
66283
66284 -static struct dentry *d_tracer;
66285 -
66286 struct dentry *tracing_init_dentry(void)
66287 {
66288 + static struct dentry *d_tracer;
66289 static int once;
66290
66291 if (d_tracer)
66292 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66293 return d_tracer;
66294 }
66295
66296 -static struct dentry *d_percpu;
66297 -
66298 struct dentry *tracing_dentry_percpu(void)
66299 {
66300 + static struct dentry *d_percpu;
66301 static int once;
66302 struct dentry *d_tracer;
66303
66304 diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66305 --- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66306 +++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66307 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66308 * Modules must own their file_operations to keep up with
66309 * reference counting.
66310 */
66311 +
66312 struct ftrace_module_file_ops {
66313 struct list_head list;
66314 struct module *mod;
66315 - struct file_operations id;
66316 - struct file_operations enable;
66317 - struct file_operations format;
66318 - struct file_operations filter;
66319 };
66320
66321 static void remove_subsystem_dir(const char *name)
66322 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66323
66324 file_ops->mod = mod;
66325
66326 - file_ops->id = ftrace_event_id_fops;
66327 - file_ops->id.owner = mod;
66328 -
66329 - file_ops->enable = ftrace_enable_fops;
66330 - file_ops->enable.owner = mod;
66331 -
66332 - file_ops->filter = ftrace_event_filter_fops;
66333 - file_ops->filter.owner = mod;
66334 -
66335 - file_ops->format = ftrace_event_format_fops;
66336 - file_ops->format.owner = mod;
66337 + pax_open_kernel();
66338 + *(void **)&mod->trace_id.owner = mod;
66339 + *(void **)&mod->trace_enable.owner = mod;
66340 + *(void **)&mod->trace_filter.owner = mod;
66341 + *(void **)&mod->trace_format.owner = mod;
66342 + pax_close_kernel();
66343
66344 list_add(&file_ops->list, &ftrace_module_file_list);
66345
66346 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66347 call->mod = mod;
66348 list_add(&call->list, &ftrace_events);
66349 event_create_dir(call, d_events,
66350 - &file_ops->id, &file_ops->enable,
66351 - &file_ops->filter, &file_ops->format);
66352 + &mod->trace_id, &mod->trace_enable,
66353 + &mod->trace_filter, &mod->trace_format);
66354 }
66355 }
66356
66357 diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66358 --- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66359 +++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66360 @@ -23,7 +23,7 @@ struct header_iter {
66361 static struct trace_array *mmio_trace_array;
66362 static bool overrun_detected;
66363 static unsigned long prev_overruns;
66364 -static atomic_t dropped_count;
66365 +static atomic_unchecked_t dropped_count;
66366
66367 static void mmio_reset_data(struct trace_array *tr)
66368 {
66369 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66370
66371 static unsigned long count_overruns(struct trace_iterator *iter)
66372 {
66373 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66374 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66375 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66376
66377 if (over > prev_overruns)
66378 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66379 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66380 sizeof(*entry), 0, pc);
66381 if (!event) {
66382 - atomic_inc(&dropped_count);
66383 + atomic_inc_unchecked(&dropped_count);
66384 return;
66385 }
66386 entry = ring_buffer_event_data(event);
66387 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66388 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66389 sizeof(*entry), 0, pc);
66390 if (!event) {
66391 - atomic_inc(&dropped_count);
66392 + atomic_inc_unchecked(&dropped_count);
66393 return;
66394 }
66395 entry = ring_buffer_event_data(event);
66396 diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66397 --- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66398 +++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66399 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66400 return 0;
66401 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66402 if (!IS_ERR(p)) {
66403 - p = mangle_path(s->buffer + s->len, p, "\n");
66404 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66405 if (p) {
66406 s->len = p - s->buffer;
66407 return 1;
66408 diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66409 --- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66410 +++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66411 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66412 return;
66413
66414 /* we do not handle interrupt stacks yet */
66415 - if (!object_is_on_stack(&this_size))
66416 + if (!object_starts_on_stack(&this_size))
66417 return;
66418
66419 local_irq_save(flags);
66420 diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66421 --- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66422 +++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66423 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66424 int cpu;
66425 pid_t pid;
66426 /* Can be inserted from interrupt or user context, need to be atomic */
66427 - atomic_t inserted;
66428 + atomic_unchecked_t inserted;
66429 /*
66430 * Don't need to be atomic, works are serialized in a single workqueue thread
66431 * on a single CPU.
66432 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66433 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66434 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66435 if (node->pid == wq_thread->pid) {
66436 - atomic_inc(&node->inserted);
66437 + atomic_inc_unchecked(&node->inserted);
66438 goto found;
66439 }
66440 }
66441 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66442 tsk = get_pid_task(pid, PIDTYPE_PID);
66443 if (tsk) {
66444 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66445 - atomic_read(&cws->inserted), cws->executed,
66446 + atomic_read_unchecked(&cws->inserted), cws->executed,
66447 tsk->comm);
66448 put_task_struct(tsk);
66449 }
66450 diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66451 --- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66452 +++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66453 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66454 spin_lock_irq(&uidhash_lock);
66455 up = uid_hash_find(uid, hashent);
66456 if (up) {
66457 + put_user_ns(ns);
66458 key_put(new->uid_keyring);
66459 key_put(new->session_keyring);
66460 kmem_cache_free(uid_cachep, new);
66461 diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66462 --- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66463 +++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66464 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66465 return BUG_TRAP_TYPE_NONE;
66466
66467 bug = find_bug(bugaddr);
66468 + if (!bug)
66469 + return BUG_TRAP_TYPE_NONE;
66470
66471 printk(KERN_EMERG "------------[ cut here ]------------\n");
66472
66473 diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66474 --- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66475 +++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66476 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66477 if (limit > 4)
66478 return;
66479
66480 - is_on_stack = object_is_on_stack(addr);
66481 + is_on_stack = object_starts_on_stack(addr);
66482 if (is_on_stack == onstack)
66483 return;
66484
66485 diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66486 --- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66487 +++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66488 @@ -861,7 +861,7 @@ out:
66489
66490 static void check_for_stack(struct device *dev, void *addr)
66491 {
66492 - if (object_is_on_stack(addr))
66493 + if (object_starts_on_stack(addr))
66494 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66495 "stack [addr=%p]\n", addr);
66496 }
66497 diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66498 --- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66499 +++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66500 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66501 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66502
66503 /* if already at the top layer, we need to grow */
66504 - if (id >= 1 << (idp->layers * IDR_BITS)) {
66505 + if (id >= (1 << (idp->layers * IDR_BITS))) {
66506 *starting_id = id;
66507 return IDR_NEED_TO_GROW;
66508 }
66509 diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66510 --- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66511 +++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66512 @@ -266,7 +266,7 @@ static void free(void *where)
66513 malloc_ptr = free_mem_ptr;
66514 }
66515 #else
66516 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66517 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66518 #define free(a) kfree(a)
66519 #endif
66520
66521 diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66522 --- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66523 +++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66524 @@ -905,7 +905,7 @@ config LATENCYTOP
66525 select STACKTRACE
66526 select SCHEDSTATS
66527 select SCHED_DEBUG
66528 - depends on HAVE_LATENCYTOP_SUPPORT
66529 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66530 help
66531 Enable this option if you want to use the LatencyTOP tool
66532 to find out which userspace is blocking on what kernel operations.
66533 diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66534 --- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66535 +++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66536 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66537 return ret;
66538 }
66539
66540 -struct sysfs_ops kobj_sysfs_ops = {
66541 +const struct sysfs_ops kobj_sysfs_ops = {
66542 .show = kobj_attr_show,
66543 .store = kobj_attr_store,
66544 };
66545 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66546 * If the kset was not able to be created, NULL will be returned.
66547 */
66548 static struct kset *kset_create(const char *name,
66549 - struct kset_uevent_ops *uevent_ops,
66550 + const struct kset_uevent_ops *uevent_ops,
66551 struct kobject *parent_kobj)
66552 {
66553 struct kset *kset;
66554 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66555 * If the kset was not able to be created, NULL will be returned.
66556 */
66557 struct kset *kset_create_and_add(const char *name,
66558 - struct kset_uevent_ops *uevent_ops,
66559 + const struct kset_uevent_ops *uevent_ops,
66560 struct kobject *parent_kobj)
66561 {
66562 struct kset *kset;
66563 diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66564 --- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66565 +++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66566 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66567 const char *subsystem;
66568 struct kobject *top_kobj;
66569 struct kset *kset;
66570 - struct kset_uevent_ops *uevent_ops;
66571 + const struct kset_uevent_ops *uevent_ops;
66572 u64 seq;
66573 int i = 0;
66574 int retval = 0;
66575 diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66576 --- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66577 +++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66578 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66579 */
66580 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66581 {
66582 - WARN_ON(release == NULL);
66583 + BUG_ON(release == NULL);
66584 WARN_ON(release == (void (*)(struct kref *))kfree);
66585
66586 if (atomic_dec_and_test(&kref->refcount)) {
66587 diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66588 --- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66589 +++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66590 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66591 char *buf;
66592 int ret;
66593
66594 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66595 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66596 if (!buf)
66597 return -ENOMEM;
66598 memcpy(buf, s->from, s->to - s->from);
66599 diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66600 --- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66601 +++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66602 @@ -81,7 +81,7 @@ struct radix_tree_preload {
66603 int nr;
66604 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66605 };
66606 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66607 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66608
66609 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66610 {
66611 diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66612 --- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66613 +++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66614 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66615 */
66616 static inline u32 __seed(u32 x, u32 m)
66617 {
66618 - return (x < m) ? x + m : x;
66619 + return (x <= m) ? x + m + 1 : x;
66620 }
66621
66622 /**
66623 diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66624 --- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66625 +++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66626 @@ -16,6 +16,9 @@
66627 * - scnprintf and vscnprintf
66628 */
66629
66630 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66631 +#define __INCLUDED_BY_HIDESYM 1
66632 +#endif
66633 #include <stdarg.h>
66634 #include <linux/module.h>
66635 #include <linux/types.h>
66636 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66637 return buf;
66638 }
66639
66640 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66641 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66642 {
66643 int len, i;
66644
66645 if ((unsigned long)s < PAGE_SIZE)
66646 - s = "<NULL>";
66647 + s = "(null)";
66648
66649 len = strnlen(s, spec.precision);
66650
66651 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66652 unsigned long value = (unsigned long) ptr;
66653 #ifdef CONFIG_KALLSYMS
66654 char sym[KSYM_SYMBOL_LEN];
66655 - if (ext != 'f' && ext != 's')
66656 + if (ext != 'f' && ext != 's' && ext != 'a')
66657 sprint_symbol(sym, value);
66658 else
66659 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66660 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66661 * - 'f' For simple symbolic function names without offset
66662 * - 'S' For symbolic direct pointers with offset
66663 * - 's' For symbolic direct pointers without offset
66664 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66665 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66666 * - 'R' For a struct resource pointer, it prints the range of
66667 * addresses (not the name nor the flags)
66668 * - 'M' For a 6-byte MAC address, it prints the address in the
66669 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66670 struct printf_spec spec)
66671 {
66672 if (!ptr)
66673 - return string(buf, end, "(null)", spec);
66674 + return string(buf, end, "(nil)", spec);
66675
66676 switch (*fmt) {
66677 case 'F':
66678 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66679 case 's':
66680 /* Fallthrough */
66681 case 'S':
66682 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66683 + break;
66684 +#else
66685 + return symbol_string(buf, end, ptr, spec, *fmt);
66686 +#endif
66687 + case 'a':
66688 + /* Fallthrough */
66689 + case 'A':
66690 return symbol_string(buf, end, ptr, spec, *fmt);
66691 case 'R':
66692 return resource_string(buf, end, ptr, spec);
66693 @@ -1445,7 +1458,7 @@ do { \
66694 size_t len;
66695 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66696 || (unsigned long)save_str < PAGE_SIZE)
66697 - save_str = "<NULL>";
66698 + save_str = "(null)";
66699 len = strlen(save_str);
66700 if (str + len + 1 < end)
66701 memcpy(str, save_str, len + 1);
66702 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66703 typeof(type) value; \
66704 if (sizeof(type) == 8) { \
66705 args = PTR_ALIGN(args, sizeof(u32)); \
66706 - *(u32 *)&value = *(u32 *)args; \
66707 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66708 + *(u32 *)&value = *(const u32 *)args; \
66709 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66710 } else { \
66711 args = PTR_ALIGN(args, sizeof(type)); \
66712 - value = *(typeof(type) *)args; \
66713 + value = *(const typeof(type) *)args; \
66714 } \
66715 args += sizeof(type); \
66716 value; \
66717 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66718 const char *str_arg = args;
66719 size_t len = strlen(str_arg);
66720 args += len + 1;
66721 - str = string(str, end, (char *)str_arg, spec);
66722 + str = string(str, end, str_arg, spec);
66723 break;
66724 }
66725
66726 diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66727 --- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66728 +++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66729 @@ -0,0 +1 @@
66730 +-grsec
66731 diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66732 --- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66733 +++ linux-2.6.32.45/Makefile 2011-08-23 21:19:01.000000000 -0400
66734 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66735
66736 HOSTCC = gcc
66737 HOSTCXX = g++
66738 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66739 -HOSTCXXFLAGS = -O2
66740 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66741 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66742 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66743
66744 # Decide whether to build built-in, modular, or both.
66745 # Normally, just do built-in.
66746 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66747 KBUILD_CPPFLAGS := -D__KERNEL__
66748
66749 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66750 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66751 -fno-strict-aliasing -fno-common \
66752 -Werror-implicit-function-declaration \
66753 -Wno-format-security \
66754 -fno-delete-null-pointer-checks
66755 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66756 KBUILD_AFLAGS := -D__ASSEMBLY__
66757
66758 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66759 @@ -376,9 +379,10 @@ export RCS_TAR_IGNORE := --exclude SCCS
66760 # Rules shared between *config targets and build targets
66761
66762 # Basic helpers built in scripts/
66763 -PHONY += scripts_basic
66764 -scripts_basic:
66765 +PHONY += scripts_basic0 scripts_basic gcc-plugins
66766 +scripts_basic0:
66767 $(Q)$(MAKE) $(build)=scripts/basic
66768 +scripts_basic: scripts_basic0 gcc-plugins
66769
66770 # To avoid any implicit rule to kick in, define an empty command.
66771 scripts/basic/%: scripts_basic ;
66772 @@ -403,7 +407,7 @@ endif
66773 # of make so .config is not included in this case either (for *config).
66774
66775 no-dot-config-targets := clean mrproper distclean \
66776 - cscope TAGS tags help %docs check% \
66777 + cscope gtags TAGS tags help %docs check% \
66778 include/linux/version.h headers_% \
66779 kernelrelease kernelversion
66780
66781 @@ -526,6 +530,25 @@ else
66782 KBUILD_CFLAGS += -O2
66783 endif
66784
66785 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66786 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
66787 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
66788 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66789 +endif
66790 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66791 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66792 +gcc-plugins:
66793 + $(Q)$(MAKE) $(build)=tools/gcc
66794 +else
66795 +gcc-plugins:
66796 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66797 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66798 +else
66799 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66800 +endif
66801 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66802 +endif
66803 +
66804 include $(srctree)/arch/$(SRCARCH)/Makefile
66805
66806 ifneq ($(CONFIG_FRAME_WARN),0)
66807 @@ -644,7 +667,7 @@ export mod_strip_cmd
66808
66809
66810 ifeq ($(KBUILD_EXTMOD),)
66811 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66812 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66813
66814 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66815 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66816 @@ -970,7 +993,7 @@ ifneq ($(KBUILD_SRC),)
66817 endif
66818
66819 # prepare2 creates a makefile if using a separate output directory
66820 -prepare2: prepare3 outputmakefile
66821 +prepare2: prepare3 outputmakefile gcc-plugins
66822
66823 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66824 include/asm include/config/auto.conf
66825 @@ -1198,7 +1221,7 @@ MRPROPER_FILES += .config .config.old in
66826 include/linux/autoconf.h include/linux/version.h \
66827 include/linux/utsrelease.h \
66828 include/linux/bounds.h include/asm*/asm-offsets.h \
66829 - Module.symvers Module.markers tags TAGS cscope*
66830 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66831
66832 # clean - Delete most, but leave enough to build external modules
66833 #
66834 @@ -1242,7 +1265,7 @@ distclean: mrproper
66835 @find $(srctree) $(RCS_FIND_IGNORE) \
66836 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
66837 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
66838 - -o -name '.*.rej' -o -size 0 \
66839 + -o -name '.*.rej' -o -size 0 -o -name '*.so' \
66840 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
66841 -type f -print | xargs rm -f
66842
66843 @@ -1289,6 +1312,7 @@ help:
66844 @echo ' modules_prepare - Set up for building external modules'
66845 @echo ' tags/TAGS - Generate tags file for editors'
66846 @echo ' cscope - Generate cscope index'
66847 + @echo ' gtags - Generate GNU GLOBAL index'
66848 @echo ' kernelrelease - Output the release version string'
66849 @echo ' kernelversion - Output the version stored in Makefile'
66850 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66851 @@ -1421,7 +1445,7 @@ clean: $(clean-dirs)
66852 $(call cmd,rmdirs)
66853 $(call cmd,rmfiles)
66854 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
66855 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66856 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66857 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66858 -o -name '*.gcno' \) -type f -print | xargs rm -f
66859
66860 @@ -1445,7 +1469,7 @@ endif # KBUILD_EXTMOD
66861 quiet_cmd_tags = GEN $@
66862 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
66863
66864 -tags TAGS cscope: FORCE
66865 +tags TAGS cscope gtags: FORCE
66866 $(call cmd,tags)
66867
66868 # Scripts to check various things for consistency
66869 diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
66870 --- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
66871 +++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
66872 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
66873 list_add_tail_rcu(&wb->list, &bdi->wb_list);
66874 spin_unlock(&bdi->wb_lock);
66875
66876 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
66877 + tsk->flags |= PF_SWAPWRITE;
66878 set_freezable();
66879
66880 /*
66881 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
66882 * Add the default flusher task that gets created for any bdi
66883 * that has dirty data pending writeout
66884 */
66885 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66886 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66887 {
66888 if (!bdi_cap_writeback_dirty(bdi))
66889 return;
66890 diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
66891 --- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
66892 +++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
66893 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
66894 struct address_space *mapping = file->f_mapping;
66895
66896 if (!mapping->a_ops->readpage)
66897 - return -ENOEXEC;
66898 + return -ENODEV;
66899 file_accessed(file);
66900 vma->vm_ops = &generic_file_vm_ops;
66901 vma->vm_flags |= VM_CAN_NONLINEAR;
66902 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
66903 *pos = i_size_read(inode);
66904
66905 if (limit != RLIM_INFINITY) {
66906 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66907 if (*pos >= limit) {
66908 send_sig(SIGXFSZ, current, 0);
66909 return -EFBIG;
66910 diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
66911 --- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
66912 +++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
66913 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66914 retry:
66915 vma = find_vma(mm, start);
66916
66917 +#ifdef CONFIG_PAX_SEGMEXEC
66918 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66919 + goto out;
66920 +#endif
66921 +
66922 /*
66923 * Make sure the vma is shared, that it supports prefaulting,
66924 * and that the remapped range is valid and fully within
66925 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66926 /*
66927 * drop PG_Mlocked flag for over-mapped range
66928 */
66929 - unsigned int saved_flags = vma->vm_flags;
66930 + unsigned long saved_flags = vma->vm_flags;
66931 munlock_vma_pages_range(vma, start, start + size);
66932 vma->vm_flags = saved_flags;
66933 }
66934 diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
66935 --- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
66936 +++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
66937 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
66938 * So no dangers, even with speculative execution.
66939 */
66940 page = pte_page(pkmap_page_table[i]);
66941 + pax_open_kernel();
66942 pte_clear(&init_mm, (unsigned long)page_address(page),
66943 &pkmap_page_table[i]);
66944 -
66945 + pax_close_kernel();
66946 set_page_address(page, NULL);
66947 need_flush = 1;
66948 }
66949 @@ -177,9 +178,11 @@ start:
66950 }
66951 }
66952 vaddr = PKMAP_ADDR(last_pkmap_nr);
66953 +
66954 + pax_open_kernel();
66955 set_pte_at(&init_mm, vaddr,
66956 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66957 -
66958 + pax_close_kernel();
66959 pkmap_count[last_pkmap_nr] = 1;
66960 set_page_address(page, (void *)vaddr);
66961
66962 diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
66963 --- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
66964 +++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
66965 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
66966 return 1;
66967 }
66968
66969 +#ifdef CONFIG_PAX_SEGMEXEC
66970 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66971 +{
66972 + struct mm_struct *mm = vma->vm_mm;
66973 + struct vm_area_struct *vma_m;
66974 + unsigned long address_m;
66975 + pte_t *ptep_m;
66976 +
66977 + vma_m = pax_find_mirror_vma(vma);
66978 + if (!vma_m)
66979 + return;
66980 +
66981 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66982 + address_m = address + SEGMEXEC_TASK_SIZE;
66983 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66984 + get_page(page_m);
66985 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66986 +}
66987 +#endif
66988 +
66989 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
66990 unsigned long address, pte_t *ptep, pte_t pte,
66991 struct page *pagecache_page)
66992 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
66993 huge_ptep_clear_flush(vma, address, ptep);
66994 set_huge_pte_at(mm, address, ptep,
66995 make_huge_pte(vma, new_page, 1));
66996 +
66997 +#ifdef CONFIG_PAX_SEGMEXEC
66998 + pax_mirror_huge_pte(vma, address, new_page);
66999 +#endif
67000 +
67001 /* Make the old page be freed below */
67002 new_page = old_page;
67003 }
67004 @@ -2135,6 +2160,10 @@ retry:
67005 && (vma->vm_flags & VM_SHARED)));
67006 set_huge_pte_at(mm, address, ptep, new_pte);
67007
67008 +#ifdef CONFIG_PAX_SEGMEXEC
67009 + pax_mirror_huge_pte(vma, address, page);
67010 +#endif
67011 +
67012 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67013 /* Optimization, do the COW without a second fault */
67014 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67015 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67016 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67017 struct hstate *h = hstate_vma(vma);
67018
67019 +#ifdef CONFIG_PAX_SEGMEXEC
67020 + struct vm_area_struct *vma_m;
67021 +
67022 + vma_m = pax_find_mirror_vma(vma);
67023 + if (vma_m) {
67024 + unsigned long address_m;
67025 +
67026 + if (vma->vm_start > vma_m->vm_start) {
67027 + address_m = address;
67028 + address -= SEGMEXEC_TASK_SIZE;
67029 + vma = vma_m;
67030 + h = hstate_vma(vma);
67031 + } else
67032 + address_m = address + SEGMEXEC_TASK_SIZE;
67033 +
67034 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67035 + return VM_FAULT_OOM;
67036 + address_m &= HPAGE_MASK;
67037 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67038 + }
67039 +#endif
67040 +
67041 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67042 if (!ptep)
67043 return VM_FAULT_OOM;
67044 diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67045 --- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67046 +++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67047 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67048 * in mm/page_alloc.c
67049 */
67050 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67051 +extern void free_compound_page(struct page *page);
67052 extern void prep_compound_page(struct page *page, unsigned long order);
67053
67054
67055 diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67056 --- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67057 +++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67058 @@ -228,7 +228,7 @@ config KSM
67059 config DEFAULT_MMAP_MIN_ADDR
67060 int "Low address space to protect from user allocation"
67061 depends on MMU
67062 - default 4096
67063 + default 65536
67064 help
67065 This is the portion of low virtual memory which should be protected
67066 from userspace allocation. Keeping a user from writing to low pages
67067 diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67068 --- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67069 +++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67070 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67071
67072 for (i = 0; i < object->trace_len; i++) {
67073 void *ptr = (void *)object->trace[i];
67074 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67075 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67076 }
67077 }
67078
67079 diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67080 --- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67081 +++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67082 @@ -14,7 +14,7 @@
67083 * Safely read from address @src to the buffer at @dst. If a kernel fault
67084 * happens, handle that and return -EFAULT.
67085 */
67086 -long probe_kernel_read(void *dst, void *src, size_t size)
67087 +long probe_kernel_read(void *dst, const void *src, size_t size)
67088 {
67089 long ret;
67090 mm_segment_t old_fs = get_fs();
67091 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67092 * Safely write to address @dst from the buffer at @src. If a kernel fault
67093 * happens, handle that and return -EFAULT.
67094 */
67095 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67096 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67097 {
67098 long ret;
67099 mm_segment_t old_fs = get_fs();
67100 diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67101 --- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67102 +++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67103 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67104 pgoff_t pgoff;
67105 unsigned long new_flags = vma->vm_flags;
67106
67107 +#ifdef CONFIG_PAX_SEGMEXEC
67108 + struct vm_area_struct *vma_m;
67109 +#endif
67110 +
67111 switch (behavior) {
67112 case MADV_NORMAL:
67113 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67114 @@ -103,6 +107,13 @@ success:
67115 /*
67116 * vm_flags is protected by the mmap_sem held in write mode.
67117 */
67118 +
67119 +#ifdef CONFIG_PAX_SEGMEXEC
67120 + vma_m = pax_find_mirror_vma(vma);
67121 + if (vma_m)
67122 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67123 +#endif
67124 +
67125 vma->vm_flags = new_flags;
67126
67127 out:
67128 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67129 struct vm_area_struct ** prev,
67130 unsigned long start, unsigned long end)
67131 {
67132 +
67133 +#ifdef CONFIG_PAX_SEGMEXEC
67134 + struct vm_area_struct *vma_m;
67135 +#endif
67136 +
67137 *prev = vma;
67138 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67139 return -EINVAL;
67140 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67141 zap_page_range(vma, start, end - start, &details);
67142 } else
67143 zap_page_range(vma, start, end - start, NULL);
67144 +
67145 +#ifdef CONFIG_PAX_SEGMEXEC
67146 + vma_m = pax_find_mirror_vma(vma);
67147 + if (vma_m) {
67148 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67149 + struct zap_details details = {
67150 + .nonlinear_vma = vma_m,
67151 + .last_index = ULONG_MAX,
67152 + };
67153 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67154 + } else
67155 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67156 + }
67157 +#endif
67158 +
67159 return 0;
67160 }
67161
67162 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67163 if (end < start)
67164 goto out;
67165
67166 +#ifdef CONFIG_PAX_SEGMEXEC
67167 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67168 + if (end > SEGMEXEC_TASK_SIZE)
67169 + goto out;
67170 + } else
67171 +#endif
67172 +
67173 + if (end > TASK_SIZE)
67174 + goto out;
67175 +
67176 error = 0;
67177 if (end == start)
67178 goto out;
67179 diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67180 --- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67181 +++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67182 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67183 return;
67184
67185 pmd = pmd_offset(pud, start);
67186 +
67187 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67188 pud_clear(pud);
67189 pmd_free_tlb(tlb, pmd, start);
67190 +#endif
67191 +
67192 }
67193
67194 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67195 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67196 if (end - 1 > ceiling - 1)
67197 return;
67198
67199 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67200 pud = pud_offset(pgd, start);
67201 pgd_clear(pgd);
67202 pud_free_tlb(tlb, pud, start);
67203 +#endif
67204 +
67205 }
67206
67207 /*
67208 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67209 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67210 i = 0;
67211
67212 - do {
67213 + while (nr_pages) {
67214 struct vm_area_struct *vma;
67215
67216 - vma = find_extend_vma(mm, start);
67217 + vma = find_vma(mm, start);
67218 if (!vma && in_gate_area(tsk, start)) {
67219 unsigned long pg = start & PAGE_MASK;
67220 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67221 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67222 continue;
67223 }
67224
67225 - if (!vma ||
67226 + if (!vma || start < vma->vm_start ||
67227 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67228 !(vm_flags & vma->vm_flags))
67229 return i ? : -EFAULT;
67230 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67231 start += PAGE_SIZE;
67232 nr_pages--;
67233 } while (nr_pages && start < vma->vm_end);
67234 - } while (nr_pages);
67235 + }
67236 return i;
67237 }
67238
67239 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67240 page_add_file_rmap(page);
67241 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67242
67243 +#ifdef CONFIG_PAX_SEGMEXEC
67244 + pax_mirror_file_pte(vma, addr, page, ptl);
67245 +#endif
67246 +
67247 retval = 0;
67248 pte_unmap_unlock(pte, ptl);
67249 return retval;
67250 @@ -1560,10 +1571,22 @@ out:
67251 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67252 struct page *page)
67253 {
67254 +
67255 +#ifdef CONFIG_PAX_SEGMEXEC
67256 + struct vm_area_struct *vma_m;
67257 +#endif
67258 +
67259 if (addr < vma->vm_start || addr >= vma->vm_end)
67260 return -EFAULT;
67261 if (!page_count(page))
67262 return -EINVAL;
67263 +
67264 +#ifdef CONFIG_PAX_SEGMEXEC
67265 + vma_m = pax_find_mirror_vma(vma);
67266 + if (vma_m)
67267 + vma_m->vm_flags |= VM_INSERTPAGE;
67268 +#endif
67269 +
67270 vma->vm_flags |= VM_INSERTPAGE;
67271 return insert_page(vma, addr, page, vma->vm_page_prot);
67272 }
67273 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67274 unsigned long pfn)
67275 {
67276 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67277 + BUG_ON(vma->vm_mirror);
67278
67279 if (addr < vma->vm_start || addr >= vma->vm_end)
67280 return -EFAULT;
67281 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67282 copy_user_highpage(dst, src, va, vma);
67283 }
67284
67285 +#ifdef CONFIG_PAX_SEGMEXEC
67286 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67287 +{
67288 + struct mm_struct *mm = vma->vm_mm;
67289 + spinlock_t *ptl;
67290 + pte_t *pte, entry;
67291 +
67292 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67293 + entry = *pte;
67294 + if (!pte_present(entry)) {
67295 + if (!pte_none(entry)) {
67296 + BUG_ON(pte_file(entry));
67297 + free_swap_and_cache(pte_to_swp_entry(entry));
67298 + pte_clear_not_present_full(mm, address, pte, 0);
67299 + }
67300 + } else {
67301 + struct page *page;
67302 +
67303 + flush_cache_page(vma, address, pte_pfn(entry));
67304 + entry = ptep_clear_flush(vma, address, pte);
67305 + BUG_ON(pte_dirty(entry));
67306 + page = vm_normal_page(vma, address, entry);
67307 + if (page) {
67308 + update_hiwater_rss(mm);
67309 + if (PageAnon(page))
67310 + dec_mm_counter(mm, anon_rss);
67311 + else
67312 + dec_mm_counter(mm, file_rss);
67313 + page_remove_rmap(page);
67314 + page_cache_release(page);
67315 + }
67316 + }
67317 + pte_unmap_unlock(pte, ptl);
67318 +}
67319 +
67320 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67321 + *
67322 + * the ptl of the lower mapped page is held on entry and is not released on exit
67323 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67324 + */
67325 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67326 +{
67327 + struct mm_struct *mm = vma->vm_mm;
67328 + unsigned long address_m;
67329 + spinlock_t *ptl_m;
67330 + struct vm_area_struct *vma_m;
67331 + pmd_t *pmd_m;
67332 + pte_t *pte_m, entry_m;
67333 +
67334 + BUG_ON(!page_m || !PageAnon(page_m));
67335 +
67336 + vma_m = pax_find_mirror_vma(vma);
67337 + if (!vma_m)
67338 + return;
67339 +
67340 + BUG_ON(!PageLocked(page_m));
67341 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67342 + address_m = address + SEGMEXEC_TASK_SIZE;
67343 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67344 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67345 + ptl_m = pte_lockptr(mm, pmd_m);
67346 + if (ptl != ptl_m) {
67347 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67348 + if (!pte_none(*pte_m))
67349 + goto out;
67350 + }
67351 +
67352 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67353 + page_cache_get(page_m);
67354 + page_add_anon_rmap(page_m, vma_m, address_m);
67355 + inc_mm_counter(mm, anon_rss);
67356 + set_pte_at(mm, address_m, pte_m, entry_m);
67357 + update_mmu_cache(vma_m, address_m, entry_m);
67358 +out:
67359 + if (ptl != ptl_m)
67360 + spin_unlock(ptl_m);
67361 + pte_unmap_nested(pte_m);
67362 + unlock_page(page_m);
67363 +}
67364 +
67365 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67366 +{
67367 + struct mm_struct *mm = vma->vm_mm;
67368 + unsigned long address_m;
67369 + spinlock_t *ptl_m;
67370 + struct vm_area_struct *vma_m;
67371 + pmd_t *pmd_m;
67372 + pte_t *pte_m, entry_m;
67373 +
67374 + BUG_ON(!page_m || PageAnon(page_m));
67375 +
67376 + vma_m = pax_find_mirror_vma(vma);
67377 + if (!vma_m)
67378 + return;
67379 +
67380 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67381 + address_m = address + SEGMEXEC_TASK_SIZE;
67382 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67383 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67384 + ptl_m = pte_lockptr(mm, pmd_m);
67385 + if (ptl != ptl_m) {
67386 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67387 + if (!pte_none(*pte_m))
67388 + goto out;
67389 + }
67390 +
67391 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67392 + page_cache_get(page_m);
67393 + page_add_file_rmap(page_m);
67394 + inc_mm_counter(mm, file_rss);
67395 + set_pte_at(mm, address_m, pte_m, entry_m);
67396 + update_mmu_cache(vma_m, address_m, entry_m);
67397 +out:
67398 + if (ptl != ptl_m)
67399 + spin_unlock(ptl_m);
67400 + pte_unmap_nested(pte_m);
67401 +}
67402 +
67403 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67404 +{
67405 + struct mm_struct *mm = vma->vm_mm;
67406 + unsigned long address_m;
67407 + spinlock_t *ptl_m;
67408 + struct vm_area_struct *vma_m;
67409 + pmd_t *pmd_m;
67410 + pte_t *pte_m, entry_m;
67411 +
67412 + vma_m = pax_find_mirror_vma(vma);
67413 + if (!vma_m)
67414 + return;
67415 +
67416 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67417 + address_m = address + SEGMEXEC_TASK_SIZE;
67418 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67419 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67420 + ptl_m = pte_lockptr(mm, pmd_m);
67421 + if (ptl != ptl_m) {
67422 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67423 + if (!pte_none(*pte_m))
67424 + goto out;
67425 + }
67426 +
67427 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67428 + set_pte_at(mm, address_m, pte_m, entry_m);
67429 +out:
67430 + if (ptl != ptl_m)
67431 + spin_unlock(ptl_m);
67432 + pte_unmap_nested(pte_m);
67433 +}
67434 +
67435 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67436 +{
67437 + struct page *page_m;
67438 + pte_t entry;
67439 +
67440 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67441 + goto out;
67442 +
67443 + entry = *pte;
67444 + page_m = vm_normal_page(vma, address, entry);
67445 + if (!page_m)
67446 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67447 + else if (PageAnon(page_m)) {
67448 + if (pax_find_mirror_vma(vma)) {
67449 + pte_unmap_unlock(pte, ptl);
67450 + lock_page(page_m);
67451 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67452 + if (pte_same(entry, *pte))
67453 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67454 + else
67455 + unlock_page(page_m);
67456 + }
67457 + } else
67458 + pax_mirror_file_pte(vma, address, page_m, ptl);
67459 +
67460 +out:
67461 + pte_unmap_unlock(pte, ptl);
67462 +}
67463 +#endif
67464 +
67465 /*
67466 * This routine handles present pages, when users try to write
67467 * to a shared page. It is done by copying the page to a new address
67468 @@ -2156,6 +2360,12 @@ gotten:
67469 */
67470 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67471 if (likely(pte_same(*page_table, orig_pte))) {
67472 +
67473 +#ifdef CONFIG_PAX_SEGMEXEC
67474 + if (pax_find_mirror_vma(vma))
67475 + BUG_ON(!trylock_page(new_page));
67476 +#endif
67477 +
67478 if (old_page) {
67479 if (!PageAnon(old_page)) {
67480 dec_mm_counter(mm, file_rss);
67481 @@ -2207,6 +2417,10 @@ gotten:
67482 page_remove_rmap(old_page);
67483 }
67484
67485 +#ifdef CONFIG_PAX_SEGMEXEC
67486 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67487 +#endif
67488 +
67489 /* Free the old page.. */
67490 new_page = old_page;
67491 ret |= VM_FAULT_WRITE;
67492 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67493 swap_free(entry);
67494 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67495 try_to_free_swap(page);
67496 +
67497 +#ifdef CONFIG_PAX_SEGMEXEC
67498 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67499 +#endif
67500 +
67501 unlock_page(page);
67502
67503 if (flags & FAULT_FLAG_WRITE) {
67504 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67505
67506 /* No need to invalidate - it was non-present before */
67507 update_mmu_cache(vma, address, pte);
67508 +
67509 +#ifdef CONFIG_PAX_SEGMEXEC
67510 + pax_mirror_anon_pte(vma, address, page, ptl);
67511 +#endif
67512 +
67513 unlock:
67514 pte_unmap_unlock(page_table, ptl);
67515 out:
67516 @@ -2632,40 +2856,6 @@ out_release:
67517 }
67518
67519 /*
67520 - * This is like a special single-page "expand_{down|up}wards()",
67521 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67522 - * doesn't hit another vma.
67523 - */
67524 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67525 -{
67526 - address &= PAGE_MASK;
67527 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67528 - struct vm_area_struct *prev = vma->vm_prev;
67529 -
67530 - /*
67531 - * Is there a mapping abutting this one below?
67532 - *
67533 - * That's only ok if it's the same stack mapping
67534 - * that has gotten split..
67535 - */
67536 - if (prev && prev->vm_end == address)
67537 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67538 -
67539 - expand_stack(vma, address - PAGE_SIZE);
67540 - }
67541 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67542 - struct vm_area_struct *next = vma->vm_next;
67543 -
67544 - /* As VM_GROWSDOWN but s/below/above/ */
67545 - if (next && next->vm_start == address + PAGE_SIZE)
67546 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67547 -
67548 - expand_upwards(vma, address + PAGE_SIZE);
67549 - }
67550 - return 0;
67551 -}
67552 -
67553 -/*
67554 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67555 * but allow concurrent faults), and pte mapped but not yet locked.
67556 * We return with mmap_sem still held, but pte unmapped and unlocked.
67557 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67558 unsigned long address, pte_t *page_table, pmd_t *pmd,
67559 unsigned int flags)
67560 {
67561 - struct page *page;
67562 + struct page *page = NULL;
67563 spinlock_t *ptl;
67564 pte_t entry;
67565
67566 - pte_unmap(page_table);
67567 -
67568 - /* Check if we need to add a guard page to the stack */
67569 - if (check_stack_guard_page(vma, address) < 0)
67570 - return VM_FAULT_SIGBUS;
67571 -
67572 - /* Use the zero-page for reads */
67573 if (!(flags & FAULT_FLAG_WRITE)) {
67574 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67575 vma->vm_page_prot));
67576 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67577 + ptl = pte_lockptr(mm, pmd);
67578 + spin_lock(ptl);
67579 if (!pte_none(*page_table))
67580 goto unlock;
67581 goto setpte;
67582 }
67583
67584 /* Allocate our own private page. */
67585 + pte_unmap(page_table);
67586 +
67587 if (unlikely(anon_vma_prepare(vma)))
67588 goto oom;
67589 page = alloc_zeroed_user_highpage_movable(vma, address);
67590 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67591 if (!pte_none(*page_table))
67592 goto release;
67593
67594 +#ifdef CONFIG_PAX_SEGMEXEC
67595 + if (pax_find_mirror_vma(vma))
67596 + BUG_ON(!trylock_page(page));
67597 +#endif
67598 +
67599 inc_mm_counter(mm, anon_rss);
67600 page_add_new_anon_rmap(page, vma, address);
67601 setpte:
67602 @@ -2720,6 +2911,12 @@ setpte:
67603
67604 /* No need to invalidate - it was non-present before */
67605 update_mmu_cache(vma, address, entry);
67606 +
67607 +#ifdef CONFIG_PAX_SEGMEXEC
67608 + if (page)
67609 + pax_mirror_anon_pte(vma, address, page, ptl);
67610 +#endif
67611 +
67612 unlock:
67613 pte_unmap_unlock(page_table, ptl);
67614 return 0;
67615 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67616 */
67617 /* Only go through if we didn't race with anybody else... */
67618 if (likely(pte_same(*page_table, orig_pte))) {
67619 +
67620 +#ifdef CONFIG_PAX_SEGMEXEC
67621 + if (anon && pax_find_mirror_vma(vma))
67622 + BUG_ON(!trylock_page(page));
67623 +#endif
67624 +
67625 flush_icache_page(vma, page);
67626 entry = mk_pte(page, vma->vm_page_prot);
67627 if (flags & FAULT_FLAG_WRITE)
67628 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67629
67630 /* no need to invalidate: a not-present page won't be cached */
67631 update_mmu_cache(vma, address, entry);
67632 +
67633 +#ifdef CONFIG_PAX_SEGMEXEC
67634 + if (anon)
67635 + pax_mirror_anon_pte(vma, address, page, ptl);
67636 + else
67637 + pax_mirror_file_pte(vma, address, page, ptl);
67638 +#endif
67639 +
67640 } else {
67641 if (charged)
67642 mem_cgroup_uncharge_page(page);
67643 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67644 if (flags & FAULT_FLAG_WRITE)
67645 flush_tlb_page(vma, address);
67646 }
67647 +
67648 +#ifdef CONFIG_PAX_SEGMEXEC
67649 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67650 + return 0;
67651 +#endif
67652 +
67653 unlock:
67654 pte_unmap_unlock(pte, ptl);
67655 return 0;
67656 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67657 pmd_t *pmd;
67658 pte_t *pte;
67659
67660 +#ifdef CONFIG_PAX_SEGMEXEC
67661 + struct vm_area_struct *vma_m;
67662 +#endif
67663 +
67664 __set_current_state(TASK_RUNNING);
67665
67666 count_vm_event(PGFAULT);
67667 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67668 if (unlikely(is_vm_hugetlb_page(vma)))
67669 return hugetlb_fault(mm, vma, address, flags);
67670
67671 +#ifdef CONFIG_PAX_SEGMEXEC
67672 + vma_m = pax_find_mirror_vma(vma);
67673 + if (vma_m) {
67674 + unsigned long address_m;
67675 + pgd_t *pgd_m;
67676 + pud_t *pud_m;
67677 + pmd_t *pmd_m;
67678 +
67679 + if (vma->vm_start > vma_m->vm_start) {
67680 + address_m = address;
67681 + address -= SEGMEXEC_TASK_SIZE;
67682 + vma = vma_m;
67683 + } else
67684 + address_m = address + SEGMEXEC_TASK_SIZE;
67685 +
67686 + pgd_m = pgd_offset(mm, address_m);
67687 + pud_m = pud_alloc(mm, pgd_m, address_m);
67688 + if (!pud_m)
67689 + return VM_FAULT_OOM;
67690 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67691 + if (!pmd_m)
67692 + return VM_FAULT_OOM;
67693 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67694 + return VM_FAULT_OOM;
67695 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67696 + }
67697 +#endif
67698 +
67699 pgd = pgd_offset(mm, address);
67700 pud = pud_alloc(mm, pgd, address);
67701 if (!pud)
67702 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67703 gate_vma.vm_start = FIXADDR_USER_START;
67704 gate_vma.vm_end = FIXADDR_USER_END;
67705 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67706 - gate_vma.vm_page_prot = __P101;
67707 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67708 /*
67709 * Make sure the vDSO gets into every core dump.
67710 * Dumping its contents makes post-mortem fully interpretable later
67711 diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67712 --- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67713 +++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67714 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67715
67716 int sysctl_memory_failure_recovery __read_mostly = 1;
67717
67718 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67719 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67720
67721 /*
67722 * Send all the processes who have the page mapped an ``action optional''
67723 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67724 return 0;
67725 }
67726
67727 - atomic_long_add(1, &mce_bad_pages);
67728 + atomic_long_add_unchecked(1, &mce_bad_pages);
67729
67730 /*
67731 * We need/can do nothing about count=0 pages.
67732 diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67733 --- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67734 +++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67735 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67736 struct vm_area_struct *next;
67737 int err;
67738
67739 +#ifdef CONFIG_PAX_SEGMEXEC
67740 + struct vm_area_struct *vma_m;
67741 +#endif
67742 +
67743 err = 0;
67744 for (; vma && vma->vm_start < end; vma = next) {
67745 next = vma->vm_next;
67746 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67747 err = policy_vma(vma, new);
67748 if (err)
67749 break;
67750 +
67751 +#ifdef CONFIG_PAX_SEGMEXEC
67752 + vma_m = pax_find_mirror_vma(vma);
67753 + if (vma_m) {
67754 + err = policy_vma(vma_m, new);
67755 + if (err)
67756 + break;
67757 + }
67758 +#endif
67759 +
67760 }
67761 return err;
67762 }
67763 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67764
67765 if (end < start)
67766 return -EINVAL;
67767 +
67768 +#ifdef CONFIG_PAX_SEGMEXEC
67769 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67770 + if (end > SEGMEXEC_TASK_SIZE)
67771 + return -EINVAL;
67772 + } else
67773 +#endif
67774 +
67775 + if (end > TASK_SIZE)
67776 + return -EINVAL;
67777 +
67778 if (end == start)
67779 return 0;
67780
67781 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67782 if (!mm)
67783 return -EINVAL;
67784
67785 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67786 + if (mm != current->mm &&
67787 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67788 + err = -EPERM;
67789 + goto out;
67790 + }
67791 +#endif
67792 +
67793 /*
67794 * Check if this process has the right to modify the specified
67795 * process. The right exists if the process has administrative
67796 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67797 rcu_read_lock();
67798 tcred = __task_cred(task);
67799 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67800 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67801 - !capable(CAP_SYS_NICE)) {
67802 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67803 rcu_read_unlock();
67804 err = -EPERM;
67805 goto out;
67806 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67807
67808 if (file) {
67809 seq_printf(m, " file=");
67810 - seq_path(m, &file->f_path, "\n\t= ");
67811 + seq_path(m, &file->f_path, "\n\t\\= ");
67812 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67813 seq_printf(m, " heap");
67814 } else if (vma->vm_start <= mm->start_stack &&
67815 diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
67816 --- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67817 +++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67818 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67819 unsigned long chunk_start;
67820 int err;
67821
67822 + pax_track_stack();
67823 +
67824 task_nodes = cpuset_mems_allowed(task);
67825
67826 err = -ENOMEM;
67827 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67828 if (!mm)
67829 return -EINVAL;
67830
67831 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67832 + if (mm != current->mm &&
67833 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67834 + err = -EPERM;
67835 + goto out;
67836 + }
67837 +#endif
67838 +
67839 /*
67840 * Check if this process has the right to modify the specified
67841 * process. The right exists if the process has administrative
67842 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67843 rcu_read_lock();
67844 tcred = __task_cred(task);
67845 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67846 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67847 - !capable(CAP_SYS_NICE)) {
67848 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67849 rcu_read_unlock();
67850 err = -EPERM;
67851 goto out;
67852 diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
67853 --- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67854 +++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67855 @@ -13,6 +13,7 @@
67856 #include <linux/pagemap.h>
67857 #include <linux/mempolicy.h>
67858 #include <linux/syscalls.h>
67859 +#include <linux/security.h>
67860 #include <linux/sched.h>
67861 #include <linux/module.h>
67862 #include <linux/rmap.h>
67863 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
67864 }
67865 }
67866
67867 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67868 -{
67869 - return (vma->vm_flags & VM_GROWSDOWN) &&
67870 - (vma->vm_start == addr) &&
67871 - !vma_stack_continue(vma->vm_prev, addr);
67872 -}
67873 -
67874 /**
67875 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
67876 * @vma: target vma
67877 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
67878 if (vma->vm_flags & VM_WRITE)
67879 gup_flags |= FOLL_WRITE;
67880
67881 - /* We don't try to access the guard page of a stack vma */
67882 - if (stack_guard_page(vma, start)) {
67883 - addr += PAGE_SIZE;
67884 - nr_pages--;
67885 - }
67886 -
67887 while (nr_pages > 0) {
67888 int i;
67889
67890 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
67891 {
67892 unsigned long nstart, end, tmp;
67893 struct vm_area_struct * vma, * prev;
67894 - int error;
67895 + int error = -EINVAL;
67896
67897 len = PAGE_ALIGN(len);
67898 end = start + len;
67899 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
67900 return -EINVAL;
67901 if (end == start)
67902 return 0;
67903 + if (end > TASK_SIZE)
67904 + return -EINVAL;
67905 +
67906 vma = find_vma_prev(current->mm, start, &prev);
67907 if (!vma || vma->vm_start > start)
67908 return -ENOMEM;
67909 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
67910 for (nstart = start ; ; ) {
67911 unsigned int newflags;
67912
67913 +#ifdef CONFIG_PAX_SEGMEXEC
67914 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67915 + break;
67916 +#endif
67917 +
67918 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67919
67920 newflags = vma->vm_flags | VM_LOCKED;
67921 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67922 lock_limit >>= PAGE_SHIFT;
67923
67924 /* check against resource limits */
67925 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67926 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67927 error = do_mlock(start, len, 1);
67928 up_write(&current->mm->mmap_sem);
67929 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67930 static int do_mlockall(int flags)
67931 {
67932 struct vm_area_struct * vma, * prev = NULL;
67933 - unsigned int def_flags = 0;
67934
67935 if (flags & MCL_FUTURE)
67936 - def_flags = VM_LOCKED;
67937 - current->mm->def_flags = def_flags;
67938 + current->mm->def_flags |= VM_LOCKED;
67939 + else
67940 + current->mm->def_flags &= ~VM_LOCKED;
67941 if (flags == MCL_FUTURE)
67942 goto out;
67943
67944 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67945 - unsigned int newflags;
67946 + unsigned long newflags;
67947 +
67948 +#ifdef CONFIG_PAX_SEGMEXEC
67949 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67950 + break;
67951 +#endif
67952
67953 + BUG_ON(vma->vm_end > TASK_SIZE);
67954 newflags = vma->vm_flags | VM_LOCKED;
67955 if (!(flags & MCL_CURRENT))
67956 newflags &= ~VM_LOCKED;
67957 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67958 lock_limit >>= PAGE_SHIFT;
67959
67960 ret = -ENOMEM;
67961 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67962 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67963 capable(CAP_IPC_LOCK))
67964 ret = do_mlockall(flags);
67965 diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
67966 --- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
67967 +++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
67968 @@ -45,6 +45,16 @@
67969 #define arch_rebalance_pgtables(addr, len) (addr)
67970 #endif
67971
67972 +static inline void verify_mm_writelocked(struct mm_struct *mm)
67973 +{
67974 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67975 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67976 + up_read(&mm->mmap_sem);
67977 + BUG();
67978 + }
67979 +#endif
67980 +}
67981 +
67982 static void unmap_region(struct mm_struct *mm,
67983 struct vm_area_struct *vma, struct vm_area_struct *prev,
67984 unsigned long start, unsigned long end);
67985 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
67986 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67987 *
67988 */
67989 -pgprot_t protection_map[16] = {
67990 +pgprot_t protection_map[16] __read_only = {
67991 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67992 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67993 };
67994
67995 pgprot_t vm_get_page_prot(unsigned long vm_flags)
67996 {
67997 - return __pgprot(pgprot_val(protection_map[vm_flags &
67998 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67999 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68000 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68001 +
68002 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68003 + if (!nx_enabled &&
68004 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68005 + (vm_flags & (VM_READ | VM_WRITE)))
68006 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68007 +#endif
68008 +
68009 + return prot;
68010 }
68011 EXPORT_SYMBOL(vm_get_page_prot);
68012
68013 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68014 int sysctl_overcommit_ratio = 50; /* default is 50% */
68015 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68016 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68017 struct percpu_counter vm_committed_as;
68018
68019 /*
68020 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68021 struct vm_area_struct *next = vma->vm_next;
68022
68023 might_sleep();
68024 + BUG_ON(vma->vm_mirror);
68025 if (vma->vm_ops && vma->vm_ops->close)
68026 vma->vm_ops->close(vma);
68027 if (vma->vm_file) {
68028 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68029 * not page aligned -Ram Gupta
68030 */
68031 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68032 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68033 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68034 (mm->end_data - mm->start_data) > rlim)
68035 goto out;
68036 @@ -704,6 +726,12 @@ static int
68037 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68038 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68039 {
68040 +
68041 +#ifdef CONFIG_PAX_SEGMEXEC
68042 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68043 + return 0;
68044 +#endif
68045 +
68046 if (is_mergeable_vma(vma, file, vm_flags) &&
68047 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68048 if (vma->vm_pgoff == vm_pgoff)
68049 @@ -723,6 +751,12 @@ static int
68050 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68051 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68052 {
68053 +
68054 +#ifdef CONFIG_PAX_SEGMEXEC
68055 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68056 + return 0;
68057 +#endif
68058 +
68059 if (is_mergeable_vma(vma, file, vm_flags) &&
68060 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68061 pgoff_t vm_pglen;
68062 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68063 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68064 struct vm_area_struct *prev, unsigned long addr,
68065 unsigned long end, unsigned long vm_flags,
68066 - struct anon_vma *anon_vma, struct file *file,
68067 + struct anon_vma *anon_vma, struct file *file,
68068 pgoff_t pgoff, struct mempolicy *policy)
68069 {
68070 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68071 struct vm_area_struct *area, *next;
68072
68073 +#ifdef CONFIG_PAX_SEGMEXEC
68074 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68075 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68076 +
68077 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68078 +#endif
68079 +
68080 /*
68081 * We later require that vma->vm_flags == vm_flags,
68082 * so this tests vma->vm_flags & VM_SPECIAL, too.
68083 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68084 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68085 next = next->vm_next;
68086
68087 +#ifdef CONFIG_PAX_SEGMEXEC
68088 + if (prev)
68089 + prev_m = pax_find_mirror_vma(prev);
68090 + if (area)
68091 + area_m = pax_find_mirror_vma(area);
68092 + if (next)
68093 + next_m = pax_find_mirror_vma(next);
68094 +#endif
68095 +
68096 /*
68097 * Can it merge with the predecessor?
68098 */
68099 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68100 /* cases 1, 6 */
68101 vma_adjust(prev, prev->vm_start,
68102 next->vm_end, prev->vm_pgoff, NULL);
68103 - } else /* cases 2, 5, 7 */
68104 +
68105 +#ifdef CONFIG_PAX_SEGMEXEC
68106 + if (prev_m)
68107 + vma_adjust(prev_m, prev_m->vm_start,
68108 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68109 +#endif
68110 +
68111 + } else { /* cases 2, 5, 7 */
68112 vma_adjust(prev, prev->vm_start,
68113 end, prev->vm_pgoff, NULL);
68114 +
68115 +#ifdef CONFIG_PAX_SEGMEXEC
68116 + if (prev_m)
68117 + vma_adjust(prev_m, prev_m->vm_start,
68118 + end_m, prev_m->vm_pgoff, NULL);
68119 +#endif
68120 +
68121 + }
68122 return prev;
68123 }
68124
68125 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68126 mpol_equal(policy, vma_policy(next)) &&
68127 can_vma_merge_before(next, vm_flags,
68128 anon_vma, file, pgoff+pglen)) {
68129 - if (prev && addr < prev->vm_end) /* case 4 */
68130 + if (prev && addr < prev->vm_end) { /* case 4 */
68131 vma_adjust(prev, prev->vm_start,
68132 addr, prev->vm_pgoff, NULL);
68133 - else /* cases 3, 8 */
68134 +
68135 +#ifdef CONFIG_PAX_SEGMEXEC
68136 + if (prev_m)
68137 + vma_adjust(prev_m, prev_m->vm_start,
68138 + addr_m, prev_m->vm_pgoff, NULL);
68139 +#endif
68140 +
68141 + } else { /* cases 3, 8 */
68142 vma_adjust(area, addr, next->vm_end,
68143 next->vm_pgoff - pglen, NULL);
68144 +
68145 +#ifdef CONFIG_PAX_SEGMEXEC
68146 + if (area_m)
68147 + vma_adjust(area_m, addr_m, next_m->vm_end,
68148 + next_m->vm_pgoff - pglen, NULL);
68149 +#endif
68150 +
68151 + }
68152 return area;
68153 }
68154
68155 @@ -898,14 +978,11 @@ none:
68156 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68157 struct file *file, long pages)
68158 {
68159 - const unsigned long stack_flags
68160 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68161 -
68162 if (file) {
68163 mm->shared_vm += pages;
68164 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68165 mm->exec_vm += pages;
68166 - } else if (flags & stack_flags)
68167 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68168 mm->stack_vm += pages;
68169 if (flags & (VM_RESERVED|VM_IO))
68170 mm->reserved_vm += pages;
68171 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68172 * (the exception is when the underlying filesystem is noexec
68173 * mounted, in which case we dont add PROT_EXEC.)
68174 */
68175 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68176 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68177 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68178 prot |= PROT_EXEC;
68179
68180 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68181 /* Obtain the address to map to. we verify (or select) it and ensure
68182 * that it represents a valid section of the address space.
68183 */
68184 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68185 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68186 if (addr & ~PAGE_MASK)
68187 return addr;
68188
68189 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68190 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68191 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68192
68193 +#ifdef CONFIG_PAX_MPROTECT
68194 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68195 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68196 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68197 + gr_log_rwxmmap(file);
68198 +
68199 +#ifdef CONFIG_PAX_EMUPLT
68200 + vm_flags &= ~VM_EXEC;
68201 +#else
68202 + return -EPERM;
68203 +#endif
68204 +
68205 + }
68206 +
68207 + if (!(vm_flags & VM_EXEC))
68208 + vm_flags &= ~VM_MAYEXEC;
68209 +#else
68210 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68211 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68212 +#endif
68213 + else
68214 + vm_flags &= ~VM_MAYWRITE;
68215 + }
68216 +#endif
68217 +
68218 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68219 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68220 + vm_flags &= ~VM_PAGEEXEC;
68221 +#endif
68222 +
68223 if (flags & MAP_LOCKED)
68224 if (!can_do_mlock())
68225 return -EPERM;
68226 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68227 locked += mm->locked_vm;
68228 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68229 lock_limit >>= PAGE_SHIFT;
68230 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68231 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68232 return -EAGAIN;
68233 }
68234 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68235 if (error)
68236 return error;
68237
68238 + if (!gr_acl_handle_mmap(file, prot))
68239 + return -EACCES;
68240 +
68241 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68242 }
68243 EXPORT_SYMBOL(do_mmap_pgoff);
68244 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68245 */
68246 int vma_wants_writenotify(struct vm_area_struct *vma)
68247 {
68248 - unsigned int vm_flags = vma->vm_flags;
68249 + unsigned long vm_flags = vma->vm_flags;
68250
68251 /* If it was private or non-writable, the write bit is already clear */
68252 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68253 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68254 return 0;
68255
68256 /* The backer wishes to know when pages are first written to? */
68257 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68258 unsigned long charged = 0;
68259 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68260
68261 +#ifdef CONFIG_PAX_SEGMEXEC
68262 + struct vm_area_struct *vma_m = NULL;
68263 +#endif
68264 +
68265 + /*
68266 + * mm->mmap_sem is required to protect against another thread
68267 + * changing the mappings in case we sleep.
68268 + */
68269 + verify_mm_writelocked(mm);
68270 +
68271 /* Clear old maps */
68272 error = -ENOMEM;
68273 -munmap_back:
68274 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68275 if (vma && vma->vm_start < addr + len) {
68276 if (do_munmap(mm, addr, len))
68277 return -ENOMEM;
68278 - goto munmap_back;
68279 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68280 + BUG_ON(vma && vma->vm_start < addr + len);
68281 }
68282
68283 /* Check against address space limit. */
68284 @@ -1173,6 +1294,16 @@ munmap_back:
68285 goto unacct_error;
68286 }
68287
68288 +#ifdef CONFIG_PAX_SEGMEXEC
68289 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68290 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68291 + if (!vma_m) {
68292 + error = -ENOMEM;
68293 + goto free_vma;
68294 + }
68295 + }
68296 +#endif
68297 +
68298 vma->vm_mm = mm;
68299 vma->vm_start = addr;
68300 vma->vm_end = addr + len;
68301 @@ -1195,6 +1326,19 @@ munmap_back:
68302 error = file->f_op->mmap(file, vma);
68303 if (error)
68304 goto unmap_and_free_vma;
68305 +
68306 +#ifdef CONFIG_PAX_SEGMEXEC
68307 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68308 + added_exe_file_vma(mm);
68309 +#endif
68310 +
68311 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68312 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68313 + vma->vm_flags |= VM_PAGEEXEC;
68314 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68315 + }
68316 +#endif
68317 +
68318 if (vm_flags & VM_EXECUTABLE)
68319 added_exe_file_vma(mm);
68320
68321 @@ -1218,6 +1362,11 @@ munmap_back:
68322 vma_link(mm, vma, prev, rb_link, rb_parent);
68323 file = vma->vm_file;
68324
68325 +#ifdef CONFIG_PAX_SEGMEXEC
68326 + if (vma_m)
68327 + pax_mirror_vma(vma_m, vma);
68328 +#endif
68329 +
68330 /* Once vma denies write, undo our temporary denial count */
68331 if (correct_wcount)
68332 atomic_inc(&inode->i_writecount);
68333 @@ -1226,6 +1375,7 @@ out:
68334
68335 mm->total_vm += len >> PAGE_SHIFT;
68336 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68337 + track_exec_limit(mm, addr, addr + len, vm_flags);
68338 if (vm_flags & VM_LOCKED) {
68339 /*
68340 * makes pages present; downgrades, drops, reacquires mmap_sem
68341 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68342 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68343 charged = 0;
68344 free_vma:
68345 +
68346 +#ifdef CONFIG_PAX_SEGMEXEC
68347 + if (vma_m)
68348 + kmem_cache_free(vm_area_cachep, vma_m);
68349 +#endif
68350 +
68351 kmem_cache_free(vm_area_cachep, vma);
68352 unacct_error:
68353 if (charged)
68354 @@ -1255,6 +1411,44 @@ unacct_error:
68355 return error;
68356 }
68357
68358 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68359 +{
68360 + if (!vma) {
68361 +#ifdef CONFIG_STACK_GROWSUP
68362 + if (addr > sysctl_heap_stack_gap)
68363 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68364 + else
68365 + vma = find_vma(current->mm, 0);
68366 + if (vma && (vma->vm_flags & VM_GROWSUP))
68367 + return false;
68368 +#endif
68369 + return true;
68370 + }
68371 +
68372 + if (addr + len > vma->vm_start)
68373 + return false;
68374 +
68375 + if (vma->vm_flags & VM_GROWSDOWN)
68376 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68377 +#ifdef CONFIG_STACK_GROWSUP
68378 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68379 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68380 +#endif
68381 +
68382 + return true;
68383 +}
68384 +
68385 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68386 +{
68387 + if (vma->vm_start < len)
68388 + return -ENOMEM;
68389 + if (!(vma->vm_flags & VM_GROWSDOWN))
68390 + return vma->vm_start - len;
68391 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68392 + return vma->vm_start - len - sysctl_heap_stack_gap;
68393 + return -ENOMEM;
68394 +}
68395 +
68396 /* Get an address range which is currently unmapped.
68397 * For shmat() with addr=0.
68398 *
68399 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68400 if (flags & MAP_FIXED)
68401 return addr;
68402
68403 +#ifdef CONFIG_PAX_RANDMMAP
68404 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68405 +#endif
68406 +
68407 if (addr) {
68408 addr = PAGE_ALIGN(addr);
68409 - vma = find_vma(mm, addr);
68410 - if (TASK_SIZE - len >= addr &&
68411 - (!vma || addr + len <= vma->vm_start))
68412 - return addr;
68413 + if (TASK_SIZE - len >= addr) {
68414 + vma = find_vma(mm, addr);
68415 + if (check_heap_stack_gap(vma, addr, len))
68416 + return addr;
68417 + }
68418 }
68419 if (len > mm->cached_hole_size) {
68420 - start_addr = addr = mm->free_area_cache;
68421 + start_addr = addr = mm->free_area_cache;
68422 } else {
68423 - start_addr = addr = TASK_UNMAPPED_BASE;
68424 - mm->cached_hole_size = 0;
68425 + start_addr = addr = mm->mmap_base;
68426 + mm->cached_hole_size = 0;
68427 }
68428
68429 full_search:
68430 @@ -1303,34 +1502,40 @@ full_search:
68431 * Start a new search - just in case we missed
68432 * some holes.
68433 */
68434 - if (start_addr != TASK_UNMAPPED_BASE) {
68435 - addr = TASK_UNMAPPED_BASE;
68436 - start_addr = addr;
68437 + if (start_addr != mm->mmap_base) {
68438 + start_addr = addr = mm->mmap_base;
68439 mm->cached_hole_size = 0;
68440 goto full_search;
68441 }
68442 return -ENOMEM;
68443 }
68444 - if (!vma || addr + len <= vma->vm_start) {
68445 - /*
68446 - * Remember the place where we stopped the search:
68447 - */
68448 - mm->free_area_cache = addr + len;
68449 - return addr;
68450 - }
68451 + if (check_heap_stack_gap(vma, addr, len))
68452 + break;
68453 if (addr + mm->cached_hole_size < vma->vm_start)
68454 mm->cached_hole_size = vma->vm_start - addr;
68455 addr = vma->vm_end;
68456 }
68457 +
68458 + /*
68459 + * Remember the place where we stopped the search:
68460 + */
68461 + mm->free_area_cache = addr + len;
68462 + return addr;
68463 }
68464 #endif
68465
68466 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68467 {
68468 +
68469 +#ifdef CONFIG_PAX_SEGMEXEC
68470 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68471 + return;
68472 +#endif
68473 +
68474 /*
68475 * Is this a new hole at the lowest possible address?
68476 */
68477 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68478 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68479 mm->free_area_cache = addr;
68480 mm->cached_hole_size = ~0UL;
68481 }
68482 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68483 {
68484 struct vm_area_struct *vma;
68485 struct mm_struct *mm = current->mm;
68486 - unsigned long addr = addr0;
68487 + unsigned long base = mm->mmap_base, addr = addr0;
68488
68489 /* requested length too big for entire address space */
68490 if (len > TASK_SIZE)
68491 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68492 if (flags & MAP_FIXED)
68493 return addr;
68494
68495 +#ifdef CONFIG_PAX_RANDMMAP
68496 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68497 +#endif
68498 +
68499 /* requesting a specific address */
68500 if (addr) {
68501 addr = PAGE_ALIGN(addr);
68502 - vma = find_vma(mm, addr);
68503 - if (TASK_SIZE - len >= addr &&
68504 - (!vma || addr + len <= vma->vm_start))
68505 - return addr;
68506 + if (TASK_SIZE - len >= addr) {
68507 + vma = find_vma(mm, addr);
68508 + if (check_heap_stack_gap(vma, addr, len))
68509 + return addr;
68510 + }
68511 }
68512
68513 /* check if free_area_cache is useful for us */
68514 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68515 /* make sure it can fit in the remaining address space */
68516 if (addr > len) {
68517 vma = find_vma(mm, addr-len);
68518 - if (!vma || addr <= vma->vm_start)
68519 + if (check_heap_stack_gap(vma, addr - len, len))
68520 /* remember the address as a hint for next time */
68521 return (mm->free_area_cache = addr-len);
68522 }
68523 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68524 * return with success:
68525 */
68526 vma = find_vma(mm, addr);
68527 - if (!vma || addr+len <= vma->vm_start)
68528 + if (check_heap_stack_gap(vma, addr, len))
68529 /* remember the address as a hint for next time */
68530 return (mm->free_area_cache = addr);
68531
68532 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68533 mm->cached_hole_size = vma->vm_start - addr;
68534
68535 /* try just below the current vma->vm_start */
68536 - addr = vma->vm_start-len;
68537 - } while (len < vma->vm_start);
68538 + addr = skip_heap_stack_gap(vma, len);
68539 + } while (!IS_ERR_VALUE(addr));
68540
68541 bottomup:
68542 /*
68543 @@ -1414,13 +1624,21 @@ bottomup:
68544 * can happen with large stack limits and large mmap()
68545 * allocations.
68546 */
68547 + mm->mmap_base = TASK_UNMAPPED_BASE;
68548 +
68549 +#ifdef CONFIG_PAX_RANDMMAP
68550 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68551 + mm->mmap_base += mm->delta_mmap;
68552 +#endif
68553 +
68554 + mm->free_area_cache = mm->mmap_base;
68555 mm->cached_hole_size = ~0UL;
68556 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68557 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68558 /*
68559 * Restore the topdown base:
68560 */
68561 - mm->free_area_cache = mm->mmap_base;
68562 + mm->mmap_base = base;
68563 + mm->free_area_cache = base;
68564 mm->cached_hole_size = ~0UL;
68565
68566 return addr;
68567 @@ -1429,6 +1647,12 @@ bottomup:
68568
68569 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68570 {
68571 +
68572 +#ifdef CONFIG_PAX_SEGMEXEC
68573 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68574 + return;
68575 +#endif
68576 +
68577 /*
68578 * Is this a new hole at the highest possible address?
68579 */
68580 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68581 mm->free_area_cache = addr;
68582
68583 /* dont allow allocations above current base */
68584 - if (mm->free_area_cache > mm->mmap_base)
68585 + if (mm->free_area_cache > mm->mmap_base) {
68586 mm->free_area_cache = mm->mmap_base;
68587 + mm->cached_hole_size = ~0UL;
68588 + }
68589 }
68590
68591 unsigned long
68592 @@ -1545,6 +1771,27 @@ out:
68593 return prev ? prev->vm_next : vma;
68594 }
68595
68596 +#ifdef CONFIG_PAX_SEGMEXEC
68597 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68598 +{
68599 + struct vm_area_struct *vma_m;
68600 +
68601 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68602 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68603 + BUG_ON(vma->vm_mirror);
68604 + return NULL;
68605 + }
68606 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68607 + vma_m = vma->vm_mirror;
68608 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68609 + BUG_ON(vma->vm_file != vma_m->vm_file);
68610 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68611 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68612 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68613 + return vma_m;
68614 +}
68615 +#endif
68616 +
68617 /*
68618 * Verify that the stack growth is acceptable and
68619 * update accounting. This is shared with both the
68620 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68621 return -ENOMEM;
68622
68623 /* Stack limit test */
68624 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68625 if (size > rlim[RLIMIT_STACK].rlim_cur)
68626 return -ENOMEM;
68627
68628 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68629 unsigned long limit;
68630 locked = mm->locked_vm + grow;
68631 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68632 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68633 if (locked > limit && !capable(CAP_IPC_LOCK))
68634 return -ENOMEM;
68635 }
68636 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68637 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68638 * vma is the last one with address > vma->vm_end. Have to extend vma.
68639 */
68640 +#ifndef CONFIG_IA64
68641 +static
68642 +#endif
68643 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68644 {
68645 int error;
68646 + bool locknext;
68647
68648 if (!(vma->vm_flags & VM_GROWSUP))
68649 return -EFAULT;
68650
68651 + /* Also guard against wrapping around to address 0. */
68652 + if (address < PAGE_ALIGN(address+1))
68653 + address = PAGE_ALIGN(address+1);
68654 + else
68655 + return -ENOMEM;
68656 +
68657 /*
68658 * We must make sure the anon_vma is allocated
68659 * so that the anon_vma locking is not a noop.
68660 */
68661 if (unlikely(anon_vma_prepare(vma)))
68662 return -ENOMEM;
68663 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68664 + if (locknext && anon_vma_prepare(vma->vm_next))
68665 + return -ENOMEM;
68666 anon_vma_lock(vma);
68667 + if (locknext)
68668 + anon_vma_lock(vma->vm_next);
68669
68670 /*
68671 * vma->vm_start/vm_end cannot change under us because the caller
68672 * is required to hold the mmap_sem in read mode. We need the
68673 - * anon_vma lock to serialize against concurrent expand_stacks.
68674 - * Also guard against wrapping around to address 0.
68675 + * anon_vma locks to serialize against concurrent expand_stacks
68676 + * and expand_upwards.
68677 */
68678 - if (address < PAGE_ALIGN(address+4))
68679 - address = PAGE_ALIGN(address+4);
68680 - else {
68681 - anon_vma_unlock(vma);
68682 - return -ENOMEM;
68683 - }
68684 error = 0;
68685
68686 /* Somebody else might have raced and expanded it already */
68687 - if (address > vma->vm_end) {
68688 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68689 + error = -ENOMEM;
68690 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68691 unsigned long size, grow;
68692
68693 size = address - vma->vm_start;
68694 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68695 if (!error)
68696 vma->vm_end = address;
68697 }
68698 + if (locknext)
68699 + anon_vma_unlock(vma->vm_next);
68700 anon_vma_unlock(vma);
68701 return error;
68702 }
68703 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68704 unsigned long address)
68705 {
68706 int error;
68707 + bool lockprev = false;
68708 + struct vm_area_struct *prev;
68709
68710 /*
68711 * We must make sure the anon_vma is allocated
68712 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68713 if (error)
68714 return error;
68715
68716 + prev = vma->vm_prev;
68717 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68718 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68719 +#endif
68720 + if (lockprev && anon_vma_prepare(prev))
68721 + return -ENOMEM;
68722 + if (lockprev)
68723 + anon_vma_lock(prev);
68724 +
68725 anon_vma_lock(vma);
68726
68727 /*
68728 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68729 */
68730
68731 /* Somebody else might have raced and expanded it already */
68732 - if (address < vma->vm_start) {
68733 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68734 + error = -ENOMEM;
68735 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68736 unsigned long size, grow;
68737
68738 +#ifdef CONFIG_PAX_SEGMEXEC
68739 + struct vm_area_struct *vma_m;
68740 +
68741 + vma_m = pax_find_mirror_vma(vma);
68742 +#endif
68743 +
68744 size = vma->vm_end - address;
68745 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68746
68747 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68748 if (!error) {
68749 vma->vm_start = address;
68750 vma->vm_pgoff -= grow;
68751 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68752 +
68753 +#ifdef CONFIG_PAX_SEGMEXEC
68754 + if (vma_m) {
68755 + vma_m->vm_start -= grow << PAGE_SHIFT;
68756 + vma_m->vm_pgoff -= grow;
68757 + }
68758 +#endif
68759 +
68760 }
68761 }
68762 anon_vma_unlock(vma);
68763 + if (lockprev)
68764 + anon_vma_unlock(prev);
68765 return error;
68766 }
68767
68768 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68769 do {
68770 long nrpages = vma_pages(vma);
68771
68772 +#ifdef CONFIG_PAX_SEGMEXEC
68773 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68774 + vma = remove_vma(vma);
68775 + continue;
68776 + }
68777 +#endif
68778 +
68779 mm->total_vm -= nrpages;
68780 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68781 vma = remove_vma(vma);
68782 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68783 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68784 vma->vm_prev = NULL;
68785 do {
68786 +
68787 +#ifdef CONFIG_PAX_SEGMEXEC
68788 + if (vma->vm_mirror) {
68789 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68790 + vma->vm_mirror->vm_mirror = NULL;
68791 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
68792 + vma->vm_mirror = NULL;
68793 + }
68794 +#endif
68795 +
68796 rb_erase(&vma->vm_rb, &mm->mm_rb);
68797 mm->map_count--;
68798 tail_vma = vma;
68799 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68800 struct mempolicy *pol;
68801 struct vm_area_struct *new;
68802
68803 +#ifdef CONFIG_PAX_SEGMEXEC
68804 + struct vm_area_struct *vma_m, *new_m = NULL;
68805 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68806 +#endif
68807 +
68808 if (is_vm_hugetlb_page(vma) && (addr &
68809 ~(huge_page_mask(hstate_vma(vma)))))
68810 return -EINVAL;
68811
68812 +#ifdef CONFIG_PAX_SEGMEXEC
68813 + vma_m = pax_find_mirror_vma(vma);
68814 +
68815 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68816 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68817 + if (mm->map_count >= sysctl_max_map_count-1)
68818 + return -ENOMEM;
68819 + } else
68820 +#endif
68821 +
68822 if (mm->map_count >= sysctl_max_map_count)
68823 return -ENOMEM;
68824
68825 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68826 if (!new)
68827 return -ENOMEM;
68828
68829 +#ifdef CONFIG_PAX_SEGMEXEC
68830 + if (vma_m) {
68831 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68832 + if (!new_m) {
68833 + kmem_cache_free(vm_area_cachep, new);
68834 + return -ENOMEM;
68835 + }
68836 + }
68837 +#endif
68838 +
68839 /* most fields are the same, copy all, and then fixup */
68840 *new = *vma;
68841
68842 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68843 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68844 }
68845
68846 +#ifdef CONFIG_PAX_SEGMEXEC
68847 + if (vma_m) {
68848 + *new_m = *vma_m;
68849 + new_m->vm_mirror = new;
68850 + new->vm_mirror = new_m;
68851 +
68852 + if (new_below)
68853 + new_m->vm_end = addr_m;
68854 + else {
68855 + new_m->vm_start = addr_m;
68856 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68857 + }
68858 + }
68859 +#endif
68860 +
68861 pol = mpol_dup(vma_policy(vma));
68862 if (IS_ERR(pol)) {
68863 +
68864 +#ifdef CONFIG_PAX_SEGMEXEC
68865 + if (new_m)
68866 + kmem_cache_free(vm_area_cachep, new_m);
68867 +#endif
68868 +
68869 kmem_cache_free(vm_area_cachep, new);
68870 return PTR_ERR(pol);
68871 }
68872 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
68873 else
68874 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68875
68876 +#ifdef CONFIG_PAX_SEGMEXEC
68877 + if (vma_m) {
68878 + mpol_get(pol);
68879 + vma_set_policy(new_m, pol);
68880 +
68881 + if (new_m->vm_file) {
68882 + get_file(new_m->vm_file);
68883 + if (vma_m->vm_flags & VM_EXECUTABLE)
68884 + added_exe_file_vma(mm);
68885 + }
68886 +
68887 + if (new_m->vm_ops && new_m->vm_ops->open)
68888 + new_m->vm_ops->open(new_m);
68889 +
68890 + if (new_below)
68891 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68892 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68893 + else
68894 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68895 + }
68896 +#endif
68897 +
68898 return 0;
68899 }
68900
68901 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
68902 * work. This now handles partial unmappings.
68903 * Jeremy Fitzhardinge <jeremy@goop.org>
68904 */
68905 +#ifdef CONFIG_PAX_SEGMEXEC
68906 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68907 +{
68908 + int ret = __do_munmap(mm, start, len);
68909 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68910 + return ret;
68911 +
68912 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68913 +}
68914 +
68915 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68916 +#else
68917 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68918 +#endif
68919 {
68920 unsigned long end;
68921 struct vm_area_struct *vma, *prev, *last;
68922
68923 + /*
68924 + * mm->mmap_sem is required to protect against another thread
68925 + * changing the mappings in case we sleep.
68926 + */
68927 + verify_mm_writelocked(mm);
68928 +
68929 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68930 return -EINVAL;
68931
68932 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
68933 /* Fix up all other VM information */
68934 remove_vma_list(mm, vma);
68935
68936 + track_exec_limit(mm, start, end, 0UL);
68937 +
68938 return 0;
68939 }
68940
68941 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68942
68943 profile_munmap(addr);
68944
68945 +#ifdef CONFIG_PAX_SEGMEXEC
68946 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68947 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68948 + return -EINVAL;
68949 +#endif
68950 +
68951 down_write(&mm->mmap_sem);
68952 ret = do_munmap(mm, addr, len);
68953 up_write(&mm->mmap_sem);
68954 return ret;
68955 }
68956
68957 -static inline void verify_mm_writelocked(struct mm_struct *mm)
68958 -{
68959 -#ifdef CONFIG_DEBUG_VM
68960 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68961 - WARN_ON(1);
68962 - up_read(&mm->mmap_sem);
68963 - }
68964 -#endif
68965 -}
68966 -
68967 /*
68968 * this is really a simplified "do_mmap". it only handles
68969 * anonymous maps. eventually we may be able to do some
68970 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
68971 struct rb_node ** rb_link, * rb_parent;
68972 pgoff_t pgoff = addr >> PAGE_SHIFT;
68973 int error;
68974 + unsigned long charged;
68975
68976 len = PAGE_ALIGN(len);
68977 if (!len)
68978 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
68979
68980 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
68981
68982 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68983 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68984 + flags &= ~VM_EXEC;
68985 +
68986 +#ifdef CONFIG_PAX_MPROTECT
68987 + if (mm->pax_flags & MF_PAX_MPROTECT)
68988 + flags &= ~VM_MAYEXEC;
68989 +#endif
68990 +
68991 + }
68992 +#endif
68993 +
68994 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
68995 if (error & ~PAGE_MASK)
68996 return error;
68997
68998 + charged = len >> PAGE_SHIFT;
68999 +
69000 /*
69001 * mlock MCL_FUTURE?
69002 */
69003 if (mm->def_flags & VM_LOCKED) {
69004 unsigned long locked, lock_limit;
69005 - locked = len >> PAGE_SHIFT;
69006 + locked = charged;
69007 locked += mm->locked_vm;
69008 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69009 lock_limit >>= PAGE_SHIFT;
69010 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69011 /*
69012 * Clear old maps. this also does some error checking for us
69013 */
69014 - munmap_back:
69015 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69016 if (vma && vma->vm_start < addr + len) {
69017 if (do_munmap(mm, addr, len))
69018 return -ENOMEM;
69019 - goto munmap_back;
69020 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69021 + BUG_ON(vma && vma->vm_start < addr + len);
69022 }
69023
69024 /* Check against address space limits *after* clearing old maps... */
69025 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69026 + if (!may_expand_vm(mm, charged))
69027 return -ENOMEM;
69028
69029 if (mm->map_count > sysctl_max_map_count)
69030 return -ENOMEM;
69031
69032 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69033 + if (security_vm_enough_memory(charged))
69034 return -ENOMEM;
69035
69036 /* Can we just expand an old private anonymous mapping? */
69037 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69038 */
69039 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69040 if (!vma) {
69041 - vm_unacct_memory(len >> PAGE_SHIFT);
69042 + vm_unacct_memory(charged);
69043 return -ENOMEM;
69044 }
69045
69046 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69047 vma->vm_page_prot = vm_get_page_prot(flags);
69048 vma_link(mm, vma, prev, rb_link, rb_parent);
69049 out:
69050 - mm->total_vm += len >> PAGE_SHIFT;
69051 + mm->total_vm += charged;
69052 if (flags & VM_LOCKED) {
69053 if (!mlock_vma_pages_range(vma, addr, addr + len))
69054 - mm->locked_vm += (len >> PAGE_SHIFT);
69055 + mm->locked_vm += charged;
69056 }
69057 + track_exec_limit(mm, addr, addr + len, flags);
69058 return addr;
69059 }
69060
69061 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69062 * Walk the list again, actually closing and freeing it,
69063 * with preemption enabled, without holding any MM locks.
69064 */
69065 - while (vma)
69066 + while (vma) {
69067 + vma->vm_mirror = NULL;
69068 vma = remove_vma(vma);
69069 + }
69070
69071 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69072 }
69073 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69074 struct vm_area_struct * __vma, * prev;
69075 struct rb_node ** rb_link, * rb_parent;
69076
69077 +#ifdef CONFIG_PAX_SEGMEXEC
69078 + struct vm_area_struct *vma_m = NULL;
69079 +#endif
69080 +
69081 /*
69082 * The vm_pgoff of a purely anonymous vma should be irrelevant
69083 * until its first write fault, when page's anon_vma and index
69084 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69085 if ((vma->vm_flags & VM_ACCOUNT) &&
69086 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69087 return -ENOMEM;
69088 +
69089 +#ifdef CONFIG_PAX_SEGMEXEC
69090 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69091 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69092 + if (!vma_m)
69093 + return -ENOMEM;
69094 + }
69095 +#endif
69096 +
69097 vma_link(mm, vma, prev, rb_link, rb_parent);
69098 +
69099 +#ifdef CONFIG_PAX_SEGMEXEC
69100 + if (vma_m)
69101 + pax_mirror_vma(vma_m, vma);
69102 +#endif
69103 +
69104 return 0;
69105 }
69106
69107 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69108 struct rb_node **rb_link, *rb_parent;
69109 struct mempolicy *pol;
69110
69111 + BUG_ON(vma->vm_mirror);
69112 +
69113 /*
69114 * If anonymous vma has not yet been faulted, update new pgoff
69115 * to match new location, to increase its chance of merging.
69116 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69117 return new_vma;
69118 }
69119
69120 +#ifdef CONFIG_PAX_SEGMEXEC
69121 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69122 +{
69123 + struct vm_area_struct *prev_m;
69124 + struct rb_node **rb_link_m, *rb_parent_m;
69125 + struct mempolicy *pol_m;
69126 +
69127 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69128 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69129 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69130 + *vma_m = *vma;
69131 + pol_m = vma_policy(vma_m);
69132 + mpol_get(pol_m);
69133 + vma_set_policy(vma_m, pol_m);
69134 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69135 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69136 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69137 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69138 + if (vma_m->vm_file)
69139 + get_file(vma_m->vm_file);
69140 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69141 + vma_m->vm_ops->open(vma_m);
69142 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69143 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69144 + vma_m->vm_mirror = vma;
69145 + vma->vm_mirror = vma_m;
69146 +}
69147 +#endif
69148 +
69149 /*
69150 * Return true if the calling process may expand its vm space by the passed
69151 * number of pages
69152 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69153 unsigned long lim;
69154
69155 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69156 -
69157 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69158 if (cur + npages > lim)
69159 return 0;
69160 return 1;
69161 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69162 vma->vm_start = addr;
69163 vma->vm_end = addr + len;
69164
69165 +#ifdef CONFIG_PAX_MPROTECT
69166 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69167 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69168 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69169 + return -EPERM;
69170 + if (!(vm_flags & VM_EXEC))
69171 + vm_flags &= ~VM_MAYEXEC;
69172 +#else
69173 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69174 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69175 +#endif
69176 + else
69177 + vm_flags &= ~VM_MAYWRITE;
69178 + }
69179 +#endif
69180 +
69181 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69182 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69183
69184 diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69185 --- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69186 +++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69187 @@ -24,10 +24,16 @@
69188 #include <linux/mmu_notifier.h>
69189 #include <linux/migrate.h>
69190 #include <linux/perf_event.h>
69191 +
69192 +#ifdef CONFIG_PAX_MPROTECT
69193 +#include <linux/elf.h>
69194 +#endif
69195 +
69196 #include <asm/uaccess.h>
69197 #include <asm/pgtable.h>
69198 #include <asm/cacheflush.h>
69199 #include <asm/tlbflush.h>
69200 +#include <asm/mmu_context.h>
69201
69202 #ifndef pgprot_modify
69203 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69204 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69205 flush_tlb_range(vma, start, end);
69206 }
69207
69208 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69209 +/* called while holding the mmap semaphor for writing except stack expansion */
69210 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69211 +{
69212 + unsigned long oldlimit, newlimit = 0UL;
69213 +
69214 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69215 + return;
69216 +
69217 + spin_lock(&mm->page_table_lock);
69218 + oldlimit = mm->context.user_cs_limit;
69219 + if ((prot & VM_EXEC) && oldlimit < end)
69220 + /* USER_CS limit moved up */
69221 + newlimit = end;
69222 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69223 + /* USER_CS limit moved down */
69224 + newlimit = start;
69225 +
69226 + if (newlimit) {
69227 + mm->context.user_cs_limit = newlimit;
69228 +
69229 +#ifdef CONFIG_SMP
69230 + wmb();
69231 + cpus_clear(mm->context.cpu_user_cs_mask);
69232 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69233 +#endif
69234 +
69235 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69236 + }
69237 + spin_unlock(&mm->page_table_lock);
69238 + if (newlimit == end) {
69239 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69240 +
69241 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69242 + if (is_vm_hugetlb_page(vma))
69243 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69244 + else
69245 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69246 + }
69247 +}
69248 +#endif
69249 +
69250 int
69251 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69252 unsigned long start, unsigned long end, unsigned long newflags)
69253 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69254 int error;
69255 int dirty_accountable = 0;
69256
69257 +#ifdef CONFIG_PAX_SEGMEXEC
69258 + struct vm_area_struct *vma_m = NULL;
69259 + unsigned long start_m, end_m;
69260 +
69261 + start_m = start + SEGMEXEC_TASK_SIZE;
69262 + end_m = end + SEGMEXEC_TASK_SIZE;
69263 +#endif
69264 +
69265 if (newflags == oldflags) {
69266 *pprev = vma;
69267 return 0;
69268 }
69269
69270 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69271 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69272 +
69273 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69274 + return -ENOMEM;
69275 +
69276 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69277 + return -ENOMEM;
69278 + }
69279 +
69280 /*
69281 * If we make a private mapping writable we increase our commit;
69282 * but (without finer accounting) cannot reduce our commit if we
69283 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69284 }
69285 }
69286
69287 +#ifdef CONFIG_PAX_SEGMEXEC
69288 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69289 + if (start != vma->vm_start) {
69290 + error = split_vma(mm, vma, start, 1);
69291 + if (error)
69292 + goto fail;
69293 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69294 + *pprev = (*pprev)->vm_next;
69295 + }
69296 +
69297 + if (end != vma->vm_end) {
69298 + error = split_vma(mm, vma, end, 0);
69299 + if (error)
69300 + goto fail;
69301 + }
69302 +
69303 + if (pax_find_mirror_vma(vma)) {
69304 + error = __do_munmap(mm, start_m, end_m - start_m);
69305 + if (error)
69306 + goto fail;
69307 + } else {
69308 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69309 + if (!vma_m) {
69310 + error = -ENOMEM;
69311 + goto fail;
69312 + }
69313 + vma->vm_flags = newflags;
69314 + pax_mirror_vma(vma_m, vma);
69315 + }
69316 + }
69317 +#endif
69318 +
69319 /*
69320 * First try to merge with previous and/or next vma.
69321 */
69322 @@ -195,9 +293,21 @@ success:
69323 * vm_flags and vm_page_prot are protected by the mmap_sem
69324 * held in write mode.
69325 */
69326 +
69327 +#ifdef CONFIG_PAX_SEGMEXEC
69328 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69329 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69330 +#endif
69331 +
69332 vma->vm_flags = newflags;
69333 +
69334 +#ifdef CONFIG_PAX_MPROTECT
69335 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69336 + mm->binfmt->handle_mprotect(vma, newflags);
69337 +#endif
69338 +
69339 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69340 - vm_get_page_prot(newflags));
69341 + vm_get_page_prot(vma->vm_flags));
69342
69343 if (vma_wants_writenotify(vma)) {
69344 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69345 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69346 end = start + len;
69347 if (end <= start)
69348 return -ENOMEM;
69349 +
69350 +#ifdef CONFIG_PAX_SEGMEXEC
69351 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69352 + if (end > SEGMEXEC_TASK_SIZE)
69353 + return -EINVAL;
69354 + } else
69355 +#endif
69356 +
69357 + if (end > TASK_SIZE)
69358 + return -EINVAL;
69359 +
69360 if (!arch_validate_prot(prot))
69361 return -EINVAL;
69362
69363 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69364 /*
69365 * Does the application expect PROT_READ to imply PROT_EXEC:
69366 */
69367 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69368 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69369 prot |= PROT_EXEC;
69370
69371 vm_flags = calc_vm_prot_bits(prot);
69372 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69373 if (start > vma->vm_start)
69374 prev = vma;
69375
69376 +#ifdef CONFIG_PAX_MPROTECT
69377 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69378 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69379 +#endif
69380 +
69381 for (nstart = start ; ; ) {
69382 unsigned long newflags;
69383
69384 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69385
69386 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69387 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69388 + if (prot & (PROT_WRITE | PROT_EXEC))
69389 + gr_log_rwxmprotect(vma->vm_file);
69390 +
69391 + error = -EACCES;
69392 + goto out;
69393 + }
69394 +
69395 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69396 error = -EACCES;
69397 goto out;
69398 }
69399 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69400 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69401 if (error)
69402 goto out;
69403 +
69404 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69405 +
69406 nstart = tmp;
69407
69408 if (nstart < prev->vm_end)
69409 diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69410 --- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69411 +++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69412 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69413 continue;
69414 pte = ptep_clear_flush(vma, old_addr, old_pte);
69415 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69416 +
69417 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69418 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69419 + pte = pte_exprotect(pte);
69420 +#endif
69421 +
69422 set_pte_at(mm, new_addr, new_pte, pte);
69423 }
69424
69425 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69426 if (is_vm_hugetlb_page(vma))
69427 goto Einval;
69428
69429 +#ifdef CONFIG_PAX_SEGMEXEC
69430 + if (pax_find_mirror_vma(vma))
69431 + goto Einval;
69432 +#endif
69433 +
69434 /* We can't remap across vm area boundaries */
69435 if (old_len > vma->vm_end - addr)
69436 goto Efault;
69437 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69438 unsigned long ret = -EINVAL;
69439 unsigned long charged = 0;
69440 unsigned long map_flags;
69441 + unsigned long pax_task_size = TASK_SIZE;
69442
69443 if (new_addr & ~PAGE_MASK)
69444 goto out;
69445
69446 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69447 +#ifdef CONFIG_PAX_SEGMEXEC
69448 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69449 + pax_task_size = SEGMEXEC_TASK_SIZE;
69450 +#endif
69451 +
69452 + pax_task_size -= PAGE_SIZE;
69453 +
69454 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69455 goto out;
69456
69457 /* Check if the location we're moving into overlaps the
69458 * old location at all, and fail if it does.
69459 */
69460 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69461 - goto out;
69462 -
69463 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69464 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69465 goto out;
69466
69467 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69468 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69469 struct vm_area_struct *vma;
69470 unsigned long ret = -EINVAL;
69471 unsigned long charged = 0;
69472 + unsigned long pax_task_size = TASK_SIZE;
69473
69474 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69475 goto out;
69476 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69477 if (!new_len)
69478 goto out;
69479
69480 +#ifdef CONFIG_PAX_SEGMEXEC
69481 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69482 + pax_task_size = SEGMEXEC_TASK_SIZE;
69483 +#endif
69484 +
69485 + pax_task_size -= PAGE_SIZE;
69486 +
69487 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69488 + old_len > pax_task_size || addr > pax_task_size-old_len)
69489 + goto out;
69490 +
69491 if (flags & MREMAP_FIXED) {
69492 if (flags & MREMAP_MAYMOVE)
69493 ret = mremap_to(addr, old_len, new_addr, new_len);
69494 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69495 addr + new_len);
69496 }
69497 ret = addr;
69498 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69499 goto out;
69500 }
69501 }
69502 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69503 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69504 if (ret)
69505 goto out;
69506 +
69507 + map_flags = vma->vm_flags;
69508 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69509 + if (!(ret & ~PAGE_MASK)) {
69510 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69511 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69512 + }
69513 }
69514 out:
69515 if (ret & ~PAGE_MASK)
69516 diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69517 --- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69518 +++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69519 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69520 int sysctl_overcommit_ratio = 50; /* default is 50% */
69521 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69522 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69523 -int heap_stack_gap = 0;
69524
69525 atomic_long_t mmap_pages_allocated;
69526
69527 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69528 EXPORT_SYMBOL(find_vma);
69529
69530 /*
69531 - * find a VMA
69532 - * - we don't extend stack VMAs under NOMMU conditions
69533 - */
69534 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69535 -{
69536 - return find_vma(mm, addr);
69537 -}
69538 -
69539 -/*
69540 * expand a stack to a given address
69541 * - not supported under NOMMU conditions
69542 */
69543 diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69544 --- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69545 +++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69546 @@ -289,7 +289,7 @@ out:
69547 * This usage means that zero-order pages may not be compound.
69548 */
69549
69550 -static void free_compound_page(struct page *page)
69551 +void free_compound_page(struct page *page)
69552 {
69553 __free_pages_ok(page, compound_order(page));
69554 }
69555 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69556 int bad = 0;
69557 int wasMlocked = __TestClearPageMlocked(page);
69558
69559 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69560 + unsigned long index = 1UL << order;
69561 +#endif
69562 +
69563 kmemcheck_free_shadow(page, order);
69564
69565 for (i = 0 ; i < (1 << order) ; ++i)
69566 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69567 debug_check_no_obj_freed(page_address(page),
69568 PAGE_SIZE << order);
69569 }
69570 +
69571 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69572 + for (; index; --index)
69573 + sanitize_highpage(page + index - 1);
69574 +#endif
69575 +
69576 arch_free_page(page, order);
69577 kernel_map_pages(page, 1 << order, 0);
69578
69579 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69580 arch_alloc_page(page, order);
69581 kernel_map_pages(page, 1 << order, 1);
69582
69583 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69584 if (gfp_flags & __GFP_ZERO)
69585 prep_zero_page(page, order, gfp_flags);
69586 +#endif
69587
69588 if (order && (gfp_flags & __GFP_COMP))
69589 prep_compound_page(page, order);
69590 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69591 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69592 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69593 }
69594 +
69595 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69596 + sanitize_highpage(page);
69597 +#endif
69598 +
69599 arch_free_page(page, 0);
69600 kernel_map_pages(page, 1, 0);
69601
69602 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
69603 int cpu;
69604 struct zone *zone;
69605
69606 + pax_track_stack();
69607 +
69608 for_each_populated_zone(zone) {
69609 show_node(zone);
69610 printk("%s per-cpu:\n", zone->name);
69611 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69612 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69613 }
69614 #else
69615 -static void inline setup_usemap(struct pglist_data *pgdat,
69616 +static inline void setup_usemap(struct pglist_data *pgdat,
69617 struct zone *zone, unsigned long zonesize) {}
69618 #endif /* CONFIG_SPARSEMEM */
69619
69620 diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69621 --- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69622 +++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69623 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69624 static unsigned int pcpu_last_unit_cpu __read_mostly;
69625
69626 /* the address of the first chunk which starts with the kernel static area */
69627 -void *pcpu_base_addr __read_mostly;
69628 +void *pcpu_base_addr __read_only;
69629 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69630
69631 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69632 diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69633 --- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69634 +++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69635 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69636 /* page_table_lock to protect against threads */
69637 spin_lock(&mm->page_table_lock);
69638 if (likely(!vma->anon_vma)) {
69639 +
69640 +#ifdef CONFIG_PAX_SEGMEXEC
69641 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69642 +
69643 + if (vma_m) {
69644 + BUG_ON(vma_m->anon_vma);
69645 + vma_m->anon_vma = anon_vma;
69646 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69647 + }
69648 +#endif
69649 +
69650 vma->anon_vma = anon_vma;
69651 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69652 allocated = NULL;
69653 diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69654 --- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69655 +++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69656 @@ -31,7 +31,7 @@
69657 #include <linux/swap.h>
69658 #include <linux/ima.h>
69659
69660 -static struct vfsmount *shm_mnt;
69661 +struct vfsmount *shm_mnt;
69662
69663 #ifdef CONFIG_SHMEM
69664 /*
69665 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69666 goto unlock;
69667 }
69668 entry = shmem_swp_entry(info, index, NULL);
69669 + if (!entry)
69670 + goto unlock;
69671 if (entry->val) {
69672 /*
69673 * The more uptodate page coming down from a stacked
69674 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69675 struct vm_area_struct pvma;
69676 struct page *page;
69677
69678 + pax_track_stack();
69679 +
69680 spol = mpol_cond_copy(&mpol,
69681 mpol_shared_policy_lookup(&info->policy, idx));
69682
69683 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69684
69685 info = SHMEM_I(inode);
69686 inode->i_size = len-1;
69687 - if (len <= (char *)inode - (char *)info) {
69688 + if (len <= (char *)inode - (char *)info && len <= 64) {
69689 /* do it inline */
69690 memcpy(info, symname, len);
69691 inode->i_op = &shmem_symlink_inline_operations;
69692 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69693 int err = -ENOMEM;
69694
69695 /* Round up to L1_CACHE_BYTES to resist false sharing */
69696 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69697 - L1_CACHE_BYTES), GFP_KERNEL);
69698 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69699 if (!sbinfo)
69700 return -ENOMEM;
69701
69702 diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69703 --- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69704 +++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69705 @@ -174,7 +174,7 @@
69706
69707 /* Legal flag mask for kmem_cache_create(). */
69708 #if DEBUG
69709 -# define CREATE_MASK (SLAB_RED_ZONE | \
69710 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69711 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69712 SLAB_CACHE_DMA | \
69713 SLAB_STORE_USER | \
69714 @@ -182,7 +182,7 @@
69715 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69716 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69717 #else
69718 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69719 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69720 SLAB_CACHE_DMA | \
69721 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69722 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69723 @@ -308,7 +308,7 @@ struct kmem_list3 {
69724 * Need this for bootstrapping a per node allocator.
69725 */
69726 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69727 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69728 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69729 #define CACHE_CACHE 0
69730 #define SIZE_AC MAX_NUMNODES
69731 #define SIZE_L3 (2 * MAX_NUMNODES)
69732 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69733 if ((x)->max_freeable < i) \
69734 (x)->max_freeable = i; \
69735 } while (0)
69736 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69737 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69738 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69739 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69740 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69741 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69742 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69743 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69744 #else
69745 #define STATS_INC_ACTIVE(x) do { } while (0)
69746 #define STATS_DEC_ACTIVE(x) do { } while (0)
69747 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69748 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69749 */
69750 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69751 - const struct slab *slab, void *obj)
69752 + const struct slab *slab, const void *obj)
69753 {
69754 u32 offset = (obj - slab->s_mem);
69755 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69756 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69757 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69758 sizes[INDEX_AC].cs_size,
69759 ARCH_KMALLOC_MINALIGN,
69760 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69761 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69762 NULL);
69763
69764 if (INDEX_AC != INDEX_L3) {
69765 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69766 kmem_cache_create(names[INDEX_L3].name,
69767 sizes[INDEX_L3].cs_size,
69768 ARCH_KMALLOC_MINALIGN,
69769 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69770 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69771 NULL);
69772 }
69773
69774 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69775 sizes->cs_cachep = kmem_cache_create(names->name,
69776 sizes->cs_size,
69777 ARCH_KMALLOC_MINALIGN,
69778 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69779 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69780 NULL);
69781 }
69782 #ifdef CONFIG_ZONE_DMA
69783 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69784 }
69785 /* cpu stats */
69786 {
69787 - unsigned long allochit = atomic_read(&cachep->allochit);
69788 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69789 - unsigned long freehit = atomic_read(&cachep->freehit);
69790 - unsigned long freemiss = atomic_read(&cachep->freemiss);
69791 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69792 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69793 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69794 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69795
69796 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69797 allochit, allocmiss, freehit, freemiss);
69798 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
69799
69800 static int __init slab_proc_init(void)
69801 {
69802 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69803 + mode_t gr_mode = S_IRUGO;
69804 +
69805 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69806 + gr_mode = S_IRUSR;
69807 +#endif
69808 +
69809 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69810 #ifdef CONFIG_DEBUG_SLAB_LEAK
69811 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69812 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69813 #endif
69814 return 0;
69815 }
69816 module_init(slab_proc_init);
69817 #endif
69818
69819 +void check_object_size(const void *ptr, unsigned long n, bool to)
69820 +{
69821 +
69822 +#ifdef CONFIG_PAX_USERCOPY
69823 + struct page *page;
69824 + struct kmem_cache *cachep = NULL;
69825 + struct slab *slabp;
69826 + unsigned int objnr;
69827 + unsigned long offset;
69828 +
69829 + if (!n)
69830 + return;
69831 +
69832 + if (ZERO_OR_NULL_PTR(ptr))
69833 + goto report;
69834 +
69835 + if (!virt_addr_valid(ptr))
69836 + return;
69837 +
69838 + page = virt_to_head_page(ptr);
69839 +
69840 + if (!PageSlab(page)) {
69841 + if (object_is_on_stack(ptr, n) == -1)
69842 + goto report;
69843 + return;
69844 + }
69845 +
69846 + cachep = page_get_cache(page);
69847 + if (!(cachep->flags & SLAB_USERCOPY))
69848 + goto report;
69849 +
69850 + slabp = page_get_slab(page);
69851 + objnr = obj_to_index(cachep, slabp, ptr);
69852 + BUG_ON(objnr >= cachep->num);
69853 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69854 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69855 + return;
69856 +
69857 +report:
69858 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69859 +#endif
69860 +
69861 +}
69862 +EXPORT_SYMBOL(check_object_size);
69863 +
69864 /**
69865 * ksize - get the actual amount of memory allocated for a given object
69866 * @objp: Pointer to the object
69867 diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
69868 --- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
69869 +++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
69870 @@ -29,7 +29,7 @@
69871 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69872 * alloc_pages() directly, allocating compound pages so the page order
69873 * does not have to be separately tracked, and also stores the exact
69874 - * allocation size in page->private so that it can be used to accurately
69875 + * allocation size in slob_page->size so that it can be used to accurately
69876 * provide ksize(). These objects are detected in kfree() because slob_page()
69877 * is false for them.
69878 *
69879 @@ -58,6 +58,7 @@
69880 */
69881
69882 #include <linux/kernel.h>
69883 +#include <linux/sched.h>
69884 #include <linux/slab.h>
69885 #include <linux/mm.h>
69886 #include <linux/swap.h> /* struct reclaim_state */
69887 @@ -100,7 +101,8 @@ struct slob_page {
69888 unsigned long flags; /* mandatory */
69889 atomic_t _count; /* mandatory */
69890 slobidx_t units; /* free units left in page */
69891 - unsigned long pad[2];
69892 + unsigned long pad[1];
69893 + unsigned long size; /* size when >=PAGE_SIZE */
69894 slob_t *free; /* first free slob_t in page */
69895 struct list_head list; /* linked list of free pages */
69896 };
69897 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
69898 */
69899 static inline int is_slob_page(struct slob_page *sp)
69900 {
69901 - return PageSlab((struct page *)sp);
69902 + return PageSlab((struct page *)sp) && !sp->size;
69903 }
69904
69905 static inline void set_slob_page(struct slob_page *sp)
69906 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
69907
69908 static inline struct slob_page *slob_page(const void *addr)
69909 {
69910 - return (struct slob_page *)virt_to_page(addr);
69911 + return (struct slob_page *)virt_to_head_page(addr);
69912 }
69913
69914 /*
69915 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
69916 /*
69917 * Return the size of a slob block.
69918 */
69919 -static slobidx_t slob_units(slob_t *s)
69920 +static slobidx_t slob_units(const slob_t *s)
69921 {
69922 if (s->units > 0)
69923 return s->units;
69924 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
69925 /*
69926 * Return the next free slob block pointer after this one.
69927 */
69928 -static slob_t *slob_next(slob_t *s)
69929 +static slob_t *slob_next(const slob_t *s)
69930 {
69931 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69932 slobidx_t next;
69933 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
69934 /*
69935 * Returns true if s is the last free block in its page.
69936 */
69937 -static int slob_last(slob_t *s)
69938 +static int slob_last(const slob_t *s)
69939 {
69940 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
69941 }
69942 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
69943 if (!page)
69944 return NULL;
69945
69946 + set_slob_page(page);
69947 return page_address(page);
69948 }
69949
69950 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
69951 if (!b)
69952 return NULL;
69953 sp = slob_page(b);
69954 - set_slob_page(sp);
69955
69956 spin_lock_irqsave(&slob_lock, flags);
69957 sp->units = SLOB_UNITS(PAGE_SIZE);
69958 sp->free = b;
69959 + sp->size = 0;
69960 INIT_LIST_HEAD(&sp->list);
69961 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
69962 set_slob_page_free(sp, slob_list);
69963 @@ -475,10 +478,9 @@ out:
69964 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
69965 #endif
69966
69967 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
69968 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
69969 {
69970 - unsigned int *m;
69971 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
69972 + slob_t *m;
69973 void *ret;
69974
69975 lockdep_trace_alloc(gfp);
69976 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
69977
69978 if (!m)
69979 return NULL;
69980 - *m = size;
69981 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
69982 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
69983 + m[0].units = size;
69984 + m[1].units = align;
69985 ret = (void *)m + align;
69986
69987 trace_kmalloc_node(_RET_IP_, ret,
69988 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
69989
69990 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
69991 if (ret) {
69992 - struct page *page;
69993 - page = virt_to_page(ret);
69994 - page->private = size;
69995 + struct slob_page *sp;
69996 + sp = slob_page(ret);
69997 + sp->size = size;
69998 }
69999
70000 trace_kmalloc_node(_RET_IP_, ret,
70001 size, PAGE_SIZE << order, gfp, node);
70002 }
70003
70004 - kmemleak_alloc(ret, size, 1, gfp);
70005 + return ret;
70006 +}
70007 +
70008 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70009 +{
70010 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70011 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70012 +
70013 + if (!ZERO_OR_NULL_PTR(ret))
70014 + kmemleak_alloc(ret, size, 1, gfp);
70015 return ret;
70016 }
70017 EXPORT_SYMBOL(__kmalloc_node);
70018 @@ -528,13 +542,88 @@ void kfree(const void *block)
70019 sp = slob_page(block);
70020 if (is_slob_page(sp)) {
70021 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70022 - unsigned int *m = (unsigned int *)(block - align);
70023 - slob_free(m, *m + align);
70024 - } else
70025 + slob_t *m = (slob_t *)(block - align);
70026 + slob_free(m, m[0].units + align);
70027 + } else {
70028 + clear_slob_page(sp);
70029 + free_slob_page(sp);
70030 + sp->size = 0;
70031 put_page(&sp->page);
70032 + }
70033 }
70034 EXPORT_SYMBOL(kfree);
70035
70036 +void check_object_size(const void *ptr, unsigned long n, bool to)
70037 +{
70038 +
70039 +#ifdef CONFIG_PAX_USERCOPY
70040 + struct slob_page *sp;
70041 + const slob_t *free;
70042 + const void *base;
70043 + unsigned long flags;
70044 +
70045 + if (!n)
70046 + return;
70047 +
70048 + if (ZERO_OR_NULL_PTR(ptr))
70049 + goto report;
70050 +
70051 + if (!virt_addr_valid(ptr))
70052 + return;
70053 +
70054 + sp = slob_page(ptr);
70055 + if (!PageSlab((struct page*)sp)) {
70056 + if (object_is_on_stack(ptr, n) == -1)
70057 + goto report;
70058 + return;
70059 + }
70060 +
70061 + if (sp->size) {
70062 + base = page_address(&sp->page);
70063 + if (base <= ptr && n <= sp->size - (ptr - base))
70064 + return;
70065 + goto report;
70066 + }
70067 +
70068 + /* some tricky double walking to find the chunk */
70069 + spin_lock_irqsave(&slob_lock, flags);
70070 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70071 + free = sp->free;
70072 +
70073 + while (!slob_last(free) && (void *)free <= ptr) {
70074 + base = free + slob_units(free);
70075 + free = slob_next(free);
70076 + }
70077 +
70078 + while (base < (void *)free) {
70079 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70080 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70081 + int offset;
70082 +
70083 + if (ptr < base + align)
70084 + break;
70085 +
70086 + offset = ptr - base - align;
70087 + if (offset >= m) {
70088 + base += size;
70089 + continue;
70090 + }
70091 +
70092 + if (n > m - offset)
70093 + break;
70094 +
70095 + spin_unlock_irqrestore(&slob_lock, flags);
70096 + return;
70097 + }
70098 +
70099 + spin_unlock_irqrestore(&slob_lock, flags);
70100 +report:
70101 + pax_report_usercopy(ptr, n, to, NULL);
70102 +#endif
70103 +
70104 +}
70105 +EXPORT_SYMBOL(check_object_size);
70106 +
70107 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70108 size_t ksize(const void *block)
70109 {
70110 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70111 sp = slob_page(block);
70112 if (is_slob_page(sp)) {
70113 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70114 - unsigned int *m = (unsigned int *)(block - align);
70115 - return SLOB_UNITS(*m) * SLOB_UNIT;
70116 + slob_t *m = (slob_t *)(block - align);
70117 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70118 } else
70119 - return sp->page.private;
70120 + return sp->size;
70121 }
70122 EXPORT_SYMBOL(ksize);
70123
70124 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70125 {
70126 struct kmem_cache *c;
70127
70128 +#ifdef CONFIG_PAX_USERCOPY
70129 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70130 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70131 +#else
70132 c = slob_alloc(sizeof(struct kmem_cache),
70133 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70134 +#endif
70135
70136 if (c) {
70137 c->name = name;
70138 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70139 {
70140 void *b;
70141
70142 +#ifdef CONFIG_PAX_USERCOPY
70143 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70144 +#else
70145 if (c->size < PAGE_SIZE) {
70146 b = slob_alloc(c->size, flags, c->align, node);
70147 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70148 SLOB_UNITS(c->size) * SLOB_UNIT,
70149 flags, node);
70150 } else {
70151 + struct slob_page *sp;
70152 +
70153 b = slob_new_pages(flags, get_order(c->size), node);
70154 + sp = slob_page(b);
70155 + sp->size = c->size;
70156 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70157 PAGE_SIZE << get_order(c->size),
70158 flags, node);
70159 }
70160 +#endif
70161
70162 if (c->ctor)
70163 c->ctor(b);
70164 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70165
70166 static void __kmem_cache_free(void *b, int size)
70167 {
70168 - if (size < PAGE_SIZE)
70169 + struct slob_page *sp = slob_page(b);
70170 +
70171 + if (is_slob_page(sp))
70172 slob_free(b, size);
70173 - else
70174 + else {
70175 + clear_slob_page(sp);
70176 + free_slob_page(sp);
70177 + sp->size = 0;
70178 slob_free_pages(b, get_order(size));
70179 + }
70180 }
70181
70182 static void kmem_rcu_free(struct rcu_head *head)
70183 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70184
70185 void kmem_cache_free(struct kmem_cache *c, void *b)
70186 {
70187 + int size = c->size;
70188 +
70189 +#ifdef CONFIG_PAX_USERCOPY
70190 + if (size + c->align < PAGE_SIZE) {
70191 + size += c->align;
70192 + b -= c->align;
70193 + }
70194 +#endif
70195 +
70196 kmemleak_free_recursive(b, c->flags);
70197 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70198 struct slob_rcu *slob_rcu;
70199 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70200 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70201 INIT_RCU_HEAD(&slob_rcu->head);
70202 - slob_rcu->size = c->size;
70203 + slob_rcu->size = size;
70204 call_rcu(&slob_rcu->head, kmem_rcu_free);
70205 } else {
70206 - __kmem_cache_free(b, c->size);
70207 + __kmem_cache_free(b, size);
70208 }
70209
70210 +#ifdef CONFIG_PAX_USERCOPY
70211 + trace_kfree(_RET_IP_, b);
70212 +#else
70213 trace_kmem_cache_free(_RET_IP_, b);
70214 +#endif
70215 +
70216 }
70217 EXPORT_SYMBOL(kmem_cache_free);
70218
70219 diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70220 --- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70221 +++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70222 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70223 if (!t->addr)
70224 return;
70225
70226 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70227 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70228 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70229 }
70230
70231 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70232
70233 page = virt_to_head_page(x);
70234
70235 + BUG_ON(!PageSlab(page));
70236 +
70237 slab_free(s, page, x, _RET_IP_);
70238
70239 trace_kmem_cache_free(_RET_IP_, x);
70240 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70241 * Merge control. If this is set then no merging of slab caches will occur.
70242 * (Could be removed. This was introduced to pacify the merge skeptics.)
70243 */
70244 -static int slub_nomerge;
70245 +static int slub_nomerge = 1;
70246
70247 /*
70248 * Calculate the order of allocation given an slab object size.
70249 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70250 * list to avoid pounding the page allocator excessively.
70251 */
70252 set_min_partial(s, ilog2(s->size));
70253 - s->refcount = 1;
70254 + atomic_set(&s->refcount, 1);
70255 #ifdef CONFIG_NUMA
70256 s->remote_node_defrag_ratio = 1000;
70257 #endif
70258 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70259 void kmem_cache_destroy(struct kmem_cache *s)
70260 {
70261 down_write(&slub_lock);
70262 - s->refcount--;
70263 - if (!s->refcount) {
70264 + if (atomic_dec_and_test(&s->refcount)) {
70265 list_del(&s->list);
70266 up_write(&slub_lock);
70267 if (kmem_cache_close(s)) {
70268 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70269 __setup("slub_nomerge", setup_slub_nomerge);
70270
70271 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70272 - const char *name, int size, gfp_t gfp_flags)
70273 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70274 {
70275 - unsigned int flags = 0;
70276 -
70277 if (gfp_flags & SLUB_DMA)
70278 - flags = SLAB_CACHE_DMA;
70279 + flags |= SLAB_CACHE_DMA;
70280
70281 /*
70282 * This function is called with IRQs disabled during early-boot on
70283 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70284 EXPORT_SYMBOL(__kmalloc_node);
70285 #endif
70286
70287 +void check_object_size(const void *ptr, unsigned long n, bool to)
70288 +{
70289 +
70290 +#ifdef CONFIG_PAX_USERCOPY
70291 + struct page *page;
70292 + struct kmem_cache *s = NULL;
70293 + unsigned long offset;
70294 +
70295 + if (!n)
70296 + return;
70297 +
70298 + if (ZERO_OR_NULL_PTR(ptr))
70299 + goto report;
70300 +
70301 + if (!virt_addr_valid(ptr))
70302 + return;
70303 +
70304 + page = get_object_page(ptr);
70305 +
70306 + if (!page) {
70307 + if (object_is_on_stack(ptr, n) == -1)
70308 + goto report;
70309 + return;
70310 + }
70311 +
70312 + s = page->slab;
70313 + if (!(s->flags & SLAB_USERCOPY))
70314 + goto report;
70315 +
70316 + offset = (ptr - page_address(page)) % s->size;
70317 + if (offset <= s->objsize && n <= s->objsize - offset)
70318 + return;
70319 +
70320 +report:
70321 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70322 +#endif
70323 +
70324 +}
70325 +EXPORT_SYMBOL(check_object_size);
70326 +
70327 size_t ksize(const void *object)
70328 {
70329 struct page *page;
70330 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70331 * kmem_cache_open for slab_state == DOWN.
70332 */
70333 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70334 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
70335 - kmalloc_caches[0].refcount = -1;
70336 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70337 + atomic_set(&kmalloc_caches[0].refcount, -1);
70338 caches++;
70339
70340 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70341 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70342 /* Caches that are not of the two-to-the-power-of size */
70343 if (KMALLOC_MIN_SIZE <= 32) {
70344 create_kmalloc_cache(&kmalloc_caches[1],
70345 - "kmalloc-96", 96, GFP_NOWAIT);
70346 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70347 caches++;
70348 }
70349 if (KMALLOC_MIN_SIZE <= 64) {
70350 create_kmalloc_cache(&kmalloc_caches[2],
70351 - "kmalloc-192", 192, GFP_NOWAIT);
70352 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70353 caches++;
70354 }
70355
70356 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70357 create_kmalloc_cache(&kmalloc_caches[i],
70358 - "kmalloc", 1 << i, GFP_NOWAIT);
70359 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70360 caches++;
70361 }
70362
70363 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70364 /*
70365 * We may have set a slab to be unmergeable during bootstrap.
70366 */
70367 - if (s->refcount < 0)
70368 + if (atomic_read(&s->refcount) < 0)
70369 return 1;
70370
70371 return 0;
70372 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70373 if (s) {
70374 int cpu;
70375
70376 - s->refcount++;
70377 + atomic_inc(&s->refcount);
70378 /*
70379 * Adjust the object sizes so that we clear
70380 * the complete object on kzalloc.
70381 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70382
70383 if (sysfs_slab_alias(s, name)) {
70384 down_write(&slub_lock);
70385 - s->refcount--;
70386 + atomic_dec(&s->refcount);
70387 up_write(&slub_lock);
70388 goto err;
70389 }
70390 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70391
70392 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70393 {
70394 - return sprintf(buf, "%d\n", s->refcount - 1);
70395 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70396 }
70397 SLAB_ATTR_RO(aliases);
70398
70399 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70400 kfree(s);
70401 }
70402
70403 -static struct sysfs_ops slab_sysfs_ops = {
70404 +static const struct sysfs_ops slab_sysfs_ops = {
70405 .show = slab_attr_show,
70406 .store = slab_attr_store,
70407 };
70408 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70409 return 0;
70410 }
70411
70412 -static struct kset_uevent_ops slab_uevent_ops = {
70413 +static const struct kset_uevent_ops slab_uevent_ops = {
70414 .filter = uevent_filter,
70415 };
70416
70417 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
70418
70419 static int __init slab_proc_init(void)
70420 {
70421 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70422 + mode_t gr_mode = S_IRUGO;
70423 +
70424 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70425 + gr_mode = S_IRUSR;
70426 +#endif
70427 +
70428 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70429 return 0;
70430 }
70431 module_init(slab_proc_init);
70432 diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70433 --- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70434 +++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70435 @@ -30,6 +30,7 @@
70436 #include <linux/notifier.h>
70437 #include <linux/backing-dev.h>
70438 #include <linux/memcontrol.h>
70439 +#include <linux/hugetlb.h>
70440
70441 #include "internal.h"
70442
70443 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70444 compound_page_dtor *dtor;
70445
70446 dtor = get_compound_page_dtor(page);
70447 + if (!PageHuge(page))
70448 + BUG_ON(dtor != free_compound_page);
70449 (*dtor)(page);
70450 }
70451 }
70452 diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70453 --- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70454 +++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70455 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70456 void arch_pick_mmap_layout(struct mm_struct *mm)
70457 {
70458 mm->mmap_base = TASK_UNMAPPED_BASE;
70459 +
70460 +#ifdef CONFIG_PAX_RANDMMAP
70461 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70462 + mm->mmap_base += mm->delta_mmap;
70463 +#endif
70464 +
70465 mm->get_unmapped_area = arch_get_unmapped_area;
70466 mm->unmap_area = arch_unmap_area;
70467 }
70468 diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70469 --- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70470 +++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70471 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70472
70473 pte = pte_offset_kernel(pmd, addr);
70474 do {
70475 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70476 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70477 +
70478 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70479 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70480 + BUG_ON(!pte_exec(*pte));
70481 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70482 + continue;
70483 + }
70484 +#endif
70485 +
70486 + {
70487 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70488 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70489 + }
70490 } while (pte++, addr += PAGE_SIZE, addr != end);
70491 }
70492
70493 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70494 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70495 {
70496 pte_t *pte;
70497 + int ret = -ENOMEM;
70498
70499 /*
70500 * nr is a running index into the array which helps higher level
70501 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70502 pte = pte_alloc_kernel(pmd, addr);
70503 if (!pte)
70504 return -ENOMEM;
70505 +
70506 + pax_open_kernel();
70507 do {
70508 struct page *page = pages[*nr];
70509
70510 - if (WARN_ON(!pte_none(*pte)))
70511 - return -EBUSY;
70512 - if (WARN_ON(!page))
70513 - return -ENOMEM;
70514 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70515 + if (!(pgprot_val(prot) & _PAGE_NX))
70516 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70517 + else
70518 +#endif
70519 +
70520 + if (WARN_ON(!pte_none(*pte))) {
70521 + ret = -EBUSY;
70522 + goto out;
70523 + }
70524 + if (WARN_ON(!page)) {
70525 + ret = -ENOMEM;
70526 + goto out;
70527 + }
70528 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70529 (*nr)++;
70530 } while (pte++, addr += PAGE_SIZE, addr != end);
70531 - return 0;
70532 + ret = 0;
70533 +out:
70534 + pax_close_kernel();
70535 + return ret;
70536 }
70537
70538 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70539 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70540 * and fall back on vmalloc() if that fails. Others
70541 * just put it in the vmalloc space.
70542 */
70543 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70544 +#ifdef CONFIG_MODULES
70545 +#ifdef MODULES_VADDR
70546 unsigned long addr = (unsigned long)x;
70547 if (addr >= MODULES_VADDR && addr < MODULES_END)
70548 return 1;
70549 #endif
70550 +
70551 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70552 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70553 + return 1;
70554 +#endif
70555 +
70556 +#endif
70557 +
70558 return is_vmalloc_addr(x);
70559 }
70560
70561 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70562
70563 if (!pgd_none(*pgd)) {
70564 pud_t *pud = pud_offset(pgd, addr);
70565 +#ifdef CONFIG_X86
70566 + if (!pud_large(*pud))
70567 +#endif
70568 if (!pud_none(*pud)) {
70569 pmd_t *pmd = pmd_offset(pud, addr);
70570 +#ifdef CONFIG_X86
70571 + if (!pmd_large(*pmd))
70572 +#endif
70573 if (!pmd_none(*pmd)) {
70574 pte_t *ptep, pte;
70575
70576 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70577 struct rb_node *tmp;
70578
70579 while (*p) {
70580 - struct vmap_area *tmp;
70581 + struct vmap_area *varea;
70582
70583 parent = *p;
70584 - tmp = rb_entry(parent, struct vmap_area, rb_node);
70585 - if (va->va_start < tmp->va_end)
70586 + varea = rb_entry(parent, struct vmap_area, rb_node);
70587 + if (va->va_start < varea->va_end)
70588 p = &(*p)->rb_left;
70589 - else if (va->va_end > tmp->va_start)
70590 + else if (va->va_end > varea->va_start)
70591 p = &(*p)->rb_right;
70592 else
70593 BUG();
70594 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70595 struct vm_struct *area;
70596
70597 BUG_ON(in_interrupt());
70598 +
70599 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70600 + if (flags & VM_KERNEXEC) {
70601 + if (start != VMALLOC_START || end != VMALLOC_END)
70602 + return NULL;
70603 + start = (unsigned long)MODULES_EXEC_VADDR;
70604 + end = (unsigned long)MODULES_EXEC_END;
70605 + }
70606 +#endif
70607 +
70608 if (flags & VM_IOREMAP) {
70609 int bit = fls(size);
70610
70611 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70612 if (count > totalram_pages)
70613 return NULL;
70614
70615 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70616 + if (!(pgprot_val(prot) & _PAGE_NX))
70617 + flags |= VM_KERNEXEC;
70618 +#endif
70619 +
70620 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70621 __builtin_return_address(0));
70622 if (!area)
70623 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70624 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70625 return NULL;
70626
70627 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70628 + if (!(pgprot_val(prot) & _PAGE_NX))
70629 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70630 + node, gfp_mask, caller);
70631 + else
70632 +#endif
70633 +
70634 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70635 VMALLOC_END, node, gfp_mask, caller);
70636
70637 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70638 return addr;
70639 }
70640
70641 +#undef __vmalloc
70642 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70643 {
70644 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70645 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70646 * For tight control over page level allocator and protection flags
70647 * use __vmalloc() instead.
70648 */
70649 +#undef vmalloc
70650 void *vmalloc(unsigned long size)
70651 {
70652 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70653 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70654 * The resulting memory area is zeroed so it can be mapped to userspace
70655 * without leaking data.
70656 */
70657 +#undef vmalloc_user
70658 void *vmalloc_user(unsigned long size)
70659 {
70660 struct vm_struct *area;
70661 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70662 * For tight control over page level allocator and protection flags
70663 * use __vmalloc() instead.
70664 */
70665 +#undef vmalloc_node
70666 void *vmalloc_node(unsigned long size, int node)
70667 {
70668 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70669 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70670 * For tight control over page level allocator and protection flags
70671 * use __vmalloc() instead.
70672 */
70673 -
70674 +#undef vmalloc_exec
70675 void *vmalloc_exec(unsigned long size)
70676 {
70677 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70678 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70679 -1, __builtin_return_address(0));
70680 }
70681
70682 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70683 * Allocate enough 32bit PA addressable pages to cover @size from the
70684 * page level allocator and map them into contiguous kernel virtual space.
70685 */
70686 +#undef vmalloc_32
70687 void *vmalloc_32(unsigned long size)
70688 {
70689 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70690 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70691 * The resulting memory area is 32bit addressable and zeroed so it can be
70692 * mapped to userspace without leaking data.
70693 */
70694 +#undef vmalloc_32_user
70695 void *vmalloc_32_user(unsigned long size)
70696 {
70697 struct vm_struct *area;
70698 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70699 unsigned long uaddr = vma->vm_start;
70700 unsigned long usize = vma->vm_end - vma->vm_start;
70701
70702 + BUG_ON(vma->vm_mirror);
70703 +
70704 if ((PAGE_SIZE-1) & (unsigned long)addr)
70705 return -EINVAL;
70706
70707 diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70708 --- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70709 +++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70710 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70711 *
70712 * vm_stat contains the global counters
70713 */
70714 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70715 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70716 EXPORT_SYMBOL(vm_stat);
70717
70718 #ifdef CONFIG_SMP
70719 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70720 v = p->vm_stat_diff[i];
70721 p->vm_stat_diff[i] = 0;
70722 local_irq_restore(flags);
70723 - atomic_long_add(v, &zone->vm_stat[i]);
70724 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70725 global_diff[i] += v;
70726 #ifdef CONFIG_NUMA
70727 /* 3 seconds idle till flush */
70728 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70729
70730 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70731 if (global_diff[i])
70732 - atomic_long_add(global_diff[i], &vm_stat[i]);
70733 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70734 }
70735
70736 #endif
70737 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70738 start_cpu_timer(cpu);
70739 #endif
70740 #ifdef CONFIG_PROC_FS
70741 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70742 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70743 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70744 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70745 + {
70746 + mode_t gr_mode = S_IRUGO;
70747 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70748 + gr_mode = S_IRUSR;
70749 +#endif
70750 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70751 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70752 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70753 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70754 +#else
70755 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70756 +#endif
70757 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70758 + }
70759 #endif
70760 return 0;
70761 }
70762 diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70763 --- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70764 +++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70765 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70766 err = -EPERM;
70767 if (!capable(CAP_NET_ADMIN))
70768 break;
70769 - if ((args.u.name_type >= 0) &&
70770 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70771 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70772 struct vlan_net *vn;
70773
70774 vn = net_generic(net, vlan_net_id);
70775 diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70776 --- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70777 +++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70778 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70779 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70780 return 1;
70781 atm_return(vcc,truesize);
70782 - atomic_inc(&vcc->stats->rx_drop);
70783 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70784 return 0;
70785 }
70786
70787 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70788 }
70789 }
70790 atm_return(vcc,guess);
70791 - atomic_inc(&vcc->stats->rx_drop);
70792 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70793 return NULL;
70794 }
70795
70796 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70797
70798 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70799 {
70800 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70801 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70802 __SONET_ITEMS
70803 #undef __HANDLE_ITEM
70804 }
70805 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70806
70807 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70808 {
70809 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70810 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70811 __SONET_ITEMS
70812 #undef __HANDLE_ITEM
70813 }
70814 diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
70815 --- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70816 +++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70817 @@ -48,7 +48,7 @@ struct lane2_ops {
70818 const u8 *tlvs, u32 sizeoftlvs);
70819 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70820 const u8 *tlvs, u32 sizeoftlvs);
70821 -};
70822 +} __no_const;
70823
70824 /*
70825 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70826 diff -urNp linux-2.6.32.45/net/atm/mpc.h linux-2.6.32.45/net/atm/mpc.h
70827 --- linux-2.6.32.45/net/atm/mpc.h 2011-03-27 14:31:47.000000000 -0400
70828 +++ linux-2.6.32.45/net/atm/mpc.h 2011-08-23 21:22:38.000000000 -0400
70829 @@ -33,7 +33,7 @@ struct mpoa_client {
70830 struct mpc_parameters parameters; /* parameters for this client */
70831
70832 const struct net_device_ops *old_ops;
70833 - struct net_device_ops new_ops;
70834 + net_device_ops_no_const new_ops;
70835 };
70836
70837
70838 diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
70839 --- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70840 +++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70841 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70842 struct timeval now;
70843 struct k_message msg;
70844
70845 + pax_track_stack();
70846 +
70847 do_gettimeofday(&now);
70848
70849 write_lock_irq(&client->egress_lock);
70850 diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
70851 --- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70852 +++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70853 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70854 const struct k_atm_aal_stats *stats)
70855 {
70856 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
70857 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
70858 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
70859 - atomic_read(&stats->rx_drop));
70860 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
70861 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
70862 + atomic_read_unchecked(&stats->rx_drop));
70863 }
70864
70865 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
70866 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
70867 {
70868 struct sock *sk = sk_atm(vcc);
70869
70870 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70871 + seq_printf(seq, "%p ", NULL);
70872 +#else
70873 seq_printf(seq, "%p ", vcc);
70874 +#endif
70875 +
70876 if (!vcc->dev)
70877 seq_printf(seq, "Unassigned ");
70878 else
70879 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
70880 {
70881 if (!vcc->dev)
70882 seq_printf(seq, sizeof(void *) == 4 ?
70883 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70884 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
70885 +#else
70886 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
70887 +#endif
70888 else
70889 seq_printf(seq, "%3d %3d %5d ",
70890 vcc->dev->number, vcc->vpi, vcc->vci);
70891 diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
70892 --- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
70893 +++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
70894 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
70895 static void copy_aal_stats(struct k_atm_aal_stats *from,
70896 struct atm_aal_stats *to)
70897 {
70898 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70899 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70900 __AAL_STAT_ITEMS
70901 #undef __HANDLE_ITEM
70902 }
70903 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
70904 static void subtract_aal_stats(struct k_atm_aal_stats *from,
70905 struct atm_aal_stats *to)
70906 {
70907 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70908 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
70909 __AAL_STAT_ITEMS
70910 #undef __HANDLE_ITEM
70911 }
70912 diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
70913 --- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
70914 +++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
70915 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
70916 err = -ENOTCONN;
70917 break;
70918 }
70919 -
70920 + memset(&cinfo, 0, sizeof(cinfo));
70921 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
70922 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
70923
70924 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
70925
70926 /* Reject if config buffer is too small. */
70927 len = cmd_len - sizeof(*req);
70928 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70929 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70930 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
70931 l2cap_build_conf_rsp(sk, rsp,
70932 L2CAP_CONF_REJECT, flags), rsp);
70933 diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
70934 --- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
70935 +++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
70936 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
70937
70938 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
70939
70940 + memset(&cinfo, 0, sizeof(cinfo));
70941 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
70942 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
70943
70944 diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
70945 --- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
70946 +++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
70947 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
70948
70949 #ifdef CONFIG_SYSFS
70950 /* br_sysfs_if.c */
70951 -extern struct sysfs_ops brport_sysfs_ops;
70952 +extern const struct sysfs_ops brport_sysfs_ops;
70953 extern int br_sysfs_addif(struct net_bridge_port *p);
70954
70955 /* br_sysfs_br.c */
70956 diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
70957 --- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
70958 +++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
70959 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
70960 char *envp[] = { NULL };
70961
70962 if (br->stp_enabled == BR_USER_STP) {
70963 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
70964 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
70965 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
70966 br->dev->name, r);
70967
70968 diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
70969 --- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
70970 +++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
70971 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
70972 return ret;
70973 }
70974
70975 -struct sysfs_ops brport_sysfs_ops = {
70976 +const struct sysfs_ops brport_sysfs_ops = {
70977 .show = brport_show,
70978 .store = brport_store,
70979 };
70980 diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
70981 --- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
70982 +++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
70983 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
70984 unsigned int entries_size, nentries;
70985 char *entries;
70986
70987 + pax_track_stack();
70988 +
70989 if (cmd == EBT_SO_GET_ENTRIES) {
70990 entries_size = t->private->entries_size;
70991 nentries = t->private->nentries;
70992 diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
70993 --- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
70994 +++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
70995 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
70996 struct bcm_sock *bo = bcm_sk(sk);
70997 struct bcm_op *op;
70998
70999 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71000 + seq_printf(m, ">>> socket %p", NULL);
71001 + seq_printf(m, " / sk %p", NULL);
71002 + seq_printf(m, " / bo %p", NULL);
71003 +#else
71004 seq_printf(m, ">>> socket %p", sk->sk_socket);
71005 seq_printf(m, " / sk %p", sk);
71006 seq_printf(m, " / bo %p", bo);
71007 +#endif
71008 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71009 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71010 seq_printf(m, " <<<\n");
71011 diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71012 --- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71013 +++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71014 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71015 if (no_module && capable(CAP_NET_ADMIN))
71016 no_module = request_module("netdev-%s", name);
71017 if (no_module && capable(CAP_SYS_MODULE)) {
71018 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71019 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71020 +#else
71021 if (!request_module("%s", name))
71022 pr_err("Loading kernel module for a network device "
71023 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71024 "instead\n", name);
71025 +#endif
71026 }
71027 }
71028 EXPORT_SYMBOL(dev_load);
71029 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71030
71031 struct dev_gso_cb {
71032 void (*destructor)(struct sk_buff *skb);
71033 -};
71034 +} __no_const;
71035
71036 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71037
71038 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71039 }
71040 EXPORT_SYMBOL(netif_rx_ni);
71041
71042 -static void net_tx_action(struct softirq_action *h)
71043 +static void net_tx_action(void)
71044 {
71045 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71046
71047 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71048 EXPORT_SYMBOL(netif_napi_del);
71049
71050
71051 -static void net_rx_action(struct softirq_action *h)
71052 +static void net_rx_action(void)
71053 {
71054 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71055 unsigned long time_limit = jiffies + 2;
71056 diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71057 --- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71058 +++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71059 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71060 atomic_t *object_ref;
71061 };
71062
71063 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71064 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71065
71066 static u32 flow_hash_shift;
71067 #define flow_hash_size (1 << flow_hash_shift)
71068 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71069 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71070
71071 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71072
71073 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71074 u32 hash_rnd;
71075 int count;
71076 };
71077 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71078 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71079
71080 #define flow_hash_rnd_recalc(cpu) \
71081 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71082 @@ -69,7 +69,7 @@ struct flow_flush_info {
71083 atomic_t cpuleft;
71084 struct completion completion;
71085 };
71086 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71087 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71088
71089 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71090
71091 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71092 if (fle->family == family &&
71093 fle->dir == dir &&
71094 flow_key_compare(key, &fle->key) == 0) {
71095 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71096 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71097 void *ret = fle->object;
71098
71099 if (ret)
71100 @@ -228,7 +228,7 @@ nocache:
71101 err = resolver(net, key, family, dir, &obj, &obj_ref);
71102
71103 if (fle && !err) {
71104 - fle->genid = atomic_read(&flow_cache_genid);
71105 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71106
71107 if (fle->object)
71108 atomic_dec(fle->object_ref);
71109 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71110
71111 fle = flow_table(cpu)[i];
71112 for (; fle; fle = fle->next) {
71113 - unsigned genid = atomic_read(&flow_cache_genid);
71114 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71115
71116 if (!fle->object || fle->genid == genid)
71117 continue;
71118 diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71119 --- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71120 +++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71121 @@ -57,7 +57,7 @@ struct rtnl_link
71122 {
71123 rtnl_doit_func doit;
71124 rtnl_dumpit_func dumpit;
71125 -};
71126 +} __no_const;
71127
71128 static DEFINE_MUTEX(rtnl_mutex);
71129
71130 diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71131 --- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71132 +++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71133 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71134 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71135
71136 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71137 - __be16 dport)
71138 + __be16 dport)
71139 {
71140 u32 secret[MD5_MESSAGE_BYTES / 4];
71141 u32 hash[MD5_DIGEST_WORDS];
71142 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71143 secret[i] = net_secret[i];
71144
71145 md5_transform(hash, secret);
71146 -
71147 return hash[0];
71148 }
71149 #endif
71150 diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71151 --- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71152 +++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71153 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71154 struct sk_buff *frag_iter;
71155 struct sock *sk = skb->sk;
71156
71157 + pax_track_stack();
71158 +
71159 /*
71160 * __skb_splice_bits() only fails if the output has no room left,
71161 * so no point in going over the frag_list for the error case.
71162 diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71163 --- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71164 +++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71165 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71166 break;
71167
71168 case SO_PEERCRED:
71169 + {
71170 + struct ucred peercred;
71171 if (len > sizeof(sk->sk_peercred))
71172 len = sizeof(sk->sk_peercred);
71173 - if (copy_to_user(optval, &sk->sk_peercred, len))
71174 + peercred = sk->sk_peercred;
71175 + if (copy_to_user(optval, &peercred, len))
71176 return -EFAULT;
71177 goto lenout;
71178 + }
71179
71180 case SO_PEERNAME:
71181 {
71182 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71183 */
71184 smp_wmb();
71185 atomic_set(&sk->sk_refcnt, 1);
71186 - atomic_set(&sk->sk_drops, 0);
71187 + atomic_set_unchecked(&sk->sk_drops, 0);
71188 }
71189 EXPORT_SYMBOL(sock_init_data);
71190
71191 diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71192 --- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71193 +++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71194 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71195
71196 if (len > *lenp) len = *lenp;
71197
71198 - if (copy_to_user(buffer, addr, len))
71199 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71200 return -EFAULT;
71201
71202 *lenp = len;
71203 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71204
71205 if (len > *lenp) len = *lenp;
71206
71207 - if (copy_to_user(buffer, devname, len))
71208 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71209 return -EFAULT;
71210
71211 *lenp = len;
71212 diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71213 --- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71214 +++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71215 @@ -4,7 +4,7 @@
71216
71217 config ECONET
71218 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71219 - depends on EXPERIMENTAL && INET
71220 + depends on EXPERIMENTAL && INET && BROKEN
71221 ---help---
71222 Econet is a fairly old and slow networking protocol mainly used by
71223 Acorn computers to access file and print servers. It uses native
71224 diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71225 --- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71226 +++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71227 @@ -318,7 +318,7 @@ out:
71228 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71229 {
71230 if (sock_queue_rcv_skb(sk, skb) < 0) {
71231 - atomic_inc(&sk->sk_drops);
71232 + atomic_inc_unchecked(&sk->sk_drops);
71233 kfree_skb(skb);
71234 return NET_RX_DROP;
71235 }
71236 diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71237 --- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71238 +++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71239 @@ -206,7 +206,7 @@ out:
71240 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71241 {
71242 if (sock_queue_rcv_skb(sk, skb) < 0) {
71243 - atomic_inc(&sk->sk_drops);
71244 + atomic_inc_unchecked(&sk->sk_drops);
71245 kfree_skb(skb);
71246 return NET_RX_DROP;
71247 }
71248 diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71249 --- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71250 +++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71251 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71252 r->idiag_retrans = 0;
71253
71254 r->id.idiag_if = sk->sk_bound_dev_if;
71255 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71256 + r->id.idiag_cookie[0] = 0;
71257 + r->id.idiag_cookie[1] = 0;
71258 +#else
71259 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71260 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71261 +#endif
71262
71263 r->id.idiag_sport = inet->sport;
71264 r->id.idiag_dport = inet->dport;
71265 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71266 r->idiag_family = tw->tw_family;
71267 r->idiag_retrans = 0;
71268 r->id.idiag_if = tw->tw_bound_dev_if;
71269 +
71270 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71271 + r->id.idiag_cookie[0] = 0;
71272 + r->id.idiag_cookie[1] = 0;
71273 +#else
71274 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71275 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71276 +#endif
71277 +
71278 r->id.idiag_sport = tw->tw_sport;
71279 r->id.idiag_dport = tw->tw_dport;
71280 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71281 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71282 if (sk == NULL)
71283 goto unlock;
71284
71285 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71286 err = -ESTALE;
71287 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71288 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71289 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71290 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71291 goto out;
71292 +#endif
71293
71294 err = -ENOMEM;
71295 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71296 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71297 r->idiag_retrans = req->retrans;
71298
71299 r->id.idiag_if = sk->sk_bound_dev_if;
71300 +
71301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71302 + r->id.idiag_cookie[0] = 0;
71303 + r->id.idiag_cookie[1] = 0;
71304 +#else
71305 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71306 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71307 +#endif
71308
71309 tmo = req->expires - jiffies;
71310 if (tmo < 0)
71311 diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71312 --- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71313 +++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71314 @@ -18,12 +18,15 @@
71315 #include <linux/sched.h>
71316 #include <linux/slab.h>
71317 #include <linux/wait.h>
71318 +#include <linux/security.h>
71319
71320 #include <net/inet_connection_sock.h>
71321 #include <net/inet_hashtables.h>
71322 #include <net/secure_seq.h>
71323 #include <net/ip.h>
71324
71325 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71326 +
71327 /*
71328 * Allocate and initialize a new local port bind bucket.
71329 * The bindhash mutex for snum's hash chain must be held here.
71330 @@ -491,6 +494,8 @@ ok:
71331 }
71332 spin_unlock(&head->lock);
71333
71334 + gr_update_task_in_ip_table(current, inet_sk(sk));
71335 +
71336 if (tw) {
71337 inet_twsk_deschedule(tw, death_row);
71338 inet_twsk_put(tw);
71339 diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71340 --- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71341 +++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71342 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71343 struct inet_peer *p, *n;
71344 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71345
71346 + pax_track_stack();
71347 +
71348 /* Look up for the address quickly. */
71349 read_lock_bh(&peer_pool_lock);
71350 p = lookup(daddr, NULL);
71351 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71352 return NULL;
71353 n->v4daddr = daddr;
71354 atomic_set(&n->refcnt, 1);
71355 - atomic_set(&n->rid, 0);
71356 + atomic_set_unchecked(&n->rid, 0);
71357 n->ip_id_count = secure_ip_id(daddr);
71358 n->tcp_ts_stamp = 0;
71359
71360 diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71361 --- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71362 +++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71363 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71364 return 0;
71365
71366 start = qp->rid;
71367 - end = atomic_inc_return(&peer->rid);
71368 + end = atomic_inc_return_unchecked(&peer->rid);
71369 qp->rid = end;
71370
71371 rc = qp->q.fragments && (end - start) > max;
71372 diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71373 --- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71374 +++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71375 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71376 int val;
71377 int len;
71378
71379 + pax_track_stack();
71380 +
71381 if (level != SOL_IP)
71382 return -EOPNOTSUPP;
71383
71384 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71385 --- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71386 +++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71387 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71388 private = &tmp;
71389 }
71390 #endif
71391 + memset(&info, 0, sizeof(info));
71392 info.valid_hooks = t->valid_hooks;
71393 memcpy(info.hook_entry, private->hook_entry,
71394 sizeof(info.hook_entry));
71395 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c
71396 --- linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71397 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71398 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71399
71400 if (v->data_len < sizeof(*user_iph))
71401 return 0;
71402 + if (v->data_len > 65535)
71403 + return -EMSGSIZE;
71404 +
71405 diff = v->data_len - e->skb->len;
71406 if (diff < 0) {
71407 if (pskb_trim(e->skb, v->data_len))
71408 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71409 static inline void
71410 __ipq_rcv_skb(struct sk_buff *skb)
71411 {
71412 - int status, type, pid, flags, nlmsglen, skblen;
71413 + int status, type, pid, flags;
71414 + unsigned int nlmsglen, skblen;
71415 struct nlmsghdr *nlh;
71416
71417 skblen = skb->len;
71418 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71419 --- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71420 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71421 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71422 private = &tmp;
71423 }
71424 #endif
71425 + memset(&info, 0, sizeof(info));
71426 info.valid_hooks = t->valid_hooks;
71427 memcpy(info.hook_entry, private->hook_entry,
71428 sizeof(info.hook_entry));
71429 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71430 --- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71431 +++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71432 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71433
71434 *len = 0;
71435
71436 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71437 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71438 if (*octets == NULL) {
71439 if (net_ratelimit())
71440 printk("OOM in bsalg (%d)\n", __LINE__);
71441 diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71442 --- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71443 +++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71444 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71445 /* Charge it to the socket. */
71446
71447 if (sock_queue_rcv_skb(sk, skb) < 0) {
71448 - atomic_inc(&sk->sk_drops);
71449 + atomic_inc_unchecked(&sk->sk_drops);
71450 kfree_skb(skb);
71451 return NET_RX_DROP;
71452 }
71453 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71454 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71455 {
71456 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71457 - atomic_inc(&sk->sk_drops);
71458 + atomic_inc_unchecked(&sk->sk_drops);
71459 kfree_skb(skb);
71460 return NET_RX_DROP;
71461 }
71462 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71463
71464 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71465 {
71466 + struct icmp_filter filter;
71467 +
71468 + if (optlen < 0)
71469 + return -EINVAL;
71470 if (optlen > sizeof(struct icmp_filter))
71471 optlen = sizeof(struct icmp_filter);
71472 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71473 + if (copy_from_user(&filter, optval, optlen))
71474 return -EFAULT;
71475 + raw_sk(sk)->filter = filter;
71476 +
71477 return 0;
71478 }
71479
71480 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71481 {
71482 int len, ret = -EFAULT;
71483 + struct icmp_filter filter;
71484
71485 if (get_user(len, optlen))
71486 goto out;
71487 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71488 if (len > sizeof(struct icmp_filter))
71489 len = sizeof(struct icmp_filter);
71490 ret = -EFAULT;
71491 - if (put_user(len, optlen) ||
71492 - copy_to_user(optval, &raw_sk(sk)->filter, len))
71493 + filter = raw_sk(sk)->filter;
71494 + if (put_user(len, optlen) || len > sizeof filter ||
71495 + copy_to_user(optval, &filter, len))
71496 goto out;
71497 ret = 0;
71498 out: return ret;
71499 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71500 sk_wmem_alloc_get(sp),
71501 sk_rmem_alloc_get(sp),
71502 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71503 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71504 + atomic_read(&sp->sk_refcnt),
71505 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71506 + NULL,
71507 +#else
71508 + sp,
71509 +#endif
71510 + atomic_read_unchecked(&sp->sk_drops));
71511 }
71512
71513 static int raw_seq_show(struct seq_file *seq, void *v)
71514 diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71515 --- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71516 +++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71517 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71518
71519 static inline int rt_genid(struct net *net)
71520 {
71521 - return atomic_read(&net->ipv4.rt_genid);
71522 + return atomic_read_unchecked(&net->ipv4.rt_genid);
71523 }
71524
71525 #ifdef CONFIG_PROC_FS
71526 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71527 unsigned char shuffle;
71528
71529 get_random_bytes(&shuffle, sizeof(shuffle));
71530 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71531 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71532 }
71533
71534 /*
71535 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71536
71537 static __net_init int rt_secret_timer_init(struct net *net)
71538 {
71539 - atomic_set(&net->ipv4.rt_genid,
71540 + atomic_set_unchecked(&net->ipv4.rt_genid,
71541 (int) ((num_physpages ^ (num_physpages>>8)) ^
71542 (jiffies ^ (jiffies >> 7))));
71543
71544 diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71545 --- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71546 +++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71547 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71548 int val;
71549 int err = 0;
71550
71551 + pax_track_stack();
71552 +
71553 /* This is a string value all the others are int's */
71554 if (optname == TCP_CONGESTION) {
71555 char name[TCP_CA_NAME_MAX];
71556 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71557 struct tcp_sock *tp = tcp_sk(sk);
71558 int val, len;
71559
71560 + pax_track_stack();
71561 +
71562 if (get_user(len, optlen))
71563 return -EFAULT;
71564
71565 diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71566 --- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71567 +++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-23 21:22:32.000000000 -0400
71568 @@ -85,6 +85,9 @@
71569 int sysctl_tcp_tw_reuse __read_mostly;
71570 int sysctl_tcp_low_latency __read_mostly;
71571
71572 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71573 +extern int grsec_enable_blackhole;
71574 +#endif
71575
71576 #ifdef CONFIG_TCP_MD5SIG
71577 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71578 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71579 return 0;
71580
71581 reset:
71582 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71583 + if (!grsec_enable_blackhole)
71584 +#endif
71585 tcp_v4_send_reset(rsk, skb);
71586 discard:
71587 kfree_skb(skb);
71588 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71589 TCP_SKB_CB(skb)->sacked = 0;
71590
71591 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71592 - if (!sk)
71593 + if (!sk) {
71594 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71595 + ret = 1;
71596 +#endif
71597 goto no_tcp_socket;
71598 + }
71599
71600 process:
71601 - if (sk->sk_state == TCP_TIME_WAIT)
71602 + if (sk->sk_state == TCP_TIME_WAIT) {
71603 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71604 + ret = 2;
71605 +#endif
71606 goto do_time_wait;
71607 + }
71608
71609 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71610 goto discard_and_relse;
71611 @@ -1651,6 +1665,10 @@ no_tcp_socket:
71612 bad_packet:
71613 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71614 } else {
71615 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71616 + if (!grsec_enable_blackhole || (ret == 1 &&
71617 + (skb->dev->flags & IFF_LOOPBACK)))
71618 +#endif
71619 tcp_v4_send_reset(NULL, skb);
71620 }
71621
71622 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71623 0, /* non standard timer */
71624 0, /* open_requests have no inode */
71625 atomic_read(&sk->sk_refcnt),
71626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71627 + NULL,
71628 +#else
71629 req,
71630 +#endif
71631 len);
71632 }
71633
71634 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71635 sock_i_uid(sk),
71636 icsk->icsk_probes_out,
71637 sock_i_ino(sk),
71638 - atomic_read(&sk->sk_refcnt), sk,
71639 + atomic_read(&sk->sk_refcnt),
71640 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71641 + NULL,
71642 +#else
71643 + sk,
71644 +#endif
71645 jiffies_to_clock_t(icsk->icsk_rto),
71646 jiffies_to_clock_t(icsk->icsk_ack.ato),
71647 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71648 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71649 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71650 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71651 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71652 - atomic_read(&tw->tw_refcnt), tw, len);
71653 + atomic_read(&tw->tw_refcnt),
71654 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71655 + NULL,
71656 +#else
71657 + tw,
71658 +#endif
71659 + len);
71660 }
71661
71662 #define TMPSZ 150
71663 diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71664 --- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71665 +++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71666 @@ -26,6 +26,10 @@
71667 #include <net/inet_common.h>
71668 #include <net/xfrm.h>
71669
71670 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71671 +extern int grsec_enable_blackhole;
71672 +#endif
71673 +
71674 #ifdef CONFIG_SYSCTL
71675 #define SYNC_INIT 0 /* let the user enable it */
71676 #else
71677 @@ -672,6 +676,10 @@ listen_overflow:
71678
71679 embryonic_reset:
71680 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71681 +
71682 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71683 + if (!grsec_enable_blackhole)
71684 +#endif
71685 if (!(flg & TCP_FLAG_RST))
71686 req->rsk_ops->send_reset(sk, skb);
71687
71688 diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71689 --- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71690 +++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71691 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71692 __u8 *md5_hash_location;
71693 int mss;
71694
71695 + pax_track_stack();
71696 +
71697 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71698 if (skb == NULL)
71699 return NULL;
71700 diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71701 --- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71702 +++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71703 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71704 if (cnt + width >= len)
71705 break;
71706
71707 - if (copy_to_user(buf + cnt, tbuf, width))
71708 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71709 return -EFAULT;
71710 cnt += width;
71711 }
71712 diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71713 --- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71714 +++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71715 @@ -21,6 +21,10 @@
71716 #include <linux/module.h>
71717 #include <net/tcp.h>
71718
71719 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71720 +extern int grsec_lastack_retries;
71721 +#endif
71722 +
71723 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71724 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71725 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71726 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71727 }
71728 }
71729
71730 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71731 + if ((sk->sk_state == TCP_LAST_ACK) &&
71732 + (grsec_lastack_retries > 0) &&
71733 + (grsec_lastack_retries < retry_until))
71734 + retry_until = grsec_lastack_retries;
71735 +#endif
71736 +
71737 if (retransmits_timed_out(sk, retry_until)) {
71738 /* Has it gone just too far? */
71739 tcp_write_err(sk);
71740 diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71741 --- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71742 +++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-23 21:22:32.000000000 -0400
71743 @@ -86,6 +86,7 @@
71744 #include <linux/types.h>
71745 #include <linux/fcntl.h>
71746 #include <linux/module.h>
71747 +#include <linux/security.h>
71748 #include <linux/socket.h>
71749 #include <linux/sockios.h>
71750 #include <linux/igmp.h>
71751 @@ -106,6 +107,10 @@
71752 #include <net/xfrm.h>
71753 #include "udp_impl.h"
71754
71755 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71756 +extern int grsec_enable_blackhole;
71757 +#endif
71758 +
71759 struct udp_table udp_table;
71760 EXPORT_SYMBOL(udp_table);
71761
71762 @@ -371,6 +376,9 @@ found:
71763 return s;
71764 }
71765
71766 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71767 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71768 +
71769 /*
71770 * This routine is called by the ICMP module when it gets some
71771 * sort of error condition. If err < 0 then the socket should
71772 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71773 dport = usin->sin_port;
71774 if (dport == 0)
71775 return -EINVAL;
71776 +
71777 + err = gr_search_udp_sendmsg(sk, usin);
71778 + if (err)
71779 + return err;
71780 } else {
71781 if (sk->sk_state != TCP_ESTABLISHED)
71782 return -EDESTADDRREQ;
71783 +
71784 + err = gr_search_udp_sendmsg(sk, NULL);
71785 + if (err)
71786 + return err;
71787 +
71788 daddr = inet->daddr;
71789 dport = inet->dport;
71790 /* Open fast path for connected socket.
71791 @@ -945,6 +962,10 @@ try_again:
71792 if (!skb)
71793 goto out;
71794
71795 + err = gr_search_udp_recvmsg(sk, skb);
71796 + if (err)
71797 + goto out_free;
71798 +
71799 ulen = skb->len - sizeof(struct udphdr);
71800 copied = len;
71801 if (copied > ulen)
71802 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71803 if (rc == -ENOMEM) {
71804 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71805 is_udplite);
71806 - atomic_inc(&sk->sk_drops);
71807 + atomic_inc_unchecked(&sk->sk_drops);
71808 }
71809 goto drop;
71810 }
71811 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71812 goto csum_error;
71813
71814 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71815 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71816 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71817 +#endif
71818 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71819
71820 /*
71821 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71822 sk_wmem_alloc_get(sp),
71823 sk_rmem_alloc_get(sp),
71824 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71825 - atomic_read(&sp->sk_refcnt), sp,
71826 - atomic_read(&sp->sk_drops), len);
71827 + atomic_read(&sp->sk_refcnt),
71828 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71829 + NULL,
71830 +#else
71831 + sp,
71832 +#endif
71833 + atomic_read_unchecked(&sp->sk_drops), len);
71834 }
71835
71836 int udp4_seq_show(struct seq_file *seq, void *v)
71837 diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
71838 --- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
71839 +++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
71840 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
71841 #ifdef CONFIG_XFRM
71842 {
71843 struct rt6_info *rt = (struct rt6_info *)dst;
71844 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71845 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71846 }
71847 #endif
71848 }
71849 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
71850 #ifdef CONFIG_XFRM
71851 if (dst) {
71852 struct rt6_info *rt = (struct rt6_info *)dst;
71853 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71854 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71855 sk->sk_dst_cache = NULL;
71856 dst_release(dst);
71857 dst = NULL;
71858 diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
71859 --- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71860 +++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
71861 @@ -119,7 +119,7 @@ out:
71862 }
71863 EXPORT_SYMBOL(__inet6_lookup_established);
71864
71865 -static int inline compute_score(struct sock *sk, struct net *net,
71866 +static inline int compute_score(struct sock *sk, struct net *net,
71867 const unsigned short hnum,
71868 const struct in6_addr *daddr,
71869 const int dif)
71870 diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
71871 --- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71872 +++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71873 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
71874 int val, valbool;
71875 int retv = -ENOPROTOOPT;
71876
71877 + pax_track_stack();
71878 +
71879 if (optval == NULL)
71880 val=0;
71881 else {
71882 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
71883 int len;
71884 int val;
71885
71886 + pax_track_stack();
71887 +
71888 if (ip6_mroute_opt(optname))
71889 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71890
71891 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c
71892 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
71893 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
71894 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
71895
71896 if (v->data_len < sizeof(*user_iph))
71897 return 0;
71898 + if (v->data_len > 65535)
71899 + return -EMSGSIZE;
71900 +
71901 diff = v->data_len - e->skb->len;
71902 if (diff < 0) {
71903 if (pskb_trim(e->skb, v->data_len))
71904 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
71905 static inline void
71906 __ipq_rcv_skb(struct sk_buff *skb)
71907 {
71908 - int status, type, pid, flags, nlmsglen, skblen;
71909 + int status, type, pid, flags;
71910 + unsigned int nlmsglen, skblen;
71911 struct nlmsghdr *nlh;
71912
71913 skblen = skb->len;
71914 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
71915 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
71916 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
71917 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
71918 private = &tmp;
71919 }
71920 #endif
71921 + memset(&info, 0, sizeof(info));
71922 info.valid_hooks = t->valid_hooks;
71923 memcpy(info.hook_entry, private->hook_entry,
71924 sizeof(info.hook_entry));
71925 diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
71926 --- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
71927 +++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
71928 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
71929 {
71930 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
71931 skb_checksum_complete(skb)) {
71932 - atomic_inc(&sk->sk_drops);
71933 + atomic_inc_unchecked(&sk->sk_drops);
71934 kfree_skb(skb);
71935 return NET_RX_DROP;
71936 }
71937
71938 /* Charge it to the socket. */
71939 if (sock_queue_rcv_skb(sk,skb)<0) {
71940 - atomic_inc(&sk->sk_drops);
71941 + atomic_inc_unchecked(&sk->sk_drops);
71942 kfree_skb(skb);
71943 return NET_RX_DROP;
71944 }
71945 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71946 struct raw6_sock *rp = raw6_sk(sk);
71947
71948 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
71949 - atomic_inc(&sk->sk_drops);
71950 + atomic_inc_unchecked(&sk->sk_drops);
71951 kfree_skb(skb);
71952 return NET_RX_DROP;
71953 }
71954 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71955
71956 if (inet->hdrincl) {
71957 if (skb_checksum_complete(skb)) {
71958 - atomic_inc(&sk->sk_drops);
71959 + atomic_inc_unchecked(&sk->sk_drops);
71960 kfree_skb(skb);
71961 return NET_RX_DROP;
71962 }
71963 @@ -518,7 +518,7 @@ csum_copy_err:
71964 as some normal condition.
71965 */
71966 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
71967 - atomic_inc(&sk->sk_drops);
71968 + atomic_inc_unchecked(&sk->sk_drops);
71969 goto out;
71970 }
71971
71972 @@ -600,7 +600,7 @@ out:
71973 return err;
71974 }
71975
71976 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
71977 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
71978 struct flowi *fl, struct rt6_info *rt,
71979 unsigned int flags)
71980 {
71981 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
71982 u16 proto;
71983 int err;
71984
71985 + pax_track_stack();
71986 +
71987 /* Rough check on arithmetic overflow,
71988 better check is made in ip6_append_data().
71989 */
71990 @@ -916,12 +918,17 @@ do_confirm:
71991 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
71992 char __user *optval, int optlen)
71993 {
71994 + struct icmp6_filter filter;
71995 +
71996 switch (optname) {
71997 case ICMPV6_FILTER:
71998 + if (optlen < 0)
71999 + return -EINVAL;
72000 if (optlen > sizeof(struct icmp6_filter))
72001 optlen = sizeof(struct icmp6_filter);
72002 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72003 + if (copy_from_user(&filter, optval, optlen))
72004 return -EFAULT;
72005 + raw6_sk(sk)->filter = filter;
72006 return 0;
72007 default:
72008 return -ENOPROTOOPT;
72009 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72010 char __user *optval, int __user *optlen)
72011 {
72012 int len;
72013 + struct icmp6_filter filter;
72014
72015 switch (optname) {
72016 case ICMPV6_FILTER:
72017 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72018 len = sizeof(struct icmp6_filter);
72019 if (put_user(len, optlen))
72020 return -EFAULT;
72021 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72022 + filter = raw6_sk(sk)->filter;
72023 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72024 return -EFAULT;
72025 return 0;
72026 default:
72027 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72028 0, 0L, 0,
72029 sock_i_uid(sp), 0,
72030 sock_i_ino(sp),
72031 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72032 + atomic_read(&sp->sk_refcnt),
72033 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72034 + NULL,
72035 +#else
72036 + sp,
72037 +#endif
72038 + atomic_read_unchecked(&sp->sk_drops));
72039 }
72040
72041 static int raw6_seq_show(struct seq_file *seq, void *v)
72042 diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72043 --- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72044 +++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72045 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72046 }
72047 #endif
72048
72049 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72050 +extern int grsec_enable_blackhole;
72051 +#endif
72052 +
72053 static void tcp_v6_hash(struct sock *sk)
72054 {
72055 if (sk->sk_state != TCP_CLOSE) {
72056 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72057 return 0;
72058
72059 reset:
72060 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72061 + if (!grsec_enable_blackhole)
72062 +#endif
72063 tcp_v6_send_reset(sk, skb);
72064 discard:
72065 if (opt_skb)
72066 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72067 TCP_SKB_CB(skb)->sacked = 0;
72068
72069 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72070 - if (!sk)
72071 + if (!sk) {
72072 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72073 + ret = 1;
72074 +#endif
72075 goto no_tcp_socket;
72076 + }
72077
72078 process:
72079 - if (sk->sk_state == TCP_TIME_WAIT)
72080 + if (sk->sk_state == TCP_TIME_WAIT) {
72081 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72082 + ret = 2;
72083 +#endif
72084 goto do_time_wait;
72085 + }
72086
72087 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72088 goto discard_and_relse;
72089 @@ -1701,6 +1716,10 @@ no_tcp_socket:
72090 bad_packet:
72091 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72092 } else {
72093 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72094 + if (!grsec_enable_blackhole || (ret == 1 &&
72095 + (skb->dev->flags & IFF_LOOPBACK)))
72096 +#endif
72097 tcp_v6_send_reset(NULL, skb);
72098 }
72099
72100 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72101 uid,
72102 0, /* non standard timer */
72103 0, /* open_requests have no inode */
72104 - 0, req);
72105 + 0,
72106 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72107 + NULL
72108 +#else
72109 + req
72110 +#endif
72111 + );
72112 }
72113
72114 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72115 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72116 sock_i_uid(sp),
72117 icsk->icsk_probes_out,
72118 sock_i_ino(sp),
72119 - atomic_read(&sp->sk_refcnt), sp,
72120 + atomic_read(&sp->sk_refcnt),
72121 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72122 + NULL,
72123 +#else
72124 + sp,
72125 +#endif
72126 jiffies_to_clock_t(icsk->icsk_rto),
72127 jiffies_to_clock_t(icsk->icsk_ack.ato),
72128 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72129 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72130 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72131 tw->tw_substate, 0, 0,
72132 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72133 - atomic_read(&tw->tw_refcnt), tw);
72134 + atomic_read(&tw->tw_refcnt),
72135 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72136 + NULL
72137 +#else
72138 + tw
72139 +#endif
72140 + );
72141 }
72142
72143 static int tcp6_seq_show(struct seq_file *seq, void *v)
72144 diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72145 --- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72146 +++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72147 @@ -49,6 +49,10 @@
72148 #include <linux/seq_file.h>
72149 #include "udp_impl.h"
72150
72151 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72152 +extern int grsec_enable_blackhole;
72153 +#endif
72154 +
72155 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72156 {
72157 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72158 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72159 if (rc == -ENOMEM) {
72160 UDP6_INC_STATS_BH(sock_net(sk),
72161 UDP_MIB_RCVBUFERRORS, is_udplite);
72162 - atomic_inc(&sk->sk_drops);
72163 + atomic_inc_unchecked(&sk->sk_drops);
72164 }
72165 goto drop;
72166 }
72167 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72168 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72169 proto == IPPROTO_UDPLITE);
72170
72171 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72172 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72173 +#endif
72174 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72175
72176 kfree_skb(skb);
72177 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72178 0, 0L, 0,
72179 sock_i_uid(sp), 0,
72180 sock_i_ino(sp),
72181 - atomic_read(&sp->sk_refcnt), sp,
72182 - atomic_read(&sp->sk_drops));
72183 + atomic_read(&sp->sk_refcnt),
72184 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72185 + NULL,
72186 +#else
72187 + sp,
72188 +#endif
72189 + atomic_read_unchecked(&sp->sk_drops));
72190 }
72191
72192 int udp6_seq_show(struct seq_file *seq, void *v)
72193 diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72194 --- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72195 +++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72196 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72197 add_wait_queue(&self->open_wait, &wait);
72198
72199 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72200 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72201 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72202
72203 /* As far as I can see, we protect open_count - Jean II */
72204 spin_lock_irqsave(&self->spinlock, flags);
72205 if (!tty_hung_up_p(filp)) {
72206 extra_count = 1;
72207 - self->open_count--;
72208 + local_dec(&self->open_count);
72209 }
72210 spin_unlock_irqrestore(&self->spinlock, flags);
72211 - self->blocked_open++;
72212 + local_inc(&self->blocked_open);
72213
72214 while (1) {
72215 if (tty->termios->c_cflag & CBAUD) {
72216 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72217 }
72218
72219 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72220 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72221 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72222
72223 schedule();
72224 }
72225 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72226 if (extra_count) {
72227 /* ++ is not atomic, so this should be protected - Jean II */
72228 spin_lock_irqsave(&self->spinlock, flags);
72229 - self->open_count++;
72230 + local_inc(&self->open_count);
72231 spin_unlock_irqrestore(&self->spinlock, flags);
72232 }
72233 - self->blocked_open--;
72234 + local_dec(&self->blocked_open);
72235
72236 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72237 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72238 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72239
72240 if (!retval)
72241 self->flags |= ASYNC_NORMAL_ACTIVE;
72242 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72243 }
72244 /* ++ is not atomic, so this should be protected - Jean II */
72245 spin_lock_irqsave(&self->spinlock, flags);
72246 - self->open_count++;
72247 + local_inc(&self->open_count);
72248
72249 tty->driver_data = self;
72250 self->tty = tty;
72251 spin_unlock_irqrestore(&self->spinlock, flags);
72252
72253 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72254 - self->line, self->open_count);
72255 + self->line, local_read(&self->open_count));
72256
72257 /* Not really used by us, but lets do it anyway */
72258 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72259 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72260 return;
72261 }
72262
72263 - if ((tty->count == 1) && (self->open_count != 1)) {
72264 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72265 /*
72266 * Uh, oh. tty->count is 1, which means that the tty
72267 * structure will be freed. state->count should always
72268 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72269 */
72270 IRDA_DEBUG(0, "%s(), bad serial port count; "
72271 "tty->count is 1, state->count is %d\n", __func__ ,
72272 - self->open_count);
72273 - self->open_count = 1;
72274 + local_read(&self->open_count));
72275 + local_set(&self->open_count, 1);
72276 }
72277
72278 - if (--self->open_count < 0) {
72279 + if (local_dec_return(&self->open_count) < 0) {
72280 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72281 - __func__, self->line, self->open_count);
72282 - self->open_count = 0;
72283 + __func__, self->line, local_read(&self->open_count));
72284 + local_set(&self->open_count, 0);
72285 }
72286 - if (self->open_count) {
72287 + if (local_read(&self->open_count)) {
72288 spin_unlock_irqrestore(&self->spinlock, flags);
72289
72290 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72291 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72292 tty->closing = 0;
72293 self->tty = NULL;
72294
72295 - if (self->blocked_open) {
72296 + if (local_read(&self->blocked_open)) {
72297 if (self->close_delay)
72298 schedule_timeout_interruptible(self->close_delay);
72299 wake_up_interruptible(&self->open_wait);
72300 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72301 spin_lock_irqsave(&self->spinlock, flags);
72302 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72303 self->tty = NULL;
72304 - self->open_count = 0;
72305 + local_set(&self->open_count, 0);
72306 spin_unlock_irqrestore(&self->spinlock, flags);
72307
72308 wake_up_interruptible(&self->open_wait);
72309 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72310 seq_putc(m, '\n');
72311
72312 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72313 - seq_printf(m, "Open count: %d\n", self->open_count);
72314 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72315 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72316 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72317
72318 diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72319 --- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72320 +++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72321 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72322
72323 write_lock_bh(&iucv_sk_list.lock);
72324
72325 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72326 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72327 while (__iucv_get_sock_by_name(name)) {
72328 sprintf(name, "%08x",
72329 - atomic_inc_return(&iucv_sk_list.autobind_name));
72330 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72331 }
72332
72333 write_unlock_bh(&iucv_sk_list.lock);
72334 diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72335 --- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72336 +++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72337 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72338 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72339 struct xfrm_kmaddress k;
72340
72341 + pax_track_stack();
72342 +
72343 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72344 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72345 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72346 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72347 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72348 else
72349 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72350 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72351 + NULL,
72352 +#else
72353 s,
72354 +#endif
72355 atomic_read(&s->sk_refcnt),
72356 sk_rmem_alloc_get(s),
72357 sk_wmem_alloc_get(s),
72358 diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72359 --- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72360 +++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72361 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72362 goto out;
72363
72364 lapb->dev = dev;
72365 - lapb->callbacks = *callbacks;
72366 + lapb->callbacks = callbacks;
72367
72368 __lapb_insert_cb(lapb);
72369
72370 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72371
72372 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72373 {
72374 - if (lapb->callbacks.connect_confirmation)
72375 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72376 + if (lapb->callbacks->connect_confirmation)
72377 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72378 }
72379
72380 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72381 {
72382 - if (lapb->callbacks.connect_indication)
72383 - lapb->callbacks.connect_indication(lapb->dev, reason);
72384 + if (lapb->callbacks->connect_indication)
72385 + lapb->callbacks->connect_indication(lapb->dev, reason);
72386 }
72387
72388 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72389 {
72390 - if (lapb->callbacks.disconnect_confirmation)
72391 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72392 + if (lapb->callbacks->disconnect_confirmation)
72393 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72394 }
72395
72396 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72397 {
72398 - if (lapb->callbacks.disconnect_indication)
72399 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
72400 + if (lapb->callbacks->disconnect_indication)
72401 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
72402 }
72403
72404 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72405 {
72406 - if (lapb->callbacks.data_indication)
72407 - return lapb->callbacks.data_indication(lapb->dev, skb);
72408 + if (lapb->callbacks->data_indication)
72409 + return lapb->callbacks->data_indication(lapb->dev, skb);
72410
72411 kfree_skb(skb);
72412 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72413 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72414 {
72415 int used = 0;
72416
72417 - if (lapb->callbacks.data_transmit) {
72418 - lapb->callbacks.data_transmit(lapb->dev, skb);
72419 + if (lapb->callbacks->data_transmit) {
72420 + lapb->callbacks->data_transmit(lapb->dev, skb);
72421 used = 1;
72422 }
72423
72424 diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72425 --- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72426 +++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72427 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72428 return err;
72429 }
72430
72431 -struct cfg80211_ops mac80211_config_ops = {
72432 +const struct cfg80211_ops mac80211_config_ops = {
72433 .add_virtual_intf = ieee80211_add_iface,
72434 .del_virtual_intf = ieee80211_del_iface,
72435 .change_virtual_intf = ieee80211_change_iface,
72436 diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72437 --- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72438 +++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72439 @@ -4,6 +4,6 @@
72440 #ifndef __CFG_H
72441 #define __CFG_H
72442
72443 -extern struct cfg80211_ops mac80211_config_ops;
72444 +extern const struct cfg80211_ops mac80211_config_ops;
72445
72446 #endif /* __CFG_H */
72447 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72448 --- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72449 +++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72450 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72451 size_t count, loff_t *ppos)
72452 {
72453 struct ieee80211_key *key = file->private_data;
72454 - int i, res, bufsize = 2 * key->conf.keylen + 2;
72455 + int i, bufsize = 2 * key->conf.keylen + 2;
72456 char *buf = kmalloc(bufsize, GFP_KERNEL);
72457 char *p = buf;
72458 + ssize_t res;
72459 +
72460 + if (buf == NULL)
72461 + return -ENOMEM;
72462
72463 for (i = 0; i < key->conf.keylen; i++)
72464 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72465 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72466 --- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72467 +++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72468 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72469 int i;
72470 struct sta_info *sta = file->private_data;
72471
72472 + pax_track_stack();
72473 +
72474 spin_lock_bh(&sta->lock);
72475 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72476 sta->ampdu_mlme.dialog_token_allocator + 1);
72477 diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72478 --- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72479 +++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72480 @@ -25,6 +25,7 @@
72481 #include <linux/etherdevice.h>
72482 #include <net/cfg80211.h>
72483 #include <net/mac80211.h>
72484 +#include <asm/local.h>
72485 #include "key.h"
72486 #include "sta_info.h"
72487
72488 @@ -635,7 +636,7 @@ struct ieee80211_local {
72489 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72490 spinlock_t queue_stop_reason_lock;
72491
72492 - int open_count;
72493 + local_t open_count;
72494 int monitors, cooked_mntrs;
72495 /* number of interfaces with corresponding FIF_ flags */
72496 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72497 diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72498 --- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72499 +++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72500 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72501 break;
72502 }
72503
72504 - if (local->open_count == 0) {
72505 + if (local_read(&local->open_count) == 0) {
72506 res = drv_start(local);
72507 if (res)
72508 goto err_del_bss;
72509 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72510 * Validate the MAC address for this device.
72511 */
72512 if (!is_valid_ether_addr(dev->dev_addr)) {
72513 - if (!local->open_count)
72514 + if (!local_read(&local->open_count))
72515 drv_stop(local);
72516 return -EADDRNOTAVAIL;
72517 }
72518 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72519
72520 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72521
72522 - local->open_count++;
72523 + local_inc(&local->open_count);
72524 if (hw_reconf_flags) {
72525 ieee80211_hw_config(local, hw_reconf_flags);
72526 /*
72527 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72528 err_del_interface:
72529 drv_remove_interface(local, &conf);
72530 err_stop:
72531 - if (!local->open_count)
72532 + if (!local_read(&local->open_count))
72533 drv_stop(local);
72534 err_del_bss:
72535 sdata->bss = NULL;
72536 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72537 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72538 }
72539
72540 - local->open_count--;
72541 + local_dec(&local->open_count);
72542
72543 switch (sdata->vif.type) {
72544 case NL80211_IFTYPE_AP_VLAN:
72545 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72546
72547 ieee80211_recalc_ps(local, -1);
72548
72549 - if (local->open_count == 0) {
72550 + if (local_read(&local->open_count) == 0) {
72551 ieee80211_clear_tx_pending(local);
72552 ieee80211_stop_device(local);
72553
72554 diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72555 --- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72556 +++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72557 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72558 local->hw.conf.power_level = power;
72559 }
72560
72561 - if (changed && local->open_count) {
72562 + if (changed && local_read(&local->open_count)) {
72563 ret = drv_config(local, changed);
72564 /*
72565 * Goal:
72566 diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72567 --- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72568 +++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72569 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72570 bool have_higher_than_11mbit = false, newsta = false;
72571 u16 ap_ht_cap_flags;
72572
72573 + pax_track_stack();
72574 +
72575 /*
72576 * AssocResp and ReassocResp have identical structure, so process both
72577 * of them in this function.
72578 diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72579 --- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72580 +++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72581 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72582 }
72583
72584 /* stop hardware - this must stop RX */
72585 - if (local->open_count)
72586 + if (local_read(&local->open_count))
72587 ieee80211_stop_device(local);
72588
72589 local->suspended = true;
72590 diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72591 --- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72592 +++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72593 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72594 struct rate_control_ref *ref, *old;
72595
72596 ASSERT_RTNL();
72597 - if (local->open_count)
72598 + if (local_read(&local->open_count))
72599 return -EBUSY;
72600
72601 ref = rate_control_alloc(name, local);
72602 diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72603 --- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72604 +++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72605 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72606 return cpu_to_le16(dur);
72607 }
72608
72609 -static int inline is_ieee80211_device(struct ieee80211_local *local,
72610 +static inline int is_ieee80211_device(struct ieee80211_local *local,
72611 struct net_device *dev)
72612 {
72613 return local == wdev_priv(dev->ieee80211_ptr);
72614 diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72615 --- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72616 +++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72617 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72618 local->resuming = true;
72619
72620 /* restart hardware */
72621 - if (local->open_count) {
72622 + if (local_read(&local->open_count)) {
72623 /*
72624 * Upon resume hardware can sometimes be goofy due to
72625 * various platform / driver / bus issues, so restarting
72626 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72627 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72628 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72629 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
72630 .open = ip_vs_app_open,
72631 .read = seq_read,
72632 .llseek = seq_lseek,
72633 - .release = seq_release,
72634 + .release = seq_release_net,
72635 };
72636 #endif
72637
72638 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72639 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72640 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72641 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72642 /* if the connection is not template and is created
72643 * by sync, preserve the activity flag.
72644 */
72645 - cp->flags |= atomic_read(&dest->conn_flags) &
72646 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72647 (~IP_VS_CONN_F_INACTIVE);
72648 else
72649 - cp->flags |= atomic_read(&dest->conn_flags);
72650 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72651 cp->dest = dest;
72652
72653 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72654 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72655 atomic_set(&cp->refcnt, 1);
72656
72657 atomic_set(&cp->n_control, 0);
72658 - atomic_set(&cp->in_pkts, 0);
72659 + atomic_set_unchecked(&cp->in_pkts, 0);
72660
72661 atomic_inc(&ip_vs_conn_count);
72662 if (flags & IP_VS_CONN_F_NO_CPORT)
72663 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
72664 .open = ip_vs_conn_open,
72665 .read = seq_read,
72666 .llseek = seq_lseek,
72667 - .release = seq_release,
72668 + .release = seq_release_net,
72669 };
72670
72671 static const char *ip_vs_origin_name(unsigned flags)
72672 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
72673 .open = ip_vs_conn_sync_open,
72674 .read = seq_read,
72675 .llseek = seq_lseek,
72676 - .release = seq_release,
72677 + .release = seq_release_net,
72678 };
72679
72680 #endif
72681 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72682
72683 /* Don't drop the entry if its number of incoming packets is not
72684 located in [0, 8] */
72685 - i = atomic_read(&cp->in_pkts);
72686 + i = atomic_read_unchecked(&cp->in_pkts);
72687 if (i > 8 || i < 0) return 0;
72688
72689 if (!todrop_rate[i]) return 0;
72690 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72691 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72692 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72693 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72694 ret = cp->packet_xmit(skb, cp, pp);
72695 /* do not touch skb anymore */
72696
72697 - atomic_inc(&cp->in_pkts);
72698 + atomic_inc_unchecked(&cp->in_pkts);
72699 ip_vs_conn_put(cp);
72700 return ret;
72701 }
72702 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72703 * Sync connection if it is about to close to
72704 * encorage the standby servers to update the connections timeout
72705 */
72706 - pkts = atomic_add_return(1, &cp->in_pkts);
72707 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72708 if (af == AF_INET &&
72709 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72710 (((cp->protocol != IPPROTO_TCP ||
72711 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72712 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72713 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72714 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72715 ip_vs_rs_hash(dest);
72716 write_unlock_bh(&__ip_vs_rs_lock);
72717 }
72718 - atomic_set(&dest->conn_flags, conn_flags);
72719 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
72720
72721 /* bind the service */
72722 if (!dest->svc) {
72723 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72724 " %-7s %-6d %-10d %-10d\n",
72725 &dest->addr.in6,
72726 ntohs(dest->port),
72727 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72728 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72729 atomic_read(&dest->weight),
72730 atomic_read(&dest->activeconns),
72731 atomic_read(&dest->inactconns));
72732 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72733 "%-7s %-6d %-10d %-10d\n",
72734 ntohl(dest->addr.ip),
72735 ntohs(dest->port),
72736 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72737 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72738 atomic_read(&dest->weight),
72739 atomic_read(&dest->activeconns),
72740 atomic_read(&dest->inactconns));
72741 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72742 .open = ip_vs_info_open,
72743 .read = seq_read,
72744 .llseek = seq_lseek,
72745 - .release = seq_release_private,
72746 + .release = seq_release_net,
72747 };
72748
72749 #endif
72750 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72751 .open = ip_vs_stats_seq_open,
72752 .read = seq_read,
72753 .llseek = seq_lseek,
72754 - .release = single_release,
72755 + .release = single_release_net,
72756 };
72757
72758 #endif
72759 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72760
72761 entry.addr = dest->addr.ip;
72762 entry.port = dest->port;
72763 - entry.conn_flags = atomic_read(&dest->conn_flags);
72764 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72765 entry.weight = atomic_read(&dest->weight);
72766 entry.u_threshold = dest->u_threshold;
72767 entry.l_threshold = dest->l_threshold;
72768 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72769 unsigned char arg[128];
72770 int ret = 0;
72771
72772 + pax_track_stack();
72773 +
72774 if (!capable(CAP_NET_ADMIN))
72775 return -EPERM;
72776
72777 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72778 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72779
72780 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72781 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72782 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72783 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72784 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72785 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72786 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72787 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72788 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72789 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72790
72791 if (opt)
72792 memcpy(&cp->in_seq, opt, sizeof(*opt));
72793 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72794 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72795 cp->state = state;
72796 cp->old_state = cp->state;
72797 /*
72798 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72799 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72800 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72801 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72802 else
72803 rc = NF_ACCEPT;
72804 /* do not touch skb anymore */
72805 - atomic_inc(&cp->in_pkts);
72806 + atomic_inc_unchecked(&cp->in_pkts);
72807 goto out;
72808 }
72809
72810 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72811 else
72812 rc = NF_ACCEPT;
72813 /* do not touch skb anymore */
72814 - atomic_inc(&cp->in_pkts);
72815 + atomic_inc_unchecked(&cp->in_pkts);
72816 goto out;
72817 }
72818
72819 diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
72820 --- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72821 +++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72822 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72823
72824 To compile it as a module, choose M here. If unsure, say N.
72825
72826 +config NETFILTER_XT_MATCH_GRADM
72827 + tristate '"gradm" match support'
72828 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72829 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72830 + ---help---
72831 + The gradm match allows to match on grsecurity RBAC being enabled.
72832 + It is useful when iptables rules are applied early on bootup to
72833 + prevent connections to the machine (except from a trusted host)
72834 + while the RBAC system is disabled.
72835 +
72836 config NETFILTER_XT_MATCH_HASHLIMIT
72837 tristate '"hashlimit" match support'
72838 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72839 diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
72840 --- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72841 +++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72842 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72843 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72844 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72845 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72846 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72847 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72848 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72849 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72850 diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
72851 --- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72852 +++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72853 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72854 static int
72855 ctnetlink_parse_tuple(const struct nlattr * const cda[],
72856 struct nf_conntrack_tuple *tuple,
72857 - enum ctattr_tuple type, u_int8_t l3num)
72858 + enum ctattr_type type, u_int8_t l3num)
72859 {
72860 struct nlattr *tb[CTA_TUPLE_MAX+1];
72861 int err;
72862 diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
72863 --- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
72864 +++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
72865 @@ -68,7 +68,7 @@ struct nfulnl_instance {
72866 };
72867
72868 static DEFINE_RWLOCK(instances_lock);
72869 -static atomic_t global_seq;
72870 +static atomic_unchecked_t global_seq;
72871
72872 #define INSTANCE_BUCKETS 16
72873 static struct hlist_head instance_table[INSTANCE_BUCKETS];
72874 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
72875 /* global sequence number */
72876 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
72877 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
72878 - htonl(atomic_inc_return(&global_seq)));
72879 + htonl(atomic_inc_return_unchecked(&global_seq)));
72880
72881 if (data_len) {
72882 struct nlattr *nla;
72883 diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
72884 --- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
72885 +++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
72886 @@ -0,0 +1,51 @@
72887 +/*
72888 + * gradm match for netfilter
72889 + * Copyright © Zbigniew Krzystolik, 2010
72890 + *
72891 + * This program is free software; you can redistribute it and/or modify
72892 + * it under the terms of the GNU General Public License; either version
72893 + * 2 or 3 as published by the Free Software Foundation.
72894 + */
72895 +#include <linux/module.h>
72896 +#include <linux/moduleparam.h>
72897 +#include <linux/skbuff.h>
72898 +#include <linux/netfilter/x_tables.h>
72899 +#include <linux/grsecurity.h>
72900 +#include <linux/netfilter/xt_gradm.h>
72901 +
72902 +static bool
72903 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
72904 +{
72905 + const struct xt_gradm_mtinfo *info = par->matchinfo;
72906 + bool retval = false;
72907 + if (gr_acl_is_enabled())
72908 + retval = true;
72909 + return retval ^ info->invflags;
72910 +}
72911 +
72912 +static struct xt_match gradm_mt_reg __read_mostly = {
72913 + .name = "gradm",
72914 + .revision = 0,
72915 + .family = NFPROTO_UNSPEC,
72916 + .match = gradm_mt,
72917 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
72918 + .me = THIS_MODULE,
72919 +};
72920 +
72921 +static int __init gradm_mt_init(void)
72922 +{
72923 + return xt_register_match(&gradm_mt_reg);
72924 +}
72925 +
72926 +static void __exit gradm_mt_exit(void)
72927 +{
72928 + xt_unregister_match(&gradm_mt_reg);
72929 +}
72930 +
72931 +module_init(gradm_mt_init);
72932 +module_exit(gradm_mt_exit);
72933 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
72934 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
72935 +MODULE_LICENSE("GPL");
72936 +MODULE_ALIAS("ipt_gradm");
72937 +MODULE_ALIAS("ip6t_gradm");
72938 diff -urNp linux-2.6.32.45/net/netlink/af_netlink.c linux-2.6.32.45/net/netlink/af_netlink.c
72939 --- linux-2.6.32.45/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
72940 +++ linux-2.6.32.45/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
72941 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
72942 sk->sk_error_report(sk);
72943 }
72944 }
72945 - atomic_inc(&sk->sk_drops);
72946 + atomic_inc_unchecked(&sk->sk_drops);
72947 }
72948
72949 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
72950 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
72951 struct netlink_sock *nlk = nlk_sk(s);
72952
72953 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
72954 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72955 + NULL,
72956 +#else
72957 s,
72958 +#endif
72959 s->sk_protocol,
72960 nlk->pid,
72961 nlk->groups ? (u32)nlk->groups[0] : 0,
72962 sk_rmem_alloc_get(s),
72963 sk_wmem_alloc_get(s),
72964 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72965 + NULL,
72966 +#else
72967 nlk->cb,
72968 +#endif
72969 atomic_read(&s->sk_refcnt),
72970 - atomic_read(&s->sk_drops)
72971 + atomic_read_unchecked(&s->sk_drops)
72972 );
72973
72974 }
72975 diff -urNp linux-2.6.32.45/net/netrom/af_netrom.c linux-2.6.32.45/net/netrom/af_netrom.c
72976 --- linux-2.6.32.45/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
72977 +++ linux-2.6.32.45/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
72978 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
72979 struct sock *sk = sock->sk;
72980 struct nr_sock *nr = nr_sk(sk);
72981
72982 + memset(sax, 0, sizeof(*sax));
72983 lock_sock(sk);
72984 if (peer != 0) {
72985 if (sk->sk_state != TCP_ESTABLISHED) {
72986 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
72987 *uaddr_len = sizeof(struct full_sockaddr_ax25);
72988 } else {
72989 sax->fsa_ax25.sax25_family = AF_NETROM;
72990 - sax->fsa_ax25.sax25_ndigis = 0;
72991 sax->fsa_ax25.sax25_call = nr->source_addr;
72992 *uaddr_len = sizeof(struct sockaddr_ax25);
72993 }
72994 diff -urNp linux-2.6.32.45/net/packet/af_packet.c linux-2.6.32.45/net/packet/af_packet.c
72995 --- linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
72996 +++ linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
72997 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
72998
72999 seq_printf(seq,
73000 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73001 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73002 + NULL,
73003 +#else
73004 s,
73005 +#endif
73006 atomic_read(&s->sk_refcnt),
73007 s->sk_type,
73008 ntohs(po->num),
73009 diff -urNp linux-2.6.32.45/net/phonet/af_phonet.c linux-2.6.32.45/net/phonet/af_phonet.c
73010 --- linux-2.6.32.45/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73011 +++ linux-2.6.32.45/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73012 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73013 {
73014 struct phonet_protocol *pp;
73015
73016 - if (protocol >= PHONET_NPROTO)
73017 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73018 return NULL;
73019
73020 spin_lock(&proto_tab_lock);
73021 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73022 {
73023 int err = 0;
73024
73025 - if (protocol >= PHONET_NPROTO)
73026 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73027 return -EINVAL;
73028
73029 err = proto_register(pp->prot, 1);
73030 diff -urNp linux-2.6.32.45/net/phonet/datagram.c linux-2.6.32.45/net/phonet/datagram.c
73031 --- linux-2.6.32.45/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
73032 +++ linux-2.6.32.45/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
73033 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
73034 if (err < 0) {
73035 kfree_skb(skb);
73036 if (err == -ENOMEM)
73037 - atomic_inc(&sk->sk_drops);
73038 + atomic_inc_unchecked(&sk->sk_drops);
73039 }
73040 return err ? NET_RX_DROP : NET_RX_SUCCESS;
73041 }
73042 diff -urNp linux-2.6.32.45/net/phonet/pep.c linux-2.6.32.45/net/phonet/pep.c
73043 --- linux-2.6.32.45/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
73044 +++ linux-2.6.32.45/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
73045 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
73046
73047 case PNS_PEP_CTRL_REQ:
73048 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73049 - atomic_inc(&sk->sk_drops);
73050 + atomic_inc_unchecked(&sk->sk_drops);
73051 break;
73052 }
73053 __skb_pull(skb, 4);
73054 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
73055 if (!err)
73056 return 0;
73057 if (err == -ENOMEM)
73058 - atomic_inc(&sk->sk_drops);
73059 + atomic_inc_unchecked(&sk->sk_drops);
73060 break;
73061 }
73062
73063 if (pn->rx_credits == 0) {
73064 - atomic_inc(&sk->sk_drops);
73065 + atomic_inc_unchecked(&sk->sk_drops);
73066 err = -ENOBUFS;
73067 break;
73068 }
73069 diff -urNp linux-2.6.32.45/net/phonet/socket.c linux-2.6.32.45/net/phonet/socket.c
73070 --- linux-2.6.32.45/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
73071 +++ linux-2.6.32.45/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
73072 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
73073 sk->sk_state,
73074 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73075 sock_i_uid(sk), sock_i_ino(sk),
73076 - atomic_read(&sk->sk_refcnt), sk,
73077 - atomic_read(&sk->sk_drops), &len);
73078 + atomic_read(&sk->sk_refcnt),
73079 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73080 + NULL,
73081 +#else
73082 + sk,
73083 +#endif
73084 + atomic_read_unchecked(&sk->sk_drops), &len);
73085 }
73086 seq_printf(seq, "%*s\n", 127 - len, "");
73087 return 0;
73088 diff -urNp linux-2.6.32.45/net/rds/cong.c linux-2.6.32.45/net/rds/cong.c
73089 --- linux-2.6.32.45/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
73090 +++ linux-2.6.32.45/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
73091 @@ -77,7 +77,7 @@
73092 * finds that the saved generation number is smaller than the global generation
73093 * number, it wakes up the process.
73094 */
73095 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73096 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73097
73098 /*
73099 * Congestion monitoring
73100 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73101 rdsdebug("waking map %p for %pI4\n",
73102 map, &map->m_addr);
73103 rds_stats_inc(s_cong_update_received);
73104 - atomic_inc(&rds_cong_generation);
73105 + atomic_inc_unchecked(&rds_cong_generation);
73106 if (waitqueue_active(&map->m_waitq))
73107 wake_up(&map->m_waitq);
73108 if (waitqueue_active(&rds_poll_waitq))
73109 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73110
73111 int rds_cong_updated_since(unsigned long *recent)
73112 {
73113 - unsigned long gen = atomic_read(&rds_cong_generation);
73114 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73115
73116 if (likely(*recent == gen))
73117 return 0;
73118 diff -urNp linux-2.6.32.45/net/rds/iw_rdma.c linux-2.6.32.45/net/rds/iw_rdma.c
73119 --- linux-2.6.32.45/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
73120 +++ linux-2.6.32.45/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
73121 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73122 struct rdma_cm_id *pcm_id;
73123 int rc;
73124
73125 + pax_track_stack();
73126 +
73127 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73128 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73129
73130 diff -urNp linux-2.6.32.45/net/rds/Kconfig linux-2.6.32.45/net/rds/Kconfig
73131 --- linux-2.6.32.45/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
73132 +++ linux-2.6.32.45/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
73133 @@ -1,7 +1,7 @@
73134
73135 config RDS
73136 tristate "The RDS Protocol (EXPERIMENTAL)"
73137 - depends on INET && EXPERIMENTAL
73138 + depends on INET && EXPERIMENTAL && BROKEN
73139 ---help---
73140 The RDS (Reliable Datagram Sockets) protocol provides reliable,
73141 sequenced delivery of datagrams over Infiniband, iWARP,
73142 diff -urNp linux-2.6.32.45/net/rxrpc/af_rxrpc.c linux-2.6.32.45/net/rxrpc/af_rxrpc.c
73143 --- linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
73144 +++ linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
73145 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
73146 __be32 rxrpc_epoch;
73147
73148 /* current debugging ID */
73149 -atomic_t rxrpc_debug_id;
73150 +atomic_unchecked_t rxrpc_debug_id;
73151
73152 /* count of skbs currently in use */
73153 atomic_t rxrpc_n_skbs;
73154 diff -urNp linux-2.6.32.45/net/rxrpc/ar-ack.c linux-2.6.32.45/net/rxrpc/ar-ack.c
73155 --- linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
73156 +++ linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
73157 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
73158
73159 _enter("{%d,%d,%d,%d},",
73160 call->acks_hard, call->acks_unacked,
73161 - atomic_read(&call->sequence),
73162 + atomic_read_unchecked(&call->sequence),
73163 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73164
73165 stop = 0;
73166 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
73167
73168 /* each Tx packet has a new serial number */
73169 sp->hdr.serial =
73170 - htonl(atomic_inc_return(&call->conn->serial));
73171 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73172
73173 hdr = (struct rxrpc_header *) txb->head;
73174 hdr->serial = sp->hdr.serial;
73175 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
73176 */
73177 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73178 {
73179 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73180 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73181 }
73182
73183 /*
73184 @@ -627,7 +627,7 @@ process_further:
73185
73186 latest = ntohl(sp->hdr.serial);
73187 hard = ntohl(ack.firstPacket);
73188 - tx = atomic_read(&call->sequence);
73189 + tx = atomic_read_unchecked(&call->sequence);
73190
73191 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73192 latest,
73193 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
73194 u32 abort_code = RX_PROTOCOL_ERROR;
73195 u8 *acks = NULL;
73196
73197 + pax_track_stack();
73198 +
73199 //printk("\n--------------------\n");
73200 _enter("{%d,%s,%lx} [%lu]",
73201 call->debug_id, rxrpc_call_states[call->state], call->events,
73202 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
73203 goto maybe_reschedule;
73204
73205 send_ACK_with_skew:
73206 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73207 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73208 ntohl(ack.serial));
73209 send_ACK:
73210 mtu = call->conn->trans->peer->if_mtu;
73211 @@ -1171,7 +1173,7 @@ send_ACK:
73212 ackinfo.rxMTU = htonl(5692);
73213 ackinfo.jumbo_max = htonl(4);
73214
73215 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73216 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73217 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73218 ntohl(hdr.serial),
73219 ntohs(ack.maxSkew),
73220 @@ -1189,7 +1191,7 @@ send_ACK:
73221 send_message:
73222 _debug("send message");
73223
73224 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73225 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73226 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73227 send_message_2:
73228
73229 diff -urNp linux-2.6.32.45/net/rxrpc/ar-call.c linux-2.6.32.45/net/rxrpc/ar-call.c
73230 --- linux-2.6.32.45/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
73231 +++ linux-2.6.32.45/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
73232 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73233 spin_lock_init(&call->lock);
73234 rwlock_init(&call->state_lock);
73235 atomic_set(&call->usage, 1);
73236 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73237 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73238 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73239
73240 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73241 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connection.c linux-2.6.32.45/net/rxrpc/ar-connection.c
73242 --- linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
73243 +++ linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
73244 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
73245 rwlock_init(&conn->lock);
73246 spin_lock_init(&conn->state_lock);
73247 atomic_set(&conn->usage, 1);
73248 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73249 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73250 conn->avail_calls = RXRPC_MAXCALLS;
73251 conn->size_align = 4;
73252 conn->header_size = sizeof(struct rxrpc_header);
73253 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connevent.c linux-2.6.32.45/net/rxrpc/ar-connevent.c
73254 --- linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
73255 +++ linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
73256 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73257
73258 len = iov[0].iov_len + iov[1].iov_len;
73259
73260 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73261 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73262 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73263
73264 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73265 diff -urNp linux-2.6.32.45/net/rxrpc/ar-input.c linux-2.6.32.45/net/rxrpc/ar-input.c
73266 --- linux-2.6.32.45/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
73267 +++ linux-2.6.32.45/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
73268 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
73269 /* track the latest serial number on this connection for ACK packet
73270 * information */
73271 serial = ntohl(sp->hdr.serial);
73272 - hi_serial = atomic_read(&call->conn->hi_serial);
73273 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73274 while (serial > hi_serial)
73275 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73276 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73277 serial);
73278
73279 /* request ACK generation for any ACK or DATA packet that requests
73280 diff -urNp linux-2.6.32.45/net/rxrpc/ar-internal.h linux-2.6.32.45/net/rxrpc/ar-internal.h
73281 --- linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
73282 +++ linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
73283 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73284 int error; /* error code for local abort */
73285 int debug_id; /* debug ID for printks */
73286 unsigned call_counter; /* call ID counter */
73287 - atomic_t serial; /* packet serial number counter */
73288 - atomic_t hi_serial; /* highest serial number received */
73289 + atomic_unchecked_t serial; /* packet serial number counter */
73290 + atomic_unchecked_t hi_serial; /* highest serial number received */
73291 u8 avail_calls; /* number of calls available */
73292 u8 size_align; /* data size alignment (for security) */
73293 u8 header_size; /* rxrpc + security header size */
73294 @@ -346,7 +346,7 @@ struct rxrpc_call {
73295 spinlock_t lock;
73296 rwlock_t state_lock; /* lock for state transition */
73297 atomic_t usage;
73298 - atomic_t sequence; /* Tx data packet sequence counter */
73299 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73300 u32 abort_code; /* local/remote abort code */
73301 enum { /* current state of call */
73302 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73303 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73304 */
73305 extern atomic_t rxrpc_n_skbs;
73306 extern __be32 rxrpc_epoch;
73307 -extern atomic_t rxrpc_debug_id;
73308 +extern atomic_unchecked_t rxrpc_debug_id;
73309 extern struct workqueue_struct *rxrpc_workqueue;
73310
73311 /*
73312 diff -urNp linux-2.6.32.45/net/rxrpc/ar-key.c linux-2.6.32.45/net/rxrpc/ar-key.c
73313 --- linux-2.6.32.45/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
73314 +++ linux-2.6.32.45/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
73315 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
73316 return ret;
73317
73318 plen -= sizeof(*token);
73319 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73320 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73321 if (!token)
73322 return -ENOMEM;
73323
73324 - token->kad = kmalloc(plen, GFP_KERNEL);
73325 + token->kad = kzalloc(plen, GFP_KERNEL);
73326 if (!token->kad) {
73327 kfree(token);
73328 return -ENOMEM;
73329 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
73330 goto error;
73331
73332 ret = -ENOMEM;
73333 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73334 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73335 if (!token)
73336 goto error;
73337 - token->kad = kmalloc(plen, GFP_KERNEL);
73338 + token->kad = kzalloc(plen, GFP_KERNEL);
73339 if (!token->kad)
73340 goto error_free;
73341
73342 diff -urNp linux-2.6.32.45/net/rxrpc/ar-local.c linux-2.6.32.45/net/rxrpc/ar-local.c
73343 --- linux-2.6.32.45/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
73344 +++ linux-2.6.32.45/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
73345 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
73346 spin_lock_init(&local->lock);
73347 rwlock_init(&local->services_lock);
73348 atomic_set(&local->usage, 1);
73349 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
73350 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73351 memcpy(&local->srx, srx, sizeof(*srx));
73352 }
73353
73354 diff -urNp linux-2.6.32.45/net/rxrpc/ar-output.c linux-2.6.32.45/net/rxrpc/ar-output.c
73355 --- linux-2.6.32.45/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
73356 +++ linux-2.6.32.45/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
73357 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
73358 sp->hdr.cid = call->cid;
73359 sp->hdr.callNumber = call->call_id;
73360 sp->hdr.seq =
73361 - htonl(atomic_inc_return(&call->sequence));
73362 + htonl(atomic_inc_return_unchecked(&call->sequence));
73363 sp->hdr.serial =
73364 - htonl(atomic_inc_return(&conn->serial));
73365 + htonl(atomic_inc_return_unchecked(&conn->serial));
73366 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
73367 sp->hdr.userStatus = 0;
73368 sp->hdr.securityIndex = conn->security_ix;
73369 diff -urNp linux-2.6.32.45/net/rxrpc/ar-peer.c linux-2.6.32.45/net/rxrpc/ar-peer.c
73370 --- linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
73371 +++ linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
73372 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
73373 INIT_LIST_HEAD(&peer->error_targets);
73374 spin_lock_init(&peer->lock);
73375 atomic_set(&peer->usage, 1);
73376 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
73377 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73378 memcpy(&peer->srx, srx, sizeof(*srx));
73379
73380 rxrpc_assess_MTU_size(peer);
73381 diff -urNp linux-2.6.32.45/net/rxrpc/ar-proc.c linux-2.6.32.45/net/rxrpc/ar-proc.c
73382 --- linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
73383 +++ linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
73384 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
73385 atomic_read(&conn->usage),
73386 rxrpc_conn_states[conn->state],
73387 key_serial(conn->key),
73388 - atomic_read(&conn->serial),
73389 - atomic_read(&conn->hi_serial));
73390 + atomic_read_unchecked(&conn->serial),
73391 + atomic_read_unchecked(&conn->hi_serial));
73392
73393 return 0;
73394 }
73395 diff -urNp linux-2.6.32.45/net/rxrpc/ar-transport.c linux-2.6.32.45/net/rxrpc/ar-transport.c
73396 --- linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
73397 +++ linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
73398 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
73399 spin_lock_init(&trans->client_lock);
73400 rwlock_init(&trans->conn_lock);
73401 atomic_set(&trans->usage, 1);
73402 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
73403 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73404
73405 if (peer->srx.transport.family == AF_INET) {
73406 switch (peer->srx.transport_type) {
73407 diff -urNp linux-2.6.32.45/net/rxrpc/rxkad.c linux-2.6.32.45/net/rxrpc/rxkad.c
73408 --- linux-2.6.32.45/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
73409 +++ linux-2.6.32.45/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
73410 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
73411 u16 check;
73412 int nsg;
73413
73414 + pax_track_stack();
73415 +
73416 sp = rxrpc_skb(skb);
73417
73418 _enter("");
73419 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
73420 u16 check;
73421 int nsg;
73422
73423 + pax_track_stack();
73424 +
73425 _enter("");
73426
73427 sp = rxrpc_skb(skb);
73428 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
73429
73430 len = iov[0].iov_len + iov[1].iov_len;
73431
73432 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73433 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73434 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
73435
73436 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73437 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
73438
73439 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
73440
73441 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
73442 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73443 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
73444
73445 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
73446 diff -urNp linux-2.6.32.45/net/sctp/proc.c linux-2.6.32.45/net/sctp/proc.c
73447 --- linux-2.6.32.45/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
73448 +++ linux-2.6.32.45/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
73449 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
73450 sctp_for_each_hentry(epb, node, &head->chain) {
73451 ep = sctp_ep(epb);
73452 sk = epb->sk;
73453 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
73454 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
73455 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73456 + NULL, NULL,
73457 +#else
73458 + ep, sk,
73459 +#endif
73460 sctp_sk(sk)->type, sk->sk_state, hash,
73461 epb->bind_addr.port,
73462 sock_i_uid(sk), sock_i_ino(sk));
73463 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
73464 seq_printf(seq,
73465 "%8p %8p %-3d %-3d %-2d %-4d "
73466 "%4d %8d %8d %7d %5lu %-5d %5d ",
73467 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
73468 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73469 + NULL, NULL,
73470 +#else
73471 + assoc, sk,
73472 +#endif
73473 + sctp_sk(sk)->type, sk->sk_state,
73474 assoc->state, hash,
73475 assoc->assoc_id,
73476 assoc->sndbuf_used,
73477 diff -urNp linux-2.6.32.45/net/sctp/socket.c linux-2.6.32.45/net/sctp/socket.c
73478 --- linux-2.6.32.45/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
73479 +++ linux-2.6.32.45/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
73480 @@ -5802,7 +5802,6 @@ pp_found:
73481 */
73482 int reuse = sk->sk_reuse;
73483 struct sock *sk2;
73484 - struct hlist_node *node;
73485
73486 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
73487 if (pp->fastreuse && sk->sk_reuse &&
73488 diff -urNp linux-2.6.32.45/net/socket.c linux-2.6.32.45/net/socket.c
73489 --- linux-2.6.32.45/net/socket.c 2011-03-27 14:31:47.000000000 -0400
73490 +++ linux-2.6.32.45/net/socket.c 2011-05-16 21:46:57.000000000 -0400
73491 @@ -87,6 +87,7 @@
73492 #include <linux/wireless.h>
73493 #include <linux/nsproxy.h>
73494 #include <linux/magic.h>
73495 +#include <linux/in.h>
73496
73497 #include <asm/uaccess.h>
73498 #include <asm/unistd.h>
73499 @@ -97,6 +98,21 @@
73500 #include <net/sock.h>
73501 #include <linux/netfilter.h>
73502
73503 +extern void gr_attach_curr_ip(const struct sock *sk);
73504 +extern int gr_handle_sock_all(const int family, const int type,
73505 + const int protocol);
73506 +extern int gr_handle_sock_server(const struct sockaddr *sck);
73507 +extern int gr_handle_sock_server_other(const struct sock *sck);
73508 +extern int gr_handle_sock_client(const struct sockaddr *sck);
73509 +extern int gr_search_connect(struct socket * sock,
73510 + struct sockaddr_in * addr);
73511 +extern int gr_search_bind(struct socket * sock,
73512 + struct sockaddr_in * addr);
73513 +extern int gr_search_listen(struct socket * sock);
73514 +extern int gr_search_accept(struct socket * sock);
73515 +extern int gr_search_socket(const int domain, const int type,
73516 + const int protocol);
73517 +
73518 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
73519 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
73520 unsigned long nr_segs, loff_t pos);
73521 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
73522 mnt);
73523 }
73524
73525 -static struct vfsmount *sock_mnt __read_mostly;
73526 +struct vfsmount *sock_mnt __read_mostly;
73527
73528 static struct file_system_type sock_fs_type = {
73529 .name = "sockfs",
73530 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
73531 return -EAFNOSUPPORT;
73532 if (type < 0 || type >= SOCK_MAX)
73533 return -EINVAL;
73534 + if (protocol < 0)
73535 + return -EINVAL;
73536
73537 /* Compatibility.
73538
73539 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
73540 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
73541 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
73542
73543 + if(!gr_search_socket(family, type, protocol)) {
73544 + retval = -EACCES;
73545 + goto out;
73546 + }
73547 +
73548 + if (gr_handle_sock_all(family, type, protocol)) {
73549 + retval = -EACCES;
73550 + goto out;
73551 + }
73552 +
73553 retval = sock_create(family, type, protocol, &sock);
73554 if (retval < 0)
73555 goto out;
73556 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73557 if (sock) {
73558 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
73559 if (err >= 0) {
73560 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
73561 + err = -EACCES;
73562 + goto error;
73563 + }
73564 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
73565 + if (err)
73566 + goto error;
73567 +
73568 err = security_socket_bind(sock,
73569 (struct sockaddr *)&address,
73570 addrlen);
73571 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73572 (struct sockaddr *)
73573 &address, addrlen);
73574 }
73575 +error:
73576 fput_light(sock->file, fput_needed);
73577 }
73578 return err;
73579 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
73580 if ((unsigned)backlog > somaxconn)
73581 backlog = somaxconn;
73582
73583 + if (gr_handle_sock_server_other(sock->sk)) {
73584 + err = -EPERM;
73585 + goto error;
73586 + }
73587 +
73588 + err = gr_search_listen(sock);
73589 + if (err)
73590 + goto error;
73591 +
73592 err = security_socket_listen(sock, backlog);
73593 if (!err)
73594 err = sock->ops->listen(sock, backlog);
73595
73596 +error:
73597 fput_light(sock->file, fput_needed);
73598 }
73599 return err;
73600 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73601 newsock->type = sock->type;
73602 newsock->ops = sock->ops;
73603
73604 + if (gr_handle_sock_server_other(sock->sk)) {
73605 + err = -EPERM;
73606 + sock_release(newsock);
73607 + goto out_put;
73608 + }
73609 +
73610 + err = gr_search_accept(sock);
73611 + if (err) {
73612 + sock_release(newsock);
73613 + goto out_put;
73614 + }
73615 +
73616 /*
73617 * We don't need try_module_get here, as the listening socket (sock)
73618 * has the protocol module (sock->ops->owner) held.
73619 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73620 fd_install(newfd, newfile);
73621 err = newfd;
73622
73623 + gr_attach_curr_ip(newsock->sk);
73624 +
73625 out_put:
73626 fput_light(sock->file, fput_needed);
73627 out:
73628 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73629 int, addrlen)
73630 {
73631 struct socket *sock;
73632 + struct sockaddr *sck;
73633 struct sockaddr_storage address;
73634 int err, fput_needed;
73635
73636 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73637 if (err < 0)
73638 goto out_put;
73639
73640 + sck = (struct sockaddr *)&address;
73641 +
73642 + if (gr_handle_sock_client(sck)) {
73643 + err = -EACCES;
73644 + goto out_put;
73645 + }
73646 +
73647 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
73648 + if (err)
73649 + goto out_put;
73650 +
73651 err =
73652 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
73653 if (err)
73654 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
73655 int err, ctl_len, iov_size, total_len;
73656 int fput_needed;
73657
73658 + pax_track_stack();
73659 +
73660 err = -EFAULT;
73661 if (MSG_CMSG_COMPAT & flags) {
73662 if (get_compat_msghdr(&msg_sys, msg_compat))
73663 diff -urNp linux-2.6.32.45/net/sunrpc/sched.c linux-2.6.32.45/net/sunrpc/sched.c
73664 --- linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
73665 +++ linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
73666 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
73667 #ifdef RPC_DEBUG
73668 static void rpc_task_set_debuginfo(struct rpc_task *task)
73669 {
73670 - static atomic_t rpc_pid;
73671 + static atomic_unchecked_t rpc_pid;
73672
73673 task->tk_magic = RPC_TASK_MAGIC_ID;
73674 - task->tk_pid = atomic_inc_return(&rpc_pid);
73675 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
73676 }
73677 #else
73678 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
73679 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c
73680 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
73681 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
73682 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
73683 static unsigned int min_max_inline = 4096;
73684 static unsigned int max_max_inline = 65536;
73685
73686 -atomic_t rdma_stat_recv;
73687 -atomic_t rdma_stat_read;
73688 -atomic_t rdma_stat_write;
73689 -atomic_t rdma_stat_sq_starve;
73690 -atomic_t rdma_stat_rq_starve;
73691 -atomic_t rdma_stat_rq_poll;
73692 -atomic_t rdma_stat_rq_prod;
73693 -atomic_t rdma_stat_sq_poll;
73694 -atomic_t rdma_stat_sq_prod;
73695 +atomic_unchecked_t rdma_stat_recv;
73696 +atomic_unchecked_t rdma_stat_read;
73697 +atomic_unchecked_t rdma_stat_write;
73698 +atomic_unchecked_t rdma_stat_sq_starve;
73699 +atomic_unchecked_t rdma_stat_rq_starve;
73700 +atomic_unchecked_t rdma_stat_rq_poll;
73701 +atomic_unchecked_t rdma_stat_rq_prod;
73702 +atomic_unchecked_t rdma_stat_sq_poll;
73703 +atomic_unchecked_t rdma_stat_sq_prod;
73704
73705 /* Temporary NFS request map and context caches */
73706 struct kmem_cache *svc_rdma_map_cachep;
73707 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
73708 len -= *ppos;
73709 if (len > *lenp)
73710 len = *lenp;
73711 - if (len && copy_to_user(buffer, str_buf, len))
73712 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
73713 return -EFAULT;
73714 *lenp = len;
73715 *ppos += len;
73716 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
73717 {
73718 .procname = "rdma_stat_read",
73719 .data = &rdma_stat_read,
73720 - .maxlen = sizeof(atomic_t),
73721 + .maxlen = sizeof(atomic_unchecked_t),
73722 .mode = 0644,
73723 .proc_handler = &read_reset_stat,
73724 },
73725 {
73726 .procname = "rdma_stat_recv",
73727 .data = &rdma_stat_recv,
73728 - .maxlen = sizeof(atomic_t),
73729 + .maxlen = sizeof(atomic_unchecked_t),
73730 .mode = 0644,
73731 .proc_handler = &read_reset_stat,
73732 },
73733 {
73734 .procname = "rdma_stat_write",
73735 .data = &rdma_stat_write,
73736 - .maxlen = sizeof(atomic_t),
73737 + .maxlen = sizeof(atomic_unchecked_t),
73738 .mode = 0644,
73739 .proc_handler = &read_reset_stat,
73740 },
73741 {
73742 .procname = "rdma_stat_sq_starve",
73743 .data = &rdma_stat_sq_starve,
73744 - .maxlen = sizeof(atomic_t),
73745 + .maxlen = sizeof(atomic_unchecked_t),
73746 .mode = 0644,
73747 .proc_handler = &read_reset_stat,
73748 },
73749 {
73750 .procname = "rdma_stat_rq_starve",
73751 .data = &rdma_stat_rq_starve,
73752 - .maxlen = sizeof(atomic_t),
73753 + .maxlen = sizeof(atomic_unchecked_t),
73754 .mode = 0644,
73755 .proc_handler = &read_reset_stat,
73756 },
73757 {
73758 .procname = "rdma_stat_rq_poll",
73759 .data = &rdma_stat_rq_poll,
73760 - .maxlen = sizeof(atomic_t),
73761 + .maxlen = sizeof(atomic_unchecked_t),
73762 .mode = 0644,
73763 .proc_handler = &read_reset_stat,
73764 },
73765 {
73766 .procname = "rdma_stat_rq_prod",
73767 .data = &rdma_stat_rq_prod,
73768 - .maxlen = sizeof(atomic_t),
73769 + .maxlen = sizeof(atomic_unchecked_t),
73770 .mode = 0644,
73771 .proc_handler = &read_reset_stat,
73772 },
73773 {
73774 .procname = "rdma_stat_sq_poll",
73775 .data = &rdma_stat_sq_poll,
73776 - .maxlen = sizeof(atomic_t),
73777 + .maxlen = sizeof(atomic_unchecked_t),
73778 .mode = 0644,
73779 .proc_handler = &read_reset_stat,
73780 },
73781 {
73782 .procname = "rdma_stat_sq_prod",
73783 .data = &rdma_stat_sq_prod,
73784 - .maxlen = sizeof(atomic_t),
73785 + .maxlen = sizeof(atomic_unchecked_t),
73786 .mode = 0644,
73787 .proc_handler = &read_reset_stat,
73788 },
73789 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
73790 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
73791 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
73792 @@ -495,7 +495,7 @@ next_sge:
73793 svc_rdma_put_context(ctxt, 0);
73794 goto out;
73795 }
73796 - atomic_inc(&rdma_stat_read);
73797 + atomic_inc_unchecked(&rdma_stat_read);
73798
73799 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
73800 chl_map->ch[ch_no].count -= read_wr.num_sge;
73801 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
73802 dto_q);
73803 list_del_init(&ctxt->dto_q);
73804 } else {
73805 - atomic_inc(&rdma_stat_rq_starve);
73806 + atomic_inc_unchecked(&rdma_stat_rq_starve);
73807 clear_bit(XPT_DATA, &xprt->xpt_flags);
73808 ctxt = NULL;
73809 }
73810 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
73811 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
73812 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
73813 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
73814 - atomic_inc(&rdma_stat_recv);
73815 + atomic_inc_unchecked(&rdma_stat_recv);
73816
73817 /* Build up the XDR from the receive buffers. */
73818 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
73819 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c
73820 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
73821 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
73822 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
73823 write_wr.wr.rdma.remote_addr = to;
73824
73825 /* Post It */
73826 - atomic_inc(&rdma_stat_write);
73827 + atomic_inc_unchecked(&rdma_stat_write);
73828 if (svc_rdma_send(xprt, &write_wr))
73829 goto err;
73830 return 0;
73831 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c
73832 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
73833 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
73834 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
73835 return;
73836
73837 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
73838 - atomic_inc(&rdma_stat_rq_poll);
73839 + atomic_inc_unchecked(&rdma_stat_rq_poll);
73840
73841 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
73842 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
73843 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
73844 }
73845
73846 if (ctxt)
73847 - atomic_inc(&rdma_stat_rq_prod);
73848 + atomic_inc_unchecked(&rdma_stat_rq_prod);
73849
73850 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
73851 /*
73852 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
73853 return;
73854
73855 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
73856 - atomic_inc(&rdma_stat_sq_poll);
73857 + atomic_inc_unchecked(&rdma_stat_sq_poll);
73858 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
73859 if (wc.status != IB_WC_SUCCESS)
73860 /* Close the transport */
73861 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
73862 }
73863
73864 if (ctxt)
73865 - atomic_inc(&rdma_stat_sq_prod);
73866 + atomic_inc_unchecked(&rdma_stat_sq_prod);
73867 }
73868
73869 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
73870 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
73871 spin_lock_bh(&xprt->sc_lock);
73872 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
73873 spin_unlock_bh(&xprt->sc_lock);
73874 - atomic_inc(&rdma_stat_sq_starve);
73875 + atomic_inc_unchecked(&rdma_stat_sq_starve);
73876
73877 /* See if we can opportunistically reap SQ WR to make room */
73878 sq_cq_reap(xprt);
73879 diff -urNp linux-2.6.32.45/net/sysctl_net.c linux-2.6.32.45/net/sysctl_net.c
73880 --- linux-2.6.32.45/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
73881 +++ linux-2.6.32.45/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
73882 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
73883 struct ctl_table *table)
73884 {
73885 /* Allow network administrator to have same access as root. */
73886 - if (capable(CAP_NET_ADMIN)) {
73887 + if (capable_nolog(CAP_NET_ADMIN)) {
73888 int mode = (table->mode >> 6) & 7;
73889 return (mode << 6) | (mode << 3) | mode;
73890 }
73891 diff -urNp linux-2.6.32.45/net/unix/af_unix.c linux-2.6.32.45/net/unix/af_unix.c
73892 --- linux-2.6.32.45/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
73893 +++ linux-2.6.32.45/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
73894 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
73895 err = -ECONNREFUSED;
73896 if (!S_ISSOCK(inode->i_mode))
73897 goto put_fail;
73898 +
73899 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
73900 + err = -EACCES;
73901 + goto put_fail;
73902 + }
73903 +
73904 u = unix_find_socket_byinode(net, inode);
73905 if (!u)
73906 goto put_fail;
73907 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
73908 if (u) {
73909 struct dentry *dentry;
73910 dentry = unix_sk(u)->dentry;
73911 +
73912 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
73913 + err = -EPERM;
73914 + sock_put(u);
73915 + goto fail;
73916 + }
73917 +
73918 if (dentry)
73919 touch_atime(unix_sk(u)->mnt, dentry);
73920 } else
73921 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
73922 err = security_path_mknod(&nd.path, dentry, mode, 0);
73923 if (err)
73924 goto out_mknod_drop_write;
73925 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
73926 + err = -EACCES;
73927 + goto out_mknod_drop_write;
73928 + }
73929 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
73930 out_mknod_drop_write:
73931 mnt_drop_write(nd.path.mnt);
73932 if (err)
73933 goto out_mknod_dput;
73934 +
73935 + gr_handle_create(dentry, nd.path.mnt);
73936 +
73937 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
73938 dput(nd.path.dentry);
73939 nd.path.dentry = dentry;
73940 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
73941 unix_state_lock(s);
73942
73943 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
73944 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73945 + NULL,
73946 +#else
73947 s,
73948 +#endif
73949 atomic_read(&s->sk_refcnt),
73950 0,
73951 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
73952 diff -urNp linux-2.6.32.45/net/wireless/core.h linux-2.6.32.45/net/wireless/core.h
73953 --- linux-2.6.32.45/net/wireless/core.h 2011-03-27 14:31:47.000000000 -0400
73954 +++ linux-2.6.32.45/net/wireless/core.h 2011-08-23 21:22:38.000000000 -0400
73955 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
73956 struct mutex mtx;
73957
73958 /* rfkill support */
73959 - struct rfkill_ops rfkill_ops;
73960 + rfkill_ops_no_const rfkill_ops;
73961 struct rfkill *rfkill;
73962 struct work_struct rfkill_sync;
73963
73964 diff -urNp linux-2.6.32.45/net/wireless/wext.c linux-2.6.32.45/net/wireless/wext.c
73965 --- linux-2.6.32.45/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
73966 +++ linux-2.6.32.45/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
73967 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
73968 */
73969
73970 /* Support for very large requests */
73971 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
73972 - (user_length > descr->max_tokens)) {
73973 + if (user_length > descr->max_tokens) {
73974 /* Allow userspace to GET more than max so
73975 * we can support any size GET requests.
73976 * There is still a limit : -ENOMEM.
73977 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
73978 }
73979 }
73980
73981 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
73982 - /*
73983 - * If this is a GET, but not NOMAX, it means that the extra
73984 - * data is not bounded by userspace, but by max_tokens. Thus
73985 - * set the length to max_tokens. This matches the extra data
73986 - * allocation.
73987 - * The driver should fill it with the number of tokens it
73988 - * provided, and it may check iwp->length rather than having
73989 - * knowledge of max_tokens. If the driver doesn't change the
73990 - * iwp->length, this ioctl just copies back max_token tokens
73991 - * filled with zeroes. Hopefully the driver isn't claiming
73992 - * them to be valid data.
73993 - */
73994 - iwp->length = descr->max_tokens;
73995 - }
73996 -
73997 err = handler(dev, info, (union iwreq_data *) iwp, extra);
73998
73999 iwp->length += essid_compat;
74000 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_policy.c linux-2.6.32.45/net/xfrm/xfrm_policy.c
74001 --- linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74002 +++ linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74003 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74004 hlist_add_head(&policy->bydst, chain);
74005 xfrm_pol_hold(policy);
74006 net->xfrm.policy_count[dir]++;
74007 - atomic_inc(&flow_cache_genid);
74008 + atomic_inc_unchecked(&flow_cache_genid);
74009 if (delpol)
74010 __xfrm_policy_unlink(delpol, dir);
74011 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74012 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74013 write_unlock_bh(&xfrm_policy_lock);
74014
74015 if (ret && delete) {
74016 - atomic_inc(&flow_cache_genid);
74017 + atomic_inc_unchecked(&flow_cache_genid);
74018 xfrm_policy_kill(ret);
74019 }
74020 return ret;
74021 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
74022 write_unlock_bh(&xfrm_policy_lock);
74023
74024 if (ret && delete) {
74025 - atomic_inc(&flow_cache_genid);
74026 + atomic_inc_unchecked(&flow_cache_genid);
74027 xfrm_policy_kill(ret);
74028 }
74029 return ret;
74030 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
74031 }
74032
74033 }
74034 - atomic_inc(&flow_cache_genid);
74035 + atomic_inc_unchecked(&flow_cache_genid);
74036 out:
74037 write_unlock_bh(&xfrm_policy_lock);
74038 return err;
74039 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
74040 write_unlock_bh(&xfrm_policy_lock);
74041 if (pol) {
74042 if (dir < XFRM_POLICY_MAX)
74043 - atomic_inc(&flow_cache_genid);
74044 + atomic_inc_unchecked(&flow_cache_genid);
74045 xfrm_policy_kill(pol);
74046 return 0;
74047 }
74048 @@ -1477,7 +1477,7 @@ free_dst:
74049 goto out;
74050 }
74051
74052 -static int inline
74053 +static inline int
74054 xfrm_dst_alloc_copy(void **target, void *src, int size)
74055 {
74056 if (!*target) {
74057 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
74058 return 0;
74059 }
74060
74061 -static int inline
74062 +static inline int
74063 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
74064 {
74065 #ifdef CONFIG_XFRM_SUB_POLICY
74066 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
74067 #endif
74068 }
74069
74070 -static int inline
74071 +static inline int
74072 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
74073 {
74074 #ifdef CONFIG_XFRM_SUB_POLICY
74075 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
74076 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
74077
74078 restart:
74079 - genid = atomic_read(&flow_cache_genid);
74080 + genid = atomic_read_unchecked(&flow_cache_genid);
74081 policy = NULL;
74082 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
74083 pols[pi] = NULL;
74084 @@ -1680,7 +1680,7 @@ restart:
74085 goto error;
74086 }
74087 if (nx == -EAGAIN ||
74088 - genid != atomic_read(&flow_cache_genid)) {
74089 + genid != atomic_read_unchecked(&flow_cache_genid)) {
74090 xfrm_pols_put(pols, npols);
74091 goto restart;
74092 }
74093 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_user.c linux-2.6.32.45/net/xfrm/xfrm_user.c
74094 --- linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
74095 +++ linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
74096 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
74097 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74098 int i;
74099
74100 + pax_track_stack();
74101 +
74102 if (xp->xfrm_nr == 0)
74103 return 0;
74104
74105 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
74106 int err;
74107 int n = 0;
74108
74109 + pax_track_stack();
74110 +
74111 if (attrs[XFRMA_MIGRATE] == NULL)
74112 return -EINVAL;
74113
74114 diff -urNp linux-2.6.32.45/samples/kobject/kset-example.c linux-2.6.32.45/samples/kobject/kset-example.c
74115 --- linux-2.6.32.45/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
74116 +++ linux-2.6.32.45/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
74117 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
74118 }
74119
74120 /* Our custom sysfs_ops that we will associate with our ktype later on */
74121 -static struct sysfs_ops foo_sysfs_ops = {
74122 +static const struct sysfs_ops foo_sysfs_ops = {
74123 .show = foo_attr_show,
74124 .store = foo_attr_store,
74125 };
74126 diff -urNp linux-2.6.32.45/scripts/basic/fixdep.c linux-2.6.32.45/scripts/basic/fixdep.c
74127 --- linux-2.6.32.45/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
74128 +++ linux-2.6.32.45/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
74129 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
74130
74131 static void parse_config_file(char *map, size_t len)
74132 {
74133 - int *end = (int *) (map + len);
74134 + unsigned int *end = (unsigned int *) (map + len);
74135 /* start at +1, so that p can never be < map */
74136 - int *m = (int *) map + 1;
74137 + unsigned int *m = (unsigned int *) map + 1;
74138 char *p, *q;
74139
74140 for (; m < end; m++) {
74141 @@ -371,7 +371,7 @@ static void print_deps(void)
74142 static void traps(void)
74143 {
74144 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74145 - int *p = (int *)test;
74146 + unsigned int *p = (unsigned int *)test;
74147
74148 if (*p != INT_CONF) {
74149 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74150 diff -urNp linux-2.6.32.45/scripts/gcc-plugin.sh linux-2.6.32.45/scripts/gcc-plugin.sh
74151 --- linux-2.6.32.45/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74152 +++ linux-2.6.32.45/scripts/gcc-plugin.sh 2011-08-23 20:24:19.000000000 -0400
74153 @@ -0,0 +1,2 @@
74154 +#!/bin/sh
74155 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
74156 diff -urNp linux-2.6.32.45/scripts/Makefile.build linux-2.6.32.45/scripts/Makefile.build
74157 --- linux-2.6.32.45/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
74158 +++ linux-2.6.32.45/scripts/Makefile.build 2011-08-23 20:45:11.000000000 -0400
74159 @@ -59,7 +59,7 @@ endif
74160 endif
74161
74162 # Do not include host rules unless needed
74163 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74164 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74165 include scripts/Makefile.host
74166 endif
74167
74168 diff -urNp linux-2.6.32.45/scripts/Makefile.clean linux-2.6.32.45/scripts/Makefile.clean
74169 --- linux-2.6.32.45/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
74170 +++ linux-2.6.32.45/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
74171 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74172 __clean-files := $(extra-y) $(always) \
74173 $(targets) $(clean-files) \
74174 $(host-progs) \
74175 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74176 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74177 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74178
74179 # as clean-files is given relative to the current directory, this adds
74180 # a $(obj) prefix, except for absolute paths
74181 diff -urNp linux-2.6.32.45/scripts/Makefile.host linux-2.6.32.45/scripts/Makefile.host
74182 --- linux-2.6.32.45/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
74183 +++ linux-2.6.32.45/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
74184 @@ -31,6 +31,7 @@
74185 # Note: Shared libraries consisting of C++ files are not supported
74186
74187 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74188 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74189
74190 # C code
74191 # Executables compiled from a single .c file
74192 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74193 # Shared libaries (only .c supported)
74194 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74195 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74196 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74197 # Remove .so files from "xxx-objs"
74198 host-cobjs := $(filter-out %.so,$(host-cobjs))
74199
74200 diff -urNp linux-2.6.32.45/scripts/mod/file2alias.c linux-2.6.32.45/scripts/mod/file2alias.c
74201 --- linux-2.6.32.45/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
74202 +++ linux-2.6.32.45/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
74203 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74204 unsigned long size, unsigned long id_size,
74205 void *symval)
74206 {
74207 - int i;
74208 + unsigned int i;
74209
74210 if (size % id_size || size < id_size) {
74211 if (cross_build != 0)
74212 @@ -102,7 +102,7 @@ static void device_id_check(const char *
74213 /* USB is special because the bcdDevice can be matched against a numeric range */
74214 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
74215 static void do_usb_entry(struct usb_device_id *id,
74216 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
74217 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
74218 unsigned char range_lo, unsigned char range_hi,
74219 struct module *mod)
74220 {
74221 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
74222 for (i = 0; i < count; i++) {
74223 const char *id = (char *)devs[i].id;
74224 char acpi_id[sizeof(devs[0].id)];
74225 - int j;
74226 + unsigned int j;
74227
74228 buf_printf(&mod->dev_table_buf,
74229 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74230 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
74231
74232 for (j = 0; j < PNP_MAX_DEVICES; j++) {
74233 const char *id = (char *)card->devs[j].id;
74234 - int i2, j2;
74235 + unsigned int i2, j2;
74236 int dup = 0;
74237
74238 if (!id[0])
74239 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
74240 /* add an individual alias for every device entry */
74241 if (!dup) {
74242 char acpi_id[sizeof(card->devs[0].id)];
74243 - int k;
74244 + unsigned int k;
74245
74246 buf_printf(&mod->dev_table_buf,
74247 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74248 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
74249 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
74250 char *alias)
74251 {
74252 - int i, j;
74253 + unsigned int i, j;
74254
74255 sprintf(alias, "dmi*");
74256
74257 diff -urNp linux-2.6.32.45/scripts/mod/modpost.c linux-2.6.32.45/scripts/mod/modpost.c
74258 --- linux-2.6.32.45/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
74259 +++ linux-2.6.32.45/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
74260 @@ -835,6 +835,7 @@ enum mismatch {
74261 INIT_TO_EXIT,
74262 EXIT_TO_INIT,
74263 EXPORT_TO_INIT_EXIT,
74264 + DATA_TO_TEXT
74265 };
74266
74267 struct sectioncheck {
74268 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
74269 .fromsec = { "__ksymtab*", NULL },
74270 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
74271 .mismatch = EXPORT_TO_INIT_EXIT
74272 +},
74273 +/* Do not reference code from writable data */
74274 +{
74275 + .fromsec = { DATA_SECTIONS, NULL },
74276 + .tosec = { TEXT_SECTIONS, NULL },
74277 + .mismatch = DATA_TO_TEXT
74278 }
74279 };
74280
74281 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
74282 continue;
74283 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
74284 continue;
74285 - if (sym->st_value == addr)
74286 - return sym;
74287 /* Find a symbol nearby - addr are maybe negative */
74288 d = sym->st_value - addr;
74289 + if (d == 0)
74290 + return sym;
74291 if (d < 0)
74292 d = addr - sym->st_value;
74293 if (d < distance) {
74294 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
74295 "Fix this by removing the %sannotation of %s "
74296 "or drop the export.\n",
74297 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
74298 + case DATA_TO_TEXT:
74299 +/*
74300 + fprintf(stderr,
74301 + "The variable %s references\n"
74302 + "the %s %s%s%s\n",
74303 + fromsym, to, sec2annotation(tosec), tosym, to_p);
74304 +*/
74305 + break;
74306 case NO_MISMATCH:
74307 /* To get warnings on missing members */
74308 break;
74309 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
74310 static void check_sec_ref(struct module *mod, const char *modname,
74311 struct elf_info *elf)
74312 {
74313 - int i;
74314 + unsigned int i;
74315 Elf_Shdr *sechdrs = elf->sechdrs;
74316
74317 /* Walk through all sections */
74318 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
74319 va_end(ap);
74320 }
74321
74322 -void buf_write(struct buffer *buf, const char *s, int len)
74323 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
74324 {
74325 if (buf->size - buf->pos < len) {
74326 buf->size += len + SZ;
74327 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
74328 if (fstat(fileno(file), &st) < 0)
74329 goto close_write;
74330
74331 - if (st.st_size != b->pos)
74332 + if (st.st_size != (off_t)b->pos)
74333 goto close_write;
74334
74335 tmp = NOFAIL(malloc(b->pos));
74336 diff -urNp linux-2.6.32.45/scripts/mod/modpost.h linux-2.6.32.45/scripts/mod/modpost.h
74337 --- linux-2.6.32.45/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
74338 +++ linux-2.6.32.45/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
74339 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
74340
74341 struct buffer {
74342 char *p;
74343 - int pos;
74344 - int size;
74345 + unsigned int pos;
74346 + unsigned int size;
74347 };
74348
74349 void __attribute__((format(printf, 2, 3)))
74350 buf_printf(struct buffer *buf, const char *fmt, ...);
74351
74352 void
74353 -buf_write(struct buffer *buf, const char *s, int len);
74354 +buf_write(struct buffer *buf, const char *s, unsigned int len);
74355
74356 struct module {
74357 struct module *next;
74358 diff -urNp linux-2.6.32.45/scripts/mod/sumversion.c linux-2.6.32.45/scripts/mod/sumversion.c
74359 --- linux-2.6.32.45/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
74360 +++ linux-2.6.32.45/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
74361 @@ -455,7 +455,7 @@ static void write_version(const char *fi
74362 goto out;
74363 }
74364
74365 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
74366 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
74367 warn("writing sum in %s failed: %s\n",
74368 filename, strerror(errno));
74369 goto out;
74370 diff -urNp linux-2.6.32.45/scripts/package/mkspec linux-2.6.32.45/scripts/package/mkspec
74371 --- linux-2.6.32.45/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
74372 +++ linux-2.6.32.45/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
74373 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
74374 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
74375 echo "%endif"
74376
74377 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
74378 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
74379 echo "%ifarch ia64"
74380 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74381 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
74382 diff -urNp linux-2.6.32.45/scripts/pnmtologo.c linux-2.6.32.45/scripts/pnmtologo.c
74383 --- linux-2.6.32.45/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
74384 +++ linux-2.6.32.45/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
74385 @@ -237,14 +237,14 @@ static void write_header(void)
74386 fprintf(out, " * Linux logo %s\n", logoname);
74387 fputs(" */\n\n", out);
74388 fputs("#include <linux/linux_logo.h>\n\n", out);
74389 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
74390 + fprintf(out, "static unsigned char %s_data[] = {\n",
74391 logoname);
74392 }
74393
74394 static void write_footer(void)
74395 {
74396 fputs("\n};\n\n", out);
74397 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
74398 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
74399 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
74400 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
74401 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
74402 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
74403 fputs("\n};\n\n", out);
74404
74405 /* write logo clut */
74406 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
74407 + fprintf(out, "static unsigned char %s_clut[] = {\n",
74408 logoname);
74409 write_hex_cnt = 0;
74410 for (i = 0; i < logo_clutsize; i++) {
74411 diff -urNp linux-2.6.32.45/scripts/tags.sh linux-2.6.32.45/scripts/tags.sh
74412 --- linux-2.6.32.45/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
74413 +++ linux-2.6.32.45/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
74414 @@ -93,6 +93,11 @@ docscope()
74415 cscope -b -f cscope.out
74416 }
74417
74418 +dogtags()
74419 +{
74420 + all_sources | gtags -f -
74421 +}
74422 +
74423 exuberant()
74424 {
74425 all_sources | xargs $1 -a \
74426 @@ -164,6 +169,10 @@ case "$1" in
74427 docscope
74428 ;;
74429
74430 + "gtags")
74431 + dogtags
74432 + ;;
74433 +
74434 "tags")
74435 rm -f tags
74436 xtags ctags
74437 diff -urNp linux-2.6.32.45/security/capability.c linux-2.6.32.45/security/capability.c
74438 --- linux-2.6.32.45/security/capability.c 2011-03-27 14:31:47.000000000 -0400
74439 +++ linux-2.6.32.45/security/capability.c 2011-04-17 15:56:46.000000000 -0400
74440 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
74441 }
74442 #endif /* CONFIG_AUDIT */
74443
74444 -struct security_operations default_security_ops = {
74445 +struct security_operations default_security_ops __read_only = {
74446 .name = "default",
74447 };
74448
74449 diff -urNp linux-2.6.32.45/security/commoncap.c linux-2.6.32.45/security/commoncap.c
74450 --- linux-2.6.32.45/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
74451 +++ linux-2.6.32.45/security/commoncap.c 2011-08-17 19:22:13.000000000 -0400
74452 @@ -27,7 +27,7 @@
74453 #include <linux/sched.h>
74454 #include <linux/prctl.h>
74455 #include <linux/securebits.h>
74456 -
74457 +#include <net/sock.h>
74458 /*
74459 * If a non-root user executes a setuid-root binary in
74460 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
74461 @@ -50,9 +50,18 @@ static void warn_setuid_and_fcaps_mixed(
74462 }
74463 }
74464
74465 +#ifdef CONFIG_NET
74466 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
74467 +#endif
74468 +
74469 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
74470 {
74471 +#ifdef CONFIG_NET
74472 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
74473 +#else
74474 NETLINK_CB(skb).eff_cap = current_cap();
74475 +#endif
74476 +
74477 return 0;
74478 }
74479
74480 @@ -582,6 +591,9 @@ int cap_bprm_secureexec(struct linux_bin
74481 {
74482 const struct cred *cred = current_cred();
74483
74484 + if (gr_acl_enable_at_secure())
74485 + return 1;
74486 +
74487 if (cred->uid != 0) {
74488 if (bprm->cap_effective)
74489 return 1;
74490 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_api.c linux-2.6.32.45/security/integrity/ima/ima_api.c
74491 --- linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
74492 +++ linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
74493 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
74494 int result;
74495
74496 /* can overflow, only indicator */
74497 - atomic_long_inc(&ima_htable.violations);
74498 + atomic_long_inc_unchecked(&ima_htable.violations);
74499
74500 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
74501 if (!entry) {
74502 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_fs.c linux-2.6.32.45/security/integrity/ima/ima_fs.c
74503 --- linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
74504 +++ linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
74505 @@ -27,12 +27,12 @@
74506 static int valid_policy = 1;
74507 #define TMPBUFLEN 12
74508 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
74509 - loff_t *ppos, atomic_long_t *val)
74510 + loff_t *ppos, atomic_long_unchecked_t *val)
74511 {
74512 char tmpbuf[TMPBUFLEN];
74513 ssize_t len;
74514
74515 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
74516 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
74517 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
74518 }
74519
74520 diff -urNp linux-2.6.32.45/security/integrity/ima/ima.h linux-2.6.32.45/security/integrity/ima/ima.h
74521 --- linux-2.6.32.45/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
74522 +++ linux-2.6.32.45/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
74523 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
74524 extern spinlock_t ima_queue_lock;
74525
74526 struct ima_h_table {
74527 - atomic_long_t len; /* number of stored measurements in the list */
74528 - atomic_long_t violations;
74529 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
74530 + atomic_long_unchecked_t violations;
74531 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
74532 };
74533 extern struct ima_h_table ima_htable;
74534 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_queue.c linux-2.6.32.45/security/integrity/ima/ima_queue.c
74535 --- linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
74536 +++ linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
74537 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
74538 INIT_LIST_HEAD(&qe->later);
74539 list_add_tail_rcu(&qe->later, &ima_measurements);
74540
74541 - atomic_long_inc(&ima_htable.len);
74542 + atomic_long_inc_unchecked(&ima_htable.len);
74543 key = ima_hash_key(entry->digest);
74544 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
74545 return 0;
74546 diff -urNp linux-2.6.32.45/security/Kconfig linux-2.6.32.45/security/Kconfig
74547 --- linux-2.6.32.45/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
74548 +++ linux-2.6.32.45/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
74549 @@ -4,6 +4,555 @@
74550
74551 menu "Security options"
74552
74553 +source grsecurity/Kconfig
74554 +
74555 +menu "PaX"
74556 +
74557 + config ARCH_TRACK_EXEC_LIMIT
74558 + bool
74559 +
74560 + config PAX_PER_CPU_PGD
74561 + bool
74562 +
74563 + config TASK_SIZE_MAX_SHIFT
74564 + int
74565 + depends on X86_64
74566 + default 47 if !PAX_PER_CPU_PGD
74567 + default 42 if PAX_PER_CPU_PGD
74568 +
74569 + config PAX_ENABLE_PAE
74570 + bool
74571 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
74572 +
74573 +config PAX
74574 + bool "Enable various PaX features"
74575 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
74576 + help
74577 + This allows you to enable various PaX features. PaX adds
74578 + intrusion prevention mechanisms to the kernel that reduce
74579 + the risks posed by exploitable memory corruption bugs.
74580 +
74581 +menu "PaX Control"
74582 + depends on PAX
74583 +
74584 +config PAX_SOFTMODE
74585 + bool 'Support soft mode'
74586 + select PAX_PT_PAX_FLAGS
74587 + help
74588 + Enabling this option will allow you to run PaX in soft mode, that
74589 + is, PaX features will not be enforced by default, only on executables
74590 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
74591 + is the only way to mark executables for soft mode use.
74592 +
74593 + Soft mode can be activated by using the "pax_softmode=1" kernel command
74594 + line option on boot. Furthermore you can control various PaX features
74595 + at runtime via the entries in /proc/sys/kernel/pax.
74596 +
74597 +config PAX_EI_PAX
74598 + bool 'Use legacy ELF header marking'
74599 + help
74600 + Enabling this option will allow you to control PaX features on
74601 + a per executable basis via the 'chpax' utility available at
74602 + http://pax.grsecurity.net/. The control flags will be read from
74603 + an otherwise reserved part of the ELF header. This marking has
74604 + numerous drawbacks (no support for soft-mode, toolchain does not
74605 + know about the non-standard use of the ELF header) therefore it
74606 + has been deprecated in favour of PT_PAX_FLAGS support.
74607 +
74608 + Note that if you enable PT_PAX_FLAGS marking support as well,
74609 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
74610 +
74611 +config PAX_PT_PAX_FLAGS
74612 + bool 'Use ELF program header marking'
74613 + help
74614 + Enabling this option will allow you to control PaX features on
74615 + a per executable basis via the 'paxctl' utility available at
74616 + http://pax.grsecurity.net/. The control flags will be read from
74617 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
74618 + has the benefits of supporting both soft mode and being fully
74619 + integrated into the toolchain (the binutils patch is available
74620 + from http://pax.grsecurity.net).
74621 +
74622 + If your toolchain does not support PT_PAX_FLAGS markings,
74623 + you can create one in most cases with 'paxctl -C'.
74624 +
74625 + Note that if you enable the legacy EI_PAX marking support as well,
74626 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
74627 +
74628 +choice
74629 + prompt 'MAC system integration'
74630 + default PAX_HAVE_ACL_FLAGS
74631 + help
74632 + Mandatory Access Control systems have the option of controlling
74633 + PaX flags on a per executable basis, choose the method supported
74634 + by your particular system.
74635 +
74636 + - "none": if your MAC system does not interact with PaX,
74637 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
74638 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
74639 +
74640 + NOTE: this option is for developers/integrators only.
74641 +
74642 + config PAX_NO_ACL_FLAGS
74643 + bool 'none'
74644 +
74645 + config PAX_HAVE_ACL_FLAGS
74646 + bool 'direct'
74647 +
74648 + config PAX_HOOK_ACL_FLAGS
74649 + bool 'hook'
74650 +endchoice
74651 +
74652 +endmenu
74653 +
74654 +menu "Non-executable pages"
74655 + depends on PAX
74656 +
74657 +config PAX_NOEXEC
74658 + bool "Enforce non-executable pages"
74659 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
74660 + help
74661 + By design some architectures do not allow for protecting memory
74662 + pages against execution or even if they do, Linux does not make
74663 + use of this feature. In practice this means that if a page is
74664 + readable (such as the stack or heap) it is also executable.
74665 +
74666 + There is a well known exploit technique that makes use of this
74667 + fact and a common programming mistake where an attacker can
74668 + introduce code of his choice somewhere in the attacked program's
74669 + memory (typically the stack or the heap) and then execute it.
74670 +
74671 + If the attacked program was running with different (typically
74672 + higher) privileges than that of the attacker, then he can elevate
74673 + his own privilege level (e.g. get a root shell, write to files for
74674 + which he does not have write access to, etc).
74675 +
74676 + Enabling this option will let you choose from various features
74677 + that prevent the injection and execution of 'foreign' code in
74678 + a program.
74679 +
74680 + This will also break programs that rely on the old behaviour and
74681 + expect that dynamically allocated memory via the malloc() family
74682 + of functions is executable (which it is not). Notable examples
74683 + are the XFree86 4.x server, the java runtime and wine.
74684 +
74685 +config PAX_PAGEEXEC
74686 + bool "Paging based non-executable pages"
74687 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
74688 + select S390_SWITCH_AMODE if S390
74689 + select S390_EXEC_PROTECT if S390
74690 + select ARCH_TRACK_EXEC_LIMIT if X86_32
74691 + help
74692 + This implementation is based on the paging feature of the CPU.
74693 + On i386 without hardware non-executable bit support there is a
74694 + variable but usually low performance impact, however on Intel's
74695 + P4 core based CPUs it is very high so you should not enable this
74696 + for kernels meant to be used on such CPUs.
74697 +
74698 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
74699 + with hardware non-executable bit support there is no performance
74700 + impact, on ppc the impact is negligible.
74701 +
74702 + Note that several architectures require various emulations due to
74703 + badly designed userland ABIs, this will cause a performance impact
74704 + but will disappear as soon as userland is fixed. For example, ppc
74705 + userland MUST have been built with secure-plt by a recent toolchain.
74706 +
74707 +config PAX_SEGMEXEC
74708 + bool "Segmentation based non-executable pages"
74709 + depends on PAX_NOEXEC && X86_32
74710 + help
74711 + This implementation is based on the segmentation feature of the
74712 + CPU and has a very small performance impact, however applications
74713 + will be limited to a 1.5 GB address space instead of the normal
74714 + 3 GB.
74715 +
74716 +config PAX_EMUTRAMP
74717 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
74718 + default y if PARISC
74719 + help
74720 + There are some programs and libraries that for one reason or
74721 + another attempt to execute special small code snippets from
74722 + non-executable memory pages. Most notable examples are the
74723 + signal handler return code generated by the kernel itself and
74724 + the GCC trampolines.
74725 +
74726 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
74727 + such programs will no longer work under your kernel.
74728 +
74729 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
74730 + utilities to enable trampoline emulation for the affected programs
74731 + yet still have the protection provided by the non-executable pages.
74732 +
74733 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
74734 + your system will not even boot.
74735 +
74736 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
74737 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
74738 + for the affected files.
74739 +
74740 + NOTE: enabling this feature *may* open up a loophole in the
74741 + protection provided by non-executable pages that an attacker
74742 + could abuse. Therefore the best solution is to not have any
74743 + files on your system that would require this option. This can
74744 + be achieved by not using libc5 (which relies on the kernel
74745 + signal handler return code) and not using or rewriting programs
74746 + that make use of the nested function implementation of GCC.
74747 + Skilled users can just fix GCC itself so that it implements
74748 + nested function calls in a way that does not interfere with PaX.
74749 +
74750 +config PAX_EMUSIGRT
74751 + bool "Automatically emulate sigreturn trampolines"
74752 + depends on PAX_EMUTRAMP && PARISC
74753 + default y
74754 + help
74755 + Enabling this option will have the kernel automatically detect
74756 + and emulate signal return trampolines executing on the stack
74757 + that would otherwise lead to task termination.
74758 +
74759 + This solution is intended as a temporary one for users with
74760 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
74761 + Modula-3 runtime, etc) or executables linked to such, basically
74762 + everything that does not specify its own SA_RESTORER function in
74763 + normal executable memory like glibc 2.1+ does.
74764 +
74765 + On parisc you MUST enable this option, otherwise your system will
74766 + not even boot.
74767 +
74768 + NOTE: this feature cannot be disabled on a per executable basis
74769 + and since it *does* open up a loophole in the protection provided
74770 + by non-executable pages, the best solution is to not have any
74771 + files on your system that would require this option.
74772 +
74773 +config PAX_MPROTECT
74774 + bool "Restrict mprotect()"
74775 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
74776 + help
74777 + Enabling this option will prevent programs from
74778 + - changing the executable status of memory pages that were
74779 + not originally created as executable,
74780 + - making read-only executable pages writable again,
74781 + - creating executable pages from anonymous memory,
74782 + - making read-only-after-relocations (RELRO) data pages writable again.
74783 +
74784 + You should say Y here to complete the protection provided by
74785 + the enforcement of non-executable pages.
74786 +
74787 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
74788 + this feature on a per file basis.
74789 +
74790 +config PAX_MPROTECT_COMPAT
74791 + bool "Use legacy/compat protection demoting (read help)"
74792 + depends on PAX_MPROTECT
74793 + default n
74794 + help
74795 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
74796 + by sending the proper error code to the application. For some broken
74797 + userland, this can cause problems with Python or other applications. The
74798 + current implementation however allows for applications like clamav to
74799 + detect if JIT compilation/execution is allowed and to fall back gracefully
74800 + to an interpreter-based mode if it does not. While we encourage everyone
74801 + to use the current implementation as-is and push upstream to fix broken
74802 + userland (note that the RWX logging option can assist with this), in some
74803 + environments this may not be possible. Having to disable MPROTECT
74804 + completely on certain binaries reduces the security benefit of PaX,
74805 + so this option is provided for those environments to revert to the old
74806 + behavior.
74807 +
74808 +config PAX_ELFRELOCS
74809 + bool "Allow ELF text relocations (read help)"
74810 + depends on PAX_MPROTECT
74811 + default n
74812 + help
74813 + Non-executable pages and mprotect() restrictions are effective
74814 + in preventing the introduction of new executable code into an
74815 + attacked task's address space. There remain only two venues
74816 + for this kind of attack: if the attacker can execute already
74817 + existing code in the attacked task then he can either have it
74818 + create and mmap() a file containing his code or have it mmap()
74819 + an already existing ELF library that does not have position
74820 + independent code in it and use mprotect() on it to make it
74821 + writable and copy his code there. While protecting against
74822 + the former approach is beyond PaX, the latter can be prevented
74823 + by having only PIC ELF libraries on one's system (which do not
74824 + need to relocate their code). If you are sure this is your case,
74825 + as is the case with all modern Linux distributions, then leave
74826 + this option disabled. You should say 'n' here.
74827 +
74828 +config PAX_ETEXECRELOCS
74829 + bool "Allow ELF ET_EXEC text relocations"
74830 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
74831 + select PAX_ELFRELOCS
74832 + default y
74833 + help
74834 + On some architectures there are incorrectly created applications
74835 + that require text relocations and would not work without enabling
74836 + this option. If you are an alpha, ia64 or parisc user, you should
74837 + enable this option and disable it once you have made sure that
74838 + none of your applications need it.
74839 +
74840 +config PAX_EMUPLT
74841 + bool "Automatically emulate ELF PLT"
74842 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
74843 + default y
74844 + help
74845 + Enabling this option will have the kernel automatically detect
74846 + and emulate the Procedure Linkage Table entries in ELF files.
74847 + On some architectures such entries are in writable memory, and
74848 + become non-executable leading to task termination. Therefore
74849 + it is mandatory that you enable this option on alpha, parisc,
74850 + sparc and sparc64, otherwise your system would not even boot.
74851 +
74852 + NOTE: this feature *does* open up a loophole in the protection
74853 + provided by the non-executable pages, therefore the proper
74854 + solution is to modify the toolchain to produce a PLT that does
74855 + not need to be writable.
74856 +
74857 +config PAX_DLRESOLVE
74858 + bool 'Emulate old glibc resolver stub'
74859 + depends on PAX_EMUPLT && SPARC
74860 + default n
74861 + help
74862 + This option is needed if userland has an old glibc (before 2.4)
74863 + that puts a 'save' instruction into the runtime generated resolver
74864 + stub that needs special emulation.
74865 +
74866 +config PAX_KERNEXEC
74867 + bool "Enforce non-executable kernel pages"
74868 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
74869 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
74870 + help
74871 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
74872 + that is, enabling this option will make it harder to inject
74873 + and execute 'foreign' code in kernel memory itself.
74874 +
74875 + Note that on x86_64 kernels there is a known regression when
74876 + this feature and KVM/VMX are both enabled in the host kernel.
74877 +
74878 +config PAX_KERNEXEC_MODULE_TEXT
74879 + int "Minimum amount of memory reserved for module code"
74880 + default "4"
74881 + depends on PAX_KERNEXEC && X86_32 && MODULES
74882 + help
74883 + Due to implementation details the kernel must reserve a fixed
74884 + amount of memory for module code at compile time that cannot be
74885 + changed at runtime. Here you can specify the minimum amount
74886 + in MB that will be reserved. Due to the same implementation
74887 + details this size will always be rounded up to the next 2/4 MB
74888 + boundary (depends on PAE) so the actually available memory for
74889 + module code will usually be more than this minimum.
74890 +
74891 + The default 4 MB should be enough for most users but if you have
74892 + an excessive number of modules (e.g., most distribution configs
74893 + compile many drivers as modules) or use huge modules such as
74894 + nvidia's kernel driver, you will need to adjust this amount.
74895 + A good rule of thumb is to look at your currently loaded kernel
74896 + modules and add up their sizes.
74897 +
74898 +endmenu
74899 +
74900 +menu "Address Space Layout Randomization"
74901 + depends on PAX
74902 +
74903 +config PAX_ASLR
74904 + bool "Address Space Layout Randomization"
74905 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
74906 + help
74907 + Many if not most exploit techniques rely on the knowledge of
74908 + certain addresses in the attacked program. The following options
74909 + will allow the kernel to apply a certain amount of randomization
74910 + to specific parts of the program thereby forcing an attacker to
74911 + guess them in most cases. Any failed guess will most likely crash
74912 + the attacked program which allows the kernel to detect such attempts
74913 + and react on them. PaX itself provides no reaction mechanisms,
74914 + instead it is strongly encouraged that you make use of Nergal's
74915 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
74916 + (http://www.grsecurity.net/) built-in crash detection features or
74917 + develop one yourself.
74918 +
74919 + By saying Y here you can choose to randomize the following areas:
74920 + - top of the task's kernel stack
74921 + - top of the task's userland stack
74922 + - base address for mmap() requests that do not specify one
74923 + (this includes all libraries)
74924 + - base address of the main executable
74925 +
74926 + It is strongly recommended to say Y here as address space layout
74927 + randomization has negligible impact on performance yet it provides
74928 + a very effective protection.
74929 +
74930 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
74931 + this feature on a per file basis.
74932 +
74933 +config PAX_RANDKSTACK
74934 + bool "Randomize kernel stack base"
74935 + depends on PAX_ASLR && X86_TSC && X86
74936 + help
74937 + By saying Y here the kernel will randomize every task's kernel
74938 + stack on every system call. This will not only force an attacker
74939 + to guess it but also prevent him from making use of possible
74940 + leaked information about it.
74941 +
74942 + Since the kernel stack is a rather scarce resource, randomization
74943 + may cause unexpected stack overflows, therefore you should very
74944 + carefully test your system. Note that once enabled in the kernel
74945 + configuration, this feature cannot be disabled on a per file basis.
74946 +
74947 +config PAX_RANDUSTACK
74948 + bool "Randomize user stack base"
74949 + depends on PAX_ASLR
74950 + help
74951 + By saying Y here the kernel will randomize every task's userland
74952 + stack. The randomization is done in two steps where the second
74953 + one may apply a big amount of shift to the top of the stack and
74954 + cause problems for programs that want to use lots of memory (more
74955 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
74956 + For this reason the second step can be controlled by 'chpax' or
74957 + 'paxctl' on a per file basis.
74958 +
74959 +config PAX_RANDMMAP
74960 + bool "Randomize mmap() base"
74961 + depends on PAX_ASLR
74962 + help
74963 + By saying Y here the kernel will use a randomized base address for
74964 + mmap() requests that do not specify one themselves. As a result
74965 + all dynamically loaded libraries will appear at random addresses
74966 + and therefore be harder to exploit by a technique where an attacker
74967 + attempts to execute library code for his purposes (e.g. spawn a
74968 + shell from an exploited program that is running at an elevated
74969 + privilege level).
74970 +
74971 + Furthermore, if a program is relinked as a dynamic ELF file, its
74972 + base address will be randomized as well, completing the full
74973 + randomization of the address space layout. Attacking such programs
74974 + becomes a guess game. You can find an example of doing this at
74975 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
74976 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
74977 +
74978 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
74979 + feature on a per file basis.
74980 +
74981 +endmenu
74982 +
74983 +menu "Miscellaneous hardening features"
74984 +
74985 +config PAX_MEMORY_SANITIZE
74986 + bool "Sanitize all freed memory"
74987 + help
74988 + By saying Y here the kernel will erase memory pages as soon as they
74989 + are freed. This in turn reduces the lifetime of data stored in the
74990 + pages, making it less likely that sensitive information such as
74991 + passwords, cryptographic secrets, etc stay in memory for too long.
74992 +
74993 + This is especially useful for programs whose runtime is short, long
74994 + lived processes and the kernel itself benefit from this as long as
74995 + they operate on whole memory pages and ensure timely freeing of pages
74996 + that may hold sensitive information.
74997 +
74998 + The tradeoff is performance impact, on a single CPU system kernel
74999 + compilation sees a 3% slowdown, other systems and workloads may vary
75000 + and you are advised to test this feature on your expected workload
75001 + before deploying it.
75002 +
75003 + Note that this feature does not protect data stored in live pages,
75004 + e.g., process memory swapped to disk may stay there for a long time.
75005 +
75006 +config PAX_MEMORY_STACKLEAK
75007 + bool "Sanitize kernel stack"
75008 + depends on X86
75009 + help
75010 + By saying Y here the kernel will erase the kernel stack before it
75011 + returns from a system call. This in turn reduces the information
75012 + that a kernel stack leak bug can reveal.
75013 +
75014 + Note that such a bug can still leak information that was put on
75015 + the stack by the current system call (the one eventually triggering
75016 + the bug) but traces of earlier system calls on the kernel stack
75017 + cannot leak anymore.
75018 +
75019 + The tradeoff is performance impact, on a single CPU system kernel
75020 + compilation sees a 1% slowdown, other systems and workloads may vary
75021 + and you are advised to test this feature on your expected workload
75022 + before deploying it.
75023 +
75024 + Note: full support for this feature requires gcc with plugin support
75025 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75026 + is not supported). Using older gcc versions means that functions
75027 + with large enough stack frames may leave uninitialized memory behind
75028 + that may be exposed to a later syscall leaking the stack.
75029 +
75030 +config PAX_MEMORY_UDEREF
75031 + bool "Prevent invalid userland pointer dereference"
75032 + depends on X86 && !UML_X86 && !XEN
75033 + select PAX_PER_CPU_PGD if X86_64
75034 + help
75035 + By saying Y here the kernel will be prevented from dereferencing
75036 + userland pointers in contexts where the kernel expects only kernel
75037 + pointers. This is both a useful runtime debugging feature and a
75038 + security measure that prevents exploiting a class of kernel bugs.
75039 +
75040 + The tradeoff is that some virtualization solutions may experience
75041 + a huge slowdown and therefore you should not enable this feature
75042 + for kernels meant to run in such environments. Whether a given VM
75043 + solution is affected or not is best determined by simply trying it
75044 + out, the performance impact will be obvious right on boot as this
75045 + mechanism engages from very early on. A good rule of thumb is that
75046 + VMs running on CPUs without hardware virtualization support (i.e.,
75047 + the majority of IA-32 CPUs) will likely experience the slowdown.
75048 +
75049 +config PAX_REFCOUNT
75050 + bool "Prevent various kernel object reference counter overflows"
75051 + depends on GRKERNSEC && (X86 || SPARC64)
75052 + help
75053 + By saying Y here the kernel will detect and prevent overflowing
75054 + various (but not all) kinds of object reference counters. Such
75055 + overflows can normally occur due to bugs only and are often, if
75056 + not always, exploitable.
75057 +
75058 + The tradeoff is that data structures protected by an overflowed
75059 + refcount will never be freed and therefore will leak memory. Note
75060 + that this leak also happens even without this protection but in
75061 + that case the overflow can eventually trigger the freeing of the
75062 + data structure while it is still being used elsewhere, resulting
75063 + in the exploitable situation that this feature prevents.
75064 +
75065 + Since this has a negligible performance impact, you should enable
75066 + this feature.
75067 +
75068 +config PAX_USERCOPY
75069 + bool "Harden heap object copies between kernel and userland"
75070 + depends on X86 || PPC || SPARC || ARM
75071 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75072 + help
75073 + By saying Y here the kernel will enforce the size of heap objects
75074 + when they are copied in either direction between the kernel and
75075 + userland, even if only a part of the heap object is copied.
75076 +
75077 + Specifically, this checking prevents information leaking from the
75078 + kernel heap during kernel to userland copies (if the kernel heap
75079 + object is otherwise fully initialized) and prevents kernel heap
75080 + overflows during userland to kernel copies.
75081 +
75082 + Note that the current implementation provides the strictest bounds
75083 + checks for the SLUB allocator.
75084 +
75085 + Enabling this option also enables per-slab cache protection against
75086 + data in a given cache being copied into/out of via userland
75087 + accessors. Though the whitelist of regions will be reduced over
75088 + time, it notably protects important data structures like task structs.
75089 +
75090 +
75091 + If frame pointers are enabled on x86, this option will also
75092 + restrict copies into and out of the kernel stack to local variables
75093 + within a single frame.
75094 +
75095 + Since this has a negligible performance impact, you should enable
75096 + this feature.
75097 +
75098 +endmenu
75099 +
75100 +endmenu
75101 +
75102 config KEYS
75103 bool "Enable access key retention support"
75104 help
75105 @@ -146,7 +695,7 @@ config INTEL_TXT
75106 config LSM_MMAP_MIN_ADDR
75107 int "Low address space for LSM to protect from user allocation"
75108 depends on SECURITY && SECURITY_SELINUX
75109 - default 65536
75110 + default 32768
75111 help
75112 This is the portion of low virtual memory which should be protected
75113 from userspace allocation. Keeping a user from writing to low pages
75114 diff -urNp linux-2.6.32.45/security/keys/keyring.c linux-2.6.32.45/security/keys/keyring.c
75115 --- linux-2.6.32.45/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
75116 +++ linux-2.6.32.45/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
75117 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
75118 ret = -EFAULT;
75119
75120 for (loop = 0; loop < klist->nkeys; loop++) {
75121 + key_serial_t serial;
75122 key = klist->keys[loop];
75123 + serial = key->serial;
75124
75125 tmp = sizeof(key_serial_t);
75126 if (tmp > buflen)
75127 tmp = buflen;
75128
75129 - if (copy_to_user(buffer,
75130 - &key->serial,
75131 - tmp) != 0)
75132 + if (copy_to_user(buffer, &serial, tmp))
75133 goto error;
75134
75135 buflen -= tmp;
75136 diff -urNp linux-2.6.32.45/security/min_addr.c linux-2.6.32.45/security/min_addr.c
75137 --- linux-2.6.32.45/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
75138 +++ linux-2.6.32.45/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
75139 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75140 */
75141 static void update_mmap_min_addr(void)
75142 {
75143 +#ifndef SPARC
75144 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75145 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75146 mmap_min_addr = dac_mmap_min_addr;
75147 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75148 #else
75149 mmap_min_addr = dac_mmap_min_addr;
75150 #endif
75151 +#endif
75152 }
75153
75154 /*
75155 diff -urNp linux-2.6.32.45/security/root_plug.c linux-2.6.32.45/security/root_plug.c
75156 --- linux-2.6.32.45/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
75157 +++ linux-2.6.32.45/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
75158 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
75159 return 0;
75160 }
75161
75162 -static struct security_operations rootplug_security_ops = {
75163 +static struct security_operations rootplug_security_ops __read_only = {
75164 .bprm_check_security = rootplug_bprm_check_security,
75165 };
75166
75167 diff -urNp linux-2.6.32.45/security/security.c linux-2.6.32.45/security/security.c
75168 --- linux-2.6.32.45/security/security.c 2011-03-27 14:31:47.000000000 -0400
75169 +++ linux-2.6.32.45/security/security.c 2011-04-17 15:56:46.000000000 -0400
75170 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
75171 extern struct security_operations default_security_ops;
75172 extern void security_fixup_ops(struct security_operations *ops);
75173
75174 -struct security_operations *security_ops; /* Initialized to NULL */
75175 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
75176
75177 static inline int verify(struct security_operations *ops)
75178 {
75179 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
75180 * If there is already a security module registered with the kernel,
75181 * an error will be returned. Otherwise %0 is returned on success.
75182 */
75183 -int register_security(struct security_operations *ops)
75184 +int __init register_security(struct security_operations *ops)
75185 {
75186 if (verify(ops)) {
75187 printk(KERN_DEBUG "%s could not verify "
75188 diff -urNp linux-2.6.32.45/security/selinux/hooks.c linux-2.6.32.45/security/selinux/hooks.c
75189 --- linux-2.6.32.45/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
75190 +++ linux-2.6.32.45/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
75191 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
75192 * Minimal support for a secondary security module,
75193 * just to allow the use of the capability module.
75194 */
75195 -static struct security_operations *secondary_ops;
75196 +static struct security_operations *secondary_ops __read_only;
75197
75198 /* Lists of inode and superblock security structures initialized
75199 before the policy was loaded. */
75200 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
75201
75202 #endif
75203
75204 -static struct security_operations selinux_ops = {
75205 +static struct security_operations selinux_ops __read_only = {
75206 .name = "selinux",
75207
75208 .ptrace_access_check = selinux_ptrace_access_check,
75209 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
75210 avc_disable();
75211
75212 /* Reset security_ops to the secondary module, dummy or capability. */
75213 + pax_open_kernel();
75214 security_ops = secondary_ops;
75215 + pax_close_kernel();
75216
75217 /* Unregister netfilter hooks. */
75218 selinux_nf_ip_exit();
75219 diff -urNp linux-2.6.32.45/security/selinux/include/xfrm.h linux-2.6.32.45/security/selinux/include/xfrm.h
75220 --- linux-2.6.32.45/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
75221 +++ linux-2.6.32.45/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
75222 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
75223
75224 static inline void selinux_xfrm_notify_policyload(void)
75225 {
75226 - atomic_inc(&flow_cache_genid);
75227 + atomic_inc_unchecked(&flow_cache_genid);
75228 }
75229 #else
75230 static inline int selinux_xfrm_enabled(void)
75231 diff -urNp linux-2.6.32.45/security/selinux/ss/services.c linux-2.6.32.45/security/selinux/ss/services.c
75232 --- linux-2.6.32.45/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
75233 +++ linux-2.6.32.45/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
75234 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
75235 int rc = 0;
75236 struct policy_file file = { data, len }, *fp = &file;
75237
75238 + pax_track_stack();
75239 +
75240 if (!ss_initialized) {
75241 avtab_cache_init();
75242 if (policydb_read(&policydb, fp)) {
75243 diff -urNp linux-2.6.32.45/security/smack/smack_lsm.c linux-2.6.32.45/security/smack/smack_lsm.c
75244 --- linux-2.6.32.45/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
75245 +++ linux-2.6.32.45/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
75246 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
75247 return 0;
75248 }
75249
75250 -struct security_operations smack_ops = {
75251 +struct security_operations smack_ops __read_only = {
75252 .name = "smack",
75253
75254 .ptrace_access_check = smack_ptrace_access_check,
75255 diff -urNp linux-2.6.32.45/security/tomoyo/tomoyo.c linux-2.6.32.45/security/tomoyo/tomoyo.c
75256 --- linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
75257 +++ linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
75258 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
75259 * tomoyo_security_ops is a "struct security_operations" which is used for
75260 * registering TOMOYO.
75261 */
75262 -static struct security_operations tomoyo_security_ops = {
75263 +static struct security_operations tomoyo_security_ops __read_only = {
75264 .name = "tomoyo",
75265 .cred_alloc_blank = tomoyo_cred_alloc_blank,
75266 .cred_prepare = tomoyo_cred_prepare,
75267 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.c linux-2.6.32.45/sound/aoa/codecs/onyx.c
75268 --- linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
75269 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
75270 @@ -53,7 +53,7 @@ struct onyx {
75271 spdif_locked:1,
75272 analog_locked:1,
75273 original_mute:2;
75274 - int open_count;
75275 + local_t open_count;
75276 struct codec_info *codec_info;
75277
75278 /* mutex serializes concurrent access to the device
75279 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
75280 struct onyx *onyx = cii->codec_data;
75281
75282 mutex_lock(&onyx->mutex);
75283 - onyx->open_count++;
75284 + local_inc(&onyx->open_count);
75285 mutex_unlock(&onyx->mutex);
75286
75287 return 0;
75288 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
75289 struct onyx *onyx = cii->codec_data;
75290
75291 mutex_lock(&onyx->mutex);
75292 - onyx->open_count--;
75293 - if (!onyx->open_count)
75294 + if (local_dec_and_test(&onyx->open_count))
75295 onyx->spdif_locked = onyx->analog_locked = 0;
75296 mutex_unlock(&onyx->mutex);
75297
75298 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.h linux-2.6.32.45/sound/aoa/codecs/onyx.h
75299 --- linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
75300 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
75301 @@ -11,6 +11,7 @@
75302 #include <linux/i2c.h>
75303 #include <asm/pmac_low_i2c.h>
75304 #include <asm/prom.h>
75305 +#include <asm/local.h>
75306
75307 /* PCM3052 register definitions */
75308
75309 diff -urNp linux-2.6.32.45/sound/core/seq/seq_device.c linux-2.6.32.45/sound/core/seq/seq_device.c
75310 --- linux-2.6.32.45/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
75311 +++ linux-2.6.32.45/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
75312 @@ -63,7 +63,7 @@ struct ops_list {
75313 int argsize; /* argument size */
75314
75315 /* operators */
75316 - struct snd_seq_dev_ops ops;
75317 + struct snd_seq_dev_ops *ops;
75318
75319 /* registred devices */
75320 struct list_head dev_list; /* list of devices */
75321 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
75322
75323 mutex_lock(&ops->reg_mutex);
75324 /* copy driver operators */
75325 - ops->ops = *entry;
75326 + ops->ops = entry;
75327 ops->driver |= DRIVER_LOADED;
75328 ops->argsize = argsize;
75329
75330 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
75331 dev->name, ops->id, ops->argsize, dev->argsize);
75332 return -EINVAL;
75333 }
75334 - if (ops->ops.init_device(dev) >= 0) {
75335 + if (ops->ops->init_device(dev) >= 0) {
75336 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
75337 ops->num_init_devices++;
75338 } else {
75339 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
75340 dev->name, ops->id, ops->argsize, dev->argsize);
75341 return -EINVAL;
75342 }
75343 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
75344 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
75345 dev->status = SNDRV_SEQ_DEVICE_FREE;
75346 dev->driver_data = NULL;
75347 ops->num_init_devices--;
75348 diff -urNp linux-2.6.32.45/sound/drivers/mts64.c linux-2.6.32.45/sound/drivers/mts64.c
75349 --- linux-2.6.32.45/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
75350 +++ linux-2.6.32.45/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
75351 @@ -27,6 +27,7 @@
75352 #include <sound/initval.h>
75353 #include <sound/rawmidi.h>
75354 #include <sound/control.h>
75355 +#include <asm/local.h>
75356
75357 #define CARD_NAME "Miditerminal 4140"
75358 #define DRIVER_NAME "MTS64"
75359 @@ -65,7 +66,7 @@ struct mts64 {
75360 struct pardevice *pardev;
75361 int pardev_claimed;
75362
75363 - int open_count;
75364 + local_t open_count;
75365 int current_midi_output_port;
75366 int current_midi_input_port;
75367 u8 mode[MTS64_NUM_INPUT_PORTS];
75368 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
75369 {
75370 struct mts64 *mts = substream->rmidi->private_data;
75371
75372 - if (mts->open_count == 0) {
75373 + if (local_read(&mts->open_count) == 0) {
75374 /* We don't need a spinlock here, because this is just called
75375 if the device has not been opened before.
75376 So there aren't any IRQs from the device */
75377 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
75378
75379 msleep(50);
75380 }
75381 - ++(mts->open_count);
75382 + local_inc(&mts->open_count);
75383
75384 return 0;
75385 }
75386 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
75387 struct mts64 *mts = substream->rmidi->private_data;
75388 unsigned long flags;
75389
75390 - --(mts->open_count);
75391 - if (mts->open_count == 0) {
75392 + if (local_dec_return(&mts->open_count) == 0) {
75393 /* We need the spinlock_irqsave here because we can still
75394 have IRQs at this point */
75395 spin_lock_irqsave(&mts->lock, flags);
75396 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
75397
75398 msleep(500);
75399
75400 - } else if (mts->open_count < 0)
75401 - mts->open_count = 0;
75402 + } else if (local_read(&mts->open_count) < 0)
75403 + local_set(&mts->open_count, 0);
75404
75405 return 0;
75406 }
75407 diff -urNp linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c
75408 --- linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
75409 +++ linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
75410 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
75411 MODULE_DESCRIPTION("OPL4 driver");
75412 MODULE_LICENSE("GPL");
75413
75414 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
75415 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
75416 {
75417 int timeout = 10;
75418 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
75419 diff -urNp linux-2.6.32.45/sound/drivers/portman2x4.c linux-2.6.32.45/sound/drivers/portman2x4.c
75420 --- linux-2.6.32.45/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
75421 +++ linux-2.6.32.45/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
75422 @@ -46,6 +46,7 @@
75423 #include <sound/initval.h>
75424 #include <sound/rawmidi.h>
75425 #include <sound/control.h>
75426 +#include <asm/local.h>
75427
75428 #define CARD_NAME "Portman 2x4"
75429 #define DRIVER_NAME "portman"
75430 @@ -83,7 +84,7 @@ struct portman {
75431 struct pardevice *pardev;
75432 int pardev_claimed;
75433
75434 - int open_count;
75435 + local_t open_count;
75436 int mode[PORTMAN_NUM_INPUT_PORTS];
75437 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
75438 };
75439 diff -urNp linux-2.6.32.45/sound/isa/cmi8330.c linux-2.6.32.45/sound/isa/cmi8330.c
75440 --- linux-2.6.32.45/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
75441 +++ linux-2.6.32.45/sound/isa/cmi8330.c 2011-08-23 21:22:32.000000000 -0400
75442 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
75443
75444 struct snd_pcm *pcm;
75445 struct snd_cmi8330_stream {
75446 - struct snd_pcm_ops ops;
75447 + snd_pcm_ops_no_const ops;
75448 snd_pcm_open_callback_t open;
75449 void *private_data; /* sb or wss */
75450 } streams[2];
75451 diff -urNp linux-2.6.32.45/sound/oss/sb_audio.c linux-2.6.32.45/sound/oss/sb_audio.c
75452 --- linux-2.6.32.45/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
75453 +++ linux-2.6.32.45/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
75454 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
75455 buf16 = (signed short *)(localbuf + localoffs);
75456 while (c)
75457 {
75458 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75459 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75460 if (copy_from_user(lbuf8,
75461 userbuf+useroffs + p,
75462 locallen))
75463 diff -urNp linux-2.6.32.45/sound/oss/swarm_cs4297a.c linux-2.6.32.45/sound/oss/swarm_cs4297a.c
75464 --- linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
75465 +++ linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
75466 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
75467 {
75468 struct cs4297a_state *s;
75469 u32 pwr, id;
75470 - mm_segment_t fs;
75471 int rval;
75472 #ifndef CONFIG_BCM_CS4297A_CSWARM
75473 u64 cfg;
75474 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
75475 if (!rval) {
75476 char *sb1250_duart_present;
75477
75478 +#if 0
75479 + mm_segment_t fs;
75480 fs = get_fs();
75481 set_fs(KERNEL_DS);
75482 -#if 0
75483 val = SOUND_MASK_LINE;
75484 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
75485 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
75486 val = initvol[i].vol;
75487 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
75488 }
75489 + set_fs(fs);
75490 // cs4297a_write_ac97(s, 0x18, 0x0808);
75491 #else
75492 // cs4297a_write_ac97(s, 0x5e, 0x180);
75493 cs4297a_write_ac97(s, 0x02, 0x0808);
75494 cs4297a_write_ac97(s, 0x18, 0x0808);
75495 #endif
75496 - set_fs(fs);
75497
75498 list_add(&s->list, &cs4297a_devs);
75499
75500 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_codec.c linux-2.6.32.45/sound/pci/ac97/ac97_codec.c
75501 --- linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
75502 +++ linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
75503 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
75504 }
75505
75506 /* build_ops to do nothing */
75507 -static struct snd_ac97_build_ops null_build_ops;
75508 +static const struct snd_ac97_build_ops null_build_ops;
75509
75510 #ifdef CONFIG_SND_AC97_POWER_SAVE
75511 static void do_update_power(struct work_struct *work)
75512 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_patch.c linux-2.6.32.45/sound/pci/ac97/ac97_patch.c
75513 --- linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
75514 +++ linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
75515 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
75516 return 0;
75517 }
75518
75519 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75520 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75521 .build_spdif = patch_yamaha_ymf743_build_spdif,
75522 .build_3d = patch_yamaha_ymf7x3_3d,
75523 };
75524 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
75525 return 0;
75526 }
75527
75528 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75529 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75530 .build_3d = patch_yamaha_ymf7x3_3d,
75531 .build_post_spdif = patch_yamaha_ymf753_post_spdif
75532 };
75533 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
75534 return 0;
75535 }
75536
75537 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75538 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75539 .build_specific = patch_wolfson_wm9703_specific,
75540 };
75541
75542 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
75543 return 0;
75544 }
75545
75546 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75547 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75548 .build_specific = patch_wolfson_wm9704_specific,
75549 };
75550
75551 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
75552 return 0;
75553 }
75554
75555 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75556 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75557 .build_specific = patch_wolfson_wm9705_specific,
75558 };
75559
75560 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
75561 return 0;
75562 }
75563
75564 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75565 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75566 .build_specific = patch_wolfson_wm9711_specific,
75567 };
75568
75569 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
75570 }
75571 #endif
75572
75573 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75574 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75575 .build_specific = patch_wolfson_wm9713_specific,
75576 .build_3d = patch_wolfson_wm9713_3d,
75577 #ifdef CONFIG_PM
75578 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
75579 return 0;
75580 }
75581
75582 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75583 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75584 .build_3d = patch_sigmatel_stac9700_3d,
75585 .build_specific = patch_sigmatel_stac97xx_specific
75586 };
75587 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
75588 return patch_sigmatel_stac97xx_specific(ac97);
75589 }
75590
75591 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75592 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75593 .build_3d = patch_sigmatel_stac9708_3d,
75594 .build_specific = patch_sigmatel_stac9708_specific
75595 };
75596 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
75597 return 0;
75598 }
75599
75600 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75601 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75602 .build_3d = patch_sigmatel_stac9700_3d,
75603 .build_specific = patch_sigmatel_stac9758_specific
75604 };
75605 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
75606 return 0;
75607 }
75608
75609 -static struct snd_ac97_build_ops patch_cirrus_ops = {
75610 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
75611 .build_spdif = patch_cirrus_build_spdif
75612 };
75613
75614 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
75615 return 0;
75616 }
75617
75618 -static struct snd_ac97_build_ops patch_conexant_ops = {
75619 +static const struct snd_ac97_build_ops patch_conexant_ops = {
75620 .build_spdif = patch_conexant_build_spdif
75621 };
75622
75623 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
75624 }
75625 }
75626
75627 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
75628 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
75629 #ifdef CONFIG_PM
75630 .resume = ad18xx_resume
75631 #endif
75632 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
75633 return 0;
75634 }
75635
75636 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
75637 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
75638 .build_specific = &patch_ad1885_specific,
75639 #ifdef CONFIG_PM
75640 .resume = ad18xx_resume
75641 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
75642 return 0;
75643 }
75644
75645 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
75646 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
75647 .build_specific = &patch_ad1886_specific,
75648 #ifdef CONFIG_PM
75649 .resume = ad18xx_resume
75650 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
75651 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75652 }
75653
75654 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75655 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75656 .build_post_spdif = patch_ad198x_post_spdif,
75657 .build_specific = patch_ad1981a_specific,
75658 #ifdef CONFIG_PM
75659 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
75660 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75661 }
75662
75663 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75664 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75665 .build_post_spdif = patch_ad198x_post_spdif,
75666 .build_specific = patch_ad1981b_specific,
75667 #ifdef CONFIG_PM
75668 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
75669 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
75670 }
75671
75672 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
75673 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
75674 .build_post_spdif = patch_ad198x_post_spdif,
75675 .build_specific = patch_ad1888_specific,
75676 #ifdef CONFIG_PM
75677 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
75678 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
75679 }
75680
75681 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
75682 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
75683 .build_post_spdif = patch_ad198x_post_spdif,
75684 .build_specific = patch_ad1980_specific,
75685 #ifdef CONFIG_PM
75686 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
75687 ARRAY_SIZE(snd_ac97_ad1985_controls));
75688 }
75689
75690 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
75691 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
75692 .build_post_spdif = patch_ad198x_post_spdif,
75693 .build_specific = patch_ad1985_specific,
75694 #ifdef CONFIG_PM
75695 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
75696 ARRAY_SIZE(snd_ac97_ad1985_controls));
75697 }
75698
75699 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
75700 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
75701 .build_post_spdif = patch_ad198x_post_spdif,
75702 .build_specific = patch_ad1986_specific,
75703 #ifdef CONFIG_PM
75704 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
75705 return 0;
75706 }
75707
75708 -static struct snd_ac97_build_ops patch_alc650_ops = {
75709 +static const struct snd_ac97_build_ops patch_alc650_ops = {
75710 .build_specific = patch_alc650_specific,
75711 .update_jacks = alc650_update_jacks
75712 };
75713 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
75714 return 0;
75715 }
75716
75717 -static struct snd_ac97_build_ops patch_alc655_ops = {
75718 +static const struct snd_ac97_build_ops patch_alc655_ops = {
75719 .build_specific = patch_alc655_specific,
75720 .update_jacks = alc655_update_jacks
75721 };
75722 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
75723 return 0;
75724 }
75725
75726 -static struct snd_ac97_build_ops patch_alc850_ops = {
75727 +static const struct snd_ac97_build_ops patch_alc850_ops = {
75728 .build_specific = patch_alc850_specific,
75729 .update_jacks = alc850_update_jacks
75730 };
75731 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
75732 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
75733 }
75734
75735 -static struct snd_ac97_build_ops patch_cm9738_ops = {
75736 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
75737 .build_specific = patch_cm9738_specific,
75738 .update_jacks = cm9738_update_jacks
75739 };
75740 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
75741 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
75742 }
75743
75744 -static struct snd_ac97_build_ops patch_cm9739_ops = {
75745 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
75746 .build_specific = patch_cm9739_specific,
75747 .build_post_spdif = patch_cm9739_post_spdif,
75748 .update_jacks = cm9739_update_jacks
75749 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
75750 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
75751 }
75752
75753 -static struct snd_ac97_build_ops patch_cm9761_ops = {
75754 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
75755 .build_specific = patch_cm9761_specific,
75756 .build_post_spdif = patch_cm9761_post_spdif,
75757 .update_jacks = cm9761_update_jacks
75758 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
75759 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
75760 }
75761
75762 -static struct snd_ac97_build_ops patch_cm9780_ops = {
75763 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
75764 .build_specific = patch_cm9780_specific,
75765 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
75766 };
75767 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
75768 return 0;
75769 }
75770
75771 -static struct snd_ac97_build_ops patch_vt1616_ops = {
75772 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
75773 .build_specific = patch_vt1616_specific
75774 };
75775
75776 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
75777 return 0;
75778 }
75779
75780 -static struct snd_ac97_build_ops patch_it2646_ops = {
75781 +static const struct snd_ac97_build_ops patch_it2646_ops = {
75782 .build_specific = patch_it2646_specific,
75783 .update_jacks = it2646_update_jacks
75784 };
75785 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
75786 return 0;
75787 }
75788
75789 -static struct snd_ac97_build_ops patch_si3036_ops = {
75790 +static const struct snd_ac97_build_ops patch_si3036_ops = {
75791 .build_specific = patch_si3036_specific,
75792 };
75793
75794 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
75795 return 0;
75796 }
75797
75798 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
75799 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
75800 .build_specific = patch_ucb1400_specific,
75801 };
75802
75803 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_codec.h linux-2.6.32.45/sound/pci/hda/hda_codec.h
75804 --- linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
75805 +++ linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-08-23 21:22:32.000000000 -0400
75806 @@ -580,7 +580,7 @@ struct hda_bus_ops {
75807 /* notify power-up/down from codec to controller */
75808 void (*pm_notify)(struct hda_bus *bus);
75809 #endif
75810 -};
75811 +} __no_const;
75812
75813 /* template to pass to the bus constructor */
75814 struct hda_bus_template {
75815 @@ -675,6 +675,7 @@ struct hda_codec_ops {
75816 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
75817 #endif
75818 };
75819 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
75820
75821 /* record for amp information cache */
75822 struct hda_cache_head {
75823 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
75824 struct snd_pcm_substream *substream);
75825 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
75826 struct snd_pcm_substream *substream);
75827 -};
75828 +} __no_const;
75829
75830 /* PCM information for each substream */
75831 struct hda_pcm_stream {
75832 @@ -760,7 +761,7 @@ struct hda_codec {
75833 const char *modelname; /* model name for preset */
75834
75835 /* set by patch */
75836 - struct hda_codec_ops patch_ops;
75837 + hda_codec_ops_no_const patch_ops;
75838
75839 /* PCM to create, set by patch_ops.build_pcms callback */
75840 unsigned int num_pcms;
75841 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c
75842 --- linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
75843 +++ linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
75844 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
75845 */
75846 spec->multiout.dig_out_nid = CVT_NID;
75847
75848 - codec->patch_ops = atihdmi_patch_ops;
75849 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
75850
75851 return 0;
75852 }
75853 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c
75854 --- linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
75855 +++ linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
75856 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
75857 cp_ready);
75858
75859 /* TODO */
75860 - if (cp_state)
75861 - ;
75862 - if (cp_ready)
75863 - ;
75864 + if (cp_state) {
75865 + }
75866 + if (cp_ready) {
75867 + }
75868 }
75869
75870
75871 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
75872 spec->multiout.dig_out_nid = cvt_nid;
75873
75874 codec->spec = spec;
75875 - codec->patch_ops = intel_hdmi_patch_ops;
75876 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
75877
75878 snd_hda_eld_proc_new(codec, &spec->sink_eld);
75879
75880 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c
75881 --- linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
75882 +++ linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
75883 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
75884 spec->multiout.max_channels = 8;
75885 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
75886
75887 - codec->patch_ops = nvhdmi_patch_ops_8ch;
75888 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
75889
75890 return 0;
75891 }
75892 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
75893 spec->multiout.max_channels = 2;
75894 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
75895
75896 - codec->patch_ops = nvhdmi_patch_ops_2ch;
75897 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
75898
75899 return 0;
75900 }
75901 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c
75902 --- linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
75903 +++ linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-08-23 21:22:32.000000000 -0400
75904 @@ -5220,7 +5220,7 @@ again:
75905 snd_hda_codec_write_cache(codec, nid, 0,
75906 AC_VERB_SET_CONNECT_SEL, num_dacs);
75907
75908 - codec->patch_ops = stac92xx_patch_ops;
75909 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
75910
75911 codec->proc_widget_hook = stac92hd_proc_hook;
75912
75913 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
75914 return -ENOMEM;
75915
75916 codec->spec = spec;
75917 - codec->patch_ops = stac92xx_patch_ops;
75918 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
75919 spec->num_pins = STAC92HD71BXX_NUM_PINS;
75920 switch (codec->vendor_id) {
75921 case 0x111d76b6:
75922 diff -urNp linux-2.6.32.45/sound/pci/ice1712/ice1712.h linux-2.6.32.45/sound/pci/ice1712/ice1712.h
75923 --- linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
75924 +++ linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
75925 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
75926 unsigned int mask_flags; /* total mask bits */
75927 struct snd_akm4xxx_ops {
75928 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
75929 - } ops;
75930 + } __no_const ops;
75931 };
75932
75933 struct snd_ice1712_spdif {
75934 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
75935 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75936 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75937 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75938 - } ops;
75939 + } __no_const ops;
75940 };
75941
75942
75943 diff -urNp linux-2.6.32.45/sound/pci/intel8x0m.c linux-2.6.32.45/sound/pci/intel8x0m.c
75944 --- linux-2.6.32.45/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
75945 +++ linux-2.6.32.45/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
75946 @@ -1264,7 +1264,7 @@ static struct shortname_table {
75947 { 0x5455, "ALi M5455" },
75948 { 0x746d, "AMD AMD8111" },
75949 #endif
75950 - { 0 },
75951 + { 0, },
75952 };
75953
75954 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
75955 diff -urNp linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c
75956 --- linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
75957 +++ linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
75958 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
75959 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
75960 break;
75961 }
75962 - if (atomic_read(&chip->interrupt_sleep_count)) {
75963 - atomic_set(&chip->interrupt_sleep_count, 0);
75964 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
75965 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
75966 wake_up(&chip->interrupt_sleep);
75967 }
75968 __end:
75969 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
75970 continue;
75971 init_waitqueue_entry(&wait, current);
75972 add_wait_queue(&chip->interrupt_sleep, &wait);
75973 - atomic_inc(&chip->interrupt_sleep_count);
75974 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
75975 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
75976 remove_wait_queue(&chip->interrupt_sleep, &wait);
75977 }
75978 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
75979 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
75980 spin_unlock(&chip->reg_lock);
75981
75982 - if (atomic_read(&chip->interrupt_sleep_count)) {
75983 - atomic_set(&chip->interrupt_sleep_count, 0);
75984 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
75985 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
75986 wake_up(&chip->interrupt_sleep);
75987 }
75988 }
75989 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
75990 spin_lock_init(&chip->reg_lock);
75991 spin_lock_init(&chip->voice_lock);
75992 init_waitqueue_head(&chip->interrupt_sleep);
75993 - atomic_set(&chip->interrupt_sleep_count, 0);
75994 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
75995 chip->card = card;
75996 chip->pci = pci;
75997 chip->irq = -1;
75998 diff -urNp linux-2.6.32.45/sound/soc/soc-core.c linux-2.6.32.45/sound/soc/soc-core.c
75999 --- linux-2.6.32.45/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
76000 +++ linux-2.6.32.45/sound/soc/soc-core.c 2011-08-23 21:22:32.000000000 -0400
76001 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pc
76002 }
76003
76004 /* ASoC PCM operations */
76005 -static struct snd_pcm_ops soc_pcm_ops = {
76006 +static snd_pcm_ops_no_const soc_pcm_ops = {
76007 .open = soc_pcm_open,
76008 .close = soc_codec_close,
76009 .hw_params = soc_pcm_hw_params,
76010 diff -urNp linux-2.6.32.45/sound/usb/usbaudio.c linux-2.6.32.45/sound/usb/usbaudio.c
76011 --- linux-2.6.32.45/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
76012 +++ linux-2.6.32.45/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
76013 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
76014 switch (cmd) {
76015 case SNDRV_PCM_TRIGGER_START:
76016 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76017 - subs->ops.prepare = prepare_playback_urb;
76018 + *(void **)&subs->ops.prepare = prepare_playback_urb;
76019 return 0;
76020 case SNDRV_PCM_TRIGGER_STOP:
76021 return deactivate_urbs(subs, 0, 0);
76022 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76023 - subs->ops.prepare = prepare_nodata_playback_urb;
76024 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76025 return 0;
76026 default:
76027 return -EINVAL;
76028 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
76029
76030 switch (cmd) {
76031 case SNDRV_PCM_TRIGGER_START:
76032 - subs->ops.retire = retire_capture_urb;
76033 + *(void **)&subs->ops.retire = retire_capture_urb;
76034 return start_urbs(subs, substream->runtime);
76035 case SNDRV_PCM_TRIGGER_STOP:
76036 return deactivate_urbs(subs, 0, 0);
76037 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76038 - subs->ops.retire = retire_paused_capture_urb;
76039 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
76040 return 0;
76041 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76042 - subs->ops.retire = retire_capture_urb;
76043 + *(void **)&subs->ops.retire = retire_capture_urb;
76044 return 0;
76045 default:
76046 return -EINVAL;
76047 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
76048 /* for playback, submit the URBs now; otherwise, the first hwptr_done
76049 * updates for all URBs would happen at the same time when starting */
76050 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
76051 - subs->ops.prepare = prepare_nodata_playback_urb;
76052 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76053 return start_urbs(subs, runtime);
76054 } else
76055 return 0;
76056 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
76057 subs->direction = stream;
76058 subs->dev = as->chip->dev;
76059 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
76060 - subs->ops = audio_urb_ops[stream];
76061 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
76062 } else {
76063 - subs->ops = audio_urb_ops_high_speed[stream];
76064 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
76065 switch (as->chip->usb_id) {
76066 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
76067 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
76068 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
76069 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76070 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76071 break;
76072 }
76073 }
76074 diff -urNp linux-2.6.32.45/tools/gcc/constify_plugin.c linux-2.6.32.45/tools/gcc/constify_plugin.c
76075 --- linux-2.6.32.45/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76076 +++ linux-2.6.32.45/tools/gcc/constify_plugin.c 2011-08-23 22:33:42.000000000 -0400
76077 @@ -0,0 +1,258 @@
76078 +/*
76079 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76080 + * Licensed under the GPL v2, or (at your option) v3
76081 + *
76082 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
76083 + *
76084 + * Usage:
76085 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76086 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76087 + */
76088 +
76089 +#include "gcc-plugin.h"
76090 +#include "config.h"
76091 +#include "system.h"
76092 +#include "coretypes.h"
76093 +#include "tree.h"
76094 +#include "tree-pass.h"
76095 +#include "intl.h"
76096 +#include "plugin-version.h"
76097 +#include "tm.h"
76098 +#include "toplev.h"
76099 +#include "function.h"
76100 +#include "tree-flow.h"
76101 +#include "plugin.h"
76102 +
76103 +int plugin_is_GPL_compatible;
76104 +
76105 +static struct plugin_info const_plugin_info = {
76106 + .version = "20110817",
76107 + .help = "no-constify\tturn off constification\n",
76108 +};
76109 +
76110 +static bool walk_struct(tree node);
76111 +
76112 +static void deconstify_node(tree node)
76113 +{
76114 + tree field;
76115 +
76116 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
76117 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
76118 + if (code == RECORD_TYPE || code == UNION_TYPE)
76119 + deconstify_node(TREE_TYPE(field));
76120 + TREE_READONLY(field) = 0;
76121 + TREE_READONLY(TREE_TYPE(field)) = 0;
76122 + }
76123 +}
76124 +
76125 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76126 +{
76127 + tree type;
76128 +
76129 + *no_add_attrs = true;
76130 + if (TREE_CODE(*node) == FUNCTION_DECL) {
76131 + error("%qE attribute does not apply to functions", name);
76132 + return NULL_TREE;
76133 + }
76134 +
76135 + if (TREE_CODE(*node) == VAR_DECL) {
76136 + error("%qE attribute does not apply to variables", name);
76137 + return NULL_TREE;
76138 + }
76139 +
76140 + if (!DECL_P(*node)) {
76141 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
76142 + *no_add_attrs = false;
76143 + else
76144 + error("%qE attribute applies to struct and union types only", name);
76145 + return NULL_TREE;
76146 + }
76147 +
76148 + type = TREE_TYPE(*node);
76149 +
76150 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
76151 + error("%qE attribute applies to struct and union types only", name);
76152 + return NULL_TREE;
76153 + }
76154 +
76155 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
76156 + error("%qE attribute is already applied to the type", name);
76157 + return NULL_TREE;
76158 + }
76159 +
76160 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(type)) {
76161 + error("%qE attribute used on type that is not constified", name);
76162 + return NULL_TREE;
76163 + }
76164 +
76165 + if (TREE_CODE(*node) == TYPE_DECL) {
76166 + TREE_TYPE(*node) = build_qualified_type(type, TYPE_QUALS(type) & ~TYPE_QUAL_CONST);
76167 + TYPE_FIELDS(TREE_TYPE(*node)) = copy_list(TYPE_FIELDS(TREE_TYPE(*node)));
76168 + deconstify_node(TREE_TYPE(*node));
76169 + return NULL_TREE;
76170 + }
76171 +
76172 + return NULL_TREE;
76173 +}
76174 +
76175 +static struct attribute_spec no_const_attr = {
76176 + .name = "no_const",
76177 + .min_length = 0,
76178 + .max_length = 0,
76179 + .decl_required = false,
76180 + .type_required = false,
76181 + .function_type_required = false,
76182 + .handler = handle_no_const_attribute
76183 +};
76184 +
76185 +static void register_attributes(void *event_data, void *data)
76186 +{
76187 + register_attribute(&no_const_attr);
76188 +}
76189 +
76190 +static void constify_node(tree node)
76191 +{
76192 + TREE_READONLY(node) = 1;
76193 +}
76194 +
76195 +static bool is_fptr(tree field)
76196 +{
76197 + tree ptr = TREE_TYPE(field);
76198 +
76199 + if (TREE_CODE(ptr) != POINTER_TYPE)
76200 + return false;
76201 +
76202 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
76203 +}
76204 +
76205 +static bool walk_struct(tree node)
76206 +{
76207 + tree field;
76208 +
76209 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
76210 + return false;
76211 +
76212 + if (TYPE_FIELDS(node) == NULL_TREE)
76213 + return false;
76214 +
76215 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
76216 + tree type = TREE_TYPE(field);
76217 + enum tree_code code = TREE_CODE(type);
76218 + if (code == RECORD_TYPE || code == UNION_TYPE) {
76219 + if (!(walk_struct(type)))
76220 + return false;
76221 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
76222 + return false;
76223 + }
76224 + return true;
76225 +}
76226 +
76227 +static void finish_type(void *event_data, void *data)
76228 +{
76229 + tree node = (tree)event_data;
76230 +
76231 + if (node == NULL_TREE)
76232 + return;
76233 +
76234 + if (TREE_READONLY(node))
76235 + return;
76236 +
76237 + if (TYPE_FIELDS(node) == NULL_TREE)
76238 + return;
76239 +
76240 + if (walk_struct(node))
76241 + constify_node(node);
76242 +}
76243 +
76244 +static unsigned int check_local_variables(void);
76245 +
76246 +struct gimple_opt_pass pass_local_variable = {
76247 + {
76248 + .type = GIMPLE_PASS,
76249 + .name = "check_local_variables",
76250 + .gate = NULL,
76251 + .execute = check_local_variables,
76252 + .sub = NULL,
76253 + .next = NULL,
76254 + .static_pass_number = 0,
76255 + .tv_id = TV_NONE,
76256 + .properties_required = 0,
76257 + .properties_provided = 0,
76258 + .properties_destroyed = 0,
76259 + .todo_flags_start = 0,
76260 + .todo_flags_finish = 0
76261 + }
76262 +};
76263 +
76264 +static unsigned int check_local_variables(void)
76265 +{
76266 + tree var;
76267 + referenced_var_iterator rvi;
76268 +
76269 +#if __GNUC_MINOR__ >= 6
76270 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
76271 +#else
76272 + FOR_EACH_REFERENCED_VAR(var, rvi) {
76273 +#endif
76274 + tree type = TREE_TYPE(var);
76275 +
76276 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
76277 + continue;
76278 +
76279 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
76280 + continue;
76281 +
76282 + if (!TREE_READONLY(type))
76283 + continue;
76284 +
76285 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
76286 +// continue;
76287 +
76288 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
76289 +// continue;
76290 +
76291 + if (walk_struct(type)) {
76292 + error("constified variable %qE cannot be local", var);
76293 + return 1;
76294 + }
76295 + }
76296 + return 0;
76297 +}
76298 +
76299 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
76300 +{
76301 + const char * const plugin_name = plugin_info->base_name;
76302 + const int argc = plugin_info->argc;
76303 + const struct plugin_argument * const argv = plugin_info->argv;
76304 + int i;
76305 + bool constify = true;
76306 +
76307 + struct register_pass_info local_variable_pass_info = {
76308 + .pass = &pass_local_variable.pass,
76309 + .reference_pass_name = "*referenced_vars",
76310 + .ref_pass_instance_number = 0,
76311 + .pos_op = PASS_POS_INSERT_AFTER
76312 + };
76313 +
76314 + if (!plugin_default_version_check(version, &gcc_version)) {
76315 + error(G_("incompatible gcc/plugin versions"));
76316 + return 1;
76317 + }
76318 +
76319 + for (i = 0; i < argc; ++i) {
76320 + if (!(strcmp(argv[i].key, "no-constify"))) {
76321 + constify = false;
76322 + continue;
76323 + }
76324 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76325 + }
76326 +
76327 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
76328 + if (constify) {
76329 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
76330 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
76331 + }
76332 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
76333 +
76334 + return 0;
76335 +}
76336 diff -urNp linux-2.6.32.45/tools/gcc/Makefile linux-2.6.32.45/tools/gcc/Makefile
76337 --- linux-2.6.32.45/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
76338 +++ linux-2.6.32.45/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
76339 @@ -0,0 +1,12 @@
76340 +#CC := gcc
76341 +#PLUGIN_SOURCE_FILES := pax_plugin.c
76342 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
76343 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
76344 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
76345 +
76346 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
76347 +
76348 +hostlibs-y := stackleak_plugin.so constify_plugin.so
76349 +always := $(hostlibs-y)
76350 +stackleak_plugin-objs := stackleak_plugin.o
76351 +constify_plugin-objs := constify_plugin.o
76352 diff -urNp linux-2.6.32.45/tools/gcc/stackleak_plugin.c linux-2.6.32.45/tools/gcc/stackleak_plugin.c
76353 --- linux-2.6.32.45/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
76354 +++ linux-2.6.32.45/tools/gcc/stackleak_plugin.c 2011-08-23 20:24:26.000000000 -0400
76355 @@ -0,0 +1,243 @@
76356 +/*
76357 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
76358 + * Licensed under the GPL v2
76359 + *
76360 + * Note: the choice of the license means that the compilation process is
76361 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
76362 + * but for the kernel it doesn't matter since it doesn't link against
76363 + * any of the gcc libraries
76364 + *
76365 + * gcc plugin to help implement various PaX features
76366 + *
76367 + * - track lowest stack pointer
76368 + *
76369 + * TODO:
76370 + * - initialize all local variables
76371 + *
76372 + * BUGS:
76373 + * - cloned functions are instrumented twice
76374 + */
76375 +#include "gcc-plugin.h"
76376 +#include "config.h"
76377 +#include "system.h"
76378 +#include "coretypes.h"
76379 +#include "tree.h"
76380 +#include "tree-pass.h"
76381 +#include "intl.h"
76382 +#include "plugin-version.h"
76383 +#include "tm.h"
76384 +#include "toplev.h"
76385 +#include "basic-block.h"
76386 +#include "gimple.h"
76387 +//#include "expr.h" where are you...
76388 +#include "diagnostic.h"
76389 +#include "rtl.h"
76390 +#include "emit-rtl.h"
76391 +#include "function.h"
76392 +
76393 +int plugin_is_GPL_compatible;
76394 +
76395 +static int track_frame_size = -1;
76396 +static const char track_function[] = "pax_track_stack";
76397 +static bool init_locals;
76398 +
76399 +static struct plugin_info stackleak_plugin_info = {
76400 + .version = "201106030000",
76401 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
76402 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
76403 +};
76404 +
76405 +static bool gate_stackleak_track_stack(void);
76406 +static unsigned int execute_stackleak_tree_instrument(void);
76407 +static unsigned int execute_stackleak_final(void);
76408 +
76409 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
76410 + .pass = {
76411 + .type = GIMPLE_PASS,
76412 + .name = "stackleak_tree_instrument",
76413 + .gate = gate_stackleak_track_stack,
76414 + .execute = execute_stackleak_tree_instrument,
76415 + .sub = NULL,
76416 + .next = NULL,
76417 + .static_pass_number = 0,
76418 + .tv_id = TV_NONE,
76419 + .properties_required = PROP_gimple_leh | PROP_cfg,
76420 + .properties_provided = 0,
76421 + .properties_destroyed = 0,
76422 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
76423 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
76424 + }
76425 +};
76426 +
76427 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
76428 + .pass = {
76429 + .type = RTL_PASS,
76430 + .name = "stackleak_final",
76431 + .gate = gate_stackleak_track_stack,
76432 + .execute = execute_stackleak_final,
76433 + .sub = NULL,
76434 + .next = NULL,
76435 + .static_pass_number = 0,
76436 + .tv_id = TV_NONE,
76437 + .properties_required = 0,
76438 + .properties_provided = 0,
76439 + .properties_destroyed = 0,
76440 + .todo_flags_start = 0,
76441 + .todo_flags_finish = 0
76442 + }
76443 +};
76444 +
76445 +static bool gate_stackleak_track_stack(void)
76446 +{
76447 + return track_frame_size >= 0;
76448 +}
76449 +
76450 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
76451 +{
76452 + gimple call;
76453 + tree decl, type;
76454 +
76455 + // insert call to void pax_track_stack(void)
76456 + type = build_function_type_list(void_type_node, NULL_TREE);
76457 + decl = build_fn_decl(track_function, type);
76458 + DECL_ASSEMBLER_NAME(decl); // for LTO
76459 + call = gimple_build_call(decl, 0);
76460 + if (before)
76461 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
76462 + else
76463 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
76464 +}
76465 +
76466 +static unsigned int execute_stackleak_tree_instrument(void)
76467 +{
76468 + basic_block bb;
76469 + gimple_stmt_iterator gsi;
76470 +
76471 + // 1. loop through BBs and GIMPLE statements
76472 + FOR_EACH_BB(bb) {
76473 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
76474 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
76475 + tree decl;
76476 + gimple stmt = gsi_stmt(gsi);
76477 +
76478 + if (!is_gimple_call(stmt))
76479 + continue;
76480 + decl = gimple_call_fndecl(stmt);
76481 + if (!decl)
76482 + continue;
76483 + if (TREE_CODE(decl) != FUNCTION_DECL)
76484 + continue;
76485 + if (!DECL_BUILT_IN(decl))
76486 + continue;
76487 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
76488 + continue;
76489 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
76490 + continue;
76491 +
76492 + // 2. insert track call after each __builtin_alloca call
76493 + stackleak_add_instrumentation(&gsi, false);
76494 +// print_node(stderr, "pax", decl, 4);
76495 + }
76496 + }
76497 +
76498 + // 3. insert track call at the beginning
76499 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
76500 + gsi = gsi_start_bb(bb);
76501 + stackleak_add_instrumentation(&gsi, true);
76502 +
76503 + return 0;
76504 +}
76505 +
76506 +static unsigned int execute_stackleak_final(void)
76507 +{
76508 + rtx insn;
76509 +
76510 + if (cfun->calls_alloca)
76511 + return 0;
76512 +
76513 + // 1. find pax_track_stack calls
76514 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
76515 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
76516 + rtx body;
76517 +
76518 + if (!CALL_P(insn))
76519 + continue;
76520 + body = PATTERN(insn);
76521 + if (GET_CODE(body) != CALL)
76522 + continue;
76523 + body = XEXP(body, 0);
76524 + if (GET_CODE(body) != MEM)
76525 + continue;
76526 + body = XEXP(body, 0);
76527 + if (GET_CODE(body) != SYMBOL_REF)
76528 + continue;
76529 + if (strcmp(XSTR(body, 0), track_function))
76530 + continue;
76531 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
76532 + // 2. delete call if function frame is not big enough
76533 + if (get_frame_size() >= track_frame_size)
76534 + continue;
76535 + delete_insn_and_edges(insn);
76536 + }
76537 +
76538 +// print_simple_rtl(stderr, get_insns());
76539 +// print_rtl(stderr, get_insns());
76540 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
76541 +
76542 + return 0;
76543 +}
76544 +
76545 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
76546 +{
76547 + const char * const plugin_name = plugin_info->base_name;
76548 + const int argc = plugin_info->argc;
76549 + const struct plugin_argument * const argv = plugin_info->argv;
76550 + int i;
76551 + struct register_pass_info stackleak_tree_instrument_pass_info = {
76552 + .pass = &stackleak_tree_instrument_pass.pass,
76553 +// .reference_pass_name = "tree_profile",
76554 + .reference_pass_name = "optimized",
76555 + .ref_pass_instance_number = 0,
76556 + .pos_op = PASS_POS_INSERT_AFTER
76557 + };
76558 + struct register_pass_info stackleak_final_pass_info = {
76559 + .pass = &stackleak_final_rtl_opt_pass.pass,
76560 + .reference_pass_name = "final",
76561 + .ref_pass_instance_number = 0,
76562 + .pos_op = PASS_POS_INSERT_BEFORE
76563 + };
76564 +
76565 + if (!plugin_default_version_check(version, &gcc_version)) {
76566 + error(G_("incompatible gcc/plugin versions"));
76567 + return 1;
76568 + }
76569 +
76570 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
76571 +
76572 + for (i = 0; i < argc; ++i) {
76573 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
76574 + if (!argv[i].value) {
76575 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76576 + continue;
76577 + }
76578 + track_frame_size = atoi(argv[i].value);
76579 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
76580 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
76581 + continue;
76582 + }
76583 + if (!strcmp(argv[i].key, "initialize-locals")) {
76584 + if (argv[i].value) {
76585 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
76586 + continue;
76587 + }
76588 + init_locals = true;
76589 + continue;
76590 + }
76591 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76592 + }
76593 +
76594 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
76595 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
76596 +
76597 + return 0;
76598 +}
76599 diff -urNp linux-2.6.32.45/usr/gen_init_cpio.c linux-2.6.32.45/usr/gen_init_cpio.c
76600 --- linux-2.6.32.45/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
76601 +++ linux-2.6.32.45/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
76602 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
76603 int retval;
76604 int rc = -1;
76605 int namesize;
76606 - int i;
76607 + unsigned int i;
76608
76609 mode |= S_IFREG;
76610
76611 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
76612 *env_var = *expanded = '\0';
76613 strncat(env_var, start + 2, end - start - 2);
76614 strncat(expanded, new_location, start - new_location);
76615 - strncat(expanded, getenv(env_var), PATH_MAX);
76616 - strncat(expanded, end + 1, PATH_MAX);
76617 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
76618 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
76619 strncpy(new_location, expanded, PATH_MAX);
76620 + new_location[PATH_MAX] = 0;
76621 } else
76622 break;
76623 }
76624 diff -urNp linux-2.6.32.45/virt/kvm/kvm_main.c linux-2.6.32.45/virt/kvm/kvm_main.c
76625 --- linux-2.6.32.45/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
76626 +++ linux-2.6.32.45/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
76627 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
76628 if (kvm_rebooting)
76629 /* spin while reset goes on */
76630 while (true)
76631 - ;
76632 + cpu_relax();
76633 /* Fault while not rebooting. We want the trace. */
76634 BUG();
76635 }
76636 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
76637 kvm_arch_vcpu_put(vcpu);
76638 }
76639
76640 -int kvm_init(void *opaque, unsigned int vcpu_size,
76641 +int kvm_init(const void *opaque, unsigned int vcpu_size,
76642 struct module *module)
76643 {
76644 int r;
76645 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
76646 /* A kmem cache lets us meet the alignment requirements of fx_save. */
76647 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
76648 __alignof__(struct kvm_vcpu),
76649 - 0, NULL);
76650 + SLAB_USERCOPY, NULL);
76651 if (!kvm_vcpu_cache) {
76652 r = -ENOMEM;
76653 goto out_free_5;
76654 }
76655
76656 - kvm_chardev_ops.owner = module;
76657 - kvm_vm_fops.owner = module;
76658 - kvm_vcpu_fops.owner = module;
76659 + pax_open_kernel();
76660 + *(void **)&kvm_chardev_ops.owner = module;
76661 + *(void **)&kvm_vm_fops.owner = module;
76662 + *(void **)&kvm_vcpu_fops.owner = module;
76663 + pax_close_kernel();
76664
76665 r = misc_register(&kvm_dev);
76666 if (r) {