]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/2.6.32.45/grsecurity-2.2.2-2.6.32.45-201108262310.patch
Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 2.6.32.45 / grsecurity-2.2.2-2.6.32.45-201108262310.patch
1 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40 --- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86 --- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359 --- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596 --- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647 --- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755 --- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391 --- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.45/arch/mips/include/asm/reboot.h linux-2.6.32.45/arch/mips/include/asm/reboot.h
1488 --- linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490 @@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494 -extern void (*_machine_restart)(char *command);
1495 -extern void (*_machine_halt)(void);
1496 +extern void (*__noreturn _machine_restart)(char *command);
1497 +extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500 diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1501 --- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502 +++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507 -extern unsigned long arch_align_stack(unsigned long sp);
1508 +#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1512 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518 +#ifdef CONFIG_PAX_ASLR
1519 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520 +
1521 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523 +#endif
1524 +
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528 diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1529 --- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530 +++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535 +#ifdef CONFIG_PAX_ASLR
1536 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537 +
1538 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540 +#endif
1541 +
1542 #include <asm/processor.h>
1543
1544 /*
1545 diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1546 --- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547 +++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552 +/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556 diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1557 --- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558 +++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563 -
1564 -/*
1565 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1566 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567 - */
1568 -unsigned long arch_align_stack(unsigned long sp)
1569 -{
1570 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571 - sp -= get_random_int() & ~PAGE_MASK;
1572 -
1573 - return sp & ALMASK;
1574 -}
1575 diff -urNp linux-2.6.32.45/arch/mips/kernel/reset.c linux-2.6.32.45/arch/mips/kernel/reset.c
1576 --- linux-2.6.32.45/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577 +++ linux-2.6.32.45/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578 @@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582 -void (*_machine_restart)(char *command);
1583 -void (*_machine_halt)(void);
1584 +void (*__noreturn _machine_restart)(char *command);
1585 +void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589 @@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593 + BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600 + BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607 + BUG();
1608 }
1609 diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1610 --- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611 +++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616 +
1617 +#ifdef CONFIG_PAX_RANDMMAP
1618 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619 +#endif
1620 +
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627 - if (task_size - len >= addr &&
1628 - (!vmm || addr + len <= vmm->vm_start))
1629 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632 - addr = TASK_UNMAPPED_BASE;
1633 + addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641 - if (!vmm || addr + len <= vmm->vm_start)
1642 + if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646 diff -urNp linux-2.6.32.45/arch/mips/Makefile linux-2.6.32.45/arch/mips/Makefile
1647 --- linux-2.6.32.45/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648 +++ linux-2.6.32.45/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649 @@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653 +cflags-y += -Wno-sign-compare -Wno-extra
1654 +
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658 diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1659 --- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660 +++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661 @@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665 +#ifdef CONFIG_PAX_PAGEEXEC
1666 +void pax_report_insns(void *pc, void *sp)
1667 +{
1668 + unsigned long i;
1669 +
1670 + printk(KERN_ERR "PAX: bytes at PC: ");
1671 + for (i = 0; i < 5; i++) {
1672 + unsigned int c;
1673 + if (get_user(c, (unsigned int *)pc+i))
1674 + printk(KERN_CONT "???????? ");
1675 + else
1676 + printk(KERN_CONT "%08x ", c);
1677 + }
1678 + printk("\n");
1679 +}
1680 +#endif
1681 +
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1686 --- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687 +++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692 +#ifdef CONFIG_PAX_ASLR
1693 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694 +
1695 +#define PAX_DELTA_MMAP_LEN 16
1696 +#define PAX_DELTA_STACK_LEN 16
1697 +#endif
1698 +
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702 diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1703 --- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704 +++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705 @@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709 +
1710 +#ifdef CONFIG_PAX_PAGEEXEC
1711 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714 +#else
1715 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716 +# define PAGE_COPY_NOEXEC PAGE_COPY
1717 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718 +#endif
1719 +
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723 diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1724 --- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725 +++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726 @@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730 +static inline int in_init_rx(struct module *me, void *loc)
1731 +{
1732 + return (loc >= me->module_init_rx &&
1733 + loc < (me->module_init_rx + me->init_size_rx));
1734 +}
1735 +
1736 +static inline int in_init_rw(struct module *me, void *loc)
1737 +{
1738 + return (loc >= me->module_init_rw &&
1739 + loc < (me->module_init_rw + me->init_size_rw));
1740 +}
1741 +
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744 - return (loc >= me->module_init &&
1745 - loc <= (me->module_init + me->init_size));
1746 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1747 +}
1748 +
1749 +static inline int in_core_rx(struct module *me, void *loc)
1750 +{
1751 + return (loc >= me->module_core_rx &&
1752 + loc < (me->module_core_rx + me->core_size_rx));
1753 +}
1754 +
1755 +static inline int in_core_rw(struct module *me, void *loc)
1756 +{
1757 + return (loc >= me->module_core_rw &&
1758 + loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763 - return (loc >= me->module_core &&
1764 - loc <= (me->module_core + me->core_size));
1765 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773 - me->core_size = ALIGN(me->core_size, 16);
1774 - me->arch.got_offset = me->core_size;
1775 - me->core_size += gots * sizeof(struct got_entry);
1776 -
1777 - me->core_size = ALIGN(me->core_size, 16);
1778 - me->arch.fdesc_offset = me->core_size;
1779 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1780 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781 + me->arch.got_offset = me->core_size_rw;
1782 + me->core_size_rw += gots * sizeof(struct got_entry);
1783 +
1784 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785 + me->arch.fdesc_offset = me->core_size_rw;
1786 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794 - got = me->module_core + me->arch.got_offset;
1795 + got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826 diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1827 --- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828 +++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833 - if (!vma || addr + len <= vma->vm_start)
1834 + if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842 - if (!vma || addr + len <= vma->vm_start)
1843 + if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851 - addr = TASK_UNMAPPED_BASE;
1852 + addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856 diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1857 --- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858 +++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1864 - && (vma->vm_flags & VM_EXEC)) {
1865 -
1866 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870 diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1871 --- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872 +++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873 @@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877 +#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885 - if (code == 6 || code == 16)
1886 + if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894 +#ifdef CONFIG_PAX_PAGEEXEC
1895 +/*
1896 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897 + *
1898 + * returns 1 when task should be killed
1899 + * 2 when rt_sigreturn trampoline was detected
1900 + * 3 when unpatched PLT trampoline was detected
1901 + */
1902 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1903 +{
1904 +
1905 +#ifdef CONFIG_PAX_EMUPLT
1906 + int err;
1907 +
1908 + do { /* PaX: unpatched PLT emulation */
1909 + unsigned int bl, depwi;
1910 +
1911 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913 +
1914 + if (err)
1915 + break;
1916 +
1917 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919 +
1920 + err = get_user(ldw, (unsigned int *)addr);
1921 + err |= get_user(bv, (unsigned int *)(addr+4));
1922 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1923 +
1924 + if (err)
1925 + break;
1926 +
1927 + if (ldw == 0x0E801096U &&
1928 + bv == 0xEAC0C000U &&
1929 + ldw2 == 0x0E881095U)
1930 + {
1931 + unsigned int resolver, map;
1932 +
1933 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935 + if (err)
1936 + break;
1937 +
1938 + regs->gr[20] = instruction_pointer(regs)+8;
1939 + regs->gr[21] = map;
1940 + regs->gr[22] = resolver;
1941 + regs->iaoq[0] = resolver | 3UL;
1942 + regs->iaoq[1] = regs->iaoq[0] + 4;
1943 + return 3;
1944 + }
1945 + }
1946 + } while (0);
1947 +#endif
1948 +
1949 +#ifdef CONFIG_PAX_EMUTRAMP
1950 +
1951 +#ifndef CONFIG_PAX_EMUSIGRT
1952 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953 + return 1;
1954 +#endif
1955 +
1956 + do { /* PaX: rt_sigreturn emulation */
1957 + unsigned int ldi1, ldi2, bel, nop;
1958 +
1959 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963 +
1964 + if (err)
1965 + break;
1966 +
1967 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968 + ldi2 == 0x3414015AU &&
1969 + bel == 0xE4008200U &&
1970 + nop == 0x08000240U)
1971 + {
1972 + regs->gr[25] = (ldi1 & 2) >> 1;
1973 + regs->gr[20] = __NR_rt_sigreturn;
1974 + regs->gr[31] = regs->iaoq[1] + 16;
1975 + regs->sr[0] = regs->iasq[1];
1976 + regs->iaoq[0] = 0x100UL;
1977 + regs->iaoq[1] = regs->iaoq[0] + 4;
1978 + regs->iasq[0] = regs->sr[2];
1979 + regs->iasq[1] = regs->sr[2];
1980 + return 2;
1981 + }
1982 + } while (0);
1983 +#endif
1984 +
1985 + return 1;
1986 +}
1987 +
1988 +void pax_report_insns(void *pc, void *sp)
1989 +{
1990 + unsigned long i;
1991 +
1992 + printk(KERN_ERR "PAX: bytes at PC: ");
1993 + for (i = 0; i < 5; i++) {
1994 + unsigned int c;
1995 + if (get_user(c, (unsigned int *)pc+i))
1996 + printk(KERN_CONT "???????? ");
1997 + else
1998 + printk(KERN_CONT "%08x ", c);
1999 + }
2000 + printk("\n");
2001 +}
2002 +#endif
2003 +
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007 @@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011 - if ((vma->vm_flags & acc_type) != acc_type)
2012 + if ((vma->vm_flags & acc_type) != acc_type) {
2013 +
2014 +#ifdef CONFIG_PAX_PAGEEXEC
2015 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016 + (address & ~3UL) == instruction_pointer(regs))
2017 + {
2018 + up_read(&mm->mmap_sem);
2019 + switch (pax_handle_fetch_fault(regs)) {
2020 +
2021 +#ifdef CONFIG_PAX_EMUPLT
2022 + case 3:
2023 + return;
2024 +#endif
2025 +
2026 +#ifdef CONFIG_PAX_EMUTRAMP
2027 + case 2:
2028 + return;
2029 +#endif
2030 +
2031 + }
2032 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033 + do_group_exit(SIGKILL);
2034 + }
2035 +#endif
2036 +
2037 goto bad_area;
2038 + }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
2043 --- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044 +++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045 @@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049 - struct dma_map_ops *dma_ops;
2050 + const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
2055 --- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056 +++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061 -extern struct dma_map_ops dma_direct_ops;
2062 +extern const struct dma_map_ops dma_direct_ops;
2063
2064 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2124 --- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130 -extern unsigned long randomize_et_dyn(unsigned long base);
2131 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132 +#define ELF_ET_DYN_BASE (0x20000000)
2133 +
2134 +#ifdef CONFIG_PAX_ASLR
2135 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136 +
2137 +#ifdef __powerpc64__
2138 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140 +#else
2141 +#define PAX_DELTA_MMAP_LEN 15
2142 +#define PAX_DELTA_STACK_LEN 15
2143 +#endif
2144 +#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153 -#define arch_randomize_brk arch_randomize_brk
2154 -
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2159 --- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160 +++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165 +/* dma-iommu.c */
2166 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167 +
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2172 --- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173 +++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174 @@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178 + KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2183 --- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184 +++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185 @@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191 +#define VM_STACK_DEFAULT_FLAGS32 \
2192 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198 +#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202 +#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2207 --- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208 +++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215 +#define VM_DATA_DEFAULT_FLAGS32 \
2216 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225 +#define ktla_ktva(addr) (addr)
2226 +#define ktva_ktla(addr) (addr)
2227 +
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2232 --- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239 -extern struct dma_map_ops *get_pci_dma_ops(void);
2240 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2246 --- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247 +++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248 @@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252 +#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2257 --- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258 +++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259 @@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263 +#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h
2268 --- linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269 +++ linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270 @@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274 -extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275 +extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2280 --- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281 +++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282 @@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2291 --- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292 +++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293 @@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297 -extern struct dma_map_ops swiotlb_dma_ops;
2298 +extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2303 --- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304 +++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309 -extern unsigned long arch_align_stack(unsigned long sp);
2310 +#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314 diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2315 --- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316 +++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317 @@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322 +
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326 @@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330 -#ifndef __powerpc64__
2331 -
2332 -static inline unsigned long copy_from_user(void *to,
2333 - const void __user *from, unsigned long n)
2334 -{
2335 - unsigned long over;
2336 -
2337 - if (access_ok(VERIFY_READ, from, n))
2338 - return __copy_tofrom_user((__force void __user *)to, from, n);
2339 - if ((unsigned long)from < TASK_SIZE) {
2340 - over = (unsigned long)from + n - TASK_SIZE;
2341 - return __copy_tofrom_user((__force void __user *)to, from,
2342 - n - over) + over;
2343 - }
2344 - return n;
2345 -}
2346 -
2347 -static inline unsigned long copy_to_user(void __user *to,
2348 - const void *from, unsigned long n)
2349 -{
2350 - unsigned long over;
2351 -
2352 - if (access_ok(VERIFY_WRITE, to, n))
2353 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2354 - if ((unsigned long)to < TASK_SIZE) {
2355 - over = (unsigned long)to + n - TASK_SIZE;
2356 - return __copy_tofrom_user(to, (__force void __user *)from,
2357 - n - over) + over;
2358 - }
2359 - return n;
2360 -}
2361 -
2362 -#else /* __powerpc64__ */
2363 -
2364 -#define __copy_in_user(to, from, size) \
2365 - __copy_tofrom_user((to), (from), (size))
2366 -
2367 -extern unsigned long copy_from_user(void *to, const void __user *from,
2368 - unsigned long n);
2369 -extern unsigned long copy_to_user(void __user *to, const void *from,
2370 - unsigned long n);
2371 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372 - unsigned long n);
2373 -
2374 -#endif /* __powerpc64__ */
2375 -
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383 +
2384 + if (!__builtin_constant_p(n))
2385 + check_object_size(to, n, false);
2386 +
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394 +
2395 + if (!__builtin_constant_p(n))
2396 + check_object_size(from, n, true);
2397 +
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405 +#ifndef __powerpc64__
2406 +
2407 +static inline unsigned long __must_check copy_from_user(void *to,
2408 + const void __user *from, unsigned long n)
2409 +{
2410 + unsigned long over;
2411 +
2412 + if ((long)n < 0)
2413 + return n;
2414 +
2415 + if (access_ok(VERIFY_READ, from, n)) {
2416 + if (!__builtin_constant_p(n))
2417 + check_object_size(to, n, false);
2418 + return __copy_tofrom_user((__force void __user *)to, from, n);
2419 + }
2420 + if ((unsigned long)from < TASK_SIZE) {
2421 + over = (unsigned long)from + n - TASK_SIZE;
2422 + if (!__builtin_constant_p(n - over))
2423 + check_object_size(to, n - over, false);
2424 + return __copy_tofrom_user((__force void __user *)to, from,
2425 + n - over) + over;
2426 + }
2427 + return n;
2428 +}
2429 +
2430 +static inline unsigned long __must_check copy_to_user(void __user *to,
2431 + const void *from, unsigned long n)
2432 +{
2433 + unsigned long over;
2434 +
2435 + if ((long)n < 0)
2436 + return n;
2437 +
2438 + if (access_ok(VERIFY_WRITE, to, n)) {
2439 + if (!__builtin_constant_p(n))
2440 + check_object_size(from, n, true);
2441 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2442 + }
2443 + if ((unsigned long)to < TASK_SIZE) {
2444 + over = (unsigned long)to + n - TASK_SIZE;
2445 + if (!__builtin_constant_p(n))
2446 + check_object_size(from, n - over, true);
2447 + return __copy_tofrom_user(to, (__force void __user *)from,
2448 + n - over) + over;
2449 + }
2450 + return n;
2451 +}
2452 +
2453 +#else /* __powerpc64__ */
2454 +
2455 +#define __copy_in_user(to, from, size) \
2456 + __copy_tofrom_user((to), (from), (size))
2457 +
2458 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459 +{
2460 + if ((long)n < 0 || n > INT_MAX)
2461 + return n;
2462 +
2463 + if (!__builtin_constant_p(n))
2464 + check_object_size(to, n, false);
2465 +
2466 + if (likely(access_ok(VERIFY_READ, from, n)))
2467 + n = __copy_from_user(to, from, n);
2468 + else
2469 + memset(to, 0, n);
2470 + return n;
2471 +}
2472 +
2473 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474 +{
2475 + if ((long)n < 0 || n > INT_MAX)
2476 + return n;
2477 +
2478 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479 + if (!__builtin_constant_p(n))
2480 + check_object_size(from, n, true);
2481 + n = __copy_to_user(to, from, n);
2482 + }
2483 + return n;
2484 +}
2485 +
2486 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487 + unsigned long n);
2488 +
2489 +#endif /* __powerpc64__ */
2490 +
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2495 --- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496 +++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501 -static struct sysfs_ops cache_index_ops = {
2502 +static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2507 --- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508 +++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513 -struct dma_map_ops dma_direct_ops = {
2514 +const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2519 --- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2531 --- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532 +++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537 -struct dma_map_ops swiotlb_dma_ops = {
2538 +const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2543 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545 @@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549 + bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553 @@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557 -1: bl .save_nvgprs
2558 - mr r5,r3
2559 +1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2564 --- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565 +++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566 @@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570 + bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574 - bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2579 --- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580 +++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585 -static struct dma_map_ops ibmebus_dma_ops = {
2586 +static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2591 --- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592 +++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606 -struct kgdb_arch arch_kgdb_ops = {
2607 +const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2612 --- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613 +++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2619 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627 - if (location >= mod->module_core
2628 - && location < mod->module_core + mod->core_size)
2629 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632 - else
2633 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636 + else {
2637 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638 + return ~0UL;
2639 + }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2644 --- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645 +++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646 @@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650 +#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656 + return vmalloc(size);
2657 +}
2658 +
2659 +void *module_alloc_exec(unsigned long size)
2660 +#else
2661 +void *module_alloc(unsigned long size)
2662 +#endif
2663 +
2664 +{
2665 + if (size == 0)
2666 + return NULL;
2667 +
2668 return vmalloc_exec(size);
2669 }
2670
2671 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675 +#ifdef CONFIG_PAX_KERNEXEC
2676 +void module_free_exec(struct module *mod, void *module_region)
2677 +{
2678 + module_free(mod, module_region);
2679 +}
2680 +#endif
2681 +
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2686 --- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687 +++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701 -struct dma_map_ops *get_pci_dma_ops(void)
2702 +const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2707 --- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708 +++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728 - printk(" (%pS)",
2729 + printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746 -
2747 -unsigned long arch_align_stack(unsigned long sp)
2748 -{
2749 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750 - sp -= get_random_int() & ~PAGE_MASK;
2751 - return sp & ~0xf;
2752 -}
2753 -
2754 -static inline unsigned long brk_rnd(void)
2755 -{
2756 - unsigned long rnd = 0;
2757 -
2758 - /* 8MB for 32bit, 1GB for 64bit */
2759 - if (is_32bit_task())
2760 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761 - else
2762 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763 -
2764 - return rnd << PAGE_SHIFT;
2765 -}
2766 -
2767 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2768 -{
2769 - unsigned long base = mm->brk;
2770 - unsigned long ret;
2771 -
2772 -#ifdef CONFIG_PPC_STD_MMU_64
2773 - /*
2774 - * If we are using 1TB segments and we are allowed to randomise
2775 - * the heap, we can put it above 1TB so it is backed by a 1TB
2776 - * segment. Otherwise the heap will be in the bottom 1TB
2777 - * which always uses 256MB segments and this may result in a
2778 - * performance penalty.
2779 - */
2780 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782 -#endif
2783 -
2784 - ret = PAGE_ALIGN(base + brk_rnd());
2785 -
2786 - if (ret < mm->brk)
2787 - return mm->brk;
2788 -
2789 - return ret;
2790 -}
2791 -
2792 -unsigned long randomize_et_dyn(unsigned long base)
2793 -{
2794 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795 -
2796 - if (ret < base)
2797 - return base;
2798 -
2799 - return ret;
2800 -}
2801 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ptrace.c linux-2.6.32.45/arch/powerpc/kernel/ptrace.c
2802 --- linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803 +++ linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804 @@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808 -unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809 +unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813 @@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817 - tmp = ptrace_get_reg(child, (int) index);
2818 + tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2823 --- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2835 --- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836 +++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2847 --- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848 +++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2863 --- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864 +++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869 +extern void gr_handle_kernel_exploit(void);
2870 +
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878 + gr_handle_kernel_exploit();
2879 +
2880 oops_exit();
2881 do_exit(err);
2882
2883 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2884 --- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885 +++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886 @@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890 +#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898 - current->mm->context.vdso_base = 0;
2899 + current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907 - 0, 0);
2908 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912 diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2913 --- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914 +++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919 -struct dma_map_ops vio_dma_mapping_ops = {
2920 +static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925 + .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937 diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2938 --- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939 +++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940 @@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945 -{
2946 - if (likely(access_ok(VERIFY_READ, from, n)))
2947 - n = __copy_from_user(to, from, n);
2948 - else
2949 - memset(to, 0, n);
2950 - return n;
2951 -}
2952 -
2953 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954 -{
2955 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2956 - n = __copy_to_user(to, from, n);
2957 - return n;
2958 -}
2959 -
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967 -EXPORT_SYMBOL(copy_from_user);
2968 -EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971 diff -urNp linux-2.6.32.45/arch/powerpc/Makefile linux-2.6.32.45/arch/powerpc/Makefile
2972 --- linux-2.6.32.45/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973 +++ linux-2.6.32.45/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974 @@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978 +cflags-y += -Wno-sign-compare -Wno-extra
2979 +
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983 diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2984 --- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985 +++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986 @@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990 +#include <linux/slab.h>
2991 +#include <linux/pagemap.h>
2992 +#include <linux/compiler.h>
2993 +#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997 @@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001 +#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009 +#ifdef CONFIG_PAX_PAGEEXEC
3010 +/*
3011 + * PaX: decide what to do with offenders (regs->nip = fault address)
3012 + *
3013 + * returns 1 when task should be killed
3014 + */
3015 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3016 +{
3017 + return 1;
3018 +}
3019 +
3020 +void pax_report_insns(void *pc, void *sp)
3021 +{
3022 + unsigned long i;
3023 +
3024 + printk(KERN_ERR "PAX: bytes at PC: ");
3025 + for (i = 0; i < 5; i++) {
3026 + unsigned int c;
3027 + if (get_user(c, (unsigned int __user *)pc+i))
3028 + printk(KERN_CONT "???????? ");
3029 + else
3030 + printk(KERN_CONT "%08x ", c);
3031 + }
3032 + printk("\n");
3033 +}
3034 +#endif
3035 +
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043 - error_code &= 0x48200000;
3044 + error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048 @@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052 - if (error_code & 0x10000000)
3053 + if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057 @@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061 - if (error_code & DSISR_PROTFAULT)
3062 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066 @@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070 +
3071 +#ifdef CONFIG_PAX_PAGEEXEC
3072 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073 +#ifdef CONFIG_PPC_STD_MMU
3074 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075 +#else
3076 + if (is_exec && regs->nip == address) {
3077 +#endif
3078 + switch (pax_handle_fetch_fault(regs)) {
3079 + }
3080 +
3081 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082 + do_group_exit(SIGKILL);
3083 + }
3084 + }
3085 +#endif
3086 +
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mem.c linux-2.6.32.45/arch/powerpc/mm/mem.c
3091 --- linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092 +++ linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093 @@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097 - int i;
3098 + unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102 diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
3103 --- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104 +++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109 +
3110 +#ifdef CONFIG_PAX_RANDMMAP
3111 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3112 + mm->mmap_base += mm->delta_mmap;
3113 +#endif
3114 +
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119 +
3120 +#ifdef CONFIG_PAX_RANDMMAP
3121 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3122 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123 +#endif
3124 +
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128 diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3129 --- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130 +++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135 - return (!vma || (addr + len) <= vma->vm_start);
3136 + return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140 @@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144 - if (!vma || addr + len <= vma->vm_start) {
3145 + if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153 - addr = mm->mmap_base;
3154 - while (addr > len) {
3155 + if (mm->mmap_base < len)
3156 + addr = -ENOMEM;
3157 + else
3158 + addr = mm->mmap_base - len;
3159 +
3160 + while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171 - if (!vma || (addr + len) <= vma->vm_start) {
3172 + if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180 - addr = vma->vm_start;
3181 + addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189 +#ifdef CONFIG_PAX_RANDMMAP
3190 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191 + addr = 0;
3192 +#endif
3193 +
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3198 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204 -static struct platform_suspend_ops lite5200_pm_ops = {
3205 +static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210 --- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211 +++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3217 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3222 --- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223 +++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3234 --- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240 -struct dma_map_ops dma_iommu_fixed_ops = {
3241 +const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3246 --- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247 +++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252 -static struct dma_map_ops ps3_sb_dma_ops = {
3253 +static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3262 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266 diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3267 --- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268 +++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269 @@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273 + select PCI_MSI
3274 + select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278 diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3279 --- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280 +++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285 +#ifdef CONFIG_PAX_ASLR
3286 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287 +
3288 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290 +#endif
3291 +
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295 diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3296 --- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297 +++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302 -extern unsigned int switch_amode;
3303 +#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309 -extern unsigned int s390_noexec;
3310 +#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314 diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3315 --- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316 +++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321 +
3322 + if ((long)n < 0)
3323 + return n;
3324 +
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332 + if ((long)n < 0)
3333 + return n;
3334 +
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342 +
3343 + if ((long)n < 0)
3344 + return n;
3345 +
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349 diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3350 --- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351 +++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356 + default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359 - space. The kernel parameter switch_amode=on will enable this feature,
3360 - default is disabled. Enabling this (via kernel parameter) on machines
3361 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3362 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363 + will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366 - protection option below. Enabling the execute protection via the
3367 - noexec kernel parameter will also switch the addressing modes,
3368 - independent of the switch_amode kernel parameter.
3369 + protection option below. Enabling the execute protection will also
3370 + switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375 + default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380 - The kernel parameter noexec=on will enable this feature and also
3381 - switch the addressing modes, default is disabled. Enabling this (via
3382 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383 - will reduce system performance.
3384 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385 + reduce system performance.
3386
3387 comment "Code generation options"
3388
3389 diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3390 --- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391 +++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396 - me->core_size = ALIGN(me->core_size, 4);
3397 - me->arch.got_offset = me->core_size;
3398 - me->core_size += me->arch.got_size;
3399 - me->arch.plt_offset = me->core_size;
3400 - me->core_size += me->arch.plt_size;
3401 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402 + me->arch.got_offset = me->core_size_rw;
3403 + me->core_size_rw += me->arch.got_size;
3404 + me->arch.plt_offset = me->core_size_rx;
3405 + me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413 - gotent = me->module_core + me->arch.got_offset +
3414 + gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3423 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431 - ip = me->module_core + me->arch.plt_offset +
3432 + ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440 - val = (Elf_Addr) me->module_core +
3441 + val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3450 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463 diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3464 --- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465 +++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470 -unsigned int switch_amode = 0;
3471 -EXPORT_SYMBOL_GPL(switch_amode);
3472 -
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480 -
3481 -/*
3482 - * Switch kernel/user addressing modes?
3483 - */
3484 -static int __init early_parse_switch_amode(char *p)
3485 -{
3486 - switch_amode = 1;
3487 - return 0;
3488 -}
3489 -early_param("switch_amode", early_parse_switch_amode);
3490 -
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498 -#ifdef CONFIG_S390_EXEC_PROTECT
3499 -unsigned int s390_noexec = 0;
3500 -EXPORT_SYMBOL_GPL(s390_noexec);
3501 -
3502 -/*
3503 - * Enable execute protection?
3504 - */
3505 -static int __init early_parse_noexec(char *p)
3506 -{
3507 - if (!strncmp(p, "off", 3))
3508 - return 0;
3509 - switch_amode = 1;
3510 - s390_noexec = 1;
3511 - return 0;
3512 -}
3513 -early_param("noexec", early_parse_noexec);
3514 -#endif /* CONFIG_S390_EXEC_PROTECT */
3515 -
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519 diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3520 --- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521 +++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526 +
3527 +#ifdef CONFIG_PAX_RANDMMAP
3528 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3529 + mm->mmap_base += mm->delta_mmap;
3530 +#endif
3531 +
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536 +
3537 +#ifdef CONFIG_PAX_RANDMMAP
3538 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3539 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540 +#endif
3541 +
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549 +
3550 +#ifdef CONFIG_PAX_RANDMMAP
3551 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3552 + mm->mmap_base += mm->delta_mmap;
3553 +#endif
3554 +
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559 +
3560 +#ifdef CONFIG_PAX_RANDMMAP
3561 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3562 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563 +#endif
3564 +
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568 diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3569 --- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570 +++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571 @@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575 -extern unsigned long arch_align_stack(unsigned long sp);
3576 +#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580 diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3581 --- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582 +++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587 -
3588 -unsigned long arch_align_stack(unsigned long sp)
3589 -{
3590 - return sp;
3591 -}
3592 diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3593 --- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594 +++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599 -static struct platform_suspend_ops hp6x0_pm_ops = {
3600 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3605 --- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606 +++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611 -static struct sysfs_ops sq_sysfs_ops = {
3612 +static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616 diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3617 --- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618 +++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623 -static struct platform_suspend_ops sh_pm_ops = {
3624 +static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628 diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3629 --- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630 +++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635 -struct kgdb_arch arch_kgdb_ops = {
3636 +const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640 diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3641 --- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642 +++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647 - if (TASK_SIZE - len >= addr &&
3648 - (!vma || addr + len <= vma->vm_start))
3649 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653 @@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657 - if (likely(!vma || addr + len <= vma->vm_start)) {
3658 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666 - if (TASK_SIZE - len >= addr &&
3667 - (!vma || addr + len <= vma->vm_start))
3668 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676 - if (!vma || addr <= vma->vm_start) {
3677 + if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685 - addr = mm->mmap_base-len;
3686 - if (do_colour_align)
3687 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688 + addr = mm->mmap_base - len;
3689
3690 do {
3691 + if (do_colour_align)
3692 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699 - if (likely(!vma || addr+len <= vma->vm_start)) {
3700 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708 - addr = vma->vm_start-len;
3709 - if (do_colour_align)
3710 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711 - } while (likely(len < vma->vm_start));
3712 + addr = skip_heap_stack_gap(vma, len);
3713 + } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3718 --- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719 +++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720 @@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725 +{
3726 + return v->counter;
3727 +}
3728 #define atomic64_read(v) ((v)->counter)
3729 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730 +{
3731 + return v->counter;
3732 +}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736 +{
3737 + v->counter = i;
3738 +}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741 +{
3742 + v->counter = i;
3743 +}
3744
3745 extern void atomic_add(int, atomic_t *);
3746 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766 +{
3767 + return atomic_add_ret_unchecked(1, v);
3768 +}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771 +{
3772 + return atomic64_add_ret_unchecked(1, v);
3773 +}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780 +{
3781 + return atomic_add_ret_unchecked(i, v);
3782 +}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785 +{
3786 + return atomic64_add_ret_unchecked(i, v);
3787 +}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796 +{
3797 + return atomic_inc_return_unchecked(v) == 0;
3798 +}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807 +{
3808 + atomic_add_unchecked(1, v);
3809 +}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812 +{
3813 + atomic64_add_unchecked(1, v);
3814 +}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818 +{
3819 + atomic_sub_unchecked(1, v);
3820 +}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823 +{
3824 + atomic64_sub_unchecked(1, v);
3825 +}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832 +{
3833 + return cmpxchg(&v->counter, old, new);
3834 +}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837 +{
3838 + return xchg(&v->counter, new);
3839 +}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843 - int c, old;
3844 + int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847 - if (unlikely(c == (u)))
3848 + if (unlikely(c == u))
3849 break;
3850 - old = atomic_cmpxchg((v), c, c + (a));
3851 +
3852 + asm volatile("addcc %2, %0, %0\n"
3853 +
3854 +#ifdef CONFIG_PAX_REFCOUNT
3855 + "tvs %%icc, 6\n"
3856 +#endif
3857 +
3858 + : "=r" (new)
3859 + : "0" (c), "ir" (a)
3860 + : "cc");
3861 +
3862 + old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867 - return c != (u);
3868 + return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877 +{
3878 + return xchg(&v->counter, new);
3879 +}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883 - long c, old;
3884 + long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887 - if (unlikely(c == (u)))
3888 + if (unlikely(c == u))
3889 break;
3890 - old = atomic64_cmpxchg((v), c, c + (a));
3891 +
3892 + asm volatile("addcc %2, %0, %0\n"
3893 +
3894 +#ifdef CONFIG_PAX_REFCOUNT
3895 + "tvs %%xcc, 6\n"
3896 +#endif
3897 +
3898 + : "=r" (new)
3899 + : "0" (c), "ir" (a)
3900 + : "cc");
3901 +
3902 + old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907 - return c != (u);
3908 + return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3913 --- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914 +++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915 @@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919 -#define L1_CACHE_BYTES 32
3920 +#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3925 --- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926 +++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944 - struct dma_map_ops *ops = get_dma_ops(dev);
3945 + const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953 - struct dma_map_ops *ops = get_dma_ops(dev);
3954 + const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3959 --- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961 @@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965 +#ifdef CONFIG_PAX_ASLR
3966 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967 +
3968 +#define PAX_DELTA_MMAP_LEN 16
3969 +#define PAX_DELTA_STACK_LEN 16
3970 +#endif
3971 +
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3976 --- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977 +++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978 @@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982 +#ifdef CONFIG_PAX_ASLR
3983 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984 +
3985 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987 +#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3992 --- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998 +
3999 +#ifdef CONFIG_PAX_PAGEEXEC
4000 +BTFIXUPDEF_INT(page_shared_noexec)
4001 +BTFIXUPDEF_INT(page_copy_noexec)
4002 +BTFIXUPDEF_INT(page_readonly_noexec)
4003 +#endif
4004 +
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012 +#ifdef CONFIG_PAX_PAGEEXEC
4013 +extern pgprot_t PAGE_SHARED_NOEXEC;
4014 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016 +#else
4017 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018 +# define PAGE_COPY_NOEXEC PAGE_COPY
4019 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020 +#endif
4021 +
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
4026 --- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027 +++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028 @@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032 +
4033 +#ifdef CONFIG_PAX_PAGEEXEC
4034 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037 +#endif
4038 +
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
4043 --- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044 +++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049 -static void inline arch_read_lock(raw_rwlock_t *lock)
4050 +static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057 -"4: add %0, 1, %1\n"
4058 +"4: addcc %0, 1, %1\n"
4059 +
4060 +#ifdef CONFIG_PAX_REFCOUNT
4061 +" tvs %%icc, 6\n"
4062 +#endif
4063 +
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067 @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071 - : "memory");
4072 + : "memory", "cc");
4073 }
4074
4075 -static int inline arch_read_trylock(raw_rwlock_t *lock)
4076 +static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084 -" add %0, 1, %1\n"
4085 +" addcc %0, 1, %1\n"
4086 +
4087 +#ifdef CONFIG_PAX_REFCOUNT
4088 +" tvs %%icc, 6\n"
4089 +#endif
4090 +
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4099 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105 -" sub %0, 1, %1\n"
4106 +" subcc %0, 1, %1\n"
4107 +
4108 +#ifdef CONFIG_PAX_REFCOUNT
4109 +" tvs %%icc, 6\n"
4110 +#endif
4111 +
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119 -static void inline arch_write_lock(raw_rwlock_t *lock)
4120 +static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4129 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137 -static int inline arch_write_trylock(raw_rwlock_t *lock)
4138 +static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4143 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145 @@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149 +
4150 + unsigned long lowest_stack;
4151 };
4152
4153 /*
4154 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4155 --- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156 +++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157 @@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161 + unsigned long lowest_stack;
4162 +
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4167 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173 - if (n && __access_ok((unsigned long) to, n))
4174 + if ((long)n < 0)
4175 + return n;
4176 +
4177 + if (n && __access_ok((unsigned long) to, n)) {
4178 + if (!__builtin_constant_p(n))
4179 + check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181 - else
4182 + } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188 + if ((long)n < 0)
4189 + return n;
4190 +
4191 + if (!__builtin_constant_p(n))
4192 + check_object_size(from, n, true);
4193 +
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199 - if (n && __access_ok((unsigned long) from, n))
4200 + if ((long)n < 0)
4201 + return n;
4202 +
4203 + if (n && __access_ok((unsigned long) from, n)) {
4204 + if (!__builtin_constant_p(n))
4205 + check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207 - else
4208 + } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214 + if ((long)n < 0)
4215 + return n;
4216 +
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4221 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223 @@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227 +#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235 - unsigned long ret = ___copy_from_user(to, from, size);
4236 + unsigned long ret;
4237
4238 + if ((long)size < 0 || size > INT_MAX)
4239 + return size;
4240 +
4241 + if (!__builtin_constant_p(size))
4242 + check_object_size(to, size, false);
4243 +
4244 + ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252 - unsigned long ret = ___copy_to_user(to, from, size);
4253 + unsigned long ret;
4254 +
4255 + if ((long)size < 0 || size > INT_MAX)
4256 + return size;
4257 +
4258 + if (!__builtin_constant_p(size))
4259 + check_object_size(from, size, true);
4260
4261 + ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265 diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4266 --- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267 +++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268 @@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271 +
4272 +#ifdef __KERNEL__
4273 +#ifndef __ASSEMBLY__
4274 +#include <linux/types.h>
4275 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276 +#endif
4277 +#endif
4278 +
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282 diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4283 --- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284 +++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289 -static struct dma_map_ops sun4u_dma_ops = {
4290 +static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303 diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4304 --- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305 +++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310 -struct dma_map_ops sbus_dma_ops = {
4311 +const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328 -struct dma_map_ops pci32_dma_ops = {
4329 +const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4334 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340 -struct kgdb_arch arch_kgdb_ops = {
4341 +const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345 diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4346 --- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347 +++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352 -struct kgdb_arch arch_kgdb_ops = {
4353 +const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357 diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4358 --- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359 +++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360 @@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364 -ccflags-y := -Werror
4365 +#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369 diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4370 --- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371 +++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376 -static struct dma_map_ops sun4v_dma_ops = {
4377 +static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4382 --- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383 +++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388 - printk("%pS\n", (void *) rw->ins[7]);
4389 + printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397 - printk("PC: <%pS>\n", (void *) r->pc);
4398 + printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414 - printk("%pS ] ", (void *) pc);
4415 + printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419 diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4420 --- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421 +++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4435 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4458 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464 - addr = TASK_UNMAPPED_BASE;
4465 + addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473 - if (!vmm || addr + len <= vmm->vm_start)
4474 + if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478 diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4479 --- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480 +++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485 - if ((flags & MAP_SHARED) &&
4486 + if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494 +#ifdef CONFIG_PAX_RANDMMAP
4495 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496 +#endif
4497 +
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505 - if (task_size - len >= addr &&
4506 - (!vma || addr + len <= vma->vm_start))
4507 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512 - start_addr = addr = mm->free_area_cache;
4513 + start_addr = addr = mm->free_area_cache;
4514 } else {
4515 - start_addr = addr = TASK_UNMAPPED_BASE;
4516 + start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520 @@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524 - if (start_addr != TASK_UNMAPPED_BASE) {
4525 - start_addr = addr = TASK_UNMAPPED_BASE;
4526 + if (start_addr != mm->mmap_base) {
4527 + start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533 - if (likely(!vma || addr + len <= vma->vm_start)) {
4534 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542 - if ((flags & MAP_SHARED) &&
4543 + if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551 - if (task_size - len >= addr &&
4552 - (!vma || addr + len <= vma->vm_start))
4553 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561 - if (!vma || addr <= vma->vm_start) {
4562 + if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570 - addr = mm->mmap_base-len;
4571 - if (do_color_align)
4572 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573 + addr = mm->mmap_base - len;
4574
4575 do {
4576 + if (do_color_align)
4577 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584 - if (likely(!vma || addr+len <= vma->vm_start)) {
4585 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593 - addr = vma->vm_start-len;
4594 - if (do_color_align)
4595 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596 - } while (likely(len < vma->vm_start));
4597 + addr = skip_heap_stack_gap(vma, len);
4598 + } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606 +
4607 +#ifdef CONFIG_PAX_RANDMMAP
4608 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4609 + mm->mmap_base += mm->delta_mmap;
4610 +#endif
4611 +
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619 +
4620 +#ifdef CONFIG_PAX_RANDMMAP
4621 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4622 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623 +#endif
4624 +
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4629 --- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630 +++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635 +extern void gr_handle_kernel_exploit(void);
4636 +
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652 - if(regs->psr & PSR_PS)
4653 + if(regs->psr & PSR_PS) {
4654 + gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656 + }
4657 do_exit(SIGSEGV);
4658 }
4659
4660 diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4661 --- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662 +++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676 +
4677 +#ifdef CONFIG_PAX_REFCOUNT
4678 + if (lvl == 6)
4679 + pax_report_refcount_overflow(regs);
4680 +#endif
4681 +
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689 -
4690 +
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695 +#ifdef CONFIG_PAX_REFCOUNT
4696 + if (lvl == 6)
4697 + pax_report_refcount_overflow(regs);
4698 +#endif
4699 +
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707 - printk("TPC<%pS>\n", (void *) regs->tpc);
4708 + printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4759 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767 +extern void gr_handle_kernel_exploit(void);
4768 +
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785 - if (regs->tstate & TSTATE_PRIV)
4786 + if (regs->tstate & TSTATE_PRIV) {
4787 + gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789 + }
4790 +
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794 diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4795 --- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796 +++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797 @@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801 - .size __do_int_load, .-__do_int_load
4802 + .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806 diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4807 --- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808 +++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818 diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4819 --- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820 +++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821 @@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825 - add %g1, %o0, %g7
4826 + addcc %g1, %o0, %g7
4827 +
4828 +#ifdef CONFIG_PAX_REFCOUNT
4829 + tvs %icc, 6
4830 +#endif
4831 +
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839 + .globl atomic_add_unchecked
4840 + .type atomic_add_unchecked,#function
4841 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842 + BACKOFF_SETUP(%o2)
4843 +1: lduw [%o1], %g1
4844 + add %g1, %o0, %g7
4845 + cas [%o1], %g1, %g7
4846 + cmp %g1, %g7
4847 + bne,pn %icc, 2f
4848 + nop
4849 + retl
4850 + nop
4851 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4852 + .size atomic_add_unchecked, .-atomic_add_unchecked
4853 +
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859 - sub %g1, %o0, %g7
4860 + subcc %g1, %o0, %g7
4861 +
4862 +#ifdef CONFIG_PAX_REFCOUNT
4863 + tvs %icc, 6
4864 +#endif
4865 +
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873 + .globl atomic_sub_unchecked
4874 + .type atomic_sub_unchecked,#function
4875 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876 + BACKOFF_SETUP(%o2)
4877 +1: lduw [%o1], %g1
4878 + sub %g1, %o0, %g7
4879 + cas [%o1], %g1, %g7
4880 + cmp %g1, %g7
4881 + bne,pn %icc, 2f
4882 + nop
4883 + retl
4884 + nop
4885 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4886 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887 +
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893 - add %g1, %o0, %g7
4894 + addcc %g1, %o0, %g7
4895 +
4896 +#ifdef CONFIG_PAX_REFCOUNT
4897 + tvs %icc, 6
4898 +#endif
4899 +
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907 + .globl atomic_add_ret_unchecked
4908 + .type atomic_add_ret_unchecked,#function
4909 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910 + BACKOFF_SETUP(%o2)
4911 +1: lduw [%o1], %g1
4912 + addcc %g1, %o0, %g7
4913 + cas [%o1], %g1, %g7
4914 + cmp %g1, %g7
4915 + bne,pn %icc, 2f
4916 + add %g7, %o0, %g7
4917 + sra %g7, 0, %o0
4918 + retl
4919 + nop
4920 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4921 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922 +
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928 - sub %g1, %o0, %g7
4929 + subcc %g1, %o0, %g7
4930 +
4931 +#ifdef CONFIG_PAX_REFCOUNT
4932 + tvs %icc, 6
4933 +#endif
4934 +
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942 - add %g1, %o0, %g7
4943 + addcc %g1, %o0, %g7
4944 +
4945 +#ifdef CONFIG_PAX_REFCOUNT
4946 + tvs %xcc, 6
4947 +#endif
4948 +
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956 + .globl atomic64_add_unchecked
4957 + .type atomic64_add_unchecked,#function
4958 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959 + BACKOFF_SETUP(%o2)
4960 +1: ldx [%o1], %g1
4961 + addcc %g1, %o0, %g7
4962 + casx [%o1], %g1, %g7
4963 + cmp %g1, %g7
4964 + bne,pn %xcc, 2f
4965 + nop
4966 + retl
4967 + nop
4968 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4969 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970 +
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976 - sub %g1, %o0, %g7
4977 + subcc %g1, %o0, %g7
4978 +
4979 +#ifdef CONFIG_PAX_REFCOUNT
4980 + tvs %xcc, 6
4981 +#endif
4982 +
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990 + .globl atomic64_sub_unchecked
4991 + .type atomic64_sub_unchecked,#function
4992 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993 + BACKOFF_SETUP(%o2)
4994 +1: ldx [%o1], %g1
4995 + subcc %g1, %o0, %g7
4996 + casx [%o1], %g1, %g7
4997 + cmp %g1, %g7
4998 + bne,pn %xcc, 2f
4999 + nop
5000 + retl
5001 + nop
5002 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5003 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004 +
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010 - add %g1, %o0, %g7
5011 + addcc %g1, %o0, %g7
5012 +
5013 +#ifdef CONFIG_PAX_REFCOUNT
5014 + tvs %xcc, 6
5015 +#endif
5016 +
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024 + .globl atomic64_add_ret_unchecked
5025 + .type atomic64_add_ret_unchecked,#function
5026 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027 + BACKOFF_SETUP(%o2)
5028 +1: ldx [%o1], %g1
5029 + addcc %g1, %o0, %g7
5030 + casx [%o1], %g1, %g7
5031 + cmp %g1, %g7
5032 + bne,pn %xcc, 2f
5033 + add %g7, %o0, %g7
5034 + mov %g7, %o0
5035 + retl
5036 + nop
5037 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5038 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039 +
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045 - sub %g1, %o0, %g7
5046 + subcc %g1, %o0, %g7
5047 +
5048 +#ifdef CONFIG_PAX_REFCOUNT
5049 + tvs %xcc, 6
5050 +#endif
5051 +
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055 diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
5056 --- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057 +++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058 @@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062 +EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066 +EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069 +EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077 diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
5078 --- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079 +++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080 @@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084 -ccflags-y := -Werror
5085 +#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089 diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
5090 --- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091 +++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092 @@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096 - add %g1, 1, %g7
5097 + addcc %g1, 1, %g7
5098 +
5099 +#ifdef CONFIG_PAX_REFCOUNT
5100 + tvs %icc, 6
5101 +#endif
5102 +
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106 @@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110 - add %g1, 1, %g7
5111 + addcc %g1, 1, %g7
5112 +
5113 +#ifdef CONFIG_PAX_REFCOUNT
5114 + tvs %icc, 6
5115 +#endif
5116 +
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120 @@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124 - add %g3, %g1, %g7
5125 + addcc %g3, %g1, %g7
5126 +
5127 +#ifdef CONFIG_PAX_REFCOUNT
5128 + tvs %icc, 6
5129 +#endif
5130 +
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134 @@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138 - add %g3, %g1, %g7
5139 + addcc %g3, %g1, %g7
5140 +
5141 +#ifdef CONFIG_PAX_REFCOUNT
5142 + tvs %icc, 6
5143 +#endif
5144 +
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148 @@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152 - sub %g1, 1, %g7
5153 + subcc %g1, 1, %g7
5154 +
5155 +#ifdef CONFIG_PAX_REFCOUNT
5156 + tvs %icc, 6
5157 +#endif
5158 +
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162 @@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166 - sub %g3, %g1, %g7
5167 + subcc %g3, %g1, %g7
5168 +
5169 +#ifdef CONFIG_PAX_REFCOUNT
5170 + tvs %icc, 6
5171 +#endif
5172 +
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176 @@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180 - sub %g3, %g1, %g7
5181 + subcc %g3, %g1, %g7
5182 +
5183 +#ifdef CONFIG_PAX_REFCOUNT
5184 + tvs %icc, 6
5185 +#endif
5186 +
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190 diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5191 --- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192 +++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5203 --- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204 +++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205 @@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209 +#include <linux/slab.h>
5210 +#include <linux/pagemap.h>
5211 +#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219 +#ifdef CONFIG_PAX_PAGEEXEC
5220 +#ifdef CONFIG_PAX_DLRESOLVE
5221 +static void pax_emuplt_close(struct vm_area_struct *vma)
5222 +{
5223 + vma->vm_mm->call_dl_resolve = 0UL;
5224 +}
5225 +
5226 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227 +{
5228 + unsigned int *kaddr;
5229 +
5230 + vmf->page = alloc_page(GFP_HIGHUSER);
5231 + if (!vmf->page)
5232 + return VM_FAULT_OOM;
5233 +
5234 + kaddr = kmap(vmf->page);
5235 + memset(kaddr, 0, PAGE_SIZE);
5236 + kaddr[0] = 0x9DE3BFA8U; /* save */
5237 + flush_dcache_page(vmf->page);
5238 + kunmap(vmf->page);
5239 + return VM_FAULT_MAJOR;
5240 +}
5241 +
5242 +static const struct vm_operations_struct pax_vm_ops = {
5243 + .close = pax_emuplt_close,
5244 + .fault = pax_emuplt_fault
5245 +};
5246 +
5247 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248 +{
5249 + int ret;
5250 +
5251 + vma->vm_mm = current->mm;
5252 + vma->vm_start = addr;
5253 + vma->vm_end = addr + PAGE_SIZE;
5254 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256 + vma->vm_ops = &pax_vm_ops;
5257 +
5258 + ret = insert_vm_struct(current->mm, vma);
5259 + if (ret)
5260 + return ret;
5261 +
5262 + ++current->mm->total_vm;
5263 + return 0;
5264 +}
5265 +#endif
5266 +
5267 +/*
5268 + * PaX: decide what to do with offenders (regs->pc = fault address)
5269 + *
5270 + * returns 1 when task should be killed
5271 + * 2 when patched PLT trampoline was detected
5272 + * 3 when unpatched PLT trampoline was detected
5273 + */
5274 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5275 +{
5276 +
5277 +#ifdef CONFIG_PAX_EMUPLT
5278 + int err;
5279 +
5280 + do { /* PaX: patched PLT emulation #1 */
5281 + unsigned int sethi1, sethi2, jmpl;
5282 +
5283 + err = get_user(sethi1, (unsigned int *)regs->pc);
5284 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286 +
5287 + if (err)
5288 + break;
5289 +
5290 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293 + {
5294 + unsigned int addr;
5295 +
5296 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297 + addr = regs->u_regs[UREG_G1];
5298 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299 + regs->pc = addr;
5300 + regs->npc = addr+4;
5301 + return 2;
5302 + }
5303 + } while (0);
5304 +
5305 + { /* PaX: patched PLT emulation #2 */
5306 + unsigned int ba;
5307 +
5308 + err = get_user(ba, (unsigned int *)regs->pc);
5309 +
5310 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311 + unsigned int addr;
5312 +
5313 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314 + regs->pc = addr;
5315 + regs->npc = addr+4;
5316 + return 2;
5317 + }
5318 + }
5319 +
5320 + do { /* PaX: patched PLT emulation #3 */
5321 + unsigned int sethi, jmpl, nop;
5322 +
5323 + err = get_user(sethi, (unsigned int *)regs->pc);
5324 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326 +
5327 + if (err)
5328 + break;
5329 +
5330 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332 + nop == 0x01000000U)
5333 + {
5334 + unsigned int addr;
5335 +
5336 + addr = (sethi & 0x003FFFFFU) << 10;
5337 + regs->u_regs[UREG_G1] = addr;
5338 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339 + regs->pc = addr;
5340 + regs->npc = addr+4;
5341 + return 2;
5342 + }
5343 + } while (0);
5344 +
5345 + do { /* PaX: unpatched PLT emulation step 1 */
5346 + unsigned int sethi, ba, nop;
5347 +
5348 + err = get_user(sethi, (unsigned int *)regs->pc);
5349 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351 +
5352 + if (err)
5353 + break;
5354 +
5355 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357 + nop == 0x01000000U)
5358 + {
5359 + unsigned int addr, save, call;
5360 +
5361 + if ((ba & 0xFFC00000U) == 0x30800000U)
5362 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363 + else
5364 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365 +
5366 + err = get_user(save, (unsigned int *)addr);
5367 + err |= get_user(call, (unsigned int *)(addr+4));
5368 + err |= get_user(nop, (unsigned int *)(addr+8));
5369 + if (err)
5370 + break;
5371 +
5372 +#ifdef CONFIG_PAX_DLRESOLVE
5373 + if (save == 0x9DE3BFA8U &&
5374 + (call & 0xC0000000U) == 0x40000000U &&
5375 + nop == 0x01000000U)
5376 + {
5377 + struct vm_area_struct *vma;
5378 + unsigned long call_dl_resolve;
5379 +
5380 + down_read(&current->mm->mmap_sem);
5381 + call_dl_resolve = current->mm->call_dl_resolve;
5382 + up_read(&current->mm->mmap_sem);
5383 + if (likely(call_dl_resolve))
5384 + goto emulate;
5385 +
5386 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387 +
5388 + down_write(&current->mm->mmap_sem);
5389 + if (current->mm->call_dl_resolve) {
5390 + call_dl_resolve = current->mm->call_dl_resolve;
5391 + up_write(&current->mm->mmap_sem);
5392 + if (vma)
5393 + kmem_cache_free(vm_area_cachep, vma);
5394 + goto emulate;
5395 + }
5396 +
5397 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399 + up_write(&current->mm->mmap_sem);
5400 + if (vma)
5401 + kmem_cache_free(vm_area_cachep, vma);
5402 + return 1;
5403 + }
5404 +
5405 + if (pax_insert_vma(vma, call_dl_resolve)) {
5406 + up_write(&current->mm->mmap_sem);
5407 + kmem_cache_free(vm_area_cachep, vma);
5408 + return 1;
5409 + }
5410 +
5411 + current->mm->call_dl_resolve = call_dl_resolve;
5412 + up_write(&current->mm->mmap_sem);
5413 +
5414 +emulate:
5415 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416 + regs->pc = call_dl_resolve;
5417 + regs->npc = addr+4;
5418 + return 3;
5419 + }
5420 +#endif
5421 +
5422 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423 + if ((save & 0xFFC00000U) == 0x05000000U &&
5424 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5425 + nop == 0x01000000U)
5426 + {
5427 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428 + regs->u_regs[UREG_G2] = addr + 4;
5429 + addr = (save & 0x003FFFFFU) << 10;
5430 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431 + regs->pc = addr;
5432 + regs->npc = addr+4;
5433 + return 3;
5434 + }
5435 + }
5436 + } while (0);
5437 +
5438 + do { /* PaX: unpatched PLT emulation step 2 */
5439 + unsigned int save, call, nop;
5440 +
5441 + err = get_user(save, (unsigned int *)(regs->pc-4));
5442 + err |= get_user(call, (unsigned int *)regs->pc);
5443 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444 + if (err)
5445 + break;
5446 +
5447 + if (save == 0x9DE3BFA8U &&
5448 + (call & 0xC0000000U) == 0x40000000U &&
5449 + nop == 0x01000000U)
5450 + {
5451 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452 +
5453 + regs->u_regs[UREG_RETPC] = regs->pc;
5454 + regs->pc = dl_resolve;
5455 + regs->npc = dl_resolve+4;
5456 + return 3;
5457 + }
5458 + } while (0);
5459 +#endif
5460 +
5461 + return 1;
5462 +}
5463 +
5464 +void pax_report_insns(void *pc, void *sp)
5465 +{
5466 + unsigned long i;
5467 +
5468 + printk(KERN_ERR "PAX: bytes at PC: ");
5469 + for (i = 0; i < 8; i++) {
5470 + unsigned int c;
5471 + if (get_user(c, (unsigned int *)pc+i))
5472 + printk(KERN_CONT "???????? ");
5473 + else
5474 + printk(KERN_CONT "%08x ", c);
5475 + }
5476 + printk("\n");
5477 +}
5478 +#endif
5479 +
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483 @@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487 +
5488 +#ifdef CONFIG_PAX_PAGEEXEC
5489 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490 + up_read(&mm->mmap_sem);
5491 + switch (pax_handle_fetch_fault(regs)) {
5492 +
5493 +#ifdef CONFIG_PAX_EMUPLT
5494 + case 2:
5495 + case 3:
5496 + return;
5497 +#endif
5498 +
5499 + }
5500 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501 + do_group_exit(SIGKILL);
5502 + }
5503 +#endif
5504 +
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508 diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5509 --- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510 +++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511 @@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515 +#include <linux/slab.h>
5516 +#include <linux/pagemap.h>
5517 +#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534 +#ifdef CONFIG_PAX_PAGEEXEC
5535 +#ifdef CONFIG_PAX_DLRESOLVE
5536 +static void pax_emuplt_close(struct vm_area_struct *vma)
5537 +{
5538 + vma->vm_mm->call_dl_resolve = 0UL;
5539 +}
5540 +
5541 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542 +{
5543 + unsigned int *kaddr;
5544 +
5545 + vmf->page = alloc_page(GFP_HIGHUSER);
5546 + if (!vmf->page)
5547 + return VM_FAULT_OOM;
5548 +
5549 + kaddr = kmap(vmf->page);
5550 + memset(kaddr, 0, PAGE_SIZE);
5551 + kaddr[0] = 0x9DE3BFA8U; /* save */
5552 + flush_dcache_page(vmf->page);
5553 + kunmap(vmf->page);
5554 + return VM_FAULT_MAJOR;
5555 +}
5556 +
5557 +static const struct vm_operations_struct pax_vm_ops = {
5558 + .close = pax_emuplt_close,
5559 + .fault = pax_emuplt_fault
5560 +};
5561 +
5562 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563 +{
5564 + int ret;
5565 +
5566 + vma->vm_mm = current->mm;
5567 + vma->vm_start = addr;
5568 + vma->vm_end = addr + PAGE_SIZE;
5569 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571 + vma->vm_ops = &pax_vm_ops;
5572 +
5573 + ret = insert_vm_struct(current->mm, vma);
5574 + if (ret)
5575 + return ret;
5576 +
5577 + ++current->mm->total_vm;
5578 + return 0;
5579 +}
5580 +#endif
5581 +
5582 +/*
5583 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5584 + *
5585 + * returns 1 when task should be killed
5586 + * 2 when patched PLT trampoline was detected
5587 + * 3 when unpatched PLT trampoline was detected
5588 + */
5589 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5590 +{
5591 +
5592 +#ifdef CONFIG_PAX_EMUPLT
5593 + int err;
5594 +
5595 + do { /* PaX: patched PLT emulation #1 */
5596 + unsigned int sethi1, sethi2, jmpl;
5597 +
5598 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5599 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601 +
5602 + if (err)
5603 + break;
5604 +
5605 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608 + {
5609 + unsigned long addr;
5610 +
5611 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612 + addr = regs->u_regs[UREG_G1];
5613 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614 +
5615 + if (test_thread_flag(TIF_32BIT))
5616 + addr &= 0xFFFFFFFFUL;
5617 +
5618 + regs->tpc = addr;
5619 + regs->tnpc = addr+4;
5620 + return 2;
5621 + }
5622 + } while (0);
5623 +
5624 + { /* PaX: patched PLT emulation #2 */
5625 + unsigned int ba;
5626 +
5627 + err = get_user(ba, (unsigned int *)regs->tpc);
5628 +
5629 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630 + unsigned long addr;
5631 +
5632 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633 +
5634 + if (test_thread_flag(TIF_32BIT))
5635 + addr &= 0xFFFFFFFFUL;
5636 +
5637 + regs->tpc = addr;
5638 + regs->tnpc = addr+4;
5639 + return 2;
5640 + }
5641 + }
5642 +
5643 + do { /* PaX: patched PLT emulation #3 */
5644 + unsigned int sethi, jmpl, nop;
5645 +
5646 + err = get_user(sethi, (unsigned int *)regs->tpc);
5647 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649 +
5650 + if (err)
5651 + break;
5652 +
5653 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655 + nop == 0x01000000U)
5656 + {
5657 + unsigned long addr;
5658 +
5659 + addr = (sethi & 0x003FFFFFU) << 10;
5660 + regs->u_regs[UREG_G1] = addr;
5661 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662 +
5663 + if (test_thread_flag(TIF_32BIT))
5664 + addr &= 0xFFFFFFFFUL;
5665 +
5666 + regs->tpc = addr;
5667 + regs->tnpc = addr+4;
5668 + return 2;
5669 + }
5670 + } while (0);
5671 +
5672 + do { /* PaX: patched PLT emulation #4 */
5673 + unsigned int sethi, mov1, call, mov2;
5674 +
5675 + err = get_user(sethi, (unsigned int *)regs->tpc);
5676 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679 +
5680 + if (err)
5681 + break;
5682 +
5683 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684 + mov1 == 0x8210000FU &&
5685 + (call & 0xC0000000U) == 0x40000000U &&
5686 + mov2 == 0x9E100001U)
5687 + {
5688 + unsigned long addr;
5689 +
5690 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692 +
5693 + if (test_thread_flag(TIF_32BIT))
5694 + addr &= 0xFFFFFFFFUL;
5695 +
5696 + regs->tpc = addr;
5697 + regs->tnpc = addr+4;
5698 + return 2;
5699 + }
5700 + } while (0);
5701 +
5702 + do { /* PaX: patched PLT emulation #5 */
5703 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704 +
5705 + err = get_user(sethi, (unsigned int *)regs->tpc);
5706 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713 +
5714 + if (err)
5715 + break;
5716 +
5717 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5721 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722 + sllx == 0x83287020U &&
5723 + jmpl == 0x81C04005U &&
5724 + nop == 0x01000000U)
5725 + {
5726 + unsigned long addr;
5727 +
5728 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729 + regs->u_regs[UREG_G1] <<= 32;
5730 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732 + regs->tpc = addr;
5733 + regs->tnpc = addr+4;
5734 + return 2;
5735 + }
5736 + } while (0);
5737 +
5738 + do { /* PaX: patched PLT emulation #6 */
5739 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740 +
5741 + err = get_user(sethi, (unsigned int *)regs->tpc);
5742 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748 +
5749 + if (err)
5750 + break;
5751 +
5752 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755 + sllx == 0x83287020U &&
5756 + (or & 0xFFFFE000U) == 0x8A116000U &&
5757 + jmpl == 0x81C04005U &&
5758 + nop == 0x01000000U)
5759 + {
5760 + unsigned long addr;
5761 +
5762 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763 + regs->u_regs[UREG_G1] <<= 32;
5764 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766 + regs->tpc = addr;
5767 + regs->tnpc = addr+4;
5768 + return 2;
5769 + }
5770 + } while (0);
5771 +
5772 + do { /* PaX: unpatched PLT emulation step 1 */
5773 + unsigned int sethi, ba, nop;
5774 +
5775 + err = get_user(sethi, (unsigned int *)regs->tpc);
5776 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778 +
5779 + if (err)
5780 + break;
5781 +
5782 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784 + nop == 0x01000000U)
5785 + {
5786 + unsigned long addr;
5787 + unsigned int save, call;
5788 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789 +
5790 + if ((ba & 0xFFC00000U) == 0x30800000U)
5791 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792 + else
5793 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794 +
5795 + if (test_thread_flag(TIF_32BIT))
5796 + addr &= 0xFFFFFFFFUL;
5797 +
5798 + err = get_user(save, (unsigned int *)addr);
5799 + err |= get_user(call, (unsigned int *)(addr+4));
5800 + err |= get_user(nop, (unsigned int *)(addr+8));
5801 + if (err)
5802 + break;
5803 +
5804 +#ifdef CONFIG_PAX_DLRESOLVE
5805 + if (save == 0x9DE3BFA8U &&
5806 + (call & 0xC0000000U) == 0x40000000U &&
5807 + nop == 0x01000000U)
5808 + {
5809 + struct vm_area_struct *vma;
5810 + unsigned long call_dl_resolve;
5811 +
5812 + down_read(&current->mm->mmap_sem);
5813 + call_dl_resolve = current->mm->call_dl_resolve;
5814 + up_read(&current->mm->mmap_sem);
5815 + if (likely(call_dl_resolve))
5816 + goto emulate;
5817 +
5818 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819 +
5820 + down_write(&current->mm->mmap_sem);
5821 + if (current->mm->call_dl_resolve) {
5822 + call_dl_resolve = current->mm->call_dl_resolve;
5823 + up_write(&current->mm->mmap_sem);
5824 + if (vma)
5825 + kmem_cache_free(vm_area_cachep, vma);
5826 + goto emulate;
5827 + }
5828 +
5829 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831 + up_write(&current->mm->mmap_sem);
5832 + if (vma)
5833 + kmem_cache_free(vm_area_cachep, vma);
5834 + return 1;
5835 + }
5836 +
5837 + if (pax_insert_vma(vma, call_dl_resolve)) {
5838 + up_write(&current->mm->mmap_sem);
5839 + kmem_cache_free(vm_area_cachep, vma);
5840 + return 1;
5841 + }
5842 +
5843 + current->mm->call_dl_resolve = call_dl_resolve;
5844 + up_write(&current->mm->mmap_sem);
5845 +
5846 +emulate:
5847 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848 + regs->tpc = call_dl_resolve;
5849 + regs->tnpc = addr+4;
5850 + return 3;
5851 + }
5852 +#endif
5853 +
5854 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855 + if ((save & 0xFFC00000U) == 0x05000000U &&
5856 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5857 + nop == 0x01000000U)
5858 + {
5859 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860 + regs->u_regs[UREG_G2] = addr + 4;
5861 + addr = (save & 0x003FFFFFU) << 10;
5862 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863 +
5864 + if (test_thread_flag(TIF_32BIT))
5865 + addr &= 0xFFFFFFFFUL;
5866 +
5867 + regs->tpc = addr;
5868 + regs->tnpc = addr+4;
5869 + return 3;
5870 + }
5871 +
5872 + /* PaX: 64-bit PLT stub */
5873 + err = get_user(sethi1, (unsigned int *)addr);
5874 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5875 + err |= get_user(or1, (unsigned int *)(addr+8));
5876 + err |= get_user(or2, (unsigned int *)(addr+12));
5877 + err |= get_user(sllx, (unsigned int *)(addr+16));
5878 + err |= get_user(add, (unsigned int *)(addr+20));
5879 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5880 + err |= get_user(nop, (unsigned int *)(addr+28));
5881 + if (err)
5882 + break;
5883 +
5884 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5887 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888 + sllx == 0x89293020U &&
5889 + add == 0x8A010005U &&
5890 + jmpl == 0x89C14000U &&
5891 + nop == 0x01000000U)
5892 + {
5893 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895 + regs->u_regs[UREG_G4] <<= 32;
5896 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898 + regs->u_regs[UREG_G4] = addr + 24;
5899 + addr = regs->u_regs[UREG_G5];
5900 + regs->tpc = addr;
5901 + regs->tnpc = addr+4;
5902 + return 3;
5903 + }
5904 + }
5905 + } while (0);
5906 +
5907 +#ifdef CONFIG_PAX_DLRESOLVE
5908 + do { /* PaX: unpatched PLT emulation step 2 */
5909 + unsigned int save, call, nop;
5910 +
5911 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5912 + err |= get_user(call, (unsigned int *)regs->tpc);
5913 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914 + if (err)
5915 + break;
5916 +
5917 + if (save == 0x9DE3BFA8U &&
5918 + (call & 0xC0000000U) == 0x40000000U &&
5919 + nop == 0x01000000U)
5920 + {
5921 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922 +
5923 + if (test_thread_flag(TIF_32BIT))
5924 + dl_resolve &= 0xFFFFFFFFUL;
5925 +
5926 + regs->u_regs[UREG_RETPC] = regs->tpc;
5927 + regs->tpc = dl_resolve;
5928 + regs->tnpc = dl_resolve+4;
5929 + return 3;
5930 + }
5931 + } while (0);
5932 +#endif
5933 +
5934 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935 + unsigned int sethi, ba, nop;
5936 +
5937 + err = get_user(sethi, (unsigned int *)regs->tpc);
5938 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940 +
5941 + if (err)
5942 + break;
5943 +
5944 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945 + (ba & 0xFFF00000U) == 0x30600000U &&
5946 + nop == 0x01000000U)
5947 + {
5948 + unsigned long addr;
5949 +
5950 + addr = (sethi & 0x003FFFFFU) << 10;
5951 + regs->u_regs[UREG_G1] = addr;
5952 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953 +
5954 + if (test_thread_flag(TIF_32BIT))
5955 + addr &= 0xFFFFFFFFUL;
5956 +
5957 + regs->tpc = addr;
5958 + regs->tnpc = addr+4;
5959 + return 2;
5960 + }
5961 + } while (0);
5962 +
5963 +#endif
5964 +
5965 + return 1;
5966 +}
5967 +
5968 +void pax_report_insns(void *pc, void *sp)
5969 +{
5970 + unsigned long i;
5971 +
5972 + printk(KERN_ERR "PAX: bytes at PC: ");
5973 + for (i = 0; i < 8; i++) {
5974 + unsigned int c;
5975 + if (get_user(c, (unsigned int *)pc+i))
5976 + printk(KERN_CONT "???????? ");
5977 + else
5978 + printk(KERN_CONT "%08x ", c);
5979 + }
5980 + printk("\n");
5981 +}
5982 +#endif
5983 +
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991 +#ifdef CONFIG_PAX_PAGEEXEC
5992 + /* PaX: detect ITLB misses on non-exec pages */
5993 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995 + {
5996 + if (address != regs->tpc)
5997 + goto good_area;
5998 +
5999 + up_read(&mm->mmap_sem);
6000 + switch (pax_handle_fetch_fault(regs)) {
6001 +
6002 +#ifdef CONFIG_PAX_EMUPLT
6003 + case 2:
6004 + case 3:
6005 + return;
6006 +#endif
6007 +
6008 + }
6009 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010 + do_group_exit(SIGKILL);
6011 + }
6012 +#endif
6013 +
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017 diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
6018 --- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019 +++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020 @@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024 - if (likely(!vma || addr + len <= vma->vm_start)) {
6025 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033 - if (!vma || addr <= vma->vm_start) {
6034 + if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6043 + addr = mm->mmap_base - len;
6044
6045 do {
6046 + addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053 - if (likely(!vma || addr+len <= vma->vm_start)) {
6054 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062 - addr = (vma->vm_start-len) & HPAGE_MASK;
6063 - } while (likely(len < vma->vm_start));
6064 + addr = skip_heap_stack_gap(vma, len);
6065 + } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073 - if (task_size - len >= addr &&
6074 - (!vma || addr + len <= vma->vm_start))
6075 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079 diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
6080 --- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081 +++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082 @@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088 +
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092 @@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096 - protection_map[1] = PAGE_READONLY;
6097 - protection_map[2] = PAGE_COPY;
6098 - protection_map[3] = PAGE_COPY;
6099 + protection_map[1] = PAGE_READONLY_NOEXEC;
6100 + protection_map[2] = PAGE_COPY_NOEXEC;
6101 + protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107 - protection_map[9] = PAGE_READONLY;
6108 - protection_map[10] = PAGE_SHARED;
6109 - protection_map[11] = PAGE_SHARED;
6110 + protection_map[9] = PAGE_READONLY_NOEXEC;
6111 + protection_map[10] = PAGE_SHARED_NOEXEC;
6112 + protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116 diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6117 --- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118 +++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119 @@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123 -ccflags-y := -Werror
6124 +#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128 diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6129 --- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130 +++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135 +
6136 +#ifdef CONFIG_PAX_PAGEEXEC
6137 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140 +#endif
6141 +
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145 diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6146 --- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147 +++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148 @@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152 + KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156 diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6157 --- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158 +++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159 @@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163 +#define ktla_ktva(addr) (addr)
6164 +#define ktva_ktla(addr) (addr)
6165 +
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169 diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6170 --- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171 +++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176 -/*
6177 - * Only x86 and x86_64 have an arch_align_stack().
6178 - * All other arches have "#define arch_align_stack(x) (x)"
6179 - * in their asm/system.h
6180 - * As this is included in UML from asm-um/system-generic.h,
6181 - * we can use it to behave as the subarch does.
6182 - */
6183 -#ifndef arch_align_stack
6184 -unsigned long arch_align_stack(unsigned long sp)
6185 -{
6186 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187 - sp -= get_random_int() % 8192;
6188 - return sp & ~0xf;
6189 -}
6190 -#endif
6191 -
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195 diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6196 --- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197 +++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198 @@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203 +{
6204 + unsigned long pax_task_size = TASK_SIZE;
6205 +
6206 +#ifdef CONFIG_PAX_SEGMEXEC
6207 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208 + pax_task_size = SEGMEXEC_TASK_SIZE;
6209 +#endif
6210 +
6211 + if (len > pax_task_size || addr > pax_task_size - len)
6212 + return -EINVAL;
6213 +
6214 + return 0;
6215 +}
6216 +
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220 diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6221 --- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222 +++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241 diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6242 --- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243 +++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248 - asm("movw %%ds,%0" : "=rm" (seg));
6249 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257 - asm("repe; cmpsb; setnz %0"
6258 + asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6263 --- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269 - movl $LOAD_PHYSICAL_ADDR, %ebx
6270 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274 @@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278 - subl $LOAD_PHYSICAL_ADDR, %ebx
6279 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283 @@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287 - testl %ecx, %ecx
6288 - jz 2f
6289 + jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6294 --- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295 +++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300 - movl $LOAD_PHYSICAL_ADDR, %ebx
6301 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305 @@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309 -#include "../../kernel/verify_cpu_64.S"
6310 +#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318 - movq $LOAD_PHYSICAL_ADDR, %rbp
6319 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6324 --- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325 +++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330 +ifdef CONSTIFY_PLUGIN
6331 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332 +endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6337 --- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338 +++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6358 --- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359 +++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365 + offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369 diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6370 --- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371 +++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372 @@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376 +#include "../../../../include/linux/autoconf.h"
6377 +
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380 +static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388 - int i;
6389 + unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397 +static void read_phdrs(FILE *fp)
6398 +{
6399 + unsigned int i;
6400 +
6401 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402 + if (!phdr) {
6403 + die("Unable to allocate %d program headers\n",
6404 + ehdr.e_phnum);
6405 + }
6406 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407 + die("Seek to %d failed: %s\n",
6408 + ehdr.e_phoff, strerror(errno));
6409 + }
6410 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411 + die("Cannot read ELF program headers: %s\n",
6412 + strerror(errno));
6413 + }
6414 + for(i = 0; i < ehdr.e_phnum; i++) {
6415 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423 + }
6424 +
6425 +}
6426 +
6427 static void read_shdrs(FILE *fp)
6428 {
6429 - int i;
6430 + unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438 - int i;
6439 + unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447 - int i,j;
6448 + unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456 - int i,j;
6457 + unsigned int i,j;
6458 + uint32_t base;
6459 +
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467 + base = 0;
6468 + for (j = 0; j < ehdr.e_phnum; j++) {
6469 + if (phdr[j].p_type != PT_LOAD )
6470 + continue;
6471 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472 + continue;
6473 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474 + break;
6475 + }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6479 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487 - int i;
6488 + unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495 - int j;
6496 + unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504 - int i, printed = 0;
6505 + unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512 - int j;
6513 + unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521 - int i;
6522 + unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528 - int j;
6529 + unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539 + continue;
6540 +
6541 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544 + continue;
6545 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546 + continue;
6547 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548 + continue;
6549 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550 + continue;
6551 +#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559 - int i;
6560 + unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568 + read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572 diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6573 --- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574 +++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575 @@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579 - asm("movl %%cr0,%0" : "=r" (cr0));
6580 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588 - asm("pushfl ; "
6589 + asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593 @@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597 - asm("cpuid"
6598 + asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602 @@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606 - asm("cpuid"
6607 + asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611 @@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615 - asm("cpuid"
6616 + asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620 @@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624 - asm("cpuid"
6625 + asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659 - asm("cpuid"
6660 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662 + asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670 diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6671 --- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672 +++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682 diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6683 --- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684 +++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689 +ifdef CONSTIFY_PLUGIN
6690 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691 +endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695 diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6696 --- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697 +++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698 @@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702 - int count = 0;
6703 + unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707 diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6708 --- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709 +++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714 - int i, len = 0;
6715 + unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719 diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6720 --- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721 +++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726 + boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6731 --- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732 +++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737 + memset(&dump, 0, sizeof(dump));
6738 +
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746 - /*
6747 - * Finally dump the task struct. Not be used by gdb, but
6748 - * could be useful
6749 - */
6750 - set_fs(KERNEL_DS);
6751 - DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6756 --- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757 +++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-08-25 17:42:18.000000000 -0400
6758 @@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762 +#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766 @@ -93,6 +94,29 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770 + .macro pax_enter_kernel_user
6771 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6772 + call pax_enter_kernel_user
6773 +#endif
6774 + .endm
6775 +
6776 + .macro pax_exit_kernel_user
6777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6778 + call pax_exit_kernel_user
6779 +#endif
6780 +#ifdef CONFIG_PAX_RANDKSTACK
6781 + pushq %rax
6782 + call pax_randomize_kstack
6783 + popq %rax
6784 +#endif
6785 + .endm
6786 +
6787 +.macro pax_erase_kstack
6788 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6789 + call pax_erase_kstack
6790 +#endif
6791 +.endm
6792 +
6793 /*
6794 * 32bit SYSENTER instruction entry.
6795 *
6796 @@ -119,7 +143,7 @@ ENTRY(ia32_sysenter_target)
6797 CFI_REGISTER rsp,rbp
6798 SWAPGS_UNSAFE_STACK
6799 movq PER_CPU_VAR(kernel_stack), %rsp
6800 - addq $(KERNEL_STACK_OFFSET),%rsp
6801 + pax_enter_kernel_user
6802 /*
6803 * No need to follow this irqs on/off section: the syscall
6804 * disabled irqs, here we enable it straight after entry:
6805 @@ -135,7 +159,8 @@ ENTRY(ia32_sysenter_target)
6806 pushfq
6807 CFI_ADJUST_CFA_OFFSET 8
6808 /*CFI_REL_OFFSET rflags,0*/
6809 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6810 + GET_THREAD_INFO(%r10)
6811 + movl TI_sysenter_return(%r10), %r10d
6812 CFI_REGISTER rip,r10
6813 pushq $__USER32_CS
6814 CFI_ADJUST_CFA_OFFSET 8
6815 @@ -150,6 +175,12 @@ ENTRY(ia32_sysenter_target)
6816 SAVE_ARGS 0,0,1
6817 /* no need to do an access_ok check here because rbp has been
6818 32bit zero extended */
6819 +
6820 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6821 + mov $PAX_USER_SHADOW_BASE,%r10
6822 + add %r10,%rbp
6823 +#endif
6824 +
6825 1: movl (%rbp),%ebp
6826 .section __ex_table,"a"
6827 .quad 1b,ia32_badarg
6828 @@ -172,6 +203,8 @@ sysenter_dispatch:
6829 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6830 jnz sysexit_audit
6831 sysexit_from_sys_call:
6832 + pax_exit_kernel_user
6833 + pax_erase_kstack
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841 +
6842 + pax_erase_kstack
6843 +
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847 @@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851 +
6852 + pax_erase_kstack
6853 +
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862 + CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869 +
6870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6871 + pax_enter_kernel_user
6872 +#endif
6873 +
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879 - SAVE_ARGS 8,1,1
6880 + SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888 +
6889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6890 + mov $PAX_USER_SHADOW_BASE,%r10
6891 + add %r10,%r8
6892 +#endif
6893 +
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897 @@ -333,6 +383,8 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901 + pax_exit_kernel_user
6902 + pax_erase_kstack
6903 andl $~TS_COMPAT,TI_status(%r10)
6904 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6905 movl RIP-ARGOFFSET(%rsp),%ecx
6906 @@ -370,6 +422,9 @@ cstar_tracesys:
6907 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6908 movq %rsp,%rdi /* &pt_regs -> arg1 */
6909 call syscall_trace_enter
6910 +
6911 + pax_erase_kstack
6912 +
6913 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6914 RESTORE_REST
6915 xchgl %ebp,%r9d
6916 @@ -415,6 +470,7 @@ ENTRY(ia32_syscall)
6917 CFI_REL_OFFSET rip,RIP-RIP
6918 PARAVIRT_ADJUST_EXCEPTION_FRAME
6919 SWAPGS
6920 + pax_enter_kernel_user
6921 /*
6922 * No need to follow this irqs on/off section: the syscall
6923 * disabled irqs and here we enable it straight after entry:
6924 @@ -448,6 +504,9 @@ ia32_tracesys:
6925 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6926 movq %rsp,%rdi /* &pt_regs -> arg1 */
6927 call syscall_trace_enter
6928 +
6929 + pax_erase_kstack
6930 +
6931 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6932 RESTORE_REST
6933 cmpq $(IA32_NR_syscalls-1),%rax
6934 diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6935 --- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6936 +++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6937 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6938 sp -= frame_size;
6939 /* Align the stack pointer according to the i386 ABI,
6940 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6941 - sp = ((sp + 4) & -16ul) - 4;
6942 + sp = ((sp - 12) & -16ul) - 4;
6943 return (void __user *) sp;
6944 }
6945
6946 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6947 * These are actually not used anymore, but left because some
6948 * gdb versions depend on them as a marker.
6949 */
6950 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6951 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6952 } put_user_catch(err);
6953
6954 if (err)
6955 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6956 0xb8,
6957 __NR_ia32_rt_sigreturn,
6958 0x80cd,
6959 - 0,
6960 + 0
6961 };
6962
6963 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6964 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6965
6966 if (ka->sa.sa_flags & SA_RESTORER)
6967 restorer = ka->sa.sa_restorer;
6968 + else if (current->mm->context.vdso)
6969 + /* Return stub is in 32bit vsyscall page */
6970 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6971 else
6972 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6973 - rt_sigreturn);
6974 + restorer = &frame->retcode;
6975 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6976
6977 /*
6978 * Not actually used anymore, but left because some gdb
6979 * versions need it.
6980 */
6981 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6982 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6983 } put_user_catch(err);
6984
6985 if (err)
6986 diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6987 --- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6988 +++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6989 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6990 " .byte 662b-661b\n" /* sourcelen */ \
6991 " .byte 664f-663f\n" /* replacementlen */ \
6992 ".previous\n" \
6993 - ".section .altinstr_replacement, \"ax\"\n" \
6994 + ".section .altinstr_replacement, \"a\"\n" \
6995 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6996 ".previous"
6997
6998 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6999 --- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
7000 +++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7001 @@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7002
7003 #ifdef CONFIG_X86_LOCAL_APIC
7004
7005 -extern unsigned int apic_verbosity;
7006 +extern int apic_verbosity;
7007 extern int local_apic_timer_c2_ok;
7008
7009 extern int disable_apic;
7010 diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
7011 --- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7012 +++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7013 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7014 __asm__ __volatile__(APM_DO_ZERO_SEGS
7015 "pushl %%edi\n\t"
7016 "pushl %%ebp\n\t"
7017 - "lcall *%%cs:apm_bios_entry\n\t"
7018 + "lcall *%%ss:apm_bios_entry\n\t"
7019 "setc %%al\n\t"
7020 "popl %%ebp\n\t"
7021 "popl %%edi\n\t"
7022 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7023 __asm__ __volatile__(APM_DO_ZERO_SEGS
7024 "pushl %%edi\n\t"
7025 "pushl %%ebp\n\t"
7026 - "lcall *%%cs:apm_bios_entry\n\t"
7027 + "lcall *%%ss:apm_bios_entry\n\t"
7028 "setc %%bl\n\t"
7029 "popl %%ebp\n\t"
7030 "popl %%edi\n\t"
7031 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
7032 --- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7033 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7034 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7035 }
7036
7037 /**
7038 + * atomic_read_unchecked - read atomic variable
7039 + * @v: pointer of type atomic_unchecked_t
7040 + *
7041 + * Atomically reads the value of @v.
7042 + */
7043 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7044 +{
7045 + return v->counter;
7046 +}
7047 +
7048 +/**
7049 * atomic_set - set atomic variable
7050 * @v: pointer of type atomic_t
7051 * @i: required value
7052 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7053 }
7054
7055 /**
7056 + * atomic_set_unchecked - set atomic variable
7057 + * @v: pointer of type atomic_unchecked_t
7058 + * @i: required value
7059 + *
7060 + * Atomically sets the value of @v to @i.
7061 + */
7062 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7063 +{
7064 + v->counter = i;
7065 +}
7066 +
7067 +/**
7068 * atomic_add - add integer to atomic variable
7069 * @i: integer value to add
7070 * @v: pointer of type atomic_t
7071 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7072 */
7073 static inline void atomic_add(int i, atomic_t *v)
7074 {
7075 - asm volatile(LOCK_PREFIX "addl %1,%0"
7076 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7077 +
7078 +#ifdef CONFIG_PAX_REFCOUNT
7079 + "jno 0f\n"
7080 + LOCK_PREFIX "subl %1,%0\n"
7081 + "int $4\n0:\n"
7082 + _ASM_EXTABLE(0b, 0b)
7083 +#endif
7084 +
7085 + : "+m" (v->counter)
7086 + : "ir" (i));
7087 +}
7088 +
7089 +/**
7090 + * atomic_add_unchecked - add integer to atomic variable
7091 + * @i: integer value to add
7092 + * @v: pointer of type atomic_unchecked_t
7093 + *
7094 + * Atomically adds @i to @v.
7095 + */
7096 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7097 +{
7098 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7099 : "+m" (v->counter)
7100 : "ir" (i));
7101 }
7102 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7103 */
7104 static inline void atomic_sub(int i, atomic_t *v)
7105 {
7106 - asm volatile(LOCK_PREFIX "subl %1,%0"
7107 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7108 +
7109 +#ifdef CONFIG_PAX_REFCOUNT
7110 + "jno 0f\n"
7111 + LOCK_PREFIX "addl %1,%0\n"
7112 + "int $4\n0:\n"
7113 + _ASM_EXTABLE(0b, 0b)
7114 +#endif
7115 +
7116 + : "+m" (v->counter)
7117 + : "ir" (i));
7118 +}
7119 +
7120 +/**
7121 + * atomic_sub_unchecked - subtract integer from atomic variable
7122 + * @i: integer value to subtract
7123 + * @v: pointer of type atomic_unchecked_t
7124 + *
7125 + * Atomically subtracts @i from @v.
7126 + */
7127 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7128 +{
7129 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7130 : "+m" (v->counter)
7131 : "ir" (i));
7132 }
7133 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7134 {
7135 unsigned char c;
7136
7137 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7138 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7139 +
7140 +#ifdef CONFIG_PAX_REFCOUNT
7141 + "jno 0f\n"
7142 + LOCK_PREFIX "addl %2,%0\n"
7143 + "int $4\n0:\n"
7144 + _ASM_EXTABLE(0b, 0b)
7145 +#endif
7146 +
7147 + "sete %1\n"
7148 : "+m" (v->counter), "=qm" (c)
7149 : "ir" (i) : "memory");
7150 return c;
7151 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7152 */
7153 static inline void atomic_inc(atomic_t *v)
7154 {
7155 - asm volatile(LOCK_PREFIX "incl %0"
7156 + asm volatile(LOCK_PREFIX "incl %0\n"
7157 +
7158 +#ifdef CONFIG_PAX_REFCOUNT
7159 + "jno 0f\n"
7160 + LOCK_PREFIX "decl %0\n"
7161 + "int $4\n0:\n"
7162 + _ASM_EXTABLE(0b, 0b)
7163 +#endif
7164 +
7165 + : "+m" (v->counter));
7166 +}
7167 +
7168 +/**
7169 + * atomic_inc_unchecked - increment atomic variable
7170 + * @v: pointer of type atomic_unchecked_t
7171 + *
7172 + * Atomically increments @v by 1.
7173 + */
7174 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7175 +{
7176 + asm volatile(LOCK_PREFIX "incl %0\n"
7177 : "+m" (v->counter));
7178 }
7179
7180 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7181 */
7182 static inline void atomic_dec(atomic_t *v)
7183 {
7184 - asm volatile(LOCK_PREFIX "decl %0"
7185 + asm volatile(LOCK_PREFIX "decl %0\n"
7186 +
7187 +#ifdef CONFIG_PAX_REFCOUNT
7188 + "jno 0f\n"
7189 + LOCK_PREFIX "incl %0\n"
7190 + "int $4\n0:\n"
7191 + _ASM_EXTABLE(0b, 0b)
7192 +#endif
7193 +
7194 + : "+m" (v->counter));
7195 +}
7196 +
7197 +/**
7198 + * atomic_dec_unchecked - decrement atomic variable
7199 + * @v: pointer of type atomic_unchecked_t
7200 + *
7201 + * Atomically decrements @v by 1.
7202 + */
7203 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7204 +{
7205 + asm volatile(LOCK_PREFIX "decl %0\n"
7206 : "+m" (v->counter));
7207 }
7208
7209 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7210 {
7211 unsigned char c;
7212
7213 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7214 + asm volatile(LOCK_PREFIX "decl %0\n"
7215 +
7216 +#ifdef CONFIG_PAX_REFCOUNT
7217 + "jno 0f\n"
7218 + LOCK_PREFIX "incl %0\n"
7219 + "int $4\n0:\n"
7220 + _ASM_EXTABLE(0b, 0b)
7221 +#endif
7222 +
7223 + "sete %1\n"
7224 : "+m" (v->counter), "=qm" (c)
7225 : : "memory");
7226 return c != 0;
7227 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7228 {
7229 unsigned char c;
7230
7231 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7232 + asm volatile(LOCK_PREFIX "incl %0\n"
7233 +
7234 +#ifdef CONFIG_PAX_REFCOUNT
7235 + "jno 0f\n"
7236 + LOCK_PREFIX "decl %0\n"
7237 + "into\n0:\n"
7238 + _ASM_EXTABLE(0b, 0b)
7239 +#endif
7240 +
7241 + "sete %1\n"
7242 + : "+m" (v->counter), "=qm" (c)
7243 + : : "memory");
7244 + return c != 0;
7245 +}
7246 +
7247 +/**
7248 + * atomic_inc_and_test_unchecked - increment and test
7249 + * @v: pointer of type atomic_unchecked_t
7250 + *
7251 + * Atomically increments @v by 1
7252 + * and returns true if the result is zero, or false for all
7253 + * other cases.
7254 + */
7255 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7256 +{
7257 + unsigned char c;
7258 +
7259 + asm volatile(LOCK_PREFIX "incl %0\n"
7260 + "sete %1\n"
7261 : "+m" (v->counter), "=qm" (c)
7262 : : "memory");
7263 return c != 0;
7264 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7265 {
7266 unsigned char c;
7267
7268 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7269 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7270 +
7271 +#ifdef CONFIG_PAX_REFCOUNT
7272 + "jno 0f\n"
7273 + LOCK_PREFIX "subl %2,%0\n"
7274 + "int $4\n0:\n"
7275 + _ASM_EXTABLE(0b, 0b)
7276 +#endif
7277 +
7278 + "sets %1\n"
7279 : "+m" (v->counter), "=qm" (c)
7280 : "ir" (i) : "memory");
7281 return c;
7282 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7283 #endif
7284 /* Modern 486+ processor */
7285 __i = i;
7286 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7287 +
7288 +#ifdef CONFIG_PAX_REFCOUNT
7289 + "jno 0f\n"
7290 + "movl %0, %1\n"
7291 + "int $4\n0:\n"
7292 + _ASM_EXTABLE(0b, 0b)
7293 +#endif
7294 +
7295 + : "+r" (i), "+m" (v->counter)
7296 + : : "memory");
7297 + return i + __i;
7298 +
7299 +#ifdef CONFIG_M386
7300 +no_xadd: /* Legacy 386 processor */
7301 + local_irq_save(flags);
7302 + __i = atomic_read(v);
7303 + atomic_set(v, i + __i);
7304 + local_irq_restore(flags);
7305 + return i + __i;
7306 +#endif
7307 +}
7308 +
7309 +/**
7310 + * atomic_add_return_unchecked - add integer and return
7311 + * @v: pointer of type atomic_unchecked_t
7312 + * @i: integer value to add
7313 + *
7314 + * Atomically adds @i to @v and returns @i + @v
7315 + */
7316 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7317 +{
7318 + int __i;
7319 +#ifdef CONFIG_M386
7320 + unsigned long flags;
7321 + if (unlikely(boot_cpu_data.x86 <= 3))
7322 + goto no_xadd;
7323 +#endif
7324 + /* Modern 486+ processor */
7325 + __i = i;
7326 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7327 : "+r" (i), "+m" (v->counter)
7328 : : "memory");
7329 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7330 return cmpxchg(&v->counter, old, new);
7331 }
7332
7333 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7334 +{
7335 + return cmpxchg(&v->counter, old, new);
7336 +}
7337 +
7338 static inline int atomic_xchg(atomic_t *v, int new)
7339 {
7340 return xchg(&v->counter, new);
7341 }
7342
7343 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7344 +{
7345 + return xchg(&v->counter, new);
7346 +}
7347 +
7348 /**
7349 * atomic_add_unless - add unless the number is already a given value
7350 * @v: pointer of type atomic_t
7351 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7352 */
7353 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7354 {
7355 - int c, old;
7356 + int c, old, new;
7357 c = atomic_read(v);
7358 for (;;) {
7359 - if (unlikely(c == (u)))
7360 + if (unlikely(c == u))
7361 break;
7362 - old = atomic_cmpxchg((v), c, c + (a));
7363 +
7364 + asm volatile("addl %2,%0\n"
7365 +
7366 +#ifdef CONFIG_PAX_REFCOUNT
7367 + "jno 0f\n"
7368 + "subl %2,%0\n"
7369 + "int $4\n0:\n"
7370 + _ASM_EXTABLE(0b, 0b)
7371 +#endif
7372 +
7373 + : "=r" (new)
7374 + : "0" (c), "ir" (a));
7375 +
7376 + old = atomic_cmpxchg(v, c, new);
7377 if (likely(old == c))
7378 break;
7379 c = old;
7380 }
7381 - return c != (u);
7382 + return c != u;
7383 }
7384
7385 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7386
7387 #define atomic_inc_return(v) (atomic_add_return(1, v))
7388 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7389 +{
7390 + return atomic_add_return_unchecked(1, v);
7391 +}
7392 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7393
7394 /* These are x86-specific, used by some header files */
7395 @@ -266,9 +495,18 @@ typedef struct {
7396 u64 __aligned(8) counter;
7397 } atomic64_t;
7398
7399 +#ifdef CONFIG_PAX_REFCOUNT
7400 +typedef struct {
7401 + u64 __aligned(8) counter;
7402 +} atomic64_unchecked_t;
7403 +#else
7404 +typedef atomic64_t atomic64_unchecked_t;
7405 +#endif
7406 +
7407 #define ATOMIC64_INIT(val) { (val) }
7408
7409 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7410 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7411
7412 /**
7413 * atomic64_xchg - xchg atomic64 variable
7414 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7415 * the old value.
7416 */
7417 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7418 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7419
7420 /**
7421 * atomic64_set - set atomic64 variable
7422 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7423 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7424
7425 /**
7426 + * atomic64_unchecked_set - set atomic64 variable
7427 + * @ptr: pointer to type atomic64_unchecked_t
7428 + * @new_val: value to assign
7429 + *
7430 + * Atomically sets the value of @ptr to @new_val.
7431 + */
7432 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7433 +
7434 +/**
7435 * atomic64_read - read atomic64 variable
7436 * @ptr: pointer to type atomic64_t
7437 *
7438 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7439 return res;
7440 }
7441
7442 -extern u64 atomic64_read(atomic64_t *ptr);
7443 +/**
7444 + * atomic64_read_unchecked - read atomic64 variable
7445 + * @ptr: pointer to type atomic64_unchecked_t
7446 + *
7447 + * Atomically reads the value of @ptr and returns it.
7448 + */
7449 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7450 +{
7451 + u64 res;
7452 +
7453 + /*
7454 + * Note, we inline this atomic64_unchecked_t primitive because
7455 + * it only clobbers EAX/EDX and leaves the others
7456 + * untouched. We also (somewhat subtly) rely on the
7457 + * fact that cmpxchg8b returns the current 64-bit value
7458 + * of the memory location we are touching:
7459 + */
7460 + asm volatile(
7461 + "mov %%ebx, %%eax\n\t"
7462 + "mov %%ecx, %%edx\n\t"
7463 + LOCK_PREFIX "cmpxchg8b %1\n"
7464 + : "=&A" (res)
7465 + : "m" (*ptr)
7466 + );
7467 +
7468 + return res;
7469 +}
7470
7471 /**
7472 * atomic64_add_return - add and return
7473 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7474 * Other variants with different arithmetic operators:
7475 */
7476 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7477 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7478 extern u64 atomic64_inc_return(atomic64_t *ptr);
7479 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7480 extern u64 atomic64_dec_return(atomic64_t *ptr);
7481 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7482
7483 /**
7484 * atomic64_add - add integer to atomic64 variable
7485 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7486 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7487
7488 /**
7489 + * atomic64_add_unchecked - add integer to atomic64 variable
7490 + * @delta: integer value to add
7491 + * @ptr: pointer to type atomic64_unchecked_t
7492 + *
7493 + * Atomically adds @delta to @ptr.
7494 + */
7495 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7496 +
7497 +/**
7498 * atomic64_sub - subtract the atomic64 variable
7499 * @delta: integer value to subtract
7500 * @ptr: pointer to type atomic64_t
7501 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7502 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7503
7504 /**
7505 + * atomic64_sub_unchecked - subtract the atomic64 variable
7506 + * @delta: integer value to subtract
7507 + * @ptr: pointer to type atomic64_unchecked_t
7508 + *
7509 + * Atomically subtracts @delta from @ptr.
7510 + */
7511 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7512 +
7513 +/**
7514 * atomic64_sub_and_test - subtract value from variable and test result
7515 * @delta: integer value to subtract
7516 * @ptr: pointer to type atomic64_t
7517 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7518 extern void atomic64_inc(atomic64_t *ptr);
7519
7520 /**
7521 + * atomic64_inc_unchecked - increment atomic64 variable
7522 + * @ptr: pointer to type atomic64_unchecked_t
7523 + *
7524 + * Atomically increments @ptr by 1.
7525 + */
7526 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7527 +
7528 +/**
7529 * atomic64_dec - decrement atomic64 variable
7530 * @ptr: pointer to type atomic64_t
7531 *
7532 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7533 extern void atomic64_dec(atomic64_t *ptr);
7534
7535 /**
7536 + * atomic64_dec_unchecked - decrement atomic64 variable
7537 + * @ptr: pointer to type atomic64_unchecked_t
7538 + *
7539 + * Atomically decrements @ptr by 1.
7540 + */
7541 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7542 +
7543 +/**
7544 * atomic64_dec_and_test - decrement and test
7545 * @ptr: pointer to type atomic64_t
7546 *
7547 diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7548 --- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7549 +++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7550 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7551 }
7552
7553 /**
7554 + * atomic_read_unchecked - read atomic variable
7555 + * @v: pointer of type atomic_unchecked_t
7556 + *
7557 + * Atomically reads the value of @v.
7558 + */
7559 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7560 +{
7561 + return v->counter;
7562 +}
7563 +
7564 +/**
7565 * atomic_set - set atomic variable
7566 * @v: pointer of type atomic_t
7567 * @i: required value
7568 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7569 }
7570
7571 /**
7572 + * atomic_set_unchecked - set atomic variable
7573 + * @v: pointer of type atomic_unchecked_t
7574 + * @i: required value
7575 + *
7576 + * Atomically sets the value of @v to @i.
7577 + */
7578 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7579 +{
7580 + v->counter = i;
7581 +}
7582 +
7583 +/**
7584 * atomic_add - add integer to atomic variable
7585 * @i: integer value to add
7586 * @v: pointer of type atomic_t
7587 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7588 */
7589 static inline void atomic_add(int i, atomic_t *v)
7590 {
7591 - asm volatile(LOCK_PREFIX "addl %1,%0"
7592 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7593 +
7594 +#ifdef CONFIG_PAX_REFCOUNT
7595 + "jno 0f\n"
7596 + LOCK_PREFIX "subl %1,%0\n"
7597 + "int $4\n0:\n"
7598 + _ASM_EXTABLE(0b, 0b)
7599 +#endif
7600 +
7601 + : "=m" (v->counter)
7602 + : "ir" (i), "m" (v->counter));
7603 +}
7604 +
7605 +/**
7606 + * atomic_add_unchecked - add integer to atomic variable
7607 + * @i: integer value to add
7608 + * @v: pointer of type atomic_unchecked_t
7609 + *
7610 + * Atomically adds @i to @v.
7611 + */
7612 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7613 +{
7614 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7615 : "=m" (v->counter)
7616 : "ir" (i), "m" (v->counter));
7617 }
7618 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7619 */
7620 static inline void atomic_sub(int i, atomic_t *v)
7621 {
7622 - asm volatile(LOCK_PREFIX "subl %1,%0"
7623 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7624 +
7625 +#ifdef CONFIG_PAX_REFCOUNT
7626 + "jno 0f\n"
7627 + LOCK_PREFIX "addl %1,%0\n"
7628 + "int $4\n0:\n"
7629 + _ASM_EXTABLE(0b, 0b)
7630 +#endif
7631 +
7632 + : "=m" (v->counter)
7633 + : "ir" (i), "m" (v->counter));
7634 +}
7635 +
7636 +/**
7637 + * atomic_sub_unchecked - subtract the atomic variable
7638 + * @i: integer value to subtract
7639 + * @v: pointer of type atomic_unchecked_t
7640 + *
7641 + * Atomically subtracts @i from @v.
7642 + */
7643 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7644 +{
7645 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7646 : "=m" (v->counter)
7647 : "ir" (i), "m" (v->counter));
7648 }
7649 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7650 {
7651 unsigned char c;
7652
7653 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7654 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7655 +
7656 +#ifdef CONFIG_PAX_REFCOUNT
7657 + "jno 0f\n"
7658 + LOCK_PREFIX "addl %2,%0\n"
7659 + "int $4\n0:\n"
7660 + _ASM_EXTABLE(0b, 0b)
7661 +#endif
7662 +
7663 + "sete %1\n"
7664 : "=m" (v->counter), "=qm" (c)
7665 : "ir" (i), "m" (v->counter) : "memory");
7666 return c;
7667 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7668 */
7669 static inline void atomic_inc(atomic_t *v)
7670 {
7671 - asm volatile(LOCK_PREFIX "incl %0"
7672 + asm volatile(LOCK_PREFIX "incl %0\n"
7673 +
7674 +#ifdef CONFIG_PAX_REFCOUNT
7675 + "jno 0f\n"
7676 + LOCK_PREFIX "decl %0\n"
7677 + "int $4\n0:\n"
7678 + _ASM_EXTABLE(0b, 0b)
7679 +#endif
7680 +
7681 + : "=m" (v->counter)
7682 + : "m" (v->counter));
7683 +}
7684 +
7685 +/**
7686 + * atomic_inc_unchecked - increment atomic variable
7687 + * @v: pointer of type atomic_unchecked_t
7688 + *
7689 + * Atomically increments @v by 1.
7690 + */
7691 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7692 +{
7693 + asm volatile(LOCK_PREFIX "incl %0\n"
7694 : "=m" (v->counter)
7695 : "m" (v->counter));
7696 }
7697 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7698 */
7699 static inline void atomic_dec(atomic_t *v)
7700 {
7701 - asm volatile(LOCK_PREFIX "decl %0"
7702 + asm volatile(LOCK_PREFIX "decl %0\n"
7703 +
7704 +#ifdef CONFIG_PAX_REFCOUNT
7705 + "jno 0f\n"
7706 + LOCK_PREFIX "incl %0\n"
7707 + "int $4\n0:\n"
7708 + _ASM_EXTABLE(0b, 0b)
7709 +#endif
7710 +
7711 + : "=m" (v->counter)
7712 + : "m" (v->counter));
7713 +}
7714 +
7715 +/**
7716 + * atomic_dec_unchecked - decrement atomic variable
7717 + * @v: pointer of type atomic_unchecked_t
7718 + *
7719 + * Atomically decrements @v by 1.
7720 + */
7721 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7722 +{
7723 + asm volatile(LOCK_PREFIX "decl %0\n"
7724 : "=m" (v->counter)
7725 : "m" (v->counter));
7726 }
7727 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7728 {
7729 unsigned char c;
7730
7731 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7732 + asm volatile(LOCK_PREFIX "decl %0\n"
7733 +
7734 +#ifdef CONFIG_PAX_REFCOUNT
7735 + "jno 0f\n"
7736 + LOCK_PREFIX "incl %0\n"
7737 + "int $4\n0:\n"
7738 + _ASM_EXTABLE(0b, 0b)
7739 +#endif
7740 +
7741 + "sete %1\n"
7742 : "=m" (v->counter), "=qm" (c)
7743 : "m" (v->counter) : "memory");
7744 return c != 0;
7745 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7746 {
7747 unsigned char c;
7748
7749 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7750 + asm volatile(LOCK_PREFIX "incl %0\n"
7751 +
7752 +#ifdef CONFIG_PAX_REFCOUNT
7753 + "jno 0f\n"
7754 + LOCK_PREFIX "decl %0\n"
7755 + "int $4\n0:\n"
7756 + _ASM_EXTABLE(0b, 0b)
7757 +#endif
7758 +
7759 + "sete %1\n"
7760 + : "=m" (v->counter), "=qm" (c)
7761 + : "m" (v->counter) : "memory");
7762 + return c != 0;
7763 +}
7764 +
7765 +/**
7766 + * atomic_inc_and_test_unchecked - increment and test
7767 + * @v: pointer of type atomic_unchecked_t
7768 + *
7769 + * Atomically increments @v by 1
7770 + * and returns true if the result is zero, or false for all
7771 + * other cases.
7772 + */
7773 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7774 +{
7775 + unsigned char c;
7776 +
7777 + asm volatile(LOCK_PREFIX "incl %0\n"
7778 + "sete %1\n"
7779 : "=m" (v->counter), "=qm" (c)
7780 : "m" (v->counter) : "memory");
7781 return c != 0;
7782 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7783 {
7784 unsigned char c;
7785
7786 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7787 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7788 +
7789 +#ifdef CONFIG_PAX_REFCOUNT
7790 + "jno 0f\n"
7791 + LOCK_PREFIX "subl %2,%0\n"
7792 + "int $4\n0:\n"
7793 + _ASM_EXTABLE(0b, 0b)
7794 +#endif
7795 +
7796 + "sets %1\n"
7797 : "=m" (v->counter), "=qm" (c)
7798 : "ir" (i), "m" (v->counter) : "memory");
7799 return c;
7800 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7801 static inline int atomic_add_return(int i, atomic_t *v)
7802 {
7803 int __i = i;
7804 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7805 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7806 +
7807 +#ifdef CONFIG_PAX_REFCOUNT
7808 + "jno 0f\n"
7809 + "movl %0, %1\n"
7810 + "int $4\n0:\n"
7811 + _ASM_EXTABLE(0b, 0b)
7812 +#endif
7813 +
7814 + : "+r" (i), "+m" (v->counter)
7815 + : : "memory");
7816 + return i + __i;
7817 +}
7818 +
7819 +/**
7820 + * atomic_add_return_unchecked - add and return
7821 + * @i: integer value to add
7822 + * @v: pointer of type atomic_unchecked_t
7823 + *
7824 + * Atomically adds @i to @v and returns @i + @v
7825 + */
7826 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7827 +{
7828 + int __i = i;
7829 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7830 : "+r" (i), "+m" (v->counter)
7831 : : "memory");
7832 return i + __i;
7833 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7834 }
7835
7836 #define atomic_inc_return(v) (atomic_add_return(1, v))
7837 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7838 +{
7839 + return atomic_add_return_unchecked(1, v);
7840 +}
7841 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7842
7843 /* The 64-bit atomic type */
7844 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7845 }
7846
7847 /**
7848 + * atomic64_read_unchecked - read atomic64 variable
7849 + * @v: pointer of type atomic64_unchecked_t
7850 + *
7851 + * Atomically reads the value of @v.
7852 + * Doesn't imply a read memory barrier.
7853 + */
7854 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7855 +{
7856 + return v->counter;
7857 +}
7858 +
7859 +/**
7860 * atomic64_set - set atomic64 variable
7861 * @v: pointer to type atomic64_t
7862 * @i: required value
7863 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7864 }
7865
7866 /**
7867 + * atomic64_set_unchecked - set atomic64 variable
7868 + * @v: pointer to type atomic64_unchecked_t
7869 + * @i: required value
7870 + *
7871 + * Atomically sets the value of @v to @i.
7872 + */
7873 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7874 +{
7875 + v->counter = i;
7876 +}
7877 +
7878 +/**
7879 * atomic64_add - add integer to atomic64 variable
7880 * @i: integer value to add
7881 * @v: pointer to type atomic64_t
7882 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7883 */
7884 static inline void atomic64_add(long i, atomic64_t *v)
7885 {
7886 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7887 +
7888 +#ifdef CONFIG_PAX_REFCOUNT
7889 + "jno 0f\n"
7890 + LOCK_PREFIX "subq %1,%0\n"
7891 + "int $4\n0:\n"
7892 + _ASM_EXTABLE(0b, 0b)
7893 +#endif
7894 +
7895 + : "=m" (v->counter)
7896 + : "er" (i), "m" (v->counter));
7897 +}
7898 +
7899 +/**
7900 + * atomic64_add_unchecked - add integer to atomic64 variable
7901 + * @i: integer value to add
7902 + * @v: pointer to type atomic64_unchecked_t
7903 + *
7904 + * Atomically adds @i to @v.
7905 + */
7906 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7907 +{
7908 asm volatile(LOCK_PREFIX "addq %1,%0"
7909 : "=m" (v->counter)
7910 : "er" (i), "m" (v->counter));
7911 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7912 */
7913 static inline void atomic64_sub(long i, atomic64_t *v)
7914 {
7915 - asm volatile(LOCK_PREFIX "subq %1,%0"
7916 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7917 +
7918 +#ifdef CONFIG_PAX_REFCOUNT
7919 + "jno 0f\n"
7920 + LOCK_PREFIX "addq %1,%0\n"
7921 + "int $4\n0:\n"
7922 + _ASM_EXTABLE(0b, 0b)
7923 +#endif
7924 +
7925 : "=m" (v->counter)
7926 : "er" (i), "m" (v->counter));
7927 }
7928 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7929 {
7930 unsigned char c;
7931
7932 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7933 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7934 +
7935 +#ifdef CONFIG_PAX_REFCOUNT
7936 + "jno 0f\n"
7937 + LOCK_PREFIX "addq %2,%0\n"
7938 + "int $4\n0:\n"
7939 + _ASM_EXTABLE(0b, 0b)
7940 +#endif
7941 +
7942 + "sete %1\n"
7943 : "=m" (v->counter), "=qm" (c)
7944 : "er" (i), "m" (v->counter) : "memory");
7945 return c;
7946 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7947 */
7948 static inline void atomic64_inc(atomic64_t *v)
7949 {
7950 + asm volatile(LOCK_PREFIX "incq %0\n"
7951 +
7952 +#ifdef CONFIG_PAX_REFCOUNT
7953 + "jno 0f\n"
7954 + LOCK_PREFIX "decq %0\n"
7955 + "int $4\n0:\n"
7956 + _ASM_EXTABLE(0b, 0b)
7957 +#endif
7958 +
7959 + : "=m" (v->counter)
7960 + : "m" (v->counter));
7961 +}
7962 +
7963 +/**
7964 + * atomic64_inc_unchecked - increment atomic64 variable
7965 + * @v: pointer to type atomic64_unchecked_t
7966 + *
7967 + * Atomically increments @v by 1.
7968 + */
7969 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7970 +{
7971 asm volatile(LOCK_PREFIX "incq %0"
7972 : "=m" (v->counter)
7973 : "m" (v->counter));
7974 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7975 */
7976 static inline void atomic64_dec(atomic64_t *v)
7977 {
7978 - asm volatile(LOCK_PREFIX "decq %0"
7979 + asm volatile(LOCK_PREFIX "decq %0\n"
7980 +
7981 +#ifdef CONFIG_PAX_REFCOUNT
7982 + "jno 0f\n"
7983 + LOCK_PREFIX "incq %0\n"
7984 + "int $4\n0:\n"
7985 + _ASM_EXTABLE(0b, 0b)
7986 +#endif
7987 +
7988 + : "=m" (v->counter)
7989 + : "m" (v->counter));
7990 +}
7991 +
7992 +/**
7993 + * atomic64_dec_unchecked - decrement atomic64 variable
7994 + * @v: pointer to type atomic64_t
7995 + *
7996 + * Atomically decrements @v by 1.
7997 + */
7998 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7999 +{
8000 + asm volatile(LOCK_PREFIX "decq %0\n"
8001 : "=m" (v->counter)
8002 : "m" (v->counter));
8003 }
8004 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8005 {
8006 unsigned char c;
8007
8008 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
8009 + asm volatile(LOCK_PREFIX "decq %0\n"
8010 +
8011 +#ifdef CONFIG_PAX_REFCOUNT
8012 + "jno 0f\n"
8013 + LOCK_PREFIX "incq %0\n"
8014 + "int $4\n0:\n"
8015 + _ASM_EXTABLE(0b, 0b)
8016 +#endif
8017 +
8018 + "sete %1\n"
8019 : "=m" (v->counter), "=qm" (c)
8020 : "m" (v->counter) : "memory");
8021 return c != 0;
8022 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8023 {
8024 unsigned char c;
8025
8026 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
8027 + asm volatile(LOCK_PREFIX "incq %0\n"
8028 +
8029 +#ifdef CONFIG_PAX_REFCOUNT
8030 + "jno 0f\n"
8031 + LOCK_PREFIX "decq %0\n"
8032 + "int $4\n0:\n"
8033 + _ASM_EXTABLE(0b, 0b)
8034 +#endif
8035 +
8036 + "sete %1\n"
8037 : "=m" (v->counter), "=qm" (c)
8038 : "m" (v->counter) : "memory");
8039 return c != 0;
8040 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8041 {
8042 unsigned char c;
8043
8044 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8045 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
8046 +
8047 +#ifdef CONFIG_PAX_REFCOUNT
8048 + "jno 0f\n"
8049 + LOCK_PREFIX "subq %2,%0\n"
8050 + "int $4\n0:\n"
8051 + _ASM_EXTABLE(0b, 0b)
8052 +#endif
8053 +
8054 + "sets %1\n"
8055 : "=m" (v->counter), "=qm" (c)
8056 : "er" (i), "m" (v->counter) : "memory");
8057 return c;
8058 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8059 static inline long atomic64_add_return(long i, atomic64_t *v)
8060 {
8061 long __i = i;
8062 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8063 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8064 +
8065 +#ifdef CONFIG_PAX_REFCOUNT
8066 + "jno 0f\n"
8067 + "movq %0, %1\n"
8068 + "int $4\n0:\n"
8069 + _ASM_EXTABLE(0b, 0b)
8070 +#endif
8071 +
8072 + : "+r" (i), "+m" (v->counter)
8073 + : : "memory");
8074 + return i + __i;
8075 +}
8076 +
8077 +/**
8078 + * atomic64_add_return_unchecked - add and return
8079 + * @i: integer value to add
8080 + * @v: pointer to type atomic64_unchecked_t
8081 + *
8082 + * Atomically adds @i to @v and returns @i + @v
8083 + */
8084 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8085 +{
8086 + long __i = i;
8087 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
8088 : "+r" (i), "+m" (v->counter)
8089 : : "memory");
8090 return i + __i;
8091 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8092 }
8093
8094 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8095 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8096 +{
8097 + return atomic64_add_return_unchecked(1, v);
8098 +}
8099 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8100
8101 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8102 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8103 return cmpxchg(&v->counter, old, new);
8104 }
8105
8106 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8107 +{
8108 + return cmpxchg(&v->counter, old, new);
8109 +}
8110 +
8111 static inline long atomic64_xchg(atomic64_t *v, long new)
8112 {
8113 return xchg(&v->counter, new);
8114 }
8115
8116 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8117 +{
8118 + return xchg(&v->counter, new);
8119 +}
8120 +
8121 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8122 {
8123 return cmpxchg(&v->counter, old, new);
8124 }
8125
8126 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8127 +{
8128 + return cmpxchg(&v->counter, old, new);
8129 +}
8130 +
8131 static inline long atomic_xchg(atomic_t *v, int new)
8132 {
8133 return xchg(&v->counter, new);
8134 }
8135
8136 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8137 +{
8138 + return xchg(&v->counter, new);
8139 +}
8140 +
8141 /**
8142 * atomic_add_unless - add unless the number is a given value
8143 * @v: pointer of type atomic_t
8144 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8145 */
8146 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8147 {
8148 - int c, old;
8149 + int c, old, new;
8150 c = atomic_read(v);
8151 for (;;) {
8152 - if (unlikely(c == (u)))
8153 + if (unlikely(c == u))
8154 break;
8155 - old = atomic_cmpxchg((v), c, c + (a));
8156 +
8157 + asm volatile("addl %2,%0\n"
8158 +
8159 +#ifdef CONFIG_PAX_REFCOUNT
8160 + "jno 0f\n"
8161 + "subl %2,%0\n"
8162 + "int $4\n0:\n"
8163 + _ASM_EXTABLE(0b, 0b)
8164 +#endif
8165 +
8166 + : "=r" (new)
8167 + : "0" (c), "ir" (a));
8168 +
8169 + old = atomic_cmpxchg(v, c, new);
8170 if (likely(old == c))
8171 break;
8172 c = old;
8173 }
8174 - return c != (u);
8175 + return c != u;
8176 }
8177
8178 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8179 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8180 */
8181 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8182 {
8183 - long c, old;
8184 + long c, old, new;
8185 c = atomic64_read(v);
8186 for (;;) {
8187 - if (unlikely(c == (u)))
8188 + if (unlikely(c == u))
8189 break;
8190 - old = atomic64_cmpxchg((v), c, c + (a));
8191 +
8192 + asm volatile("addq %2,%0\n"
8193 +
8194 +#ifdef CONFIG_PAX_REFCOUNT
8195 + "jno 0f\n"
8196 + "subq %2,%0\n"
8197 + "int $4\n0:\n"
8198 + _ASM_EXTABLE(0b, 0b)
8199 +#endif
8200 +
8201 + : "=r" (new)
8202 + : "0" (c), "er" (a));
8203 +
8204 + old = atomic64_cmpxchg(v, c, new);
8205 if (likely(old == c))
8206 break;
8207 c = old;
8208 }
8209 - return c != (u);
8210 + return c != u;
8211 }
8212
8213 /**
8214 diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8215 --- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8216 +++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8217 @@ -38,7 +38,7 @@
8218 * a mask operation on a byte.
8219 */
8220 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8221 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8222 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8223 #define CONST_MASK(nr) (1 << ((nr) & 7))
8224
8225 /**
8226 diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8227 --- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8228 +++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8229 @@ -11,10 +11,15 @@
8230 #include <asm/pgtable_types.h>
8231
8232 /* Physical address where kernel should be loaded. */
8233 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8235 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8236 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8237
8238 +#ifndef __ASSEMBLY__
8239 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8240 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8241 +#endif
8242 +
8243 /* Minimum kernel alignment, as a power of two */
8244 #ifdef CONFIG_X86_64
8245 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8246 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8247 --- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8248 +++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8249 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8250 static inline unsigned long get_page_memtype(struct page *pg)
8251 {
8252 if (!PageUncached(pg) && !PageWC(pg))
8253 - return -1;
8254 + return ~0UL;
8255 else if (!PageUncached(pg) && PageWC(pg))
8256 return _PAGE_CACHE_WC;
8257 else if (PageUncached(pg) && !PageWC(pg))
8258 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8259 SetPageWC(pg);
8260 break;
8261 default:
8262 - case -1:
8263 + case ~0UL:
8264 ClearPageUncached(pg);
8265 ClearPageWC(pg);
8266 break;
8267 diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8268 --- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8269 +++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8270 @@ -5,9 +5,10 @@
8271
8272 /* L1 cache line size */
8273 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8274 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8275 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8276
8277 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8278 +#define __read_only __attribute__((__section__(".data.read_only")))
8279
8280 #ifdef CONFIG_X86_VSMP
8281 /* vSMP Internode cacheline shift */
8282 diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8283 --- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8284 +++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8285 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8286 int len, __wsum sum,
8287 int *src_err_ptr, int *dst_err_ptr);
8288
8289 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8290 + int len, __wsum sum,
8291 + int *src_err_ptr, int *dst_err_ptr);
8292 +
8293 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8294 + int len, __wsum sum,
8295 + int *src_err_ptr, int *dst_err_ptr);
8296 +
8297 /*
8298 * Note: when you get a NULL pointer exception here this means someone
8299 * passed in an incorrect kernel address to one of these functions.
8300 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8301 int *err_ptr)
8302 {
8303 might_sleep();
8304 - return csum_partial_copy_generic((__force void *)src, dst,
8305 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8306 len, sum, err_ptr, NULL);
8307 }
8308
8309 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8310 {
8311 might_sleep();
8312 if (access_ok(VERIFY_WRITE, dst, len))
8313 - return csum_partial_copy_generic(src, (__force void *)dst,
8314 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8315 len, sum, NULL, err_ptr);
8316
8317 if (len)
8318 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8319 --- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8320 +++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8321 @@ -31,6 +31,12 @@ struct desc_struct {
8322 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8323 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8324 };
8325 + struct {
8326 + u16 offset_low;
8327 + u16 seg;
8328 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8329 + unsigned offset_high: 16;
8330 + } gate;
8331 };
8332 } __attribute__((packed));
8333
8334 diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8335 --- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8336 +++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8337 @@ -4,6 +4,7 @@
8338 #include <asm/desc_defs.h>
8339 #include <asm/ldt.h>
8340 #include <asm/mmu.h>
8341 +#include <asm/pgtable.h>
8342 #include <linux/smp.h>
8343
8344 static inline void fill_ldt(struct desc_struct *desc,
8345 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8346 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8347 desc->type = (info->read_exec_only ^ 1) << 1;
8348 desc->type |= info->contents << 2;
8349 + desc->type |= info->seg_not_present ^ 1;
8350 desc->s = 1;
8351 desc->dpl = 0x3;
8352 desc->p = info->seg_not_present ^ 1;
8353 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8354 }
8355
8356 extern struct desc_ptr idt_descr;
8357 -extern gate_desc idt_table[];
8358 -
8359 -struct gdt_page {
8360 - struct desc_struct gdt[GDT_ENTRIES];
8361 -} __attribute__((aligned(PAGE_SIZE)));
8362 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8363 +extern gate_desc idt_table[256];
8364
8365 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8366 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8367 {
8368 - return per_cpu(gdt_page, cpu).gdt;
8369 + return cpu_gdt_table[cpu];
8370 }
8371
8372 #ifdef CONFIG_X86_64
8373 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8374 unsigned long base, unsigned dpl, unsigned flags,
8375 unsigned short seg)
8376 {
8377 - gate->a = (seg << 16) | (base & 0xffff);
8378 - gate->b = (base & 0xffff0000) |
8379 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8380 + gate->gate.offset_low = base;
8381 + gate->gate.seg = seg;
8382 + gate->gate.reserved = 0;
8383 + gate->gate.type = type;
8384 + gate->gate.s = 0;
8385 + gate->gate.dpl = dpl;
8386 + gate->gate.p = 1;
8387 + gate->gate.offset_high = base >> 16;
8388 }
8389
8390 #endif
8391 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8392 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8393 const gate_desc *gate)
8394 {
8395 + pax_open_kernel();
8396 memcpy(&idt[entry], gate, sizeof(*gate));
8397 + pax_close_kernel();
8398 }
8399
8400 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8401 const void *desc)
8402 {
8403 + pax_open_kernel();
8404 memcpy(&ldt[entry], desc, 8);
8405 + pax_close_kernel();
8406 }
8407
8408 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8409 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8410 size = sizeof(struct desc_struct);
8411 break;
8412 }
8413 +
8414 + pax_open_kernel();
8415 memcpy(&gdt[entry], desc, size);
8416 + pax_close_kernel();
8417 }
8418
8419 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8420 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8421
8422 static inline void native_load_tr_desc(void)
8423 {
8424 + pax_open_kernel();
8425 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8426 + pax_close_kernel();
8427 }
8428
8429 static inline void native_load_gdt(const struct desc_ptr *dtr)
8430 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8431 unsigned int i;
8432 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8433
8434 + pax_open_kernel();
8435 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8436 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8437 + pax_close_kernel();
8438 }
8439
8440 #define _LDT_empty(info) \
8441 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8442 desc->limit = (limit >> 16) & 0xf;
8443 }
8444
8445 -static inline void _set_gate(int gate, unsigned type, void *addr,
8446 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8447 unsigned dpl, unsigned ist, unsigned seg)
8448 {
8449 gate_desc s;
8450 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8451 * Pentium F0 0F bugfix can have resulted in the mapped
8452 * IDT being write-protected.
8453 */
8454 -static inline void set_intr_gate(unsigned int n, void *addr)
8455 +static inline void set_intr_gate(unsigned int n, const void *addr)
8456 {
8457 BUG_ON((unsigned)n > 0xFF);
8458 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8459 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8460 /*
8461 * This routine sets up an interrupt gate at directory privilege level 3.
8462 */
8463 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8464 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8465 {
8466 BUG_ON((unsigned)n > 0xFF);
8467 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8468 }
8469
8470 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8471 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8472 {
8473 BUG_ON((unsigned)n > 0xFF);
8474 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8475 }
8476
8477 -static inline void set_trap_gate(unsigned int n, void *addr)
8478 +static inline void set_trap_gate(unsigned int n, const void *addr)
8479 {
8480 BUG_ON((unsigned)n > 0xFF);
8481 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8482 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8483 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8484 {
8485 BUG_ON((unsigned)n > 0xFF);
8486 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8487 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8488 }
8489
8490 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8491 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8492 {
8493 BUG_ON((unsigned)n > 0xFF);
8494 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8495 }
8496
8497 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8498 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8499 {
8500 BUG_ON((unsigned)n > 0xFF);
8501 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8502 }
8503
8504 +#ifdef CONFIG_X86_32
8505 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8506 +{
8507 + struct desc_struct d;
8508 +
8509 + if (likely(limit))
8510 + limit = (limit - 1UL) >> PAGE_SHIFT;
8511 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8512 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8513 +}
8514 +#endif
8515 +
8516 #endif /* _ASM_X86_DESC_H */
8517 diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8518 --- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8519 +++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8520 @@ -6,7 +6,7 @@ struct dev_archdata {
8521 void *acpi_handle;
8522 #endif
8523 #ifdef CONFIG_X86_64
8524 -struct dma_map_ops *dma_ops;
8525 + const struct dma_map_ops *dma_ops;
8526 #endif
8527 #ifdef CONFIG_DMAR
8528 void *iommu; /* hook for IOMMU specific extension */
8529 diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8530 --- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8531 +++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8532 @@ -25,9 +25,9 @@ extern int iommu_merge;
8533 extern struct device x86_dma_fallback_dev;
8534 extern int panic_on_overflow;
8535
8536 -extern struct dma_map_ops *dma_ops;
8537 +extern const struct dma_map_ops *dma_ops;
8538
8539 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8540 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8541 {
8542 #ifdef CONFIG_X86_32
8543 return dma_ops;
8544 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8545 /* Make sure we keep the same behaviour */
8546 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8547 {
8548 - struct dma_map_ops *ops = get_dma_ops(dev);
8549 + const struct dma_map_ops *ops = get_dma_ops(dev);
8550 if (ops->mapping_error)
8551 return ops->mapping_error(dev, dma_addr);
8552
8553 @@ -122,7 +122,7 @@ static inline void *
8554 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8555 gfp_t gfp)
8556 {
8557 - struct dma_map_ops *ops = get_dma_ops(dev);
8558 + const struct dma_map_ops *ops = get_dma_ops(dev);
8559 void *memory;
8560
8561 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8562 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8563 static inline void dma_free_coherent(struct device *dev, size_t size,
8564 void *vaddr, dma_addr_t bus)
8565 {
8566 - struct dma_map_ops *ops = get_dma_ops(dev);
8567 + const struct dma_map_ops *ops = get_dma_ops(dev);
8568
8569 WARN_ON(irqs_disabled()); /* for portability */
8570
8571 diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8572 --- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8573 +++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8574 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8575 #define ISA_END_ADDRESS 0x100000
8576 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8577
8578 -#define BIOS_BEGIN 0x000a0000
8579 +#define BIOS_BEGIN 0x000c0000
8580 #define BIOS_END 0x00100000
8581
8582 #ifdef __KERNEL__
8583 diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8584 --- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8585 +++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-08-23 20:24:19.000000000 -0400
8586 @@ -257,7 +257,25 @@ extern int force_personality32;
8587 the loader. We need to make sure that it is out of the way of the program
8588 that it will "exec", and that there is sufficient room for the brk. */
8589
8590 +#ifdef CONFIG_PAX_SEGMEXEC
8591 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8592 +#else
8593 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8594 +#endif
8595 +
8596 +#ifdef CONFIG_PAX_ASLR
8597 +#ifdef CONFIG_X86_32
8598 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8599 +
8600 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8602 +#else
8603 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8604 +
8605 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8607 +#endif
8608 +#endif
8609
8610 /* This yields a mask that user programs can use to figure out what
8611 instruction set this CPU supports. This could be done in user space,
8612 @@ -310,9 +328,7 @@ do { \
8613
8614 #define ARCH_DLINFO \
8615 do { \
8616 - if (vdso_enabled) \
8617 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8618 - (unsigned long)current->mm->context.vdso); \
8619 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8620 } while (0)
8621
8622 #define AT_SYSINFO 32
8623 @@ -323,7 +339,7 @@ do { \
8624
8625 #endif /* !CONFIG_X86_32 */
8626
8627 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8628 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8629
8630 #define VDSO_ENTRY \
8631 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8632 @@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8633 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8634 #define compat_arch_setup_additional_pages syscall32_setup_pages
8635
8636 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8637 -#define arch_randomize_brk arch_randomize_brk
8638 -
8639 #endif /* _ASM_X86_ELF_H */
8640 diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8641 --- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8642 +++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8643 @@ -15,6 +15,6 @@ enum reboot_type {
8644
8645 extern enum reboot_type reboot_type;
8646
8647 -extern void machine_emergency_restart(void);
8648 +extern void machine_emergency_restart(void) __noreturn;
8649
8650 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8651 diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8652 --- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8653 +++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8654 @@ -12,16 +12,18 @@
8655 #include <asm/system.h>
8656
8657 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8658 + typecheck(u32 *, uaddr); \
8659 asm volatile("1:\t" insn "\n" \
8660 "2:\t.section .fixup,\"ax\"\n" \
8661 "3:\tmov\t%3, %1\n" \
8662 "\tjmp\t2b\n" \
8663 "\t.previous\n" \
8664 _ASM_EXTABLE(1b, 3b) \
8665 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8666 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8667 : "i" (-EFAULT), "0" (oparg), "1" (0))
8668
8669 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8670 + typecheck(u32 *, uaddr); \
8671 asm volatile("1:\tmovl %2, %0\n" \
8672 "\tmovl\t%0, %3\n" \
8673 "\t" insn "\n" \
8674 @@ -34,10 +36,10 @@
8675 _ASM_EXTABLE(1b, 4b) \
8676 _ASM_EXTABLE(2b, 4b) \
8677 : "=&a" (oldval), "=&r" (ret), \
8678 - "+m" (*uaddr), "=&r" (tem) \
8679 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8680 : "r" (oparg), "i" (-EFAULT), "1" (0))
8681
8682 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8683 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8684 {
8685 int op = (encoded_op >> 28) & 7;
8686 int cmp = (encoded_op >> 24) & 15;
8687 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8688
8689 switch (op) {
8690 case FUTEX_OP_SET:
8691 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8692 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8693 break;
8694 case FUTEX_OP_ADD:
8695 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8696 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8697 uaddr, oparg);
8698 break;
8699 case FUTEX_OP_OR:
8700 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8701 return ret;
8702 }
8703
8704 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8705 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8706 int newval)
8707 {
8708
8709 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8710 return -ENOSYS;
8711 #endif
8712
8713 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8714 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8715 return -EFAULT;
8716
8717 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8718 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8719 "2:\t.section .fixup, \"ax\"\n"
8720 "3:\tmov %2, %0\n"
8721 "\tjmp 2b\n"
8722 "\t.previous\n"
8723 _ASM_EXTABLE(1b, 3b)
8724 - : "=a" (oldval), "+m" (*uaddr)
8725 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8726 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8727 : "memory"
8728 );
8729 diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8730 --- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8731 +++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8732 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8733 extern void enable_IO_APIC(void);
8734
8735 /* Statistics */
8736 -extern atomic_t irq_err_count;
8737 -extern atomic_t irq_mis_count;
8738 +extern atomic_unchecked_t irq_err_count;
8739 +extern atomic_unchecked_t irq_mis_count;
8740
8741 /* EISA */
8742 extern void eisa_set_level_irq(unsigned int irq);
8743 diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8744 --- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8745 +++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8746 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8747 {
8748 int err;
8749
8750 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8751 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8752 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8753 +#endif
8754 +
8755 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8756 "2:\n"
8757 ".section .fixup,\"ax\"\n"
8758 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8759 {
8760 int err;
8761
8762 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8763 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8764 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8765 +#endif
8766 +
8767 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8768 "2:\n"
8769 ".section .fixup,\"ax\"\n"
8770 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8771 }
8772
8773 /* We need a safe address that is cheap to find and that is already
8774 - in L1 during context switch. The best choices are unfortunately
8775 - different for UP and SMP */
8776 -#ifdef CONFIG_SMP
8777 -#define safe_address (__per_cpu_offset[0])
8778 -#else
8779 -#define safe_address (kstat_cpu(0).cpustat.user)
8780 -#endif
8781 + in L1 during context switch. */
8782 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8783
8784 /*
8785 * These must be called with preempt disabled
8786 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8787 struct thread_info *me = current_thread_info();
8788 preempt_disable();
8789 if (me->status & TS_USEDFPU)
8790 - __save_init_fpu(me->task);
8791 + __save_init_fpu(current);
8792 else
8793 clts();
8794 }
8795 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8796 --- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8797 +++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8798 @@ -3,6 +3,7 @@
8799
8800 #include <linux/string.h>
8801 #include <linux/compiler.h>
8802 +#include <asm/processor.h>
8803
8804 /*
8805 * This file contains the definitions for the x86 IO instructions
8806 @@ -42,6 +43,17 @@
8807
8808 #ifdef __KERNEL__
8809
8810 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8811 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8812 +{
8813 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8814 +}
8815 +
8816 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8817 +{
8818 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8819 +}
8820 +
8821 #include <asm-generic/iomap.h>
8822
8823 #include <linux/vmalloc.h>
8824 diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8825 --- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8826 +++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8827 @@ -140,6 +140,17 @@ __OUTS(l)
8828
8829 #include <linux/vmalloc.h>
8830
8831 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8832 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8833 +{
8834 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8835 +}
8836 +
8837 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8838 +{
8839 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8840 +}
8841 +
8842 #include <asm-generic/iomap.h>
8843
8844 void __memcpy_fromio(void *, unsigned long, unsigned);
8845 diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8846 --- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8847 +++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8848 @@ -3,7 +3,7 @@
8849
8850 extern void pci_iommu_shutdown(void);
8851 extern void no_iommu_init(void);
8852 -extern struct dma_map_ops nommu_dma_ops;
8853 +extern const struct dma_map_ops nommu_dma_ops;
8854 extern int force_iommu, no_iommu;
8855 extern int iommu_detected;
8856 extern int iommu_pass_through;
8857 diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8858 --- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8859 +++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8860 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8861 sti; \
8862 sysexit
8863
8864 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8865 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8866 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8867 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8868 +
8869 #else
8870 #define INTERRUPT_RETURN iret
8871 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8872 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8873 --- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8874 +++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8875 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8876 #define BREAKPOINT_INSTRUCTION 0xcc
8877 #define RELATIVEJUMP_INSTRUCTION 0xe9
8878 #define MAX_INSN_SIZE 16
8879 -#define MAX_STACK_SIZE 64
8880 -#define MIN_STACK_SIZE(ADDR) \
8881 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8882 - THREAD_SIZE - (unsigned long)(ADDR))) \
8883 - ? (MAX_STACK_SIZE) \
8884 - : (((unsigned long)current_thread_info()) + \
8885 - THREAD_SIZE - (unsigned long)(ADDR)))
8886 +#define MAX_STACK_SIZE 64UL
8887 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8888
8889 #define flush_insn_slot(p) do { } while (0)
8890
8891 diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8892 --- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8893 +++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-08-26 20:19:09.000000000 -0400
8894 @@ -534,9 +534,9 @@ struct kvm_x86_ops {
8895 bool (*gb_page_enable)(void);
8896
8897 const struct trace_print_flags *exit_reasons_str;
8898 -};
8899 +} __do_const;
8900
8901 -extern struct kvm_x86_ops *kvm_x86_ops;
8902 +extern const struct kvm_x86_ops *kvm_x86_ops;
8903
8904 int kvm_mmu_module_init(void);
8905 void kvm_mmu_module_exit(void);
8906 diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8907 --- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8908 +++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8909 @@ -18,26 +18,58 @@ typedef struct {
8910
8911 static inline void local_inc(local_t *l)
8912 {
8913 - asm volatile(_ASM_INC "%0"
8914 + asm volatile(_ASM_INC "%0\n"
8915 +
8916 +#ifdef CONFIG_PAX_REFCOUNT
8917 + "jno 0f\n"
8918 + _ASM_DEC "%0\n"
8919 + "int $4\n0:\n"
8920 + _ASM_EXTABLE(0b, 0b)
8921 +#endif
8922 +
8923 : "+m" (l->a.counter));
8924 }
8925
8926 static inline void local_dec(local_t *l)
8927 {
8928 - asm volatile(_ASM_DEC "%0"
8929 + asm volatile(_ASM_DEC "%0\n"
8930 +
8931 +#ifdef CONFIG_PAX_REFCOUNT
8932 + "jno 0f\n"
8933 + _ASM_INC "%0\n"
8934 + "int $4\n0:\n"
8935 + _ASM_EXTABLE(0b, 0b)
8936 +#endif
8937 +
8938 : "+m" (l->a.counter));
8939 }
8940
8941 static inline void local_add(long i, local_t *l)
8942 {
8943 - asm volatile(_ASM_ADD "%1,%0"
8944 + asm volatile(_ASM_ADD "%1,%0\n"
8945 +
8946 +#ifdef CONFIG_PAX_REFCOUNT
8947 + "jno 0f\n"
8948 + _ASM_SUB "%1,%0\n"
8949 + "int $4\n0:\n"
8950 + _ASM_EXTABLE(0b, 0b)
8951 +#endif
8952 +
8953 : "+m" (l->a.counter)
8954 : "ir" (i));
8955 }
8956
8957 static inline void local_sub(long i, local_t *l)
8958 {
8959 - asm volatile(_ASM_SUB "%1,%0"
8960 + asm volatile(_ASM_SUB "%1,%0\n"
8961 +
8962 +#ifdef CONFIG_PAX_REFCOUNT
8963 + "jno 0f\n"
8964 + _ASM_ADD "%1,%0\n"
8965 + "int $4\n0:\n"
8966 + _ASM_EXTABLE(0b, 0b)
8967 +#endif
8968 +
8969 : "+m" (l->a.counter)
8970 : "ir" (i));
8971 }
8972 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8973 {
8974 unsigned char c;
8975
8976 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8977 + asm volatile(_ASM_SUB "%2,%0\n"
8978 +
8979 +#ifdef CONFIG_PAX_REFCOUNT
8980 + "jno 0f\n"
8981 + _ASM_ADD "%2,%0\n"
8982 + "int $4\n0:\n"
8983 + _ASM_EXTABLE(0b, 0b)
8984 +#endif
8985 +
8986 + "sete %1\n"
8987 : "+m" (l->a.counter), "=qm" (c)
8988 : "ir" (i) : "memory");
8989 return c;
8990 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8991 {
8992 unsigned char c;
8993
8994 - asm volatile(_ASM_DEC "%0; sete %1"
8995 + asm volatile(_ASM_DEC "%0\n"
8996 +
8997 +#ifdef CONFIG_PAX_REFCOUNT
8998 + "jno 0f\n"
8999 + _ASM_INC "%0\n"
9000 + "int $4\n0:\n"
9001 + _ASM_EXTABLE(0b, 0b)
9002 +#endif
9003 +
9004 + "sete %1\n"
9005 : "+m" (l->a.counter), "=qm" (c)
9006 : : "memory");
9007 return c != 0;
9008 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9009 {
9010 unsigned char c;
9011
9012 - asm volatile(_ASM_INC "%0; sete %1"
9013 + asm volatile(_ASM_INC "%0\n"
9014 +
9015 +#ifdef CONFIG_PAX_REFCOUNT
9016 + "jno 0f\n"
9017 + _ASM_DEC "%0\n"
9018 + "int $4\n0:\n"
9019 + _ASM_EXTABLE(0b, 0b)
9020 +#endif
9021 +
9022 + "sete %1\n"
9023 : "+m" (l->a.counter), "=qm" (c)
9024 : : "memory");
9025 return c != 0;
9026 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9027 {
9028 unsigned char c;
9029
9030 - asm volatile(_ASM_ADD "%2,%0; sets %1"
9031 + asm volatile(_ASM_ADD "%2,%0\n"
9032 +
9033 +#ifdef CONFIG_PAX_REFCOUNT
9034 + "jno 0f\n"
9035 + _ASM_SUB "%2,%0\n"
9036 + "int $4\n0:\n"
9037 + _ASM_EXTABLE(0b, 0b)
9038 +#endif
9039 +
9040 + "sets %1\n"
9041 : "+m" (l->a.counter), "=qm" (c)
9042 : "ir" (i) : "memory");
9043 return c;
9044 @@ -133,7 +201,15 @@ static inline long local_add_return(long
9045 #endif
9046 /* Modern 486+ processor */
9047 __i = i;
9048 - asm volatile(_ASM_XADD "%0, %1;"
9049 + asm volatile(_ASM_XADD "%0, %1\n"
9050 +
9051 +#ifdef CONFIG_PAX_REFCOUNT
9052 + "jno 0f\n"
9053 + _ASM_MOV "%0,%1\n"
9054 + "int $4\n0:\n"
9055 + _ASM_EXTABLE(0b, 0b)
9056 +#endif
9057 +
9058 : "+r" (i), "+m" (l->a.counter)
9059 : : "memory");
9060 return i + __i;
9061 diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
9062 --- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9063 +++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9064 @@ -12,13 +12,13 @@ struct device;
9065 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9066
9067 struct microcode_ops {
9068 - enum ucode_state (*request_microcode_user) (int cpu,
9069 + enum ucode_state (* const request_microcode_user) (int cpu,
9070 const void __user *buf, size_t size);
9071
9072 - enum ucode_state (*request_microcode_fw) (int cpu,
9073 + enum ucode_state (* const request_microcode_fw) (int cpu,
9074 struct device *device);
9075
9076 - void (*microcode_fini_cpu) (int cpu);
9077 + void (* const microcode_fini_cpu) (int cpu);
9078
9079 /*
9080 * The generic 'microcode_core' part guarantees that
9081 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
9082 extern struct ucode_cpu_info ucode_cpu_info[];
9083
9084 #ifdef CONFIG_MICROCODE_INTEL
9085 -extern struct microcode_ops * __init init_intel_microcode(void);
9086 +extern const struct microcode_ops * __init init_intel_microcode(void);
9087 #else
9088 -static inline struct microcode_ops * __init init_intel_microcode(void)
9089 +static inline const struct microcode_ops * __init init_intel_microcode(void)
9090 {
9091 return NULL;
9092 }
9093 #endif /* CONFIG_MICROCODE_INTEL */
9094
9095 #ifdef CONFIG_MICROCODE_AMD
9096 -extern struct microcode_ops * __init init_amd_microcode(void);
9097 +extern const struct microcode_ops * __init init_amd_microcode(void);
9098 #else
9099 -static inline struct microcode_ops * __init init_amd_microcode(void)
9100 +static inline const struct microcode_ops * __init init_amd_microcode(void)
9101 {
9102 return NULL;
9103 }
9104 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
9105 --- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9106 +++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9107 @@ -5,4 +5,14 @@
9108
9109 #include <asm-generic/mman.h>
9110
9111 +#ifdef __KERNEL__
9112 +#ifndef __ASSEMBLY__
9113 +#ifdef CONFIG_X86_32
9114 +#define arch_mmap_check i386_mmap_check
9115 +int i386_mmap_check(unsigned long addr, unsigned long len,
9116 + unsigned long flags);
9117 +#endif
9118 +#endif
9119 +#endif
9120 +
9121 #endif /* _ASM_X86_MMAN_H */
9122 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9123 --- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9124 +++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-23 20:24:19.000000000 -0400
9125 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9126
9127 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9128 {
9129 +
9130 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9131 + unsigned int i;
9132 + pgd_t *pgd;
9133 +
9134 + pax_open_kernel();
9135 + pgd = get_cpu_pgd(smp_processor_id());
9136 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9137 + set_pgd_batched(pgd+i, native_make_pgd(0));
9138 + pax_close_kernel();
9139 +#endif
9140 +
9141 #ifdef CONFIG_SMP
9142 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9143 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9144 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9145 struct task_struct *tsk)
9146 {
9147 unsigned cpu = smp_processor_id();
9148 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9149 + int tlbstate = TLBSTATE_OK;
9150 +#endif
9151
9152 if (likely(prev != next)) {
9153 #ifdef CONFIG_SMP
9154 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9155 + tlbstate = percpu_read(cpu_tlbstate.state);
9156 +#endif
9157 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9158 percpu_write(cpu_tlbstate.active_mm, next);
9159 #endif
9160 cpumask_set_cpu(cpu, mm_cpumask(next));
9161
9162 /* Re-load page tables */
9163 +#ifdef CONFIG_PAX_PER_CPU_PGD
9164 + pax_open_kernel();
9165 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9166 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9167 + pax_close_kernel();
9168 + load_cr3(get_cpu_pgd(cpu));
9169 +#else
9170 load_cr3(next->pgd);
9171 +#endif
9172
9173 /* stop flush ipis for the previous mm */
9174 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9175 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9176 */
9177 if (unlikely(prev->context.ldt != next->context.ldt))
9178 load_LDT_nolock(&next->context);
9179 - }
9180 +
9181 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9182 + if (!nx_enabled) {
9183 + smp_mb__before_clear_bit();
9184 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9185 + smp_mb__after_clear_bit();
9186 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9187 + }
9188 +#endif
9189 +
9190 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9191 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9192 + prev->context.user_cs_limit != next->context.user_cs_limit))
9193 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9194 #ifdef CONFIG_SMP
9195 + else if (unlikely(tlbstate != TLBSTATE_OK))
9196 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9197 +#endif
9198 +#endif
9199 +
9200 + }
9201 else {
9202 +
9203 +#ifdef CONFIG_PAX_PER_CPU_PGD
9204 + pax_open_kernel();
9205 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9206 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9207 + pax_close_kernel();
9208 + load_cr3(get_cpu_pgd(cpu));
9209 +#endif
9210 +
9211 +#ifdef CONFIG_SMP
9212 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9213 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9214
9215 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9216 * tlb flush IPI delivery. We must reload CR3
9217 * to make sure to use no freed page tables.
9218 */
9219 +
9220 +#ifndef CONFIG_PAX_PER_CPU_PGD
9221 load_cr3(next->pgd);
9222 +#endif
9223 +
9224 load_LDT_nolock(&next->context);
9225 +
9226 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9227 + if (!nx_enabled)
9228 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9229 +#endif
9230 +
9231 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9232 +#ifdef CONFIG_PAX_PAGEEXEC
9233 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9234 +#endif
9235 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9236 +#endif
9237 +
9238 }
9239 - }
9240 #endif
9241 + }
9242 }
9243
9244 #define activate_mm(prev, next) \
9245 diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9246 --- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9247 +++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9248 @@ -9,10 +9,23 @@
9249 * we put the segment information here.
9250 */
9251 typedef struct {
9252 - void *ldt;
9253 + struct desc_struct *ldt;
9254 int size;
9255 struct mutex lock;
9256 - void *vdso;
9257 + unsigned long vdso;
9258 +
9259 +#ifdef CONFIG_X86_32
9260 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9261 + unsigned long user_cs_base;
9262 + unsigned long user_cs_limit;
9263 +
9264 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9265 + cpumask_t cpu_user_cs_mask;
9266 +#endif
9267 +
9268 +#endif
9269 +#endif
9270 +
9271 } mm_context_t;
9272
9273 #ifdef CONFIG_SMP
9274 diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9275 --- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9276 +++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9277 @@ -5,6 +5,7 @@
9278
9279 #ifdef CONFIG_X86_64
9280 /* X86_64 does not define MODULE_PROC_FAMILY */
9281 +#define MODULE_PROC_FAMILY ""
9282 #elif defined CONFIG_M386
9283 #define MODULE_PROC_FAMILY "386 "
9284 #elif defined CONFIG_M486
9285 @@ -59,13 +60,36 @@
9286 #error unknown processor family
9287 #endif
9288
9289 -#ifdef CONFIG_X86_32
9290 -# ifdef CONFIG_4KSTACKS
9291 -# define MODULE_STACKSIZE "4KSTACKS "
9292 -# else
9293 -# define MODULE_STACKSIZE ""
9294 -# endif
9295 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9296 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9297 +#define MODULE_PAX_UDEREF "UDEREF "
9298 +#else
9299 +#define MODULE_PAX_UDEREF ""
9300 +#endif
9301 +
9302 +#ifdef CONFIG_PAX_KERNEXEC
9303 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9304 +#else
9305 +#define MODULE_PAX_KERNEXEC ""
9306 +#endif
9307 +
9308 +#ifdef CONFIG_PAX_REFCOUNT
9309 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9310 +#else
9311 +#define MODULE_PAX_REFCOUNT ""
9312 #endif
9313
9314 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9315 +#define MODULE_STACKSIZE "4KSTACKS "
9316 +#else
9317 +#define MODULE_STACKSIZE ""
9318 +#endif
9319 +
9320 +#ifdef CONFIG_GRKERNSEC
9321 +#define MODULE_GRSEC "GRSECURITY "
9322 +#else
9323 +#define MODULE_GRSEC ""
9324 +#endif
9325 +
9326 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9327 +
9328 #endif /* _ASM_X86_MODULE_H */
9329 diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9330 --- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9331 +++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9332 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9333
9334 /* duplicated to the one in bootmem.h */
9335 extern unsigned long max_pfn;
9336 -extern unsigned long phys_base;
9337 +extern const unsigned long phys_base;
9338
9339 extern unsigned long __phys_addr(unsigned long);
9340 #define __phys_reloc_hide(x) (x)
9341 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9342 --- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9343 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-08-23 21:36:48.000000000 -0400
9344 @@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9345 val);
9346 }
9347
9348 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9349 +{
9350 + pgdval_t val = native_pgd_val(pgd);
9351 +
9352 + if (sizeof(pgdval_t) > sizeof(long))
9353 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9354 + val, (u64)val >> 32);
9355 + else
9356 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9357 + val);
9358 +}
9359 +
9360 static inline void pgd_clear(pgd_t *pgdp)
9361 {
9362 set_pgd(pgdp, __pgd(0));
9363 @@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9364 pv_mmu_ops.set_fixmap(idx, phys, flags);
9365 }
9366
9367 +#ifdef CONFIG_PAX_KERNEXEC
9368 +static inline unsigned long pax_open_kernel(void)
9369 +{
9370 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9371 +}
9372 +
9373 +static inline unsigned long pax_close_kernel(void)
9374 +{
9375 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9376 +}
9377 +#else
9378 +static inline unsigned long pax_open_kernel(void) { return 0; }
9379 +static inline unsigned long pax_close_kernel(void) { return 0; }
9380 +#endif
9381 +
9382 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9383
9384 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9385 @@ -945,7 +972,7 @@ extern void default_banner(void);
9386
9387 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9388 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9389 -#define PARA_INDIRECT(addr) *%cs:addr
9390 +#define PARA_INDIRECT(addr) *%ss:addr
9391 #endif
9392
9393 #define INTERRUPT_RETURN \
9394 @@ -1022,6 +1049,21 @@ extern void default_banner(void);
9395 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9396 CLBR_NONE, \
9397 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9398 +
9399 +#define GET_CR0_INTO_RDI \
9400 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9401 + mov %rax,%rdi
9402 +
9403 +#define SET_RDI_INTO_CR0 \
9404 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9405 +
9406 +#define GET_CR3_INTO_RDI \
9407 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9408 + mov %rax,%rdi
9409 +
9410 +#define SET_RDI_INTO_CR3 \
9411 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9412 +
9413 #endif /* CONFIG_X86_32 */
9414
9415 #endif /* __ASSEMBLY__ */
9416 diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9417 --- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9418 +++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-23 20:24:19.000000000 -0400
9419 @@ -78,19 +78,19 @@ struct pv_init_ops {
9420 */
9421 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9422 unsigned long addr, unsigned len);
9423 -};
9424 +} __no_const;
9425
9426
9427 struct pv_lazy_ops {
9428 /* Set deferred update mode, used for batching operations. */
9429 void (*enter)(void);
9430 void (*leave)(void);
9431 -};
9432 +} __no_const;
9433
9434 struct pv_time_ops {
9435 unsigned long long (*sched_clock)(void);
9436 unsigned long (*get_tsc_khz)(void);
9437 -};
9438 +} __no_const;
9439
9440 struct pv_cpu_ops {
9441 /* hooks for various privileged instructions */
9442 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9443
9444 void (*start_context_switch)(struct task_struct *prev);
9445 void (*end_context_switch)(struct task_struct *next);
9446 -};
9447 +} __no_const;
9448
9449 struct pv_irq_ops {
9450 /*
9451 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9452 unsigned long start_eip,
9453 unsigned long start_esp);
9454 #endif
9455 -};
9456 +} __no_const;
9457
9458 struct pv_mmu_ops {
9459 unsigned long (*read_cr2)(void);
9460 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
9461 struct paravirt_callee_save make_pud;
9462
9463 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9464 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9465 #endif /* PAGETABLE_LEVELS == 4 */
9466 #endif /* PAGETABLE_LEVELS >= 3 */
9467
9468 @@ -316,6 +317,12 @@ struct pv_mmu_ops {
9469 an mfn. We can tell which is which from the index. */
9470 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9471 phys_addr_t phys, pgprot_t flags);
9472 +
9473 +#ifdef CONFIG_PAX_KERNEXEC
9474 + unsigned long (*pax_open_kernel)(void);
9475 + unsigned long (*pax_close_kernel)(void);
9476 +#endif
9477 +
9478 };
9479
9480 struct raw_spinlock;
9481 @@ -326,7 +333,7 @@ struct pv_lock_ops {
9482 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9483 int (*spin_trylock)(struct raw_spinlock *lock);
9484 void (*spin_unlock)(struct raw_spinlock *lock);
9485 -};
9486 +} __no_const;
9487
9488 /* This contains all the paravirt structures: we get a convenient
9489 * number for each function using the offset which we use to indicate
9490 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9491 --- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9492 +++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9493 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9494 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9495
9496 struct pci_raw_ops {
9497 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9498 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9499 int reg, int len, u32 *val);
9500 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9501 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9502 int reg, int len, u32 val);
9503 };
9504
9505 -extern struct pci_raw_ops *raw_pci_ops;
9506 -extern struct pci_raw_ops *raw_pci_ext_ops;
9507 +extern const struct pci_raw_ops *raw_pci_ops;
9508 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9509
9510 -extern struct pci_raw_ops pci_direct_conf1;
9511 +extern const struct pci_raw_ops pci_direct_conf1;
9512 extern bool port_cf9_safe;
9513
9514 /* arch_initcall level */
9515 diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9516 --- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9517 +++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9518 @@ -78,6 +78,7 @@ do { \
9519 if (0) { \
9520 T__ tmp__; \
9521 tmp__ = (val); \
9522 + (void)tmp__; \
9523 } \
9524 switch (sizeof(var)) { \
9525 case 1: \
9526 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9527 --- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9528 +++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9529 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9530 pmd_t *pmd, pte_t *pte)
9531 {
9532 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9533 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9534 +}
9535 +
9536 +static inline void pmd_populate_user(struct mm_struct *mm,
9537 + pmd_t *pmd, pte_t *pte)
9538 +{
9539 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9540 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9541 }
9542
9543 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9544 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9545 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9546 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9547
9548 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9549 {
9550 + pax_open_kernel();
9551 *pmdp = pmd;
9552 + pax_close_kernel();
9553 }
9554
9555 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9556 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9557 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9558 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9559 @@ -26,9 +26,6 @@
9560 struct mm_struct;
9561 struct vm_area_struct;
9562
9563 -extern pgd_t swapper_pg_dir[1024];
9564 -extern pgd_t trampoline_pg_dir[1024];
9565 -
9566 static inline void pgtable_cache_init(void) { }
9567 static inline void check_pgt_cache(void) { }
9568 void paging_init(void);
9569 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9570 # include <asm/pgtable-2level.h>
9571 #endif
9572
9573 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9574 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9575 +#ifdef CONFIG_X86_PAE
9576 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9577 +#endif
9578 +
9579 #if defined(CONFIG_HIGHPTE)
9580 #define __KM_PTE \
9581 (in_nmi() ? KM_NMI_PTE : \
9582 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9583 /* Clear a kernel PTE and flush it from the TLB */
9584 #define kpte_clear_flush(ptep, vaddr) \
9585 do { \
9586 + pax_open_kernel(); \
9587 pte_clear(&init_mm, (vaddr), (ptep)); \
9588 + pax_close_kernel(); \
9589 __flush_tlb_one((vaddr)); \
9590 } while (0)
9591
9592 @@ -85,6 +90,9 @@ do { \
9593
9594 #endif /* !__ASSEMBLY__ */
9595
9596 +#define HAVE_ARCH_UNMAPPED_AREA
9597 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9598 +
9599 /*
9600 * kern_addr_valid() is (1) for FLATMEM and (0) for
9601 * SPARSEMEM and DISCONTIGMEM
9602 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9603 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9604 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9605 @@ -8,7 +8,7 @@
9606 */
9607 #ifdef CONFIG_X86_PAE
9608 # include <asm/pgtable-3level_types.h>
9609 -# define PMD_SIZE (1UL << PMD_SHIFT)
9610 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9611 # define PMD_MASK (~(PMD_SIZE - 1))
9612 #else
9613 # include <asm/pgtable-2level_types.h>
9614 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9615 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9616 #endif
9617
9618 +#ifdef CONFIG_PAX_KERNEXEC
9619 +#ifndef __ASSEMBLY__
9620 +extern unsigned char MODULES_EXEC_VADDR[];
9621 +extern unsigned char MODULES_EXEC_END[];
9622 +#endif
9623 +#include <asm/boot.h>
9624 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9625 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9626 +#else
9627 +#define ktla_ktva(addr) (addr)
9628 +#define ktva_ktla(addr) (addr)
9629 +#endif
9630 +
9631 #define MODULES_VADDR VMALLOC_START
9632 #define MODULES_END VMALLOC_END
9633 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9634 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9635 --- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9636 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9637 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9638
9639 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9640 {
9641 + pax_open_kernel();
9642 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9643 + pax_close_kernel();
9644 }
9645
9646 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9647 {
9648 + pax_open_kernel();
9649 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9650 + pax_close_kernel();
9651 }
9652
9653 /*
9654 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9655 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9656 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-08-23 20:24:19.000000000 -0400
9657 @@ -16,10 +16,13 @@
9658
9659 extern pud_t level3_kernel_pgt[512];
9660 extern pud_t level3_ident_pgt[512];
9661 +extern pud_t level3_vmalloc_pgt[512];
9662 +extern pud_t level3_vmemmap_pgt[512];
9663 +extern pud_t level2_vmemmap_pgt[512];
9664 extern pmd_t level2_kernel_pgt[512];
9665 extern pmd_t level2_fixmap_pgt[512];
9666 -extern pmd_t level2_ident_pgt[512];
9667 -extern pgd_t init_level4_pgt[];
9668 +extern pmd_t level2_ident_pgt[512*2];
9669 +extern pgd_t init_level4_pgt[512];
9670
9671 #define swapper_pg_dir init_level4_pgt
9672
9673 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9674
9675 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9676 {
9677 + pax_open_kernel();
9678 *pmdp = pmd;
9679 + pax_close_kernel();
9680 }
9681
9682 static inline void native_pmd_clear(pmd_t *pmd)
9683 @@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9684
9685 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9686 {
9687 + pax_open_kernel();
9688 + *pgdp = pgd;
9689 + pax_close_kernel();
9690 +}
9691 +
9692 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9693 +{
9694 *pgdp = pgd;
9695 }
9696
9697 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9698 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9699 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9700 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9701 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9702 #define MODULES_END _AC(0xffffffffff000000, UL)
9703 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9704 +#define MODULES_EXEC_VADDR MODULES_VADDR
9705 +#define MODULES_EXEC_END MODULES_END
9706 +
9707 +#define ktla_ktva(addr) (addr)
9708 +#define ktva_ktla(addr) (addr)
9709
9710 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9711 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9712 --- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9713 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-08-23 20:24:19.000000000 -0400
9714 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9715
9716 #ifndef __PAGETABLE_PUD_FOLDED
9717 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9718 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9719 #define pgd_clear(pgd) native_pgd_clear(pgd)
9720 #endif
9721
9722 @@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9723
9724 #define arch_end_context_switch(prev) do {} while(0)
9725
9726 +#define pax_open_kernel() native_pax_open_kernel()
9727 +#define pax_close_kernel() native_pax_close_kernel()
9728 #endif /* CONFIG_PARAVIRT */
9729
9730 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9731 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9732 +
9733 +#ifdef CONFIG_PAX_KERNEXEC
9734 +static inline unsigned long native_pax_open_kernel(void)
9735 +{
9736 + unsigned long cr0;
9737 +
9738 + preempt_disable();
9739 + barrier();
9740 + cr0 = read_cr0() ^ X86_CR0_WP;
9741 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9742 + write_cr0(cr0);
9743 + return cr0 ^ X86_CR0_WP;
9744 +}
9745 +
9746 +static inline unsigned long native_pax_close_kernel(void)
9747 +{
9748 + unsigned long cr0;
9749 +
9750 + cr0 = read_cr0() ^ X86_CR0_WP;
9751 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9752 + write_cr0(cr0);
9753 + barrier();
9754 + preempt_enable_no_resched();
9755 + return cr0 ^ X86_CR0_WP;
9756 +}
9757 +#else
9758 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9759 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9760 +#endif
9761 +
9762 /*
9763 * The following only work if pte_present() is true.
9764 * Undefined behaviour if not..
9765 */
9766 +static inline int pte_user(pte_t pte)
9767 +{
9768 + return pte_val(pte) & _PAGE_USER;
9769 +}
9770 +
9771 static inline int pte_dirty(pte_t pte)
9772 {
9773 return pte_flags(pte) & _PAGE_DIRTY;
9774 @@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
9775 return pte_clear_flags(pte, _PAGE_RW);
9776 }
9777
9778 +static inline pte_t pte_mkread(pte_t pte)
9779 +{
9780 + return __pte(pte_val(pte) | _PAGE_USER);
9781 +}
9782 +
9783 static inline pte_t pte_mkexec(pte_t pte)
9784 {
9785 - return pte_clear_flags(pte, _PAGE_NX);
9786 +#ifdef CONFIG_X86_PAE
9787 + if (__supported_pte_mask & _PAGE_NX)
9788 + return pte_clear_flags(pte, _PAGE_NX);
9789 + else
9790 +#endif
9791 + return pte_set_flags(pte, _PAGE_USER);
9792 +}
9793 +
9794 +static inline pte_t pte_exprotect(pte_t pte)
9795 +{
9796 +#ifdef CONFIG_X86_PAE
9797 + if (__supported_pte_mask & _PAGE_NX)
9798 + return pte_set_flags(pte, _PAGE_NX);
9799 + else
9800 +#endif
9801 + return pte_clear_flags(pte, _PAGE_USER);
9802 }
9803
9804 static inline pte_t pte_mkdirty(pte_t pte)
9805 @@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
9806 #endif
9807
9808 #ifndef __ASSEMBLY__
9809 +
9810 +#ifdef CONFIG_PAX_PER_CPU_PGD
9811 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9812 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9813 +{
9814 + return cpu_pgd[cpu];
9815 +}
9816 +#endif
9817 +
9818 #include <linux/mm_types.h>
9819
9820 static inline int pte_none(pte_t pte)
9821 @@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
9822
9823 static inline int pgd_bad(pgd_t pgd)
9824 {
9825 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9826 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9827 }
9828
9829 static inline int pgd_none(pgd_t pgd)
9830 @@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
9831 * pgd_offset() returns a (pgd_t *)
9832 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9833 */
9834 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9835 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9836 +
9837 +#ifdef CONFIG_PAX_PER_CPU_PGD
9838 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9839 +#endif
9840 +
9841 /*
9842 * a shortcut which implies the use of the kernel's pgd, instead
9843 * of a process's
9844 @@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
9845 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9846 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9847
9848 +#ifdef CONFIG_X86_32
9849 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9850 +#else
9851 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9852 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9853 +
9854 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9855 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9856 +#else
9857 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9858 +#endif
9859 +
9860 +#endif
9861 +
9862 #ifndef __ASSEMBLY__
9863
9864 extern int direct_gbpages;
9865 @@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
9866 * dst and src can be on the same page, but the range must not overlap,
9867 * and must not cross a page boundary.
9868 */
9869 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9870 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9871 {
9872 - memcpy(dst, src, count * sizeof(pgd_t));
9873 + pax_open_kernel();
9874 + while (count--)
9875 + *dst++ = *src++;
9876 + pax_close_kernel();
9877 }
9878
9879 +#ifdef CONFIG_PAX_PER_CPU_PGD
9880 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9881 +#endif
9882 +
9883 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9885 +#else
9886 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9887 +#endif
9888
9889 #include <asm-generic/pgtable.h>
9890 #endif /* __ASSEMBLY__ */
9891 diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9892 --- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9893 +++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9894 @@ -16,12 +16,11 @@
9895 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9896 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9897 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9898 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9899 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9900 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9901 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9902 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9903 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9904 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9905 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9906 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9907
9908 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9909 @@ -39,7 +38,6 @@
9910 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9911 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9912 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9913 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9914 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9915 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9916 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9917 @@ -55,8 +53,10 @@
9918
9919 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9920 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9921 -#else
9922 +#elif defined(CONFIG_KMEMCHECK)
9923 #define _PAGE_NX (_AT(pteval_t, 0))
9924 +#else
9925 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9926 #endif
9927
9928 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9929 @@ -93,6 +93,9 @@
9930 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9931 _PAGE_ACCESSED)
9932
9933 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9934 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9935 +
9936 #define __PAGE_KERNEL_EXEC \
9937 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9938 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9939 @@ -103,8 +106,8 @@
9940 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9941 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9942 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9943 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9944 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9945 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9946 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9947 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9948 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9949 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9950 @@ -163,8 +166,8 @@
9951 * bits are combined, this will alow user to access the high address mapped
9952 * VDSO in the presence of CONFIG_COMPAT_VDSO
9953 */
9954 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9955 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9956 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9957 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9958 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9959 #endif
9960
9961 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9962 {
9963 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9964 }
9965 +#endif
9966
9967 +#if PAGETABLE_LEVELS == 3
9968 +#include <asm-generic/pgtable-nopud.h>
9969 +#endif
9970 +
9971 +#if PAGETABLE_LEVELS == 2
9972 +#include <asm-generic/pgtable-nopmd.h>
9973 +#endif
9974 +
9975 +#ifndef __ASSEMBLY__
9976 #if PAGETABLE_LEVELS > 3
9977 typedef struct { pudval_t pud; } pud_t;
9978
9979 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9980 return pud.pud;
9981 }
9982 #else
9983 -#include <asm-generic/pgtable-nopud.h>
9984 -
9985 static inline pudval_t native_pud_val(pud_t pud)
9986 {
9987 return native_pgd_val(pud.pgd);
9988 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9989 return pmd.pmd;
9990 }
9991 #else
9992 -#include <asm-generic/pgtable-nopmd.h>
9993 -
9994 static inline pmdval_t native_pmd_val(pmd_t pmd)
9995 {
9996 return native_pgd_val(pmd.pud.pgd);
9997 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9998
9999 extern pteval_t __supported_pte_mask;
10000 extern void set_nx(void);
10001 +
10002 +#ifdef CONFIG_X86_32
10003 +#ifdef CONFIG_X86_PAE
10004 extern int nx_enabled;
10005 +#else
10006 +#define nx_enabled (0)
10007 +#endif
10008 +#else
10009 +#define nx_enabled (1)
10010 +#endif
10011
10012 #define pgprot_writecombine pgprot_writecombine
10013 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10014 diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
10015 --- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
10016 +++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
10017 @@ -272,7 +272,7 @@ struct tss_struct {
10018
10019 } ____cacheline_aligned;
10020
10021 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10022 +extern struct tss_struct init_tss[NR_CPUS];
10023
10024 /*
10025 * Save the original ist values for checking stack pointers during debugging
10026 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
10027 */
10028 #define TASK_SIZE PAGE_OFFSET
10029 #define TASK_SIZE_MAX TASK_SIZE
10030 +
10031 +#ifdef CONFIG_PAX_SEGMEXEC
10032 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10033 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10034 +#else
10035 #define STACK_TOP TASK_SIZE
10036 -#define STACK_TOP_MAX STACK_TOP
10037 +#endif
10038 +
10039 +#define STACK_TOP_MAX TASK_SIZE
10040
10041 #define INIT_THREAD { \
10042 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10043 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10044 .vm86_info = NULL, \
10045 .sysenter_cs = __KERNEL_CS, \
10046 .io_bitmap_ptr = NULL, \
10047 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10048 */
10049 #define INIT_TSS { \
10050 .x86_tss = { \
10051 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
10052 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10053 .ss0 = __KERNEL_DS, \
10054 .ss1 = __KERNEL_CS, \
10055 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10056 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10057 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10058
10059 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10060 -#define KSTK_TOP(info) \
10061 -({ \
10062 - unsigned long *__ptr = (unsigned long *)(info); \
10063 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10064 -})
10065 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10066
10067 /*
10068 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10069 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10070 #define task_pt_regs(task) \
10071 ({ \
10072 struct pt_regs *__regs__; \
10073 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10074 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10075 __regs__ - 1; \
10076 })
10077
10078 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10079 /*
10080 * User space process size. 47bits minus one guard page.
10081 */
10082 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10083 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10084
10085 /* This decides where the kernel will search for a free chunk of vm
10086 * space during mmap's.
10087 */
10088 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10089 - 0xc0000000 : 0xFFFFe000)
10090 + 0xc0000000 : 0xFFFFf000)
10091
10092 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10093 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10094 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10095 #define STACK_TOP_MAX TASK_SIZE_MAX
10096
10097 #define INIT_THREAD { \
10098 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10099 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10100 }
10101
10102 #define INIT_TSS { \
10103 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10104 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10105 }
10106
10107 /*
10108 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10109 */
10110 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10111
10112 +#ifdef CONFIG_PAX_SEGMEXEC
10113 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10114 +#endif
10115 +
10116 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10117
10118 /* Get/set a process' ability to use the timestamp counter instruction */
10119 diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
10120 --- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10121 +++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10122 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10123 }
10124
10125 /*
10126 - * user_mode_vm(regs) determines whether a register set came from user mode.
10127 + * user_mode(regs) determines whether a register set came from user mode.
10128 * This is true if V8086 mode was enabled OR if the register set was from
10129 * protected mode with RPL-3 CS value. This tricky test checks that with
10130 * one comparison. Many places in the kernel can bypass this full check
10131 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10132 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10133 + * be used.
10134 */
10135 -static inline int user_mode(struct pt_regs *regs)
10136 +static inline int user_mode_novm(struct pt_regs *regs)
10137 {
10138 #ifdef CONFIG_X86_32
10139 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10140 #else
10141 - return !!(regs->cs & 3);
10142 + return !!(regs->cs & SEGMENT_RPL_MASK);
10143 #endif
10144 }
10145
10146 -static inline int user_mode_vm(struct pt_regs *regs)
10147 +static inline int user_mode(struct pt_regs *regs)
10148 {
10149 #ifdef CONFIG_X86_32
10150 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10151 USER_RPL;
10152 #else
10153 - return user_mode(regs);
10154 + return user_mode_novm(regs);
10155 #endif
10156 }
10157
10158 diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10159 --- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10160 +++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10161 @@ -6,19 +6,19 @@
10162 struct pt_regs;
10163
10164 struct machine_ops {
10165 - void (*restart)(char *cmd);
10166 - void (*halt)(void);
10167 - void (*power_off)(void);
10168 + void (* __noreturn restart)(char *cmd);
10169 + void (* __noreturn halt)(void);
10170 + void (* __noreturn power_off)(void);
10171 void (*shutdown)(void);
10172 void (*crash_shutdown)(struct pt_regs *);
10173 - void (*emergency_restart)(void);
10174 -};
10175 + void (* __noreturn emergency_restart)(void);
10176 +} __no_const;
10177
10178 extern struct machine_ops machine_ops;
10179
10180 void native_machine_crash_shutdown(struct pt_regs *regs);
10181 void native_machine_shutdown(void);
10182 -void machine_real_restart(const unsigned char *code, int length);
10183 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10184
10185 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10186 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10187 diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10188 --- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10189 +++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10190 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10191 {
10192 asm volatile("# beginning down_read\n\t"
10193 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10194 +
10195 +#ifdef CONFIG_PAX_REFCOUNT
10196 + "jno 0f\n"
10197 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10198 + "int $4\n0:\n"
10199 + _ASM_EXTABLE(0b, 0b)
10200 +#endif
10201 +
10202 /* adds 0x00000001, returns the old value */
10203 " jns 1f\n"
10204 " call call_rwsem_down_read_failed\n"
10205 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10206 "1:\n\t"
10207 " mov %1,%2\n\t"
10208 " add %3,%2\n\t"
10209 +
10210 +#ifdef CONFIG_PAX_REFCOUNT
10211 + "jno 0f\n"
10212 + "sub %3,%2\n"
10213 + "int $4\n0:\n"
10214 + _ASM_EXTABLE(0b, 0b)
10215 +#endif
10216 +
10217 " jle 2f\n\t"
10218 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10219 " jnz 1b\n\t"
10220 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10221 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10222 asm volatile("# beginning down_write\n\t"
10223 LOCK_PREFIX " xadd %1,(%2)\n\t"
10224 +
10225 +#ifdef CONFIG_PAX_REFCOUNT
10226 + "jno 0f\n"
10227 + "mov %1,(%2)\n"
10228 + "int $4\n0:\n"
10229 + _ASM_EXTABLE(0b, 0b)
10230 +#endif
10231 +
10232 /* subtract 0x0000ffff, returns the old value */
10233 " test %1,%1\n\t"
10234 /* was the count 0 before? */
10235 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10236 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10237 asm volatile("# beginning __up_read\n\t"
10238 LOCK_PREFIX " xadd %1,(%2)\n\t"
10239 +
10240 +#ifdef CONFIG_PAX_REFCOUNT
10241 + "jno 0f\n"
10242 + "mov %1,(%2)\n"
10243 + "int $4\n0:\n"
10244 + _ASM_EXTABLE(0b, 0b)
10245 +#endif
10246 +
10247 /* subtracts 1, returns the old value */
10248 " jns 1f\n\t"
10249 " call call_rwsem_wake\n"
10250 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10251 rwsem_count_t tmp;
10252 asm volatile("# beginning __up_write\n\t"
10253 LOCK_PREFIX " xadd %1,(%2)\n\t"
10254 +
10255 +#ifdef CONFIG_PAX_REFCOUNT
10256 + "jno 0f\n"
10257 + "mov %1,(%2)\n"
10258 + "int $4\n0:\n"
10259 + _ASM_EXTABLE(0b, 0b)
10260 +#endif
10261 +
10262 /* tries to transition
10263 0xffff0001 -> 0x00000000 */
10264 " jz 1f\n"
10265 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10266 {
10267 asm volatile("# beginning __downgrade_write\n\t"
10268 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10269 +
10270 +#ifdef CONFIG_PAX_REFCOUNT
10271 + "jno 0f\n"
10272 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10273 + "int $4\n0:\n"
10274 + _ASM_EXTABLE(0b, 0b)
10275 +#endif
10276 +
10277 /*
10278 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10279 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10280 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10281 static inline void rwsem_atomic_add(rwsem_count_t delta,
10282 struct rw_semaphore *sem)
10283 {
10284 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10285 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10286 +
10287 +#ifdef CONFIG_PAX_REFCOUNT
10288 + "jno 0f\n"
10289 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10290 + "int $4\n0:\n"
10291 + _ASM_EXTABLE(0b, 0b)
10292 +#endif
10293 +
10294 : "+m" (sem->count)
10295 : "er" (delta));
10296 }
10297 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10298 {
10299 rwsem_count_t tmp = delta;
10300
10301 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10302 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10303 +
10304 +#ifdef CONFIG_PAX_REFCOUNT
10305 + "jno 0f\n"
10306 + "mov %0,%1\n"
10307 + "int $4\n0:\n"
10308 + _ASM_EXTABLE(0b, 0b)
10309 +#endif
10310 +
10311 : "+r" (tmp), "+m" (sem->count)
10312 : : "memory");
10313
10314 diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10315 --- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10316 +++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10317 @@ -62,8 +62,8 @@
10318 * 26 - ESPFIX small SS
10319 * 27 - per-cpu [ offset to per-cpu data area ]
10320 * 28 - stack_canary-20 [ for stack protector ]
10321 - * 29 - unused
10322 - * 30 - unused
10323 + * 29 - PCI BIOS CS
10324 + * 30 - PCI BIOS DS
10325 * 31 - TSS for double fault handler
10326 */
10327 #define GDT_ENTRY_TLS_MIN 6
10328 @@ -77,6 +77,8 @@
10329
10330 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10331
10332 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10333 +
10334 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10335
10336 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10337 @@ -88,7 +90,7 @@
10338 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10339 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10340
10341 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10342 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10343 #ifdef CONFIG_SMP
10344 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10345 #else
10346 @@ -102,6 +104,12 @@
10347 #define __KERNEL_STACK_CANARY 0
10348 #endif
10349
10350 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10351 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10352 +
10353 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10354 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10355 +
10356 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10357
10358 /*
10359 @@ -139,7 +147,7 @@
10360 */
10361
10362 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10363 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10364 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10365
10366
10367 #else
10368 @@ -163,6 +171,8 @@
10369 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10370 #define __USER32_DS __USER_DS
10371
10372 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10373 +
10374 #define GDT_ENTRY_TSS 8 /* needs two entries */
10375 #define GDT_ENTRY_LDT 10 /* needs two entries */
10376 #define GDT_ENTRY_TLS_MIN 12
10377 @@ -183,6 +193,7 @@
10378 #endif
10379
10380 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10381 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10382 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10383 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10384 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10385 diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10386 --- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10387 +++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10388 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10389 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10390 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10391 DECLARE_PER_CPU(u16, cpu_llc_id);
10392 -DECLARE_PER_CPU(int, cpu_number);
10393 +DECLARE_PER_CPU(unsigned int, cpu_number);
10394
10395 static inline struct cpumask *cpu_sibling_mask(int cpu)
10396 {
10397 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10398 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10399
10400 /* Static state in head.S used to set up a CPU */
10401 -extern struct {
10402 - void *sp;
10403 - unsigned short ss;
10404 -} stack_start;
10405 +extern unsigned long stack_start; /* Initial stack pointer address */
10406
10407 struct smp_ops {
10408 void (*smp_prepare_boot_cpu)(void);
10409 @@ -60,7 +57,7 @@ struct smp_ops {
10410
10411 void (*send_call_func_ipi)(const struct cpumask *mask);
10412 void (*send_call_func_single_ipi)(int cpu);
10413 -};
10414 +} __no_const;
10415
10416 /* Globals due to paravirt */
10417 extern void set_cpu_sibling_map(int cpu);
10418 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10419 extern int safe_smp_processor_id(void);
10420
10421 #elif defined(CONFIG_X86_64_SMP)
10422 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10423 -
10424 -#define stack_smp_processor_id() \
10425 -({ \
10426 - struct thread_info *ti; \
10427 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10428 - ti->cpu; \
10429 -})
10430 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10431 +#define stack_smp_processor_id() raw_smp_processor_id()
10432 #define safe_smp_processor_id() smp_processor_id()
10433
10434 #endif
10435 diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10436 --- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10437 +++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10438 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10439 static inline void __raw_read_lock(raw_rwlock_t *rw)
10440 {
10441 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10442 +
10443 +#ifdef CONFIG_PAX_REFCOUNT
10444 + "jno 0f\n"
10445 + LOCK_PREFIX " addl $1,(%0)\n"
10446 + "int $4\n0:\n"
10447 + _ASM_EXTABLE(0b, 0b)
10448 +#endif
10449 +
10450 "jns 1f\n"
10451 "call __read_lock_failed\n\t"
10452 "1:\n"
10453 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10454 static inline void __raw_write_lock(raw_rwlock_t *rw)
10455 {
10456 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10457 +
10458 +#ifdef CONFIG_PAX_REFCOUNT
10459 + "jno 0f\n"
10460 + LOCK_PREFIX " addl %1,(%0)\n"
10461 + "int $4\n0:\n"
10462 + _ASM_EXTABLE(0b, 0b)
10463 +#endif
10464 +
10465 "jz 1f\n"
10466 "call __write_lock_failed\n\t"
10467 "1:\n"
10468 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10469
10470 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10471 {
10472 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10473 + asm volatile(LOCK_PREFIX "incl %0\n"
10474 +
10475 +#ifdef CONFIG_PAX_REFCOUNT
10476 + "jno 0f\n"
10477 + LOCK_PREFIX "decl %0\n"
10478 + "int $4\n0:\n"
10479 + _ASM_EXTABLE(0b, 0b)
10480 +#endif
10481 +
10482 + :"+m" (rw->lock) : : "memory");
10483 }
10484
10485 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10486 {
10487 - asm volatile(LOCK_PREFIX "addl %1, %0"
10488 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10489 +
10490 +#ifdef CONFIG_PAX_REFCOUNT
10491 + "jno 0f\n"
10492 + LOCK_PREFIX "subl %1, %0\n"
10493 + "int $4\n0:\n"
10494 + _ASM_EXTABLE(0b, 0b)
10495 +#endif
10496 +
10497 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10498 }
10499
10500 diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10501 --- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10502 +++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10503 @@ -48,7 +48,7 @@
10504 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10505 */
10506 #define GDT_STACK_CANARY_INIT \
10507 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10508 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10509
10510 /*
10511 * Initialize the stackprotector canary value.
10512 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10513
10514 static inline void load_stack_canary_segment(void)
10515 {
10516 -#ifdef CONFIG_X86_32
10517 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10518 asm volatile ("mov %0, %%gs" : : "r" (0));
10519 #endif
10520 }
10521 diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10522 --- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10523 +++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10524 @@ -132,7 +132,7 @@ do { \
10525 "thread_return:\n\t" \
10526 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10527 __switch_canary \
10528 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10529 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10530 "movq %%rax,%%rdi\n\t" \
10531 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10532 "jnz ret_from_fork\n\t" \
10533 @@ -143,7 +143,7 @@ do { \
10534 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10535 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10536 [_tif_fork] "i" (_TIF_FORK), \
10537 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10538 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10539 [current_task] "m" (per_cpu_var(current_task)) \
10540 __switch_canary_iparam \
10541 : "memory", "cc" __EXTRA_CLOBBER)
10542 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10543 {
10544 unsigned long __limit;
10545 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10546 - return __limit + 1;
10547 + return __limit;
10548 }
10549
10550 static inline void native_clts(void)
10551 @@ -340,12 +340,12 @@ void enable_hlt(void);
10552
10553 void cpu_idle_wait(void);
10554
10555 -extern unsigned long arch_align_stack(unsigned long sp);
10556 +#define arch_align_stack(x) ((x) & ~0xfUL)
10557 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10558
10559 void default_idle(void);
10560
10561 -void stop_this_cpu(void *dummy);
10562 +void stop_this_cpu(void *dummy) __noreturn;
10563
10564 /*
10565 * Force strict CPU ordering.
10566 diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10567 --- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10568 +++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10569 @@ -10,6 +10,7 @@
10570 #include <linux/compiler.h>
10571 #include <asm/page.h>
10572 #include <asm/types.h>
10573 +#include <asm/percpu.h>
10574
10575 /*
10576 * low level task data that entry.S needs immediate access to
10577 @@ -24,7 +25,6 @@ struct exec_domain;
10578 #include <asm/atomic.h>
10579
10580 struct thread_info {
10581 - struct task_struct *task; /* main task structure */
10582 struct exec_domain *exec_domain; /* execution domain */
10583 __u32 flags; /* low level flags */
10584 __u32 status; /* thread synchronous flags */
10585 @@ -34,18 +34,12 @@ struct thread_info {
10586 mm_segment_t addr_limit;
10587 struct restart_block restart_block;
10588 void __user *sysenter_return;
10589 -#ifdef CONFIG_X86_32
10590 - unsigned long previous_esp; /* ESP of the previous stack in
10591 - case of nested (IRQ) stacks
10592 - */
10593 - __u8 supervisor_stack[0];
10594 -#endif
10595 + unsigned long lowest_stack;
10596 int uaccess_err;
10597 };
10598
10599 -#define INIT_THREAD_INFO(tsk) \
10600 +#define INIT_THREAD_INFO \
10601 { \
10602 - .task = &tsk, \
10603 .exec_domain = &default_exec_domain, \
10604 .flags = 0, \
10605 .cpu = 0, \
10606 @@ -56,7 +50,7 @@ struct thread_info {
10607 }, \
10608 }
10609
10610 -#define init_thread_info (init_thread_union.thread_info)
10611 +#define init_thread_info (init_thread_union.stack)
10612 #define init_stack (init_thread_union.stack)
10613
10614 #else /* !__ASSEMBLY__ */
10615 @@ -163,6 +157,23 @@ struct thread_info {
10616 #define alloc_thread_info(tsk) \
10617 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10618
10619 +#ifdef __ASSEMBLY__
10620 +/* how to get the thread information struct from ASM */
10621 +#define GET_THREAD_INFO(reg) \
10622 + mov PER_CPU_VAR(current_tinfo), reg
10623 +
10624 +/* use this one if reg already contains %esp */
10625 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10626 +#else
10627 +/* how to get the thread information struct from C */
10628 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10629 +
10630 +static __always_inline struct thread_info *current_thread_info(void)
10631 +{
10632 + return percpu_read_stable(current_tinfo);
10633 +}
10634 +#endif
10635 +
10636 #ifdef CONFIG_X86_32
10637
10638 #define STACK_WARN (THREAD_SIZE/8)
10639 @@ -173,35 +184,13 @@ struct thread_info {
10640 */
10641 #ifndef __ASSEMBLY__
10642
10643 -
10644 /* how to get the current stack pointer from C */
10645 register unsigned long current_stack_pointer asm("esp") __used;
10646
10647 -/* how to get the thread information struct from C */
10648 -static inline struct thread_info *current_thread_info(void)
10649 -{
10650 - return (struct thread_info *)
10651 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10652 -}
10653 -
10654 -#else /* !__ASSEMBLY__ */
10655 -
10656 -/* how to get the thread information struct from ASM */
10657 -#define GET_THREAD_INFO(reg) \
10658 - movl $-THREAD_SIZE, reg; \
10659 - andl %esp, reg
10660 -
10661 -/* use this one if reg already contains %esp */
10662 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10663 - andl $-THREAD_SIZE, reg
10664 -
10665 #endif
10666
10667 #else /* X86_32 */
10668
10669 -#include <asm/percpu.h>
10670 -#define KERNEL_STACK_OFFSET (5*8)
10671 -
10672 /*
10673 * macros/functions for gaining access to the thread information structure
10674 * preempt_count needs to be 1 initially, until the scheduler is functional.
10675 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10676 #ifndef __ASSEMBLY__
10677 DECLARE_PER_CPU(unsigned long, kernel_stack);
10678
10679 -static inline struct thread_info *current_thread_info(void)
10680 -{
10681 - struct thread_info *ti;
10682 - ti = (void *)(percpu_read_stable(kernel_stack) +
10683 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10684 - return ti;
10685 -}
10686 -
10687 -#else /* !__ASSEMBLY__ */
10688 -
10689 -/* how to get the thread information struct from ASM */
10690 -#define GET_THREAD_INFO(reg) \
10691 - movq PER_CPU_VAR(kernel_stack),reg ; \
10692 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10693 -
10694 +/* how to get the current stack pointer from C */
10695 +register unsigned long current_stack_pointer asm("rsp") __used;
10696 #endif
10697
10698 #endif /* !X86_32 */
10699 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10700 extern void free_thread_info(struct thread_info *ti);
10701 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10702 #define arch_task_cache_init arch_task_cache_init
10703 +
10704 +#define __HAVE_THREAD_FUNCTIONS
10705 +#define task_thread_info(task) (&(task)->tinfo)
10706 +#define task_stack_page(task) ((task)->stack)
10707 +#define setup_thread_stack(p, org) do {} while (0)
10708 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10709 +
10710 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10711 +extern struct task_struct *alloc_task_struct(void);
10712 +extern void free_task_struct(struct task_struct *);
10713 +
10714 #endif
10715 #endif /* _ASM_X86_THREAD_INFO_H */
10716 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10717 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10718 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10719 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10720 static __always_inline unsigned long __must_check
10721 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10722 {
10723 + pax_track_stack();
10724 +
10725 + if ((long)n < 0)
10726 + return n;
10727 +
10728 if (__builtin_constant_p(n)) {
10729 unsigned long ret;
10730
10731 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10732 return ret;
10733 }
10734 }
10735 + if (!__builtin_constant_p(n))
10736 + check_object_size(from, n, true);
10737 return __copy_to_user_ll(to, from, n);
10738 }
10739
10740 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10741 __copy_to_user(void __user *to, const void *from, unsigned long n)
10742 {
10743 might_fault();
10744 +
10745 return __copy_to_user_inatomic(to, from, n);
10746 }
10747
10748 static __always_inline unsigned long
10749 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10750 {
10751 + if ((long)n < 0)
10752 + return n;
10753 +
10754 /* Avoid zeroing the tail if the copy fails..
10755 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10756 * but as the zeroing behaviour is only significant when n is not
10757 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10758 __copy_from_user(void *to, const void __user *from, unsigned long n)
10759 {
10760 might_fault();
10761 +
10762 + pax_track_stack();
10763 +
10764 + if ((long)n < 0)
10765 + return n;
10766 +
10767 if (__builtin_constant_p(n)) {
10768 unsigned long ret;
10769
10770 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10771 return ret;
10772 }
10773 }
10774 + if (!__builtin_constant_p(n))
10775 + check_object_size(to, n, false);
10776 return __copy_from_user_ll(to, from, n);
10777 }
10778
10779 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10780 const void __user *from, unsigned long n)
10781 {
10782 might_fault();
10783 +
10784 + if ((long)n < 0)
10785 + return n;
10786 +
10787 if (__builtin_constant_p(n)) {
10788 unsigned long ret;
10789
10790 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10791 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10792 unsigned long n)
10793 {
10794 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10795 + if ((long)n < 0)
10796 + return n;
10797 +
10798 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10799 +}
10800 +
10801 +/**
10802 + * copy_to_user: - Copy a block of data into user space.
10803 + * @to: Destination address, in user space.
10804 + * @from: Source address, in kernel space.
10805 + * @n: Number of bytes to copy.
10806 + *
10807 + * Context: User context only. This function may sleep.
10808 + *
10809 + * Copy data from kernel space to user space.
10810 + *
10811 + * Returns number of bytes that could not be copied.
10812 + * On success, this will be zero.
10813 + */
10814 +static __always_inline unsigned long __must_check
10815 +copy_to_user(void __user *to, const void *from, unsigned long n)
10816 +{
10817 + if (access_ok(VERIFY_WRITE, to, n))
10818 + n = __copy_to_user(to, from, n);
10819 + return n;
10820 +}
10821 +
10822 +/**
10823 + * copy_from_user: - Copy a block of data from user space.
10824 + * @to: Destination address, in kernel space.
10825 + * @from: Source address, in user space.
10826 + * @n: Number of bytes to copy.
10827 + *
10828 + * Context: User context only. This function may sleep.
10829 + *
10830 + * Copy data from user space to kernel space.
10831 + *
10832 + * Returns number of bytes that could not be copied.
10833 + * On success, this will be zero.
10834 + *
10835 + * If some data could not be copied, this function will pad the copied
10836 + * data to the requested size using zero bytes.
10837 + */
10838 +static __always_inline unsigned long __must_check
10839 +copy_from_user(void *to, const void __user *from, unsigned long n)
10840 +{
10841 + if (access_ok(VERIFY_READ, from, n))
10842 + n = __copy_from_user(to, from, n);
10843 + else if ((long)n > 0) {
10844 + if (!__builtin_constant_p(n))
10845 + check_object_size(to, n, false);
10846 + memset(to, 0, n);
10847 + }
10848 + return n;
10849 }
10850
10851 -unsigned long __must_check copy_to_user(void __user *to,
10852 - const void *from, unsigned long n);
10853 -unsigned long __must_check copy_from_user(void *to,
10854 - const void __user *from,
10855 - unsigned long n);
10856 long __must_check strncpy_from_user(char *dst, const char __user *src,
10857 long count);
10858 long __must_check __strncpy_from_user(char *dst,
10859 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10860 --- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10861 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10862 @@ -9,6 +9,9 @@
10863 #include <linux/prefetch.h>
10864 #include <linux/lockdep.h>
10865 #include <asm/page.h>
10866 +#include <asm/pgtable.h>
10867 +
10868 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10869
10870 /*
10871 * Copy To/From Userspace
10872 @@ -19,113 +22,203 @@ __must_check unsigned long
10873 copy_user_generic(void *to, const void *from, unsigned len);
10874
10875 __must_check unsigned long
10876 -copy_to_user(void __user *to, const void *from, unsigned len);
10877 -__must_check unsigned long
10878 -copy_from_user(void *to, const void __user *from, unsigned len);
10879 -__must_check unsigned long
10880 copy_in_user(void __user *to, const void __user *from, unsigned len);
10881
10882 static __always_inline __must_check
10883 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10884 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10885 {
10886 - int ret = 0;
10887 + unsigned ret = 0;
10888
10889 might_fault();
10890 - if (!__builtin_constant_p(size))
10891 - return copy_user_generic(dst, (__force void *)src, size);
10892 +
10893 + if ((int)size < 0)
10894 + return size;
10895 +
10896 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10897 + if (!__access_ok(VERIFY_READ, src, size))
10898 + return size;
10899 +#endif
10900 +
10901 + if (!__builtin_constant_p(size)) {
10902 + check_object_size(dst, size, false);
10903 +
10904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10905 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10906 + src += PAX_USER_SHADOW_BASE;
10907 +#endif
10908 +
10909 + return copy_user_generic(dst, (__force const void *)src, size);
10910 + }
10911 switch (size) {
10912 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10913 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10914 ret, "b", "b", "=q", 1);
10915 return ret;
10916 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10917 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10918 ret, "w", "w", "=r", 2);
10919 return ret;
10920 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10921 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10922 ret, "l", "k", "=r", 4);
10923 return ret;
10924 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10925 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10926 ret, "q", "", "=r", 8);
10927 return ret;
10928 case 10:
10929 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10930 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10931 ret, "q", "", "=r", 10);
10932 if (unlikely(ret))
10933 return ret;
10934 __get_user_asm(*(u16 *)(8 + (char *)dst),
10935 - (u16 __user *)(8 + (char __user *)src),
10936 + (const u16 __user *)(8 + (const char __user *)src),
10937 ret, "w", "w", "=r", 2);
10938 return ret;
10939 case 16:
10940 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10941 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10942 ret, "q", "", "=r", 16);
10943 if (unlikely(ret))
10944 return ret;
10945 __get_user_asm(*(u64 *)(8 + (char *)dst),
10946 - (u64 __user *)(8 + (char __user *)src),
10947 + (const u64 __user *)(8 + (const char __user *)src),
10948 ret, "q", "", "=r", 8);
10949 return ret;
10950 default:
10951 - return copy_user_generic(dst, (__force void *)src, size);
10952 +
10953 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10954 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10955 + src += PAX_USER_SHADOW_BASE;
10956 +#endif
10957 +
10958 + return copy_user_generic(dst, (__force const void *)src, size);
10959 }
10960 }
10961
10962 static __always_inline __must_check
10963 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10964 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10965 {
10966 - int ret = 0;
10967 + unsigned ret = 0;
10968
10969 might_fault();
10970 - if (!__builtin_constant_p(size))
10971 +
10972 + pax_track_stack();
10973 +
10974 + if ((int)size < 0)
10975 + return size;
10976 +
10977 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10978 + if (!__access_ok(VERIFY_WRITE, dst, size))
10979 + return size;
10980 +#endif
10981 +
10982 + if (!__builtin_constant_p(size)) {
10983 + check_object_size(src, size, true);
10984 +
10985 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10986 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10987 + dst += PAX_USER_SHADOW_BASE;
10988 +#endif
10989 +
10990 return copy_user_generic((__force void *)dst, src, size);
10991 + }
10992 switch (size) {
10993 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10994 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10995 ret, "b", "b", "iq", 1);
10996 return ret;
10997 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10998 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10999 ret, "w", "w", "ir", 2);
11000 return ret;
11001 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11002 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11003 ret, "l", "k", "ir", 4);
11004 return ret;
11005 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11006 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11007 ret, "q", "", "er", 8);
11008 return ret;
11009 case 10:
11010 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11011 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11012 ret, "q", "", "er", 10);
11013 if (unlikely(ret))
11014 return ret;
11015 asm("":::"memory");
11016 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11017 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11018 ret, "w", "w", "ir", 2);
11019 return ret;
11020 case 16:
11021 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11022 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11023 ret, "q", "", "er", 16);
11024 if (unlikely(ret))
11025 return ret;
11026 asm("":::"memory");
11027 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11028 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11029 ret, "q", "", "er", 8);
11030 return ret;
11031 default:
11032 +
11033 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11034 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11035 + dst += PAX_USER_SHADOW_BASE;
11036 +#endif
11037 +
11038 return copy_user_generic((__force void *)dst, src, size);
11039 }
11040 }
11041
11042 static __always_inline __must_check
11043 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11044 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11045 +{
11046 + if (access_ok(VERIFY_WRITE, to, len))
11047 + len = __copy_to_user(to, from, len);
11048 + return len;
11049 +}
11050 +
11051 +static __always_inline __must_check
11052 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11053 +{
11054 + if ((int)len < 0)
11055 + return len;
11056 +
11057 + if (access_ok(VERIFY_READ, from, len))
11058 + len = __copy_from_user(to, from, len);
11059 + else if ((int)len > 0) {
11060 + if (!__builtin_constant_p(len))
11061 + check_object_size(to, len, false);
11062 + memset(to, 0, len);
11063 + }
11064 + return len;
11065 +}
11066 +
11067 +static __always_inline __must_check
11068 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11069 {
11070 - int ret = 0;
11071 + unsigned ret = 0;
11072
11073 might_fault();
11074 - if (!__builtin_constant_p(size))
11075 +
11076 + pax_track_stack();
11077 +
11078 + if ((int)size < 0)
11079 + return size;
11080 +
11081 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11082 + if (!__access_ok(VERIFY_READ, src, size))
11083 + return size;
11084 + if (!__access_ok(VERIFY_WRITE, dst, size))
11085 + return size;
11086 +#endif
11087 +
11088 + if (!__builtin_constant_p(size)) {
11089 +
11090 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11091 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11092 + src += PAX_USER_SHADOW_BASE;
11093 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11094 + dst += PAX_USER_SHADOW_BASE;
11095 +#endif
11096 +
11097 return copy_user_generic((__force void *)dst,
11098 - (__force void *)src, size);
11099 + (__force const void *)src, size);
11100 + }
11101 switch (size) {
11102 case 1: {
11103 u8 tmp;
11104 - __get_user_asm(tmp, (u8 __user *)src,
11105 + __get_user_asm(tmp, (const u8 __user *)src,
11106 ret, "b", "b", "=q", 1);
11107 if (likely(!ret))
11108 __put_user_asm(tmp, (u8 __user *)dst,
11109 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11110 }
11111 case 2: {
11112 u16 tmp;
11113 - __get_user_asm(tmp, (u16 __user *)src,
11114 + __get_user_asm(tmp, (const u16 __user *)src,
11115 ret, "w", "w", "=r", 2);
11116 if (likely(!ret))
11117 __put_user_asm(tmp, (u16 __user *)dst,
11118 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11119
11120 case 4: {
11121 u32 tmp;
11122 - __get_user_asm(tmp, (u32 __user *)src,
11123 + __get_user_asm(tmp, (const u32 __user *)src,
11124 ret, "l", "k", "=r", 4);
11125 if (likely(!ret))
11126 __put_user_asm(tmp, (u32 __user *)dst,
11127 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11128 }
11129 case 8: {
11130 u64 tmp;
11131 - __get_user_asm(tmp, (u64 __user *)src,
11132 + __get_user_asm(tmp, (const u64 __user *)src,
11133 ret, "q", "", "=r", 8);
11134 if (likely(!ret))
11135 __put_user_asm(tmp, (u64 __user *)dst,
11136 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11137 return ret;
11138 }
11139 default:
11140 +
11141 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11142 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11143 + src += PAX_USER_SHADOW_BASE;
11144 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11145 + dst += PAX_USER_SHADOW_BASE;
11146 +#endif
11147 +
11148 return copy_user_generic((__force void *)dst,
11149 - (__force void *)src, size);
11150 + (__force const void *)src, size);
11151 }
11152 }
11153
11154 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11155 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11156 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11157
11158 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11159 - unsigned size);
11160 +static __must_check __always_inline unsigned long
11161 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11162 +{
11163 + pax_track_stack();
11164 +
11165 + if ((int)size < 0)
11166 + return size;
11167
11168 -static __must_check __always_inline int
11169 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11170 + if (!__access_ok(VERIFY_READ, src, size))
11171 + return size;
11172 +
11173 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11174 + src += PAX_USER_SHADOW_BASE;
11175 +#endif
11176 +
11177 + return copy_user_generic(dst, (__force const void *)src, size);
11178 +}
11179 +
11180 +static __must_check __always_inline unsigned long
11181 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11182 {
11183 + if ((int)size < 0)
11184 + return size;
11185 +
11186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11187 + if (!__access_ok(VERIFY_WRITE, dst, size))
11188 + return size;
11189 +
11190 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11191 + dst += PAX_USER_SHADOW_BASE;
11192 +#endif
11193 +
11194 return copy_user_generic((__force void *)dst, src, size);
11195 }
11196
11197 -extern long __copy_user_nocache(void *dst, const void __user *src,
11198 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11199 unsigned size, int zerorest);
11200
11201 -static inline int
11202 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11203 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11204 {
11205 might_sleep();
11206 +
11207 + if ((int)size < 0)
11208 + return size;
11209 +
11210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11211 + if (!__access_ok(VERIFY_READ, src, size))
11212 + return size;
11213 +#endif
11214 +
11215 return __copy_user_nocache(dst, src, size, 1);
11216 }
11217
11218 -static inline int
11219 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11220 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11221 unsigned size)
11222 {
11223 + if ((int)size < 0)
11224 + return size;
11225 +
11226 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11227 + if (!__access_ok(VERIFY_READ, src, size))
11228 + return size;
11229 +#endif
11230 +
11231 return __copy_user_nocache(dst, src, size, 0);
11232 }
11233
11234 -unsigned long
11235 +extern unsigned long
11236 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11237
11238 #endif /* _ASM_X86_UACCESS_64_H */
11239 diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11240 --- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11241 +++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11242 @@ -8,12 +8,15 @@
11243 #include <linux/thread_info.h>
11244 #include <linux/prefetch.h>
11245 #include <linux/string.h>
11246 +#include <linux/sched.h>
11247 #include <asm/asm.h>
11248 #include <asm/page.h>
11249
11250 #define VERIFY_READ 0
11251 #define VERIFY_WRITE 1
11252
11253 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11254 +
11255 /*
11256 * The fs value determines whether argument validity checking should be
11257 * performed or not. If get_fs() == USER_DS, checking is performed, with
11258 @@ -29,7 +32,12 @@
11259
11260 #define get_ds() (KERNEL_DS)
11261 #define get_fs() (current_thread_info()->addr_limit)
11262 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11263 +void __set_fs(mm_segment_t x);
11264 +void set_fs(mm_segment_t x);
11265 +#else
11266 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11267 +#endif
11268
11269 #define segment_eq(a, b) ((a).seg == (b).seg)
11270
11271 @@ -77,7 +85,33 @@
11272 * checks that the pointer is in the user space range - after calling
11273 * this function, memory access functions may still return -EFAULT.
11274 */
11275 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11276 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11277 +#define access_ok(type, addr, size) \
11278 +({ \
11279 + long __size = size; \
11280 + unsigned long __addr = (unsigned long)addr; \
11281 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11282 + unsigned long __end_ao = __addr + __size - 1; \
11283 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11284 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11285 + while(__addr_ao <= __end_ao) { \
11286 + char __c_ao; \
11287 + __addr_ao += PAGE_SIZE; \
11288 + if (__size > PAGE_SIZE) \
11289 + cond_resched(); \
11290 + if (__get_user(__c_ao, (char __user *)__addr)) \
11291 + break; \
11292 + if (type != VERIFY_WRITE) { \
11293 + __addr = __addr_ao; \
11294 + continue; \
11295 + } \
11296 + if (__put_user(__c_ao, (char __user *)__addr)) \
11297 + break; \
11298 + __addr = __addr_ao; \
11299 + } \
11300 + } \
11301 + __ret_ao; \
11302 +})
11303
11304 /*
11305 * The exception table consists of pairs of addresses: the first is the
11306 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11307 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11308 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11309
11310 -
11311 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11312 +#define __copyuser_seg "gs;"
11313 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11314 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11315 +#else
11316 +#define __copyuser_seg
11317 +#define __COPYUSER_SET_ES
11318 +#define __COPYUSER_RESTORE_ES
11319 +#endif
11320
11321 #ifdef CONFIG_X86_32
11322 #define __put_user_asm_u64(x, addr, err, errret) \
11323 - asm volatile("1: movl %%eax,0(%2)\n" \
11324 - "2: movl %%edx,4(%2)\n" \
11325 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11326 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11327 "3:\n" \
11328 ".section .fixup,\"ax\"\n" \
11329 "4: movl %3,%0\n" \
11330 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11331 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11332
11333 #define __put_user_asm_ex_u64(x, addr) \
11334 - asm volatile("1: movl %%eax,0(%1)\n" \
11335 - "2: movl %%edx,4(%1)\n" \
11336 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11337 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11338 "3:\n" \
11339 _ASM_EXTABLE(1b, 2b - 1b) \
11340 _ASM_EXTABLE(2b, 3b - 2b) \
11341 @@ -374,7 +416,7 @@ do { \
11342 } while (0)
11343
11344 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11345 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11346 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11347 "2:\n" \
11348 ".section .fixup,\"ax\"\n" \
11349 "3: mov %3,%0\n" \
11350 @@ -382,7 +424,7 @@ do { \
11351 " jmp 2b\n" \
11352 ".previous\n" \
11353 _ASM_EXTABLE(1b, 3b) \
11354 - : "=r" (err), ltype(x) \
11355 + : "=r" (err), ltype (x) \
11356 : "m" (__m(addr)), "i" (errret), "0" (err))
11357
11358 #define __get_user_size_ex(x, ptr, size) \
11359 @@ -407,7 +449,7 @@ do { \
11360 } while (0)
11361
11362 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11363 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11364 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11365 "2:\n" \
11366 _ASM_EXTABLE(1b, 2b - 1b) \
11367 : ltype(x) : "m" (__m(addr)))
11368 @@ -424,13 +466,24 @@ do { \
11369 int __gu_err; \
11370 unsigned long __gu_val; \
11371 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11372 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11373 + (x) = (__typeof__(*(ptr)))__gu_val; \
11374 __gu_err; \
11375 })
11376
11377 /* FIXME: this hack is definitely wrong -AK */
11378 struct __large_struct { unsigned long buf[100]; };
11379 -#define __m(x) (*(struct __large_struct __user *)(x))
11380 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11381 +#define ____m(x) \
11382 +({ \
11383 + unsigned long ____x = (unsigned long)(x); \
11384 + if (____x < PAX_USER_SHADOW_BASE) \
11385 + ____x += PAX_USER_SHADOW_BASE; \
11386 + (void __user *)____x; \
11387 +})
11388 +#else
11389 +#define ____m(x) (x)
11390 +#endif
11391 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11392
11393 /*
11394 * Tell gcc we read from memory instead of writing: this is because
11395 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11396 * aliasing issues.
11397 */
11398 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11399 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11400 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11401 "2:\n" \
11402 ".section .fixup,\"ax\"\n" \
11403 "3: mov %3,%0\n" \
11404 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11405 ".previous\n" \
11406 _ASM_EXTABLE(1b, 3b) \
11407 : "=r"(err) \
11408 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11409 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11410
11411 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11412 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11413 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11414 "2:\n" \
11415 _ASM_EXTABLE(1b, 2b - 1b) \
11416 : : ltype(x), "m" (__m(addr)))
11417 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11418 * On error, the variable @x is set to zero.
11419 */
11420
11421 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11422 +#define __get_user(x, ptr) get_user((x), (ptr))
11423 +#else
11424 #define __get_user(x, ptr) \
11425 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11426 +#endif
11427
11428 /**
11429 * __put_user: - Write a simple value into user space, with less checking.
11430 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11431 * Returns zero on success, or -EFAULT on error.
11432 */
11433
11434 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11435 +#define __put_user(x, ptr) put_user((x), (ptr))
11436 +#else
11437 #define __put_user(x, ptr) \
11438 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11439 +#endif
11440
11441 #define __get_user_unaligned __get_user
11442 #define __put_user_unaligned __put_user
11443 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11444 #define get_user_ex(x, ptr) do { \
11445 unsigned long __gue_val; \
11446 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11447 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11448 + (x) = (__typeof__(*(ptr)))__gue_val; \
11449 } while (0)
11450
11451 #ifdef CONFIG_X86_WP_WORKS_OK
11452 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11453
11454 #define ARCH_HAS_NOCACHE_UACCESS 1
11455
11456 +#define ARCH_HAS_SORT_EXTABLE
11457 #ifdef CONFIG_X86_32
11458 # include "uaccess_32.h"
11459 #else
11460 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11461 --- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11462 +++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11463 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11464 int sysctl_enabled;
11465 struct timezone sys_tz;
11466 struct { /* extract of a clocksource struct */
11467 + char name[8];
11468 cycle_t (*vread)(void);
11469 cycle_t cycle_last;
11470 cycle_t mask;
11471 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11472 --- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11473 +++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11474 @@ -191,6 +191,7 @@ struct vrom_header {
11475 u8 reserved[96]; /* Reserved for headers */
11476 char vmi_init[8]; /* VMI_Init jump point */
11477 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11478 + char rom_data[8048]; /* rest of the option ROM */
11479 } __attribute__((packed));
11480
11481 struct pnp_header {
11482 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11483 --- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11484 +++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11485 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11486 int (*wallclock_updated)(void);
11487 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11488 void (*cancel_alarm)(u32 flags);
11489 -} vmi_timer_ops;
11490 +} __no_const vmi_timer_ops;
11491
11492 /* Prototypes */
11493 extern void __init vmi_time_init(void);
11494 diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11495 --- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11496 +++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11497 @@ -15,9 +15,10 @@ enum vsyscall_num {
11498
11499 #ifdef __KERNEL__
11500 #include <linux/seqlock.h>
11501 +#include <linux/getcpu.h>
11502 +#include <linux/time.h>
11503
11504 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11505 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11506
11507 /* Definitions for CONFIG_GENERIC_TIME definitions */
11508 #define __section_vsyscall_gtod_data __attribute__ \
11509 @@ -31,7 +32,6 @@ enum vsyscall_num {
11510 #define VGETCPU_LSL 2
11511
11512 extern int __vgetcpu_mode;
11513 -extern volatile unsigned long __jiffies;
11514
11515 /* kernel space (writeable) */
11516 extern int vgetcpu_mode;
11517 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11518
11519 extern void map_vsyscall(void);
11520
11521 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11522 +extern time_t vtime(time_t *t);
11523 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11524 #endif /* __KERNEL__ */
11525
11526 #endif /* _ASM_X86_VSYSCALL_H */
11527 diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11528 --- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11529 +++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11530 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11531 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11532 void (*find_smp_config)(unsigned int reserve);
11533 void (*get_smp_config)(unsigned int early);
11534 -};
11535 +} __no_const;
11536
11537 /**
11538 * struct x86_init_resources - platform specific resource related ops
11539 @@ -42,7 +42,7 @@ struct x86_init_resources {
11540 void (*probe_roms)(void);
11541 void (*reserve_resources)(void);
11542 char *(*memory_setup)(void);
11543 -};
11544 +} __no_const;
11545
11546 /**
11547 * struct x86_init_irqs - platform specific interrupt setup
11548 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11549 void (*pre_vector_init)(void);
11550 void (*intr_init)(void);
11551 void (*trap_init)(void);
11552 -};
11553 +} __no_const;
11554
11555 /**
11556 * struct x86_init_oem - oem platform specific customizing functions
11557 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11558 struct x86_init_oem {
11559 void (*arch_setup)(void);
11560 void (*banner)(void);
11561 -};
11562 +} __no_const;
11563
11564 /**
11565 * struct x86_init_paging - platform specific paging functions
11566 @@ -75,7 +75,7 @@ struct x86_init_oem {
11567 struct x86_init_paging {
11568 void (*pagetable_setup_start)(pgd_t *base);
11569 void (*pagetable_setup_done)(pgd_t *base);
11570 -};
11571 +} __no_const;
11572
11573 /**
11574 * struct x86_init_timers - platform specific timer setup
11575 @@ -88,7 +88,7 @@ struct x86_init_timers {
11576 void (*setup_percpu_clockev)(void);
11577 void (*tsc_pre_init)(void);
11578 void (*timer_init)(void);
11579 -};
11580 +} __no_const;
11581
11582 /**
11583 * struct x86_init_ops - functions for platform specific setup
11584 @@ -101,7 +101,7 @@ struct x86_init_ops {
11585 struct x86_init_oem oem;
11586 struct x86_init_paging paging;
11587 struct x86_init_timers timers;
11588 -};
11589 +} __no_const;
11590
11591 /**
11592 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11593 @@ -109,7 +109,7 @@ struct x86_init_ops {
11594 */
11595 struct x86_cpuinit_ops {
11596 void (*setup_percpu_clockev)(void);
11597 -};
11598 +} __no_const;
11599
11600 /**
11601 * struct x86_platform_ops - platform specific runtime functions
11602 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11603 unsigned long (*calibrate_tsc)(void);
11604 unsigned long (*get_wallclock)(void);
11605 int (*set_wallclock)(unsigned long nowtime);
11606 -};
11607 +} __no_const;
11608
11609 extern struct x86_init_ops x86_init;
11610 extern struct x86_cpuinit_ops x86_cpuinit;
11611 diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11612 --- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11613 +++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11614 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11615 static inline int xsave_user(struct xsave_struct __user *buf)
11616 {
11617 int err;
11618 +
11619 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11620 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11621 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11622 +#endif
11623 +
11624 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11625 "2:\n"
11626 ".section .fixup,\"ax\"\n"
11627 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11628 u32 lmask = mask;
11629 u32 hmask = mask >> 32;
11630
11631 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11632 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11633 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11634 +#endif
11635 +
11636 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11637 "2:\n"
11638 ".section .fixup,\"ax\"\n"
11639 diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11640 --- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11641 +++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11642 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11643
11644 config X86_32_LAZY_GS
11645 def_bool y
11646 - depends on X86_32 && !CC_STACKPROTECTOR
11647 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11648
11649 config KTIME_SCALAR
11650 def_bool X86_32
11651 @@ -1008,7 +1008,7 @@ choice
11652
11653 config NOHIGHMEM
11654 bool "off"
11655 - depends on !X86_NUMAQ
11656 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11657 ---help---
11658 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11659 However, the address space of 32-bit x86 processors is only 4
11660 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11661
11662 config HIGHMEM4G
11663 bool "4GB"
11664 - depends on !X86_NUMAQ
11665 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11666 ---help---
11667 Select this if you have a 32-bit processor and between 1 and 4
11668 gigabytes of physical RAM.
11669 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11670 hex
11671 default 0xB0000000 if VMSPLIT_3G_OPT
11672 default 0x80000000 if VMSPLIT_2G
11673 - default 0x78000000 if VMSPLIT_2G_OPT
11674 + default 0x70000000 if VMSPLIT_2G_OPT
11675 default 0x40000000 if VMSPLIT_1G
11676 default 0xC0000000
11677 depends on X86_32
11678 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11679
11680 config EFI
11681 bool "EFI runtime service support"
11682 - depends on ACPI
11683 + depends on ACPI && !PAX_KERNEXEC
11684 ---help---
11685 This enables the kernel to use EFI runtime services that are
11686 available (such as the EFI variable services).
11687 @@ -1460,6 +1460,7 @@ config SECCOMP
11688
11689 config CC_STACKPROTECTOR
11690 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11691 + depends on X86_64 || !PAX_MEMORY_UDEREF
11692 ---help---
11693 This option turns on the -fstack-protector GCC feature. This
11694 feature puts, at the beginning of functions, a canary value on
11695 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11696 config PHYSICAL_START
11697 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11698 default "0x1000000"
11699 + range 0x400000 0x40000000
11700 ---help---
11701 This gives the physical address where the kernel is loaded.
11702
11703 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11704 hex
11705 prompt "Alignment value to which kernel should be aligned" if X86_32
11706 default "0x1000000"
11707 + range 0x400000 0x1000000 if PAX_KERNEXEC
11708 range 0x2000 0x1000000
11709 ---help---
11710 This value puts the alignment restrictions on physical address
11711 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11712 Say N if you want to disable CPU hotplug.
11713
11714 config COMPAT_VDSO
11715 - def_bool y
11716 + def_bool n
11717 prompt "Compat VDSO support"
11718 depends on X86_32 || IA32_EMULATION
11719 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11720 ---help---
11721 Map the 32-bit VDSO to the predictable old-style address too.
11722 ---help---
11723 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11724 --- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11725 +++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11726 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11727
11728 config X86_F00F_BUG
11729 def_bool y
11730 - depends on M586MMX || M586TSC || M586 || M486 || M386
11731 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11732
11733 config X86_WP_WORKS_OK
11734 def_bool y
11735 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11736
11737 config X86_ALIGNMENT_16
11738 def_bool y
11739 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11740 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11741
11742 config X86_INTEL_USERCOPY
11743 def_bool y
11744 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11745 # generates cmov.
11746 config X86_CMOV
11747 def_bool y
11748 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11749 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11750
11751 config X86_MINIMUM_CPU_FAMILY
11752 int
11753 diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11754 --- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11755 +++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11756 @@ -99,7 +99,7 @@ config X86_PTDUMP
11757 config DEBUG_RODATA
11758 bool "Write protect kernel read-only data structures"
11759 default y
11760 - depends on DEBUG_KERNEL
11761 + depends on DEBUG_KERNEL && BROKEN
11762 ---help---
11763 Mark the kernel read-only data as write-protected in the pagetables,
11764 in order to catch accidental (and incorrect) writes to such const
11765 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11766 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11767 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11768 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11769 $(call cc-option, -fno-stack-protector) \
11770 $(call cc-option, -mpreferred-stack-boundary=2)
11771 KBUILD_CFLAGS += $(call cc-option, -m32)
11772 +ifdef CONSTIFY_PLUGIN
11773 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11774 +endif
11775 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11776 GCOV_PROFILE := n
11777
11778 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11779 --- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11780 +++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11781 @@ -91,6 +91,9 @@ _start:
11782 /* Do any other stuff... */
11783
11784 #ifndef CONFIG_64BIT
11785 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11786 + call verify_cpu
11787 +
11788 /* This could also be done in C code... */
11789 movl pmode_cr3, %eax
11790 movl %eax, %cr3
11791 @@ -104,7 +107,7 @@ _start:
11792 movl %eax, %ecx
11793 orl %edx, %ecx
11794 jz 1f
11795 - movl $0xc0000080, %ecx
11796 + mov $MSR_EFER, %ecx
11797 wrmsr
11798 1:
11799
11800 @@ -114,6 +117,7 @@ _start:
11801 movl pmode_cr0, %eax
11802 movl %eax, %cr0
11803 jmp pmode_return
11804 +# include "../../verify_cpu.S"
11805 #else
11806 pushw $0
11807 pushw trampoline_segment
11808 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11809 --- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11810 +++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11811 @@ -11,11 +11,12 @@
11812 #include <linux/cpumask.h>
11813 #include <asm/segment.h>
11814 #include <asm/desc.h>
11815 +#include <asm/e820.h>
11816
11817 #include "realmode/wakeup.h"
11818 #include "sleep.h"
11819
11820 -unsigned long acpi_wakeup_address;
11821 +unsigned long acpi_wakeup_address = 0x2000;
11822 unsigned long acpi_realmode_flags;
11823
11824 /* address in low memory of the wakeup routine. */
11825 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11826 #else /* CONFIG_64BIT */
11827 header->trampoline_segment = setup_trampoline() >> 4;
11828 #ifdef CONFIG_SMP
11829 - stack_start.sp = temp_stack + sizeof(temp_stack);
11830 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11831 +
11832 + pax_open_kernel();
11833 early_gdt_descr.address =
11834 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11835 + pax_close_kernel();
11836 +
11837 initial_gs = per_cpu_offset(smp_processor_id());
11838 #endif
11839 initial_code = (unsigned long)wakeup_long64;
11840 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11841 return;
11842 }
11843
11844 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11845 -
11846 - if (!acpi_realmode) {
11847 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11848 - return;
11849 - }
11850 -
11851 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11852 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11853 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11854 }
11855
11856
11857 diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11858 --- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11859 +++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11860 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11861 # and restore the stack ... but you need gdt for this to work
11862 movl saved_context_esp, %esp
11863
11864 - movl %cs:saved_magic, %eax
11865 - cmpl $0x12345678, %eax
11866 + cmpl $0x12345678, saved_magic
11867 jne bogus_magic
11868
11869 # jump to place where we left off
11870 - movl saved_eip, %eax
11871 - jmp *%eax
11872 + jmp *(saved_eip)
11873
11874 bogus_magic:
11875 jmp bogus_magic
11876 diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11877 --- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11878 +++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11879 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11880
11881 BUG_ON(p->len > MAX_PATCH_LEN);
11882 /* prep the buffer with the original instructions */
11883 - memcpy(insnbuf, p->instr, p->len);
11884 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11885 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11886 (unsigned long)p->instr, p->len);
11887
11888 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11889 if (smp_alt_once)
11890 free_init_pages("SMP alternatives",
11891 (unsigned long)__smp_locks,
11892 - (unsigned long)__smp_locks_end);
11893 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11894
11895 restart_nmi();
11896 }
11897 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11898 * instructions. And on the local CPU you need to be protected again NMI or MCE
11899 * handlers seeing an inconsistent instruction while you patch.
11900 */
11901 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11902 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11903 size_t len)
11904 {
11905 unsigned long flags;
11906 local_irq_save(flags);
11907 - memcpy(addr, opcode, len);
11908 +
11909 + pax_open_kernel();
11910 + memcpy(ktla_ktva(addr), opcode, len);
11911 sync_core();
11912 + pax_close_kernel();
11913 +
11914 local_irq_restore(flags);
11915 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11916 that causes hangs on some VIA CPUs. */
11917 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11918 */
11919 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11920 {
11921 - unsigned long flags;
11922 - char *vaddr;
11923 + unsigned char *vaddr = ktla_ktva(addr);
11924 struct page *pages[2];
11925 - int i;
11926 + size_t i;
11927
11928 if (!core_kernel_text((unsigned long)addr)) {
11929 - pages[0] = vmalloc_to_page(addr);
11930 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11931 + pages[0] = vmalloc_to_page(vaddr);
11932 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11933 } else {
11934 - pages[0] = virt_to_page(addr);
11935 + pages[0] = virt_to_page(vaddr);
11936 WARN_ON(!PageReserved(pages[0]));
11937 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11938 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11939 }
11940 BUG_ON(!pages[0]);
11941 - local_irq_save(flags);
11942 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11943 - if (pages[1])
11944 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11945 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11946 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11947 - clear_fixmap(FIX_TEXT_POKE0);
11948 - if (pages[1])
11949 - clear_fixmap(FIX_TEXT_POKE1);
11950 - local_flush_tlb();
11951 - sync_core();
11952 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11953 - that causes hangs on some VIA CPUs. */
11954 + text_poke_early(addr, opcode, len);
11955 for (i = 0; i < len; i++)
11956 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11957 - local_irq_restore(flags);
11958 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11959 return addr;
11960 }
11961 diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11962 --- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11963 +++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11964 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11965 }
11966 }
11967
11968 -static struct dma_map_ops amd_iommu_dma_ops = {
11969 +static const struct dma_map_ops amd_iommu_dma_ops = {
11970 .alloc_coherent = alloc_coherent,
11971 .free_coherent = free_coherent,
11972 .map_page = map_page,
11973 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11974 --- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11975 +++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11976 @@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11977 /*
11978 * Debug level, exported for io_apic.c
11979 */
11980 -unsigned int apic_verbosity;
11981 +int apic_verbosity;
11982
11983 int pic_mode;
11984
11985 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11986 apic_write(APIC_ESR, 0);
11987 v1 = apic_read(APIC_ESR);
11988 ack_APIC_irq();
11989 - atomic_inc(&irq_err_count);
11990 + atomic_inc_unchecked(&irq_err_count);
11991
11992 /*
11993 * Here is what the APIC error bits mean:
11994 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11995 u16 *bios_cpu_apicid;
11996 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11997
11998 + pax_track_stack();
11999 +
12000 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
12001 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
12002
12003 diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
12004 --- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
12005 +++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
12006 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12007 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12008 GFP_ATOMIC);
12009 if (!ioapic_entries)
12010 - return 0;
12011 + return NULL;
12012
12013 for (apic = 0; apic < nr_ioapics; apic++) {
12014 ioapic_entries[apic] =
12015 @@ -733,7 +733,7 @@ nomem:
12016 kfree(ioapic_entries[apic]);
12017 kfree(ioapic_entries);
12018
12019 - return 0;
12020 + return NULL;
12021 }
12022
12023 /*
12024 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12025 }
12026 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12027
12028 -void lock_vector_lock(void)
12029 +void lock_vector_lock(void) __acquires(vector_lock)
12030 {
12031 /* Used to the online set of cpus does not change
12032 * during assign_irq_vector.
12033 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12034 spin_lock(&vector_lock);
12035 }
12036
12037 -void unlock_vector_lock(void)
12038 +void unlock_vector_lock(void) __releases(vector_lock)
12039 {
12040 spin_unlock(&vector_lock);
12041 }
12042 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12043 ack_APIC_irq();
12044 }
12045
12046 -atomic_t irq_mis_count;
12047 +atomic_unchecked_t irq_mis_count;
12048
12049 static void ack_apic_level(unsigned int irq)
12050 {
12051 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12052
12053 /* Tail end of version 0x11 I/O APIC bug workaround */
12054 if (!(v & (1 << (i & 0x1f)))) {
12055 - atomic_inc(&irq_mis_count);
12056 + atomic_inc_unchecked(&irq_mis_count);
12057 spin_lock(&ioapic_lock);
12058 __mask_and_edge_IO_APIC_irq(cfg);
12059 __unmask_and_level_IO_APIC_irq(cfg);
12060 diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
12061 --- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12062 +++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12063 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12064 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12065 * even though they are called in protected mode.
12066 */
12067 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12068 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12069 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12070
12071 static const char driver_version[] = "1.16ac"; /* no spaces */
12072 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12073 BUG_ON(cpu != 0);
12074 gdt = get_cpu_gdt_table(cpu);
12075 save_desc_40 = gdt[0x40 / 8];
12076 +
12077 + pax_open_kernel();
12078 gdt[0x40 / 8] = bad_bios_desc;
12079 + pax_close_kernel();
12080
12081 apm_irq_save(flags);
12082 APM_DO_SAVE_SEGS;
12083 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12084 &call->esi);
12085 APM_DO_RESTORE_SEGS;
12086 apm_irq_restore(flags);
12087 +
12088 + pax_open_kernel();
12089 gdt[0x40 / 8] = save_desc_40;
12090 + pax_close_kernel();
12091 +
12092 put_cpu();
12093
12094 return call->eax & 0xff;
12095 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12096 BUG_ON(cpu != 0);
12097 gdt = get_cpu_gdt_table(cpu);
12098 save_desc_40 = gdt[0x40 / 8];
12099 +
12100 + pax_open_kernel();
12101 gdt[0x40 / 8] = bad_bios_desc;
12102 + pax_close_kernel();
12103
12104 apm_irq_save(flags);
12105 APM_DO_SAVE_SEGS;
12106 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12107 &call->eax);
12108 APM_DO_RESTORE_SEGS;
12109 apm_irq_restore(flags);
12110 +
12111 + pax_open_kernel();
12112 gdt[0x40 / 8] = save_desc_40;
12113 + pax_close_kernel();
12114 +
12115 put_cpu();
12116 return error;
12117 }
12118 @@ -975,7 +989,7 @@ recalc:
12119
12120 static void apm_power_off(void)
12121 {
12122 - unsigned char po_bios_call[] = {
12123 + const unsigned char po_bios_call[] = {
12124 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12125 0x8e, 0xd0, /* movw ax,ss */
12126 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12127 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12128 * code to that CPU.
12129 */
12130 gdt = get_cpu_gdt_table(0);
12131 +
12132 + pax_open_kernel();
12133 set_desc_base(&gdt[APM_CS >> 3],
12134 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12135 set_desc_base(&gdt[APM_CS_16 >> 3],
12136 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12137 set_desc_base(&gdt[APM_DS >> 3],
12138 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12139 + pax_close_kernel();
12140
12141 proc_create("apm", 0, NULL, &apm_file_ops);
12142
12143 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
12144 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12145 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12146 @@ -51,7 +51,6 @@ void foo(void)
12147 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12148 BLANK();
12149
12150 - OFFSET(TI_task, thread_info, task);
12151 OFFSET(TI_exec_domain, thread_info, exec_domain);
12152 OFFSET(TI_flags, thread_info, flags);
12153 OFFSET(TI_status, thread_info, status);
12154 @@ -60,6 +59,8 @@ void foo(void)
12155 OFFSET(TI_restart_block, thread_info, restart_block);
12156 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12157 OFFSET(TI_cpu, thread_info, cpu);
12158 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12159 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12160 BLANK();
12161
12162 OFFSET(GDS_size, desc_ptr, size);
12163 @@ -99,6 +100,7 @@ void foo(void)
12164
12165 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12166 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12167 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12168 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12169 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12170 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12171 @@ -115,6 +117,11 @@ void foo(void)
12172 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12173 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12174 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12175 +
12176 +#ifdef CONFIG_PAX_KERNEXEC
12177 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12178 +#endif
12179 +
12180 #endif
12181
12182 #ifdef CONFIG_XEN
12183 diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12184 --- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12185 +++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-08-23 20:24:19.000000000 -0400
12186 @@ -44,6 +44,8 @@ int main(void)
12187 ENTRY(addr_limit);
12188 ENTRY(preempt_count);
12189 ENTRY(status);
12190 + ENTRY(lowest_stack);
12191 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12192 #ifdef CONFIG_IA32_EMULATION
12193 ENTRY(sysenter_return);
12194 #endif
12195 @@ -63,6 +65,18 @@ int main(void)
12196 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12197 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12198 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12199 +
12200 +#ifdef CONFIG_PAX_KERNEXEC
12201 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12202 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12203 +#endif
12204 +
12205 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12206 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12207 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12208 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12209 +#endif
12210 +
12211 #endif
12212
12213
12214 @@ -115,6 +129,7 @@ int main(void)
12215 ENTRY(cr8);
12216 BLANK();
12217 #undef ENTRY
12218 + DEFINE(TSS_size, sizeof(struct tss_struct));
12219 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12220 BLANK();
12221 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12222 @@ -130,6 +145,7 @@ int main(void)
12223
12224 BLANK();
12225 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12226 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12227 #ifdef CONFIG_XEN
12228 BLANK();
12229 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12230 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12231 --- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12232 +++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12233 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12234 unsigned int size)
12235 {
12236 /* AMD errata T13 (order #21922) */
12237 - if ((c->x86 == 6)) {
12238 + if (c->x86 == 6) {
12239 /* Duron Rev A0 */
12240 if (c->x86_model == 3 && c->x86_mask == 0)
12241 size = 64;
12242 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12243 --- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12244 +++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12245 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12246
12247 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12248
12249 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12250 -#ifdef CONFIG_X86_64
12251 - /*
12252 - * We need valid kernel segments for data and code in long mode too
12253 - * IRET will check the segment types kkeil 2000/10/28
12254 - * Also sysret mandates a special GDT layout
12255 - *
12256 - * TLS descriptors are currently at a different place compared to i386.
12257 - * Hopefully nobody expects them at a fixed place (Wine?)
12258 - */
12259 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12260 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12261 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12262 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12263 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12264 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12265 -#else
12266 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12267 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12268 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12269 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12270 - /*
12271 - * Segments used for calling PnP BIOS have byte granularity.
12272 - * They code segments and data segments have fixed 64k limits,
12273 - * the transfer segment sizes are set at run time.
12274 - */
12275 - /* 32-bit code */
12276 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12277 - /* 16-bit code */
12278 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12279 - /* 16-bit data */
12280 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12281 - /* 16-bit data */
12282 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12283 - /* 16-bit data */
12284 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12285 - /*
12286 - * The APM segments have byte granularity and their bases
12287 - * are set at run time. All have 64k limits.
12288 - */
12289 - /* 32-bit code */
12290 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12291 - /* 16-bit code */
12292 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12293 - /* data */
12294 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12295 -
12296 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12297 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12298 - GDT_STACK_CANARY_INIT
12299 -#endif
12300 -} };
12301 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12302 -
12303 static int __init x86_xsave_setup(char *s)
12304 {
12305 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12306 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12307 {
12308 struct desc_ptr gdt_descr;
12309
12310 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12311 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12312 gdt_descr.size = GDT_SIZE - 1;
12313 load_gdt(&gdt_descr);
12314 /* Reload the per-cpu base */
12315 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12316 /* Filter out anything that depends on CPUID levels we don't have */
12317 filter_cpuid_features(c, true);
12318
12319 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12320 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12321 +#endif
12322 +
12323 /* If the model name is still unset, do table lookup. */
12324 if (!c->x86_model_id[0]) {
12325 const char *p;
12326 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12327 }
12328 __setup("clearcpuid=", setup_disablecpuid);
12329
12330 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12331 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12332 +
12333 #ifdef CONFIG_X86_64
12334 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12335
12336 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12337 EXPORT_PER_CPU_SYMBOL(current_task);
12338
12339 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12340 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12341 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12342 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12343
12344 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12345 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12346 {
12347 memset(regs, 0, sizeof(struct pt_regs));
12348 regs->fs = __KERNEL_PERCPU;
12349 - regs->gs = __KERNEL_STACK_CANARY;
12350 + savesegment(gs, regs->gs);
12351
12352 return regs;
12353 }
12354 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12355 int i;
12356
12357 cpu = stack_smp_processor_id();
12358 - t = &per_cpu(init_tss, cpu);
12359 + t = init_tss + cpu;
12360 orig_ist = &per_cpu(orig_ist, cpu);
12361
12362 #ifdef CONFIG_NUMA
12363 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12364 switch_to_new_gdt(cpu);
12365 loadsegment(fs, 0);
12366
12367 - load_idt((const struct desc_ptr *)&idt_descr);
12368 + load_idt(&idt_descr);
12369
12370 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12371 syscall_init();
12372 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12373 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12374 barrier();
12375
12376 - check_efer();
12377 if (cpu != 0)
12378 enable_x2apic();
12379
12380 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12381 {
12382 int cpu = smp_processor_id();
12383 struct task_struct *curr = current;
12384 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12385 + struct tss_struct *t = init_tss + cpu;
12386 struct thread_struct *thread = &curr->thread;
12387
12388 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12389 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12390 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12391 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12392 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12393 * Update the IDT descriptor and reload the IDT so that
12394 * it uses the read-only mapped virtual address.
12395 */
12396 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12397 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12398 load_idt(&idt_descr);
12399 }
12400 #endif
12401 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12402 --- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12403 +++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12404 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12405 return ret;
12406 }
12407
12408 -static struct sysfs_ops sysfs_ops = {
12409 +static const struct sysfs_ops sysfs_ops = {
12410 .show = show,
12411 .store = store,
12412 };
12413 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12414 --- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12415 +++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12416 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12417 CFLAGS_REMOVE_common.o = -pg
12418 endif
12419
12420 -# Make sure load_percpu_segment has no stackprotector
12421 -nostackp := $(call cc-option, -fno-stack-protector)
12422 -CFLAGS_common.o := $(nostackp)
12423 -
12424 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12425 obj-y += proc.o capflags.o powerflags.o common.o
12426 obj-y += vmware.o hypervisor.o sched.o
12427 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12428 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12429 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12430 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12431 return ret;
12432 }
12433
12434 -static struct sysfs_ops threshold_ops = {
12435 +static const struct sysfs_ops threshold_ops = {
12436 .show = show,
12437 .store = store,
12438 };
12439 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12440 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12441 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12442 @@ -43,6 +43,7 @@
12443 #include <asm/ipi.h>
12444 #include <asm/mce.h>
12445 #include <asm/msr.h>
12446 +#include <asm/local.h>
12447
12448 #include "mce-internal.h"
12449
12450 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12451 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12452 m->cs, m->ip);
12453
12454 - if (m->cs == __KERNEL_CS)
12455 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12456 print_symbol("{%s}", m->ip);
12457 pr_cont("\n");
12458 }
12459 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12460
12461 #define PANIC_TIMEOUT 5 /* 5 seconds */
12462
12463 -static atomic_t mce_paniced;
12464 +static atomic_unchecked_t mce_paniced;
12465
12466 static int fake_panic;
12467 -static atomic_t mce_fake_paniced;
12468 +static atomic_unchecked_t mce_fake_paniced;
12469
12470 /* Panic in progress. Enable interrupts and wait for final IPI */
12471 static void wait_for_panic(void)
12472 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12473 /*
12474 * Make sure only one CPU runs in machine check panic
12475 */
12476 - if (atomic_inc_return(&mce_paniced) > 1)
12477 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12478 wait_for_panic();
12479 barrier();
12480
12481 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12482 console_verbose();
12483 } else {
12484 /* Don't log too much for fake panic */
12485 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12486 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12487 return;
12488 }
12489 print_mce_head();
12490 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12491 * might have been modified by someone else.
12492 */
12493 rmb();
12494 - if (atomic_read(&mce_paniced))
12495 + if (atomic_read_unchecked(&mce_paniced))
12496 wait_for_panic();
12497 if (!monarch_timeout)
12498 goto out;
12499 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12500 */
12501
12502 static DEFINE_SPINLOCK(mce_state_lock);
12503 -static int open_count; /* #times opened */
12504 +static local_t open_count; /* #times opened */
12505 static int open_exclu; /* already open exclusive? */
12506
12507 static int mce_open(struct inode *inode, struct file *file)
12508 {
12509 spin_lock(&mce_state_lock);
12510
12511 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12512 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12513 spin_unlock(&mce_state_lock);
12514
12515 return -EBUSY;
12516 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12517
12518 if (file->f_flags & O_EXCL)
12519 open_exclu = 1;
12520 - open_count++;
12521 + local_inc(&open_count);
12522
12523 spin_unlock(&mce_state_lock);
12524
12525 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12526 {
12527 spin_lock(&mce_state_lock);
12528
12529 - open_count--;
12530 + local_dec(&open_count);
12531 open_exclu = 0;
12532
12533 spin_unlock(&mce_state_lock);
12534 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12535 static void mce_reset(void)
12536 {
12537 cpu_missing = 0;
12538 - atomic_set(&mce_fake_paniced, 0);
12539 + atomic_set_unchecked(&mce_fake_paniced, 0);
12540 atomic_set(&mce_executing, 0);
12541 atomic_set(&mce_callin, 0);
12542 atomic_set(&global_nwo, 0);
12543 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12544 --- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12545 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12546 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12547 static int inject_init(void)
12548 {
12549 printk(KERN_INFO "Machine check injector initialized\n");
12550 - mce_chrdev_ops.write = mce_write;
12551 + pax_open_kernel();
12552 + *(void **)&mce_chrdev_ops.write = mce_write;
12553 + pax_close_kernel();
12554 register_die_notifier(&mce_raise_nb);
12555 return 0;
12556 }
12557 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12558 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12559 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12560 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12561 return 0;
12562 }
12563
12564 -static struct mtrr_ops amd_mtrr_ops = {
12565 +static const struct mtrr_ops amd_mtrr_ops = {
12566 .vendor = X86_VENDOR_AMD,
12567 .set = amd_set_mtrr,
12568 .get = amd_get_mtrr,
12569 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12570 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12571 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12572 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12573 return 0;
12574 }
12575
12576 -static struct mtrr_ops centaur_mtrr_ops = {
12577 +static const struct mtrr_ops centaur_mtrr_ops = {
12578 .vendor = X86_VENDOR_CENTAUR,
12579 .set = centaur_set_mcr,
12580 .get = centaur_get_mcr,
12581 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12582 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12583 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12584 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12585 post_set();
12586 }
12587
12588 -static struct mtrr_ops cyrix_mtrr_ops = {
12589 +static const struct mtrr_ops cyrix_mtrr_ops = {
12590 .vendor = X86_VENDOR_CYRIX,
12591 .set_all = cyrix_set_all,
12592 .set = cyrix_set_arr,
12593 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12594 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12595 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12596 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12597 /*
12598 * Generic structure...
12599 */
12600 -struct mtrr_ops generic_mtrr_ops = {
12601 +const struct mtrr_ops generic_mtrr_ops = {
12602 .use_intel_if = 1,
12603 .set_all = generic_set_all,
12604 .get = generic_get_mtrr,
12605 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12606 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12607 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12608 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12609 u64 size_or_mask, size_and_mask;
12610 static bool mtrr_aps_delayed_init;
12611
12612 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12613 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12614
12615 -struct mtrr_ops *mtrr_if;
12616 +const struct mtrr_ops *mtrr_if;
12617
12618 static void set_mtrr(unsigned int reg, unsigned long base,
12619 unsigned long size, mtrr_type type);
12620
12621 -void set_mtrr_ops(struct mtrr_ops *ops)
12622 +void set_mtrr_ops(const struct mtrr_ops *ops)
12623 {
12624 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12625 mtrr_ops[ops->vendor] = ops;
12626 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12627 --- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12628 +++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 20:23:57.000000000 -0400
12629 @@ -25,14 +25,14 @@ struct mtrr_ops {
12630 int (*validate_add_page)(unsigned long base, unsigned long size,
12631 unsigned int type);
12632 int (*have_wrcomb)(void);
12633 -};
12634 +} __do_const;
12635
12636 extern int generic_get_free_region(unsigned long base, unsigned long size,
12637 int replace_reg);
12638 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12639 unsigned int type);
12640
12641 -extern struct mtrr_ops generic_mtrr_ops;
12642 +extern const struct mtrr_ops generic_mtrr_ops;
12643
12644 extern int positive_have_wrcomb(void);
12645
12646 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12647 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12648 void get_mtrr_state(void);
12649
12650 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12651 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12652
12653 extern u64 size_or_mask, size_and_mask;
12654 -extern struct mtrr_ops *mtrr_if;
12655 +extern const struct mtrr_ops *mtrr_if;
12656
12657 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12658 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12659 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12660 --- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12661 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12662 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12663
12664 /* Interface defining a CPU specific perfctr watchdog */
12665 struct wd_ops {
12666 - int (*reserve)(void);
12667 - void (*unreserve)(void);
12668 - int (*setup)(unsigned nmi_hz);
12669 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12670 - void (*stop)(void);
12671 + int (* const reserve)(void);
12672 + void (* const unreserve)(void);
12673 + int (* const setup)(unsigned nmi_hz);
12674 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12675 + void (* const stop)(void);
12676 unsigned perfctr;
12677 unsigned evntsel;
12678 u64 checkbit;
12679 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12680 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12681 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12682
12683 +/* cannot be const */
12684 static struct wd_ops intel_arch_wd_ops;
12685
12686 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12687 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12688 return 1;
12689 }
12690
12691 +/* cannot be const */
12692 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12693 .reserve = single_msr_reserve,
12694 .unreserve = single_msr_unreserve,
12695 diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12696 --- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12697 +++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12698 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12699 * count to the generic event atomically:
12700 */
12701 again:
12702 - prev_raw_count = atomic64_read(&hwc->prev_count);
12703 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12704 rdmsrl(hwc->event_base + idx, new_raw_count);
12705
12706 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12707 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12708 new_raw_count) != prev_raw_count)
12709 goto again;
12710
12711 @@ -741,7 +741,7 @@ again:
12712 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12713 delta >>= shift;
12714
12715 - atomic64_add(delta, &event->count);
12716 + atomic64_add_unchecked(delta, &event->count);
12717 atomic64_sub(delta, &hwc->period_left);
12718
12719 return new_raw_count;
12720 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12721 * The hw event starts counting from this event offset,
12722 * mark it to be able to extra future deltas:
12723 */
12724 - atomic64_set(&hwc->prev_count, (u64)-left);
12725 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12726
12727 err = checking_wrmsrl(hwc->event_base + idx,
12728 (u64)(-left) & x86_pmu.event_mask);
12729 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12730 break;
12731
12732 callchain_store(entry, frame.return_address);
12733 - fp = frame.next_frame;
12734 + fp = (__force const void __user *)frame.next_frame;
12735 }
12736 }
12737
12738 diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12739 --- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12740 +++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12741 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12742 regs = args->regs;
12743
12744 #ifdef CONFIG_X86_32
12745 - if (!user_mode_vm(regs)) {
12746 + if (!user_mode(regs)) {
12747 crash_fixup_ss_esp(&fixed_regs, regs);
12748 regs = &fixed_regs;
12749 }
12750 diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12751 --- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12752 +++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12753 @@ -11,7 +11,7 @@
12754
12755 #define DOUBLEFAULT_STACKSIZE (1024)
12756 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12757 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12758 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12759
12760 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12761
12762 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12763 unsigned long gdt, tss;
12764
12765 store_gdt(&gdt_desc);
12766 - gdt = gdt_desc.address;
12767 + gdt = (unsigned long)gdt_desc.address;
12768
12769 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12770
12771 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12772 /* 0x2 bit is always set */
12773 .flags = X86_EFLAGS_SF | 0x2,
12774 .sp = STACK_START,
12775 - .es = __USER_DS,
12776 + .es = __KERNEL_DS,
12777 .cs = __KERNEL_CS,
12778 .ss = __KERNEL_DS,
12779 - .ds = __USER_DS,
12780 + .ds = __KERNEL_DS,
12781 .fs = __KERNEL_PERCPU,
12782
12783 .__cr3 = __pa_nodebug(swapper_pg_dir),
12784 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12785 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12786 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12787 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12788 #endif
12789
12790 for (;;) {
12791 - struct thread_info *context;
12792 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12793 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12794
12795 - context = (struct thread_info *)
12796 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12797 - bp = print_context_stack(context, stack, bp, ops,
12798 - data, NULL, &graph);
12799 -
12800 - stack = (unsigned long *)context->previous_esp;
12801 - if (!stack)
12802 + if (stack_start == task_stack_page(task))
12803 break;
12804 + stack = *(unsigned long **)stack_start;
12805 if (ops->stack(data, "IRQ") < 0)
12806 break;
12807 touch_nmi_watchdog();
12808 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12809 * When in-kernel, we also print out the stack and code at the
12810 * time of the fault..
12811 */
12812 - if (!user_mode_vm(regs)) {
12813 + if (!user_mode(regs)) {
12814 unsigned int code_prologue = code_bytes * 43 / 64;
12815 unsigned int code_len = code_bytes;
12816 unsigned char c;
12817 u8 *ip;
12818 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12819
12820 printk(KERN_EMERG "Stack:\n");
12821 show_stack_log_lvl(NULL, regs, &regs->sp,
12822 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12823
12824 printk(KERN_EMERG "Code: ");
12825
12826 - ip = (u8 *)regs->ip - code_prologue;
12827 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12828 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12829 /* try starting at IP */
12830 - ip = (u8 *)regs->ip;
12831 + ip = (u8 *)regs->ip + cs_base;
12832 code_len = code_len - code_prologue + 1;
12833 }
12834 for (i = 0; i < code_len; i++, ip++) {
12835 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12836 printk(" Bad EIP value.");
12837 break;
12838 }
12839 - if (ip == (u8 *)regs->ip)
12840 + if (ip == (u8 *)regs->ip + cs_base)
12841 printk("<%02x> ", c);
12842 else
12843 printk("%02x ", c);
12844 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12845 {
12846 unsigned short ud2;
12847
12848 + ip = ktla_ktva(ip);
12849 if (ip < PAGE_OFFSET)
12850 return 0;
12851 if (probe_kernel_address((unsigned short *)ip, ud2))
12852 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12853 --- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12854 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12855 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12856 unsigned long *irq_stack_end =
12857 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12858 unsigned used = 0;
12859 - struct thread_info *tinfo;
12860 int graph = 0;
12861 + void *stack_start;
12862
12863 if (!task)
12864 task = current;
12865 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12866 * current stack address. If the stacks consist of nested
12867 * exceptions
12868 */
12869 - tinfo = task_thread_info(task);
12870 for (;;) {
12871 char *id;
12872 unsigned long *estack_end;
12873 +
12874 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12875 &used, &id);
12876
12877 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12878 if (ops->stack(data, id) < 0)
12879 break;
12880
12881 - bp = print_context_stack(tinfo, stack, bp, ops,
12882 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12883 data, estack_end, &graph);
12884 ops->stack(data, "<EOE>");
12885 /*
12886 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12887 if (stack >= irq_stack && stack < irq_stack_end) {
12888 if (ops->stack(data, "IRQ") < 0)
12889 break;
12890 - bp = print_context_stack(tinfo, stack, bp,
12891 + bp = print_context_stack(task, irq_stack, stack, bp,
12892 ops, data, irq_stack_end, &graph);
12893 /*
12894 * We link to the next stack (which would be
12895 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12896 /*
12897 * This handles the process stack:
12898 */
12899 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12900 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12901 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12902 put_cpu();
12903 }
12904 EXPORT_SYMBOL(dump_trace);
12905 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12906 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12907 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12908 @@ -2,6 +2,9 @@
12909 * Copyright (C) 1991, 1992 Linus Torvalds
12910 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12911 */
12912 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12913 +#define __INCLUDED_BY_HIDESYM 1
12914 +#endif
12915 #include <linux/kallsyms.h>
12916 #include <linux/kprobes.h>
12917 #include <linux/uaccess.h>
12918 @@ -28,7 +31,7 @@ static int die_counter;
12919
12920 void printk_address(unsigned long address, int reliable)
12921 {
12922 - printk(" [<%p>] %s%pS\n", (void *) address,
12923 + printk(" [<%p>] %s%pA\n", (void *) address,
12924 reliable ? "" : "? ", (void *) address);
12925 }
12926
12927 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12928 static void
12929 print_ftrace_graph_addr(unsigned long addr, void *data,
12930 const struct stacktrace_ops *ops,
12931 - struct thread_info *tinfo, int *graph)
12932 + struct task_struct *task, int *graph)
12933 {
12934 - struct task_struct *task = tinfo->task;
12935 unsigned long ret_addr;
12936 int index = task->curr_ret_stack;
12937
12938 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12939 static inline void
12940 print_ftrace_graph_addr(unsigned long addr, void *data,
12941 const struct stacktrace_ops *ops,
12942 - struct thread_info *tinfo, int *graph)
12943 + struct task_struct *task, int *graph)
12944 { }
12945 #endif
12946
12947 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12948 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12949 */
12950
12951 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12952 - void *p, unsigned int size, void *end)
12953 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12954 {
12955 - void *t = tinfo;
12956 if (end) {
12957 if (p < end && p >= (end-THREAD_SIZE))
12958 return 1;
12959 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12960 }
12961
12962 unsigned long
12963 -print_context_stack(struct thread_info *tinfo,
12964 +print_context_stack(struct task_struct *task, void *stack_start,
12965 unsigned long *stack, unsigned long bp,
12966 const struct stacktrace_ops *ops, void *data,
12967 unsigned long *end, int *graph)
12968 {
12969 struct stack_frame *frame = (struct stack_frame *)bp;
12970
12971 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12972 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12973 unsigned long addr;
12974
12975 addr = *stack;
12976 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12977 } else {
12978 ops->address(data, addr, 0);
12979 }
12980 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12981 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12982 }
12983 stack++;
12984 }
12985 @@ -180,7 +180,7 @@ void dump_stack(void)
12986 #endif
12987
12988 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12989 - current->pid, current->comm, print_tainted(),
12990 + task_pid_nr(current), current->comm, print_tainted(),
12991 init_utsname()->release,
12992 (int)strcspn(init_utsname()->version, " "),
12993 init_utsname()->version);
12994 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12995 return flags;
12996 }
12997
12998 +extern void gr_handle_kernel_exploit(void);
12999 +
13000 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13001 {
13002 if (regs && kexec_should_crash(current))
13003 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13004 panic("Fatal exception in interrupt");
13005 if (panic_on_oops)
13006 panic("Fatal exception");
13007 - do_exit(signr);
13008 +
13009 + gr_handle_kernel_exploit();
13010 +
13011 + do_group_exit(signr);
13012 }
13013
13014 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13015 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13016 unsigned long flags = oops_begin();
13017 int sig = SIGSEGV;
13018
13019 - if (!user_mode_vm(regs))
13020 + if (!user_mode(regs))
13021 report_bug(regs->ip, regs);
13022
13023 if (__die(str, regs, err))
13024 diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
13025 --- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13026 +++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13027 @@ -15,7 +15,7 @@
13028 #endif
13029
13030 extern unsigned long
13031 -print_context_stack(struct thread_info *tinfo,
13032 +print_context_stack(struct task_struct *task, void *stack_start,
13033 unsigned long *stack, unsigned long bp,
13034 const struct stacktrace_ops *ops, void *data,
13035 unsigned long *end, int *graph);
13036 diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
13037 --- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13038 +++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13039 @@ -733,7 +733,7 @@ struct early_res {
13040 };
13041 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13042 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13043 - {}
13044 + { 0, 0, {0}, 0 }
13045 };
13046
13047 static int __init find_overlapped_early(u64 start, u64 end)
13048 diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
13049 --- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13050 +++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13051 @@ -7,6 +7,7 @@
13052 #include <linux/pci_regs.h>
13053 #include <linux/pci_ids.h>
13054 #include <linux/errno.h>
13055 +#include <linux/sched.h>
13056 #include <asm/io.h>
13057 #include <asm/processor.h>
13058 #include <asm/fcntl.h>
13059 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13060 int n;
13061 va_list ap;
13062
13063 + pax_track_stack();
13064 +
13065 va_start(ap, fmt);
13066 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13067 early_console->write(early_console, buf, n);
13068 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
13069 --- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13070 +++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13071 @@ -38,70 +38,38 @@
13072 */
13073
13074 static unsigned long efi_rt_eflags;
13075 -static pgd_t efi_bak_pg_dir_pointer[2];
13076 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13077
13078 -void efi_call_phys_prelog(void)
13079 +void __init efi_call_phys_prelog(void)
13080 {
13081 - unsigned long cr4;
13082 - unsigned long temp;
13083 struct desc_ptr gdt_descr;
13084
13085 local_irq_save(efi_rt_eflags);
13086
13087 - /*
13088 - * If I don't have PAE, I should just duplicate two entries in page
13089 - * directory. If I have PAE, I just need to duplicate one entry in
13090 - * page directory.
13091 - */
13092 - cr4 = read_cr4_safe();
13093
13094 - if (cr4 & X86_CR4_PAE) {
13095 - efi_bak_pg_dir_pointer[0].pgd =
13096 - swapper_pg_dir[pgd_index(0)].pgd;
13097 - swapper_pg_dir[0].pgd =
13098 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13099 - } else {
13100 - efi_bak_pg_dir_pointer[0].pgd =
13101 - swapper_pg_dir[pgd_index(0)].pgd;
13102 - efi_bak_pg_dir_pointer[1].pgd =
13103 - swapper_pg_dir[pgd_index(0x400000)].pgd;
13104 - swapper_pg_dir[pgd_index(0)].pgd =
13105 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13106 - temp = PAGE_OFFSET + 0x400000;
13107 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13108 - swapper_pg_dir[pgd_index(temp)].pgd;
13109 - }
13110 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13111 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13112 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13113
13114 /*
13115 * After the lock is released, the original page table is restored.
13116 */
13117 __flush_tlb_all();
13118
13119 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
13120 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13121 gdt_descr.size = GDT_SIZE - 1;
13122 load_gdt(&gdt_descr);
13123 }
13124
13125 -void efi_call_phys_epilog(void)
13126 +void __init efi_call_phys_epilog(void)
13127 {
13128 - unsigned long cr4;
13129 struct desc_ptr gdt_descr;
13130
13131 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13132 + gdt_descr.address = get_cpu_gdt_table(0);
13133 gdt_descr.size = GDT_SIZE - 1;
13134 load_gdt(&gdt_descr);
13135
13136 - cr4 = read_cr4_safe();
13137 -
13138 - if (cr4 & X86_CR4_PAE) {
13139 - swapper_pg_dir[pgd_index(0)].pgd =
13140 - efi_bak_pg_dir_pointer[0].pgd;
13141 - } else {
13142 - swapper_pg_dir[pgd_index(0)].pgd =
13143 - efi_bak_pg_dir_pointer[0].pgd;
13144 - swapper_pg_dir[pgd_index(0x400000)].pgd =
13145 - efi_bak_pg_dir_pointer[1].pgd;
13146 - }
13147 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13148
13149 /*
13150 * After the lock is released, the original page table is restored.
13151 diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13152 --- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13153 +++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13154 @@ -6,6 +6,7 @@
13155 */
13156
13157 #include <linux/linkage.h>
13158 +#include <linux/init.h>
13159 #include <asm/page_types.h>
13160
13161 /*
13162 @@ -20,7 +21,7 @@
13163 * service functions will comply with gcc calling convention, too.
13164 */
13165
13166 -.text
13167 +__INIT
13168 ENTRY(efi_call_phys)
13169 /*
13170 * 0. The function can only be called in Linux kernel. So CS has been
13171 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13172 * The mapping of lower virtual memory has been created in prelog and
13173 * epilog.
13174 */
13175 - movl $1f, %edx
13176 - subl $__PAGE_OFFSET, %edx
13177 - jmp *%edx
13178 + jmp 1f-__PAGE_OFFSET
13179 1:
13180
13181 /*
13182 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13183 * parameter 2, ..., param n. To make things easy, we save the return
13184 * address of efi_call_phys in a global variable.
13185 */
13186 - popl %edx
13187 - movl %edx, saved_return_addr
13188 - /* get the function pointer into ECX*/
13189 - popl %ecx
13190 - movl %ecx, efi_rt_function_ptr
13191 - movl $2f, %edx
13192 - subl $__PAGE_OFFSET, %edx
13193 - pushl %edx
13194 + popl (saved_return_addr)
13195 + popl (efi_rt_function_ptr)
13196
13197 /*
13198 * 3. Clear PG bit in %CR0.
13199 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13200 /*
13201 * 5. Call the physical function.
13202 */
13203 - jmp *%ecx
13204 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13205
13206 -2:
13207 /*
13208 * 6. After EFI runtime service returns, control will return to
13209 * following instruction. We'd better readjust stack pointer first.
13210 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13211 movl %cr0, %edx
13212 orl $0x80000000, %edx
13213 movl %edx, %cr0
13214 - jmp 1f
13215 -1:
13216 +
13217 /*
13218 * 8. Now restore the virtual mode from flat mode by
13219 * adding EIP with PAGE_OFFSET.
13220 */
13221 - movl $1f, %edx
13222 - jmp *%edx
13223 + jmp 1f+__PAGE_OFFSET
13224 1:
13225
13226 /*
13227 * 9. Balance the stack. And because EAX contain the return value,
13228 * we'd better not clobber it.
13229 */
13230 - leal efi_rt_function_ptr, %edx
13231 - movl (%edx), %ecx
13232 - pushl %ecx
13233 + pushl (efi_rt_function_ptr)
13234
13235 /*
13236 - * 10. Push the saved return address onto the stack and return.
13237 + * 10. Return to the saved return address.
13238 */
13239 - leal saved_return_addr, %edx
13240 - movl (%edx), %ecx
13241 - pushl %ecx
13242 - ret
13243 + jmpl *(saved_return_addr)
13244 ENDPROC(efi_call_phys)
13245 .previous
13246
13247 -.data
13248 +__INITDATA
13249 saved_return_addr:
13250 .long 0
13251 efi_rt_function_ptr:
13252 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13253 --- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13254 +++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-08-23 20:24:19.000000000 -0400
13255 @@ -185,13 +185,146 @@
13256 /*CFI_REL_OFFSET gs, PT_GS*/
13257 .endm
13258 .macro SET_KERNEL_GS reg
13259 +
13260 +#ifdef CONFIG_CC_STACKPROTECTOR
13261 movl $(__KERNEL_STACK_CANARY), \reg
13262 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13263 + movl $(__USER_DS), \reg
13264 +#else
13265 + xorl \reg, \reg
13266 +#endif
13267 +
13268 movl \reg, %gs
13269 .endm
13270
13271 #endif /* CONFIG_X86_32_LAZY_GS */
13272
13273 -.macro SAVE_ALL
13274 +.macro pax_enter_kernel
13275 +#ifdef CONFIG_PAX_KERNEXEC
13276 + call pax_enter_kernel
13277 +#endif
13278 +.endm
13279 +
13280 +.macro pax_exit_kernel
13281 +#ifdef CONFIG_PAX_KERNEXEC
13282 + call pax_exit_kernel
13283 +#endif
13284 +.endm
13285 +
13286 +#ifdef CONFIG_PAX_KERNEXEC
13287 +ENTRY(pax_enter_kernel)
13288 +#ifdef CONFIG_PARAVIRT
13289 + pushl %eax
13290 + pushl %ecx
13291 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13292 + mov %eax, %esi
13293 +#else
13294 + mov %cr0, %esi
13295 +#endif
13296 + bts $16, %esi
13297 + jnc 1f
13298 + mov %cs, %esi
13299 + cmp $__KERNEL_CS, %esi
13300 + jz 3f
13301 + ljmp $__KERNEL_CS, $3f
13302 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13303 +2:
13304 +#ifdef CONFIG_PARAVIRT
13305 + mov %esi, %eax
13306 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13307 +#else
13308 + mov %esi, %cr0
13309 +#endif
13310 +3:
13311 +#ifdef CONFIG_PARAVIRT
13312 + popl %ecx
13313 + popl %eax
13314 +#endif
13315 + ret
13316 +ENDPROC(pax_enter_kernel)
13317 +
13318 +ENTRY(pax_exit_kernel)
13319 +#ifdef CONFIG_PARAVIRT
13320 + pushl %eax
13321 + pushl %ecx
13322 +#endif
13323 + mov %cs, %esi
13324 + cmp $__KERNEXEC_KERNEL_CS, %esi
13325 + jnz 2f
13326 +#ifdef CONFIG_PARAVIRT
13327 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13328 + mov %eax, %esi
13329 +#else
13330 + mov %cr0, %esi
13331 +#endif
13332 + btr $16, %esi
13333 + ljmp $__KERNEL_CS, $1f
13334 +1:
13335 +#ifdef CONFIG_PARAVIRT
13336 + mov %esi, %eax
13337 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13338 +#else
13339 + mov %esi, %cr0
13340 +#endif
13341 +2:
13342 +#ifdef CONFIG_PARAVIRT
13343 + popl %ecx
13344 + popl %eax
13345 +#endif
13346 + ret
13347 +ENDPROC(pax_exit_kernel)
13348 +#endif
13349 +
13350 +.macro pax_erase_kstack
13351 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13352 + call pax_erase_kstack
13353 +#endif
13354 +.endm
13355 +
13356 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13357 +/*
13358 + * ebp: thread_info
13359 + * ecx, edx: can be clobbered
13360 + */
13361 +ENTRY(pax_erase_kstack)
13362 + pushl %edi
13363 + pushl %eax
13364 +
13365 + mov TI_lowest_stack(%ebp), %edi
13366 + mov $-0xBEEF, %eax
13367 + std
13368 +
13369 +1: mov %edi, %ecx
13370 + and $THREAD_SIZE_asm - 1, %ecx
13371 + shr $2, %ecx
13372 + repne scasl
13373 + jecxz 2f
13374 +
13375 + cmp $2*16, %ecx
13376 + jc 2f
13377 +
13378 + mov $2*16, %ecx
13379 + repe scasl
13380 + jecxz 2f
13381 + jne 1b
13382 +
13383 +2: cld
13384 + mov %esp, %ecx
13385 + sub %edi, %ecx
13386 + shr $2, %ecx
13387 + rep stosl
13388 +
13389 + mov TI_task_thread_sp0(%ebp), %edi
13390 + sub $128, %edi
13391 + mov %edi, TI_lowest_stack(%ebp)
13392 +
13393 + popl %eax
13394 + popl %edi
13395 + ret
13396 +ENDPROC(pax_erase_kstack)
13397 +#endif
13398 +
13399 +.macro __SAVE_ALL _DS
13400 cld
13401 PUSH_GS
13402 pushl %fs
13403 @@ -224,7 +357,7 @@
13404 pushl %ebx
13405 CFI_ADJUST_CFA_OFFSET 4
13406 CFI_REL_OFFSET ebx, 0
13407 - movl $(__USER_DS), %edx
13408 + movl $\_DS, %edx
13409 movl %edx, %ds
13410 movl %edx, %es
13411 movl $(__KERNEL_PERCPU), %edx
13412 @@ -232,6 +365,15 @@
13413 SET_KERNEL_GS %edx
13414 .endm
13415
13416 +.macro SAVE_ALL
13417 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13418 + __SAVE_ALL __KERNEL_DS
13419 + pax_enter_kernel
13420 +#else
13421 + __SAVE_ALL __USER_DS
13422 +#endif
13423 +.endm
13424 +
13425 .macro RESTORE_INT_REGS
13426 popl %ebx
13427 CFI_ADJUST_CFA_OFFSET -4
13428 @@ -352,7 +494,15 @@ check_userspace:
13429 movb PT_CS(%esp), %al
13430 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13431 cmpl $USER_RPL, %eax
13432 +
13433 +#ifdef CONFIG_PAX_KERNEXEC
13434 + jae resume_userspace
13435 +
13436 + PAX_EXIT_KERNEL
13437 + jmp resume_kernel
13438 +#else
13439 jb resume_kernel # not returning to v8086 or userspace
13440 +#endif
13441
13442 ENTRY(resume_userspace)
13443 LOCKDEP_SYS_EXIT
13444 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13445 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13446 # int/exception return?
13447 jne work_pending
13448 - jmp restore_all
13449 + jmp restore_all_pax
13450 END(ret_from_exception)
13451
13452 #ifdef CONFIG_PREEMPT
13453 @@ -414,25 +564,36 @@ sysenter_past_esp:
13454 /*CFI_REL_OFFSET cs, 0*/
13455 /*
13456 * Push current_thread_info()->sysenter_return to the stack.
13457 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13458 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13459 */
13460 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13461 + pushl $0
13462 CFI_ADJUST_CFA_OFFSET 4
13463 CFI_REL_OFFSET eip, 0
13464
13465 pushl %eax
13466 CFI_ADJUST_CFA_OFFSET 4
13467 SAVE_ALL
13468 + GET_THREAD_INFO(%ebp)
13469 + movl TI_sysenter_return(%ebp),%ebp
13470 + movl %ebp,PT_EIP(%esp)
13471 ENABLE_INTERRUPTS(CLBR_NONE)
13472
13473 /*
13474 * Load the potential sixth argument from user stack.
13475 * Careful about security.
13476 */
13477 + movl PT_OLDESP(%esp),%ebp
13478 +
13479 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13480 + mov PT_OLDSS(%esp),%ds
13481 +1: movl %ds:(%ebp),%ebp
13482 + push %ss
13483 + pop %ds
13484 +#else
13485 cmpl $__PAGE_OFFSET-3,%ebp
13486 jae syscall_fault
13487 1: movl (%ebp),%ebp
13488 +#endif
13489 +
13490 movl %ebp,PT_EBP(%esp)
13491 .section __ex_table,"a"
13492 .align 4
13493 @@ -455,12 +616,23 @@ sysenter_do_call:
13494 testl $_TIF_ALLWORK_MASK, %ecx
13495 jne sysexit_audit
13496 sysenter_exit:
13497 +
13498 +#ifdef CONFIG_PAX_RANDKSTACK
13499 + pushl_cfi %eax
13500 + call pax_randomize_kstack
13501 + popl_cfi %eax
13502 +#endif
13503 +
13504 + pax_erase_kstack
13505 +
13506 /* if something modifies registers it must also disable sysexit */
13507 movl PT_EIP(%esp), %edx
13508 movl PT_OLDESP(%esp), %ecx
13509 xorl %ebp,%ebp
13510 TRACE_IRQS_ON
13511 1: mov PT_FS(%esp), %fs
13512 +2: mov PT_DS(%esp), %ds
13513 +3: mov PT_ES(%esp), %es
13514 PTGS_TO_GS
13515 ENABLE_INTERRUPTS_SYSEXIT
13516
13517 @@ -477,6 +649,9 @@ sysenter_audit:
13518 movl %eax,%edx /* 2nd arg: syscall number */
13519 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13520 call audit_syscall_entry
13521 +
13522 + pax_erase_kstack
13523 +
13524 pushl %ebx
13525 CFI_ADJUST_CFA_OFFSET 4
13526 movl PT_EAX(%esp),%eax /* reload syscall number */
13527 @@ -504,11 +679,17 @@ sysexit_audit:
13528
13529 CFI_ENDPROC
13530 .pushsection .fixup,"ax"
13531 -2: movl $0,PT_FS(%esp)
13532 +4: movl $0,PT_FS(%esp)
13533 + jmp 1b
13534 +5: movl $0,PT_DS(%esp)
13535 + jmp 1b
13536 +6: movl $0,PT_ES(%esp)
13537 jmp 1b
13538 .section __ex_table,"a"
13539 .align 4
13540 - .long 1b,2b
13541 + .long 1b,4b
13542 + .long 2b,5b
13543 + .long 3b,6b
13544 .popsection
13545 PTGS_TO_GS_EX
13546 ENDPROC(ia32_sysenter_target)
13547 @@ -538,6 +719,14 @@ syscall_exit:
13548 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13549 jne syscall_exit_work
13550
13551 +restore_all_pax:
13552 +
13553 +#ifdef CONFIG_PAX_RANDKSTACK
13554 + call pax_randomize_kstack
13555 +#endif
13556 +
13557 + pax_erase_kstack
13558 +
13559 restore_all:
13560 TRACE_IRQS_IRET
13561 restore_all_notrace:
13562 @@ -602,10 +791,29 @@ ldt_ss:
13563 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13564 mov %dx, %ax /* eax: new kernel esp */
13565 sub %eax, %edx /* offset (low word is 0) */
13566 - PER_CPU(gdt_page, %ebx)
13567 +#ifdef CONFIG_SMP
13568 + movl PER_CPU_VAR(cpu_number), %ebx
13569 + shll $PAGE_SHIFT_asm, %ebx
13570 + addl $cpu_gdt_table, %ebx
13571 +#else
13572 + movl $cpu_gdt_table, %ebx
13573 +#endif
13574 shr $16, %edx
13575 +
13576 +#ifdef CONFIG_PAX_KERNEXEC
13577 + mov %cr0, %esi
13578 + btr $16, %esi
13579 + mov %esi, %cr0
13580 +#endif
13581 +
13582 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13583 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13584 +
13585 +#ifdef CONFIG_PAX_KERNEXEC
13586 + bts $16, %esi
13587 + mov %esi, %cr0
13588 +#endif
13589 +
13590 pushl $__ESPFIX_SS
13591 CFI_ADJUST_CFA_OFFSET 4
13592 push %eax /* new kernel esp */
13593 @@ -636,31 +844,25 @@ work_resched:
13594 movl TI_flags(%ebp), %ecx
13595 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13596 # than syscall tracing?
13597 - jz restore_all
13598 + jz restore_all_pax
13599 testb $_TIF_NEED_RESCHED, %cl
13600 jnz work_resched
13601
13602 work_notifysig: # deal with pending signals and
13603 # notify-resume requests
13604 + movl %esp, %eax
13605 #ifdef CONFIG_VM86
13606 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13607 - movl %esp, %eax
13608 - jne work_notifysig_v86 # returning to kernel-space or
13609 + jz 1f # returning to kernel-space or
13610 # vm86-space
13611 - xorl %edx, %edx
13612 - call do_notify_resume
13613 - jmp resume_userspace_sig
13614
13615 - ALIGN
13616 -work_notifysig_v86:
13617 pushl %ecx # save ti_flags for do_notify_resume
13618 CFI_ADJUST_CFA_OFFSET 4
13619 call save_v86_state # %eax contains pt_regs pointer
13620 popl %ecx
13621 CFI_ADJUST_CFA_OFFSET -4
13622 movl %eax, %esp
13623 -#else
13624 - movl %esp, %eax
13625 +1:
13626 #endif
13627 xorl %edx, %edx
13628 call do_notify_resume
13629 @@ -673,6 +875,9 @@ syscall_trace_entry:
13630 movl $-ENOSYS,PT_EAX(%esp)
13631 movl %esp, %eax
13632 call syscall_trace_enter
13633 +
13634 + pax_erase_kstack
13635 +
13636 /* What it returned is what we'll actually use. */
13637 cmpl $(nr_syscalls), %eax
13638 jnae syscall_call
13639 @@ -695,6 +900,10 @@ END(syscall_exit_work)
13640
13641 RING0_INT_FRAME # can't unwind into user space anyway
13642 syscall_fault:
13643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13644 + push %ss
13645 + pop %ds
13646 +#endif
13647 GET_THREAD_INFO(%ebp)
13648 movl $-EFAULT,PT_EAX(%esp)
13649 jmp resume_userspace
13650 @@ -726,6 +935,33 @@ PTREGSCALL(rt_sigreturn)
13651 PTREGSCALL(vm86)
13652 PTREGSCALL(vm86old)
13653
13654 + ALIGN;
13655 +ENTRY(kernel_execve)
13656 + push %ebp
13657 + sub $PT_OLDSS+4,%esp
13658 + push %edi
13659 + push %ecx
13660 + push %eax
13661 + lea 3*4(%esp),%edi
13662 + mov $PT_OLDSS/4+1,%ecx
13663 + xorl %eax,%eax
13664 + rep stosl
13665 + pop %eax
13666 + pop %ecx
13667 + pop %edi
13668 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13669 + mov %eax,PT_EBX(%esp)
13670 + mov %edx,PT_ECX(%esp)
13671 + mov %ecx,PT_EDX(%esp)
13672 + mov %esp,%eax
13673 + call sys_execve
13674 + GET_THREAD_INFO(%ebp)
13675 + test %eax,%eax
13676 + jz syscall_exit
13677 + add $PT_OLDSS+4,%esp
13678 + pop %ebp
13679 + ret
13680 +
13681 .macro FIXUP_ESPFIX_STACK
13682 /*
13683 * Switch back for ESPFIX stack to the normal zerobased stack
13684 @@ -735,7 +971,13 @@ PTREGSCALL(vm86old)
13685 * normal stack and adjusts ESP with the matching offset.
13686 */
13687 /* fixup the stack */
13688 - PER_CPU(gdt_page, %ebx)
13689 +#ifdef CONFIG_SMP
13690 + movl PER_CPU_VAR(cpu_number), %ebx
13691 + shll $PAGE_SHIFT_asm, %ebx
13692 + addl $cpu_gdt_table, %ebx
13693 +#else
13694 + movl $cpu_gdt_table, %ebx
13695 +#endif
13696 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13697 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13698 shl $16, %eax
13699 @@ -1198,7 +1440,6 @@ return_to_handler:
13700 ret
13701 #endif
13702
13703 -.section .rodata,"a"
13704 #include "syscall_table_32.S"
13705
13706 syscall_table_size=(.-sys_call_table)
13707 @@ -1255,9 +1496,12 @@ error_code:
13708 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13709 REG_TO_PTGS %ecx
13710 SET_KERNEL_GS %ecx
13711 - movl $(__USER_DS), %ecx
13712 + movl $(__KERNEL_DS), %ecx
13713 movl %ecx, %ds
13714 movl %ecx, %es
13715 +
13716 + pax_enter_kernel
13717 +
13718 TRACE_IRQS_OFF
13719 movl %esp,%eax # pt_regs pointer
13720 call *%edi
13721 @@ -1351,6 +1595,9 @@ nmi_stack_correct:
13722 xorl %edx,%edx # zero error code
13723 movl %esp,%eax # pt_regs pointer
13724 call do_nmi
13725 +
13726 + pax_exit_kernel
13727 +
13728 jmp restore_all_notrace
13729 CFI_ENDPROC
13730
13731 @@ -1391,6 +1638,9 @@ nmi_espfix_stack:
13732 FIXUP_ESPFIX_STACK # %eax == %esp
13733 xorl %edx,%edx # zero error code
13734 call do_nmi
13735 +
13736 + pax_exit_kernel
13737 +
13738 RESTORE_REGS
13739 lss 12+4(%esp), %esp # back to espfix stack
13740 CFI_ADJUST_CFA_OFFSET -24
13741 diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13742 --- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13743 +++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-08-26 20:19:09.000000000 -0400
13744 @@ -53,6 +53,7 @@
13745 #include <asm/paravirt.h>
13746 #include <asm/ftrace.h>
13747 #include <asm/percpu.h>
13748 +#include <asm/pgtable.h>
13749
13750 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13751 #include <linux/elf-em.h>
13752 @@ -174,6 +175,264 @@ ENTRY(native_usergs_sysret64)
13753 ENDPROC(native_usergs_sysret64)
13754 #endif /* CONFIG_PARAVIRT */
13755
13756 + .macro ljmpq sel, off
13757 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13758 + .byte 0x48; ljmp *1234f(%rip)
13759 + .pushsection .rodata
13760 + .align 16
13761 + 1234: .quad \off; .word \sel
13762 + .popsection
13763 +#else
13764 + pushq $\sel
13765 + pushq $\off
13766 + lretq
13767 +#endif
13768 + .endm
13769 +
13770 + .macro pax_enter_kernel
13771 +#ifdef CONFIG_PAX_KERNEXEC
13772 + call pax_enter_kernel
13773 +#endif
13774 + .endm
13775 +
13776 + .macro pax_exit_kernel
13777 +#ifdef CONFIG_PAX_KERNEXEC
13778 + call pax_exit_kernel
13779 +#endif
13780 + .endm
13781 +
13782 +#ifdef CONFIG_PAX_KERNEXEC
13783 +ENTRY(pax_enter_kernel)
13784 + pushq %rdi
13785 +
13786 +#ifdef CONFIG_PARAVIRT
13787 + PV_SAVE_REGS(CLBR_RDI)
13788 +#endif
13789 +
13790 + GET_CR0_INTO_RDI
13791 + bts $16,%rdi
13792 + jnc 1f
13793 + mov %cs,%edi
13794 + cmp $__KERNEL_CS,%edi
13795 + jz 3f
13796 + ljmpq __KERNEL_CS,3f
13797 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13798 +2: SET_RDI_INTO_CR0
13799 +3:
13800 +
13801 +#ifdef CONFIG_PARAVIRT
13802 + PV_RESTORE_REGS(CLBR_RDI)
13803 +#endif
13804 +
13805 + popq %rdi
13806 + retq
13807 +ENDPROC(pax_enter_kernel)
13808 +
13809 +ENTRY(pax_exit_kernel)
13810 + pushq %rdi
13811 +
13812 +#ifdef CONFIG_PARAVIRT
13813 + PV_SAVE_REGS(CLBR_RDI)
13814 +#endif
13815 +
13816 + mov %cs,%rdi
13817 + cmp $__KERNEXEC_KERNEL_CS,%edi
13818 + jnz 2f
13819 + GET_CR0_INTO_RDI
13820 + btr $16,%rdi
13821 + ljmpq __KERNEL_CS,1f
13822 +1: SET_RDI_INTO_CR0
13823 +2:
13824 +
13825 +#ifdef CONFIG_PARAVIRT
13826 + PV_RESTORE_REGS(CLBR_RDI);
13827 +#endif
13828 +
13829 + popq %rdi
13830 + retq
13831 +ENDPROC(pax_exit_kernel)
13832 +#endif
13833 +
13834 + .macro pax_enter_kernel_user
13835 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13836 + call pax_enter_kernel_user
13837 +#endif
13838 + .endm
13839 +
13840 + .macro pax_exit_kernel_user
13841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13842 + call pax_exit_kernel_user
13843 +#endif
13844 +#ifdef CONFIG_PAX_RANDKSTACK
13845 + push %rax
13846 + call pax_randomize_kstack
13847 + pop %rax
13848 +#endif
13849 + .endm
13850 +
13851 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13852 +ENTRY(pax_enter_kernel_user)
13853 + pushq %rdi
13854 + pushq %rbx
13855 +
13856 +#ifdef CONFIG_PARAVIRT
13857 + PV_SAVE_REGS(CLBR_RDI)
13858 +#endif
13859 +
13860 + GET_CR3_INTO_RDI
13861 + mov %rdi,%rbx
13862 + add $__START_KERNEL_map,%rbx
13863 + sub phys_base(%rip),%rbx
13864 +
13865 +#ifdef CONFIG_PARAVIRT
13866 + pushq %rdi
13867 + cmpl $0, pv_info+PARAVIRT_enabled
13868 + jz 1f
13869 + i = 0
13870 + .rept USER_PGD_PTRS
13871 + mov i*8(%rbx),%rsi
13872 + mov $0,%sil
13873 + lea i*8(%rbx),%rdi
13874 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13875 + i = i + 1
13876 + .endr
13877 + jmp 2f
13878 +1:
13879 +#endif
13880 +
13881 + i = 0
13882 + .rept USER_PGD_PTRS
13883 + movb $0,i*8(%rbx)
13884 + i = i + 1
13885 + .endr
13886 +
13887 +#ifdef CONFIG_PARAVIRT
13888 +2: popq %rdi
13889 +#endif
13890 + SET_RDI_INTO_CR3
13891 +
13892 +#ifdef CONFIG_PAX_KERNEXEC
13893 + GET_CR0_INTO_RDI
13894 + bts $16,%rdi
13895 + SET_RDI_INTO_CR0
13896 +#endif
13897 +
13898 +#ifdef CONFIG_PARAVIRT
13899 + PV_RESTORE_REGS(CLBR_RDI)
13900 +#endif
13901 +
13902 + popq %rbx
13903 + popq %rdi
13904 + retq
13905 +ENDPROC(pax_enter_kernel_user)
13906 +
13907 +ENTRY(pax_exit_kernel_user)
13908 + push %rdi
13909 +
13910 +#ifdef CONFIG_PARAVIRT
13911 + pushq %rbx
13912 + PV_SAVE_REGS(CLBR_RDI)
13913 +#endif
13914 +
13915 +#ifdef CONFIG_PAX_KERNEXEC
13916 + GET_CR0_INTO_RDI
13917 + btr $16,%rdi
13918 + SET_RDI_INTO_CR0
13919 +#endif
13920 +
13921 + GET_CR3_INTO_RDI
13922 + add $__START_KERNEL_map,%rdi
13923 + sub phys_base(%rip),%rdi
13924 +
13925 +#ifdef CONFIG_PARAVIRT
13926 + cmpl $0, pv_info+PARAVIRT_enabled
13927 + jz 1f
13928 + mov %rdi,%rbx
13929 + i = 0
13930 + .rept USER_PGD_PTRS
13931 + mov i*8(%rbx),%rsi
13932 + mov $0x67,%sil
13933 + lea i*8(%rbx),%rdi
13934 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13935 + i = i + 1
13936 + .endr
13937 + jmp 2f
13938 +1:
13939 +#endif
13940 +
13941 + i = 0
13942 + .rept USER_PGD_PTRS
13943 + movb $0x67,i*8(%rdi)
13944 + i = i + 1
13945 + .endr
13946 +
13947 +#ifdef CONFIG_PARAVIRT
13948 +2: PV_RESTORE_REGS(CLBR_RDI)
13949 + popq %rbx
13950 +#endif
13951 +
13952 + popq %rdi
13953 + retq
13954 +ENDPROC(pax_exit_kernel_user)
13955 +#endif
13956 +
13957 +.macro pax_erase_kstack
13958 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13959 + call pax_erase_kstack
13960 +#endif
13961 +.endm
13962 +
13963 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13964 +/*
13965 + * r10: thread_info
13966 + * rcx, rdx: can be clobbered
13967 + */
13968 +ENTRY(pax_erase_kstack)
13969 + pushq %rdi
13970 + pushq %rax
13971 + pushq %r10
13972 +
13973 + GET_THREAD_INFO(%r10)
13974 + mov TI_lowest_stack(%r10), %rdi
13975 + mov $-0xBEEF, %rax
13976 + std
13977 +
13978 +1: mov %edi, %ecx
13979 + and $THREAD_SIZE_asm - 1, %ecx
13980 + shr $3, %ecx
13981 + repne scasq
13982 + jecxz 2f
13983 +
13984 + cmp $2*8, %ecx
13985 + jc 2f
13986 +
13987 + mov $2*8, %ecx
13988 + repe scasq
13989 + jecxz 2f
13990 + jne 1b
13991 +
13992 +2: cld
13993 + mov %esp, %ecx
13994 + sub %edi, %ecx
13995 +
13996 + cmp $THREAD_SIZE_asm, %rcx
13997 + jb 3f
13998 + ud2
13999 +3:
14000 +
14001 + shr $3, %ecx
14002 + rep stosq
14003 +
14004 + mov TI_task_thread_sp0(%r10), %rdi
14005 + sub $256, %rdi
14006 + mov %rdi, TI_lowest_stack(%r10)
14007 +
14008 + popq %r10
14009 + popq %rax
14010 + popq %rdi
14011 + ret
14012 +ENDPROC(pax_erase_kstack)
14013 +#endif
14014
14015 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14016 #ifdef CONFIG_TRACE_IRQFLAGS
14017 @@ -317,7 +576,7 @@ ENTRY(save_args)
14018 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14019 movq_cfi rbp, 8 /* push %rbp */
14020 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14021 - testl $3, CS(%rdi)
14022 + testb $3, CS(%rdi)
14023 je 1f
14024 SWAPGS
14025 /*
14026 @@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
14027
14028 RESTORE_REST
14029
14030 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14031 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14032 je int_ret_from_sys_call
14033
14034 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14035 @@ -455,7 +714,7 @@ END(ret_from_fork)
14036 ENTRY(system_call)
14037 CFI_STARTPROC simple
14038 CFI_SIGNAL_FRAME
14039 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14040 + CFI_DEF_CFA rsp,0
14041 CFI_REGISTER rip,rcx
14042 /*CFI_REGISTER rflags,r11*/
14043 SWAPGS_UNSAFE_STACK
14044 @@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
14045
14046 movq %rsp,PER_CPU_VAR(old_rsp)
14047 movq PER_CPU_VAR(kernel_stack),%rsp
14048 + pax_enter_kernel_user
14049 /*
14050 * No need to follow this irqs off/on section - it's straight
14051 * and short:
14052 */
14053 ENABLE_INTERRUPTS(CLBR_NONE)
14054 - SAVE_ARGS 8,1
14055 + SAVE_ARGS 8*6,1
14056 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14057 movq %rcx,RIP-ARGOFFSET(%rsp)
14058 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14059 @@ -502,6 +762,8 @@ sysret_check:
14060 andl %edi,%edx
14061 jnz sysret_careful
14062 CFI_REMEMBER_STATE
14063 + pax_exit_kernel_user
14064 + pax_erase_kstack
14065 /*
14066 * sysretq will re-enable interrupts:
14067 */
14068 @@ -562,6 +824,9 @@ auditsys:
14069 movq %rax,%rsi /* 2nd arg: syscall number */
14070 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14071 call audit_syscall_entry
14072 +
14073 + pax_erase_kstack
14074 +
14075 LOAD_ARGS 0 /* reload call-clobbered registers */
14076 jmp system_call_fastpath
14077
14078 @@ -592,6 +857,9 @@ tracesys:
14079 FIXUP_TOP_OF_STACK %rdi
14080 movq %rsp,%rdi
14081 call syscall_trace_enter
14082 +
14083 + pax_erase_kstack
14084 +
14085 /*
14086 * Reload arg registers from stack in case ptrace changed them.
14087 * We don't reload %rax because syscall_trace_enter() returned
14088 @@ -613,7 +881,7 @@ tracesys:
14089 GLOBAL(int_ret_from_sys_call)
14090 DISABLE_INTERRUPTS(CLBR_NONE)
14091 TRACE_IRQS_OFF
14092 - testl $3,CS-ARGOFFSET(%rsp)
14093 + testb $3,CS-ARGOFFSET(%rsp)
14094 je retint_restore_args
14095 movl $_TIF_ALLWORK_MASK,%edi
14096 /* edi: mask to check */
14097 @@ -800,6 +1068,16 @@ END(interrupt)
14098 CFI_ADJUST_CFA_OFFSET 10*8
14099 call save_args
14100 PARTIAL_FRAME 0
14101 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14102 + testb $3, CS(%rdi)
14103 + jnz 1f
14104 + pax_enter_kernel
14105 + jmp 2f
14106 +1: pax_enter_kernel_user
14107 +2:
14108 +#else
14109 + pax_enter_kernel
14110 +#endif
14111 call \func
14112 .endm
14113
14114 @@ -822,7 +1100,7 @@ ret_from_intr:
14115 CFI_ADJUST_CFA_OFFSET -8
14116 exit_intr:
14117 GET_THREAD_INFO(%rcx)
14118 - testl $3,CS-ARGOFFSET(%rsp)
14119 + testb $3,CS-ARGOFFSET(%rsp)
14120 je retint_kernel
14121
14122 /* Interrupt came from user space */
14123 @@ -844,12 +1122,15 @@ retint_swapgs: /* return to user-space
14124 * The iretq could re-enable interrupts:
14125 */
14126 DISABLE_INTERRUPTS(CLBR_ANY)
14127 + pax_exit_kernel_user
14128 + pax_erase_kstack
14129 TRACE_IRQS_IRETQ
14130 SWAPGS
14131 jmp restore_args
14132
14133 retint_restore_args: /* return to kernel space */
14134 DISABLE_INTERRUPTS(CLBR_ANY)
14135 + pax_exit_kernel
14136 /*
14137 * The iretq could re-enable interrupts:
14138 */
14139 @@ -1032,6 +1313,16 @@ ENTRY(\sym)
14140 CFI_ADJUST_CFA_OFFSET 15*8
14141 call error_entry
14142 DEFAULT_FRAME 0
14143 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14144 + testb $3, CS(%rsp)
14145 + jnz 1f
14146 + pax_enter_kernel
14147 + jmp 2f
14148 +1: pax_enter_kernel_user
14149 +2:
14150 +#else
14151 + pax_enter_kernel
14152 +#endif
14153 movq %rsp,%rdi /* pt_regs pointer */
14154 xorl %esi,%esi /* no error code */
14155 call \do_sym
14156 @@ -1049,6 +1340,16 @@ ENTRY(\sym)
14157 subq $15*8, %rsp
14158 call save_paranoid
14159 TRACE_IRQS_OFF
14160 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14161 + testb $3, CS(%rsp)
14162 + jnz 1f
14163 + pax_enter_kernel
14164 + jmp 2f
14165 +1: pax_enter_kernel_user
14166 +2:
14167 +#else
14168 + pax_enter_kernel
14169 +#endif
14170 movq %rsp,%rdi /* pt_regs pointer */
14171 xorl %esi,%esi /* no error code */
14172 call \do_sym
14173 @@ -1066,9 +1367,24 @@ ENTRY(\sym)
14174 subq $15*8, %rsp
14175 call save_paranoid
14176 TRACE_IRQS_OFF
14177 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14178 + testb $3, CS(%rsp)
14179 + jnz 1f
14180 + pax_enter_kernel
14181 + jmp 2f
14182 +1: pax_enter_kernel_user
14183 +2:
14184 +#else
14185 + pax_enter_kernel
14186 +#endif
14187 movq %rsp,%rdi /* pt_regs pointer */
14188 xorl %esi,%esi /* no error code */
14189 - PER_CPU(init_tss, %rbp)
14190 +#ifdef CONFIG_SMP
14191 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14192 + lea init_tss(%rbp), %rbp
14193 +#else
14194 + lea init_tss(%rip), %rbp
14195 +#endif
14196 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14197 call \do_sym
14198 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14199 @@ -1085,6 +1401,16 @@ ENTRY(\sym)
14200 CFI_ADJUST_CFA_OFFSET 15*8
14201 call error_entry
14202 DEFAULT_FRAME 0
14203 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14204 + testb $3, CS(%rsp)
14205 + jnz 1f
14206 + pax_enter_kernel
14207 + jmp 2f
14208 +1: pax_enter_kernel_user
14209 +2:
14210 +#else
14211 + pax_enter_kernel
14212 +#endif
14213 movq %rsp,%rdi /* pt_regs pointer */
14214 movq ORIG_RAX(%rsp),%rsi /* get error code */
14215 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14216 @@ -1104,6 +1430,16 @@ ENTRY(\sym)
14217 call save_paranoid
14218 DEFAULT_FRAME 0
14219 TRACE_IRQS_OFF
14220 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14221 + testb $3, CS(%rsp)
14222 + jnz 1f
14223 + pax_enter_kernel
14224 + jmp 2f
14225 +1: pax_enter_kernel_user
14226 +2:
14227 +#else
14228 + pax_enter_kernel
14229 +#endif
14230 movq %rsp,%rdi /* pt_regs pointer */
14231 movq ORIG_RAX(%rsp),%rsi /* get error code */
14232 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14233 @@ -1405,14 +1741,27 @@ ENTRY(paranoid_exit)
14234 TRACE_IRQS_OFF
14235 testl %ebx,%ebx /* swapgs needed? */
14236 jnz paranoid_restore
14237 - testl $3,CS(%rsp)
14238 + testb $3,CS(%rsp)
14239 jnz paranoid_userspace
14240 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14241 + pax_exit_kernel
14242 + TRACE_IRQS_IRETQ 0
14243 + SWAPGS_UNSAFE_STACK
14244 + RESTORE_ALL 8
14245 + jmp irq_return
14246 +#endif
14247 paranoid_swapgs:
14248 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14249 + pax_exit_kernel_user
14250 +#else
14251 + pax_exit_kernel
14252 +#endif
14253 TRACE_IRQS_IRETQ 0
14254 SWAPGS_UNSAFE_STACK
14255 RESTORE_ALL 8
14256 jmp irq_return
14257 paranoid_restore:
14258 + pax_exit_kernel
14259 TRACE_IRQS_IRETQ 0
14260 RESTORE_ALL 8
14261 jmp irq_return
14262 @@ -1470,7 +1819,7 @@ ENTRY(error_entry)
14263 movq_cfi r14, R14+8
14264 movq_cfi r15, R15+8
14265 xorl %ebx,%ebx
14266 - testl $3,CS+8(%rsp)
14267 + testb $3,CS+8(%rsp)
14268 je error_kernelspace
14269 error_swapgs:
14270 SWAPGS
14271 @@ -1529,6 +1878,16 @@ ENTRY(nmi)
14272 CFI_ADJUST_CFA_OFFSET 15*8
14273 call save_paranoid
14274 DEFAULT_FRAME 0
14275 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14276 + testb $3, CS(%rsp)
14277 + jnz 1f
14278 + pax_enter_kernel
14279 + jmp 2f
14280 +1: pax_enter_kernel_user
14281 +2:
14282 +#else
14283 + pax_enter_kernel
14284 +#endif
14285 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14286 movq %rsp,%rdi
14287 movq $-1,%rsi
14288 @@ -1539,11 +1898,25 @@ ENTRY(nmi)
14289 DISABLE_INTERRUPTS(CLBR_NONE)
14290 testl %ebx,%ebx /* swapgs needed? */
14291 jnz nmi_restore
14292 - testl $3,CS(%rsp)
14293 + testb $3,CS(%rsp)
14294 jnz nmi_userspace
14295 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14296 + pax_exit_kernel
14297 + SWAPGS_UNSAFE_STACK
14298 + RESTORE_ALL 8
14299 + jmp irq_return
14300 +#endif
14301 nmi_swapgs:
14302 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14303 + pax_exit_kernel_user
14304 +#else
14305 + pax_exit_kernel
14306 +#endif
14307 SWAPGS_UNSAFE_STACK
14308 + RESTORE_ALL 8
14309 + jmp irq_return
14310 nmi_restore:
14311 + pax_exit_kernel
14312 RESTORE_ALL 8
14313 jmp irq_return
14314 nmi_userspace:
14315 diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14316 --- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14317 +++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14318 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14319 static void *mod_code_newcode; /* holds the text to write to the IP */
14320
14321 static unsigned nmi_wait_count;
14322 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14323 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14324
14325 int ftrace_arch_read_dyn_info(char *buf, int size)
14326 {
14327 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14328
14329 r = snprintf(buf, size, "%u %u",
14330 nmi_wait_count,
14331 - atomic_read(&nmi_update_count));
14332 + atomic_read_unchecked(&nmi_update_count));
14333 return r;
14334 }
14335
14336 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14337 {
14338 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14339 smp_rmb();
14340 + pax_open_kernel();
14341 ftrace_mod_code();
14342 - atomic_inc(&nmi_update_count);
14343 + pax_close_kernel();
14344 + atomic_inc_unchecked(&nmi_update_count);
14345 }
14346 /* Must have previous changes seen before executions */
14347 smp_mb();
14348 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14349
14350
14351
14352 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14353 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14354
14355 static unsigned char *ftrace_nop_replace(void)
14356 {
14357 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14358 {
14359 unsigned char replaced[MCOUNT_INSN_SIZE];
14360
14361 + ip = ktla_ktva(ip);
14362 +
14363 /*
14364 * Note: Due to modules and __init, code can
14365 * disappear and change, we need to protect against faulting
14366 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14367 unsigned char old[MCOUNT_INSN_SIZE], *new;
14368 int ret;
14369
14370 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14371 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14372 new = ftrace_call_replace(ip, (unsigned long)func);
14373 ret = ftrace_modify_code(ip, old, new);
14374
14375 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14376 switch (faulted) {
14377 case 0:
14378 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14379 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14380 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14381 break;
14382 case 1:
14383 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14384 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14385 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14386 break;
14387 case 2:
14388 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14389 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14390 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14391 break;
14392 }
14393
14394 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14395 {
14396 unsigned char code[MCOUNT_INSN_SIZE];
14397
14398 + ip = ktla_ktva(ip);
14399 +
14400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14401 return -EFAULT;
14402
14403 diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14404 --- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14405 +++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14406 @@ -16,6 +16,7 @@
14407 #include <asm/apic.h>
14408 #include <asm/io_apic.h>
14409 #include <asm/bios_ebda.h>
14410 +#include <asm/boot.h>
14411
14412 static void __init i386_default_early_setup(void)
14413 {
14414 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14415 {
14416 reserve_trampoline_memory();
14417
14418 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14419 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14420
14421 #ifdef CONFIG_BLK_DEV_INITRD
14422 /* Reserve INITRD */
14423 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14424 --- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14425 +++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14426 @@ -19,10 +19,17 @@
14427 #include <asm/setup.h>
14428 #include <asm/processor-flags.h>
14429 #include <asm/percpu.h>
14430 +#include <asm/msr-index.h>
14431
14432 /* Physical address */
14433 #define pa(X) ((X) - __PAGE_OFFSET)
14434
14435 +#ifdef CONFIG_PAX_KERNEXEC
14436 +#define ta(X) (X)
14437 +#else
14438 +#define ta(X) ((X) - __PAGE_OFFSET)
14439 +#endif
14440 +
14441 /*
14442 * References to members of the new_cpu_data structure.
14443 */
14444 @@ -52,11 +59,7 @@
14445 * and small than max_low_pfn, otherwise will waste some page table entries
14446 */
14447
14448 -#if PTRS_PER_PMD > 1
14449 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14450 -#else
14451 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14452 -#endif
14453 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14454
14455 /* Enough space to fit pagetables for the low memory linear map */
14456 MAPPING_BEYOND_END = \
14457 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14458 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14459
14460 /*
14461 + * Real beginning of normal "text" segment
14462 + */
14463 +ENTRY(stext)
14464 +ENTRY(_stext)
14465 +
14466 +/*
14467 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14468 * %esi points to the real-mode code as a 32-bit pointer.
14469 * CS and DS must be 4 GB flat segments, but we don't depend on
14470 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14471 * can.
14472 */
14473 __HEAD
14474 +
14475 +#ifdef CONFIG_PAX_KERNEXEC
14476 + jmp startup_32
14477 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14478 +.fill PAGE_SIZE-5,1,0xcc
14479 +#endif
14480 +
14481 ENTRY(startup_32)
14482 + movl pa(stack_start),%ecx
14483 +
14484 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14485 us to not reload segments */
14486 testb $(1<<6), BP_loadflags(%esi)
14487 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14488 movl %eax,%es
14489 movl %eax,%fs
14490 movl %eax,%gs
14491 + movl %eax,%ss
14492 2:
14493 + leal -__PAGE_OFFSET(%ecx),%esp
14494 +
14495 +#ifdef CONFIG_SMP
14496 + movl $pa(cpu_gdt_table),%edi
14497 + movl $__per_cpu_load,%eax
14498 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14499 + rorl $16,%eax
14500 + movb %al,__KERNEL_PERCPU + 4(%edi)
14501 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14502 + movl $__per_cpu_end - 1,%eax
14503 + subl $__per_cpu_start,%eax
14504 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14505 +#endif
14506 +
14507 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14508 + movl $NR_CPUS,%ecx
14509 + movl $pa(cpu_gdt_table),%edi
14510 +1:
14511 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14512 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14513 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14514 + addl $PAGE_SIZE_asm,%edi
14515 + loop 1b
14516 +#endif
14517 +
14518 +#ifdef CONFIG_PAX_KERNEXEC
14519 + movl $pa(boot_gdt),%edi
14520 + movl $__LOAD_PHYSICAL_ADDR,%eax
14521 + movw %ax,__BOOT_CS + 2(%edi)
14522 + rorl $16,%eax
14523 + movb %al,__BOOT_CS + 4(%edi)
14524 + movb %ah,__BOOT_CS + 7(%edi)
14525 + rorl $16,%eax
14526 +
14527 + ljmp $(__BOOT_CS),$1f
14528 +1:
14529 +
14530 + movl $NR_CPUS,%ecx
14531 + movl $pa(cpu_gdt_table),%edi
14532 + addl $__PAGE_OFFSET,%eax
14533 +1:
14534 + movw %ax,__KERNEL_CS + 2(%edi)
14535 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14536 + rorl $16,%eax
14537 + movb %al,__KERNEL_CS + 4(%edi)
14538 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14539 + movb %ah,__KERNEL_CS + 7(%edi)
14540 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14541 + rorl $16,%eax
14542 + addl $PAGE_SIZE_asm,%edi
14543 + loop 1b
14544 +#endif
14545
14546 /*
14547 * Clear BSS first so that there are no surprises...
14548 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14549 cmpl $num_subarch_entries, %eax
14550 jae bad_subarch
14551
14552 - movl pa(subarch_entries)(,%eax,4), %eax
14553 - subl $__PAGE_OFFSET, %eax
14554 - jmp *%eax
14555 + jmp *pa(subarch_entries)(,%eax,4)
14556
14557 bad_subarch:
14558 WEAK(lguest_entry)
14559 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14560 __INITDATA
14561
14562 subarch_entries:
14563 - .long default_entry /* normal x86/PC */
14564 - .long lguest_entry /* lguest hypervisor */
14565 - .long xen_entry /* Xen hypervisor */
14566 - .long default_entry /* Moorestown MID */
14567 + .long ta(default_entry) /* normal x86/PC */
14568 + .long ta(lguest_entry) /* lguest hypervisor */
14569 + .long ta(xen_entry) /* Xen hypervisor */
14570 + .long ta(default_entry) /* Moorestown MID */
14571 num_subarch_entries = (. - subarch_entries) / 4
14572 .previous
14573 #endif /* CONFIG_PARAVIRT */
14574 @@ -218,8 +287,11 @@ default_entry:
14575 movl %eax, pa(max_pfn_mapped)
14576
14577 /* Do early initialization of the fixmap area */
14578 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14579 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14580 +#ifdef CONFIG_COMPAT_VDSO
14581 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14582 +#else
14583 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14584 +#endif
14585 #else /* Not PAE */
14586
14587 page_pde_offset = (__PAGE_OFFSET >> 20);
14588 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14589 movl %eax, pa(max_pfn_mapped)
14590
14591 /* Do early initialization of the fixmap area */
14592 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14593 - movl %eax,pa(swapper_pg_dir+0xffc)
14594 +#ifdef CONFIG_COMPAT_VDSO
14595 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14596 +#else
14597 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14598 +#endif
14599 #endif
14600 jmp 3f
14601 /*
14602 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14603 movl %eax,%es
14604 movl %eax,%fs
14605 movl %eax,%gs
14606 + movl pa(stack_start),%ecx
14607 + movl %eax,%ss
14608 + leal -__PAGE_OFFSET(%ecx),%esp
14609 #endif /* CONFIG_SMP */
14610 3:
14611
14612 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14613 orl %edx,%eax
14614 movl %eax,%cr4
14615
14616 +#ifdef CONFIG_X86_PAE
14617 btl $5, %eax # check if PAE is enabled
14618 jnc 6f
14619
14620 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14621 cpuid
14622 cmpl $0x80000000, %eax
14623 jbe 6f
14624 +
14625 + /* Clear bogus XD_DISABLE bits */
14626 + call verify_cpu
14627 +
14628 mov $0x80000001, %eax
14629 cpuid
14630 /* Execute Disable bit supported? */
14631 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14632 jnc 6f
14633
14634 /* Setup EFER (Extended Feature Enable Register) */
14635 - movl $0xc0000080, %ecx
14636 + movl $MSR_EFER, %ecx
14637 rdmsr
14638
14639 btsl $11, %eax
14640 /* Make changes effective */
14641 wrmsr
14642
14643 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14644 + movl $1,pa(nx_enabled)
14645 +#endif
14646 +
14647 6:
14648
14649 /*
14650 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14651 movl %eax,%cr0 /* ..and set paging (PG) bit */
14652 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14653 1:
14654 - /* Set up the stack pointer */
14655 - lss stack_start,%esp
14656 + /* Shift the stack pointer to a virtual address */
14657 + addl $__PAGE_OFFSET, %esp
14658
14659 /*
14660 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14661 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14662
14663 #ifdef CONFIG_SMP
14664 cmpb $0, ready
14665 - jz 1f /* Initial CPU cleans BSS */
14666 - jmp checkCPUtype
14667 -1:
14668 + jnz checkCPUtype
14669 #endif /* CONFIG_SMP */
14670
14671 /*
14672 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14673 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14674 movl %eax,%ss # after changing gdt.
14675
14676 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14677 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14678 movl %eax,%ds
14679 movl %eax,%es
14680
14681 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14682 */
14683 cmpb $0,ready
14684 jne 1f
14685 - movl $per_cpu__gdt_page,%eax
14686 + movl $cpu_gdt_table,%eax
14687 movl $per_cpu__stack_canary,%ecx
14688 +#ifdef CONFIG_SMP
14689 + addl $__per_cpu_load,%ecx
14690 +#endif
14691 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14692 shrl $16, %ecx
14693 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14694 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14695 1:
14696 -#endif
14697 movl $(__KERNEL_STACK_CANARY),%eax
14698 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14699 + movl $(__USER_DS),%eax
14700 +#else
14701 + xorl %eax,%eax
14702 +#endif
14703 movl %eax,%gs
14704
14705 xorl %eax,%eax # Clear LDT
14706 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14707
14708 cld # gcc2 wants the direction flag cleared at all times
14709 pushl $0 # fake return address for unwinder
14710 -#ifdef CONFIG_SMP
14711 - movb ready, %cl
14712 movb $1, ready
14713 - cmpb $0,%cl # the first CPU calls start_kernel
14714 - je 1f
14715 - movl (stack_start), %esp
14716 -1:
14717 -#endif /* CONFIG_SMP */
14718 jmp *(initial_code)
14719
14720 /*
14721 @@ -546,22 +631,22 @@ early_page_fault:
14722 jmp early_fault
14723
14724 early_fault:
14725 - cld
14726 #ifdef CONFIG_PRINTK
14727 + cmpl $1,%ss:early_recursion_flag
14728 + je hlt_loop
14729 + incl %ss:early_recursion_flag
14730 + cld
14731 pusha
14732 movl $(__KERNEL_DS),%eax
14733 movl %eax,%ds
14734 movl %eax,%es
14735 - cmpl $2,early_recursion_flag
14736 - je hlt_loop
14737 - incl early_recursion_flag
14738 movl %cr2,%eax
14739 pushl %eax
14740 pushl %edx /* trapno */
14741 pushl $fault_msg
14742 call printk
14743 +; call dump_stack
14744 #endif
14745 - call dump_stack
14746 hlt_loop:
14747 hlt
14748 jmp hlt_loop
14749 @@ -569,8 +654,11 @@ hlt_loop:
14750 /* This is the default interrupt "handler" :-) */
14751 ALIGN
14752 ignore_int:
14753 - cld
14754 #ifdef CONFIG_PRINTK
14755 + cmpl $2,%ss:early_recursion_flag
14756 + je hlt_loop
14757 + incl %ss:early_recursion_flag
14758 + cld
14759 pushl %eax
14760 pushl %ecx
14761 pushl %edx
14762 @@ -579,9 +667,6 @@ ignore_int:
14763 movl $(__KERNEL_DS),%eax
14764 movl %eax,%ds
14765 movl %eax,%es
14766 - cmpl $2,early_recursion_flag
14767 - je hlt_loop
14768 - incl early_recursion_flag
14769 pushl 16(%esp)
14770 pushl 24(%esp)
14771 pushl 32(%esp)
14772 @@ -600,6 +685,8 @@ ignore_int:
14773 #endif
14774 iret
14775
14776 +#include "verify_cpu.S"
14777 +
14778 __REFDATA
14779 .align 4
14780 ENTRY(initial_code)
14781 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14782 /*
14783 * BSS section
14784 */
14785 -__PAGE_ALIGNED_BSS
14786 - .align PAGE_SIZE_asm
14787 #ifdef CONFIG_X86_PAE
14788 +.section .swapper_pg_pmd,"a",@progbits
14789 swapper_pg_pmd:
14790 .fill 1024*KPMDS,4,0
14791 #else
14792 +.section .swapper_pg_dir,"a",@progbits
14793 ENTRY(swapper_pg_dir)
14794 .fill 1024,4,0
14795 #endif
14796 +.section .swapper_pg_fixmap,"a",@progbits
14797 swapper_pg_fixmap:
14798 .fill 1024,4,0
14799 #ifdef CONFIG_X86_TRAMPOLINE
14800 +.section .trampoline_pg_dir,"a",@progbits
14801 ENTRY(trampoline_pg_dir)
14802 +#ifdef CONFIG_X86_PAE
14803 + .fill 4,8,0
14804 +#else
14805 .fill 1024,4,0
14806 #endif
14807 +#endif
14808 +
14809 +.section .empty_zero_page,"a",@progbits
14810 ENTRY(empty_zero_page)
14811 .fill 4096,1,0
14812
14813 /*
14814 + * The IDT has to be page-aligned to simplify the Pentium
14815 + * F0 0F bug workaround.. We have a special link segment
14816 + * for this.
14817 + */
14818 +.section .idt,"a",@progbits
14819 +ENTRY(idt_table)
14820 + .fill 256,8,0
14821 +
14822 +/*
14823 * This starts the data section.
14824 */
14825 #ifdef CONFIG_X86_PAE
14826 -__PAGE_ALIGNED_DATA
14827 - /* Page-aligned for the benefit of paravirt? */
14828 - .align PAGE_SIZE_asm
14829 +.section .swapper_pg_dir,"a",@progbits
14830 +
14831 ENTRY(swapper_pg_dir)
14832 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14833 # if KPMDS == 3
14834 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14835 # error "Kernel PMDs should be 1, 2 or 3"
14836 # endif
14837 .align PAGE_SIZE_asm /* needs to be page-sized too */
14838 +
14839 +#ifdef CONFIG_PAX_PER_CPU_PGD
14840 +ENTRY(cpu_pgd)
14841 + .rept NR_CPUS
14842 + .fill 4,8,0
14843 + .endr
14844 +#endif
14845 +
14846 #endif
14847
14848 .data
14849 +.balign 4
14850 ENTRY(stack_start)
14851 - .long init_thread_union+THREAD_SIZE
14852 - .long __BOOT_DS
14853 + .long init_thread_union+THREAD_SIZE-8
14854
14855 ready: .byte 0
14856
14857 +.section .rodata,"a",@progbits
14858 early_recursion_flag:
14859 .long 0
14860
14861 @@ -697,7 +809,7 @@ fault_msg:
14862 .word 0 # 32 bit align gdt_desc.address
14863 boot_gdt_descr:
14864 .word __BOOT_DS+7
14865 - .long boot_gdt - __PAGE_OFFSET
14866 + .long pa(boot_gdt)
14867
14868 .word 0 # 32-bit align idt_desc.address
14869 idt_descr:
14870 @@ -708,7 +820,7 @@ idt_descr:
14871 .word 0 # 32 bit align gdt_desc.address
14872 ENTRY(early_gdt_descr)
14873 .word GDT_ENTRIES*8-1
14874 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14875 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14876
14877 /*
14878 * The boot_gdt must mirror the equivalent in setup.S and is
14879 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14880 .align L1_CACHE_BYTES
14881 ENTRY(boot_gdt)
14882 .fill GDT_ENTRY_BOOT_CS,8,0
14883 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14884 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14885 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14886 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14887 +
14888 + .align PAGE_SIZE_asm
14889 +ENTRY(cpu_gdt_table)
14890 + .rept NR_CPUS
14891 + .quad 0x0000000000000000 /* NULL descriptor */
14892 + .quad 0x0000000000000000 /* 0x0b reserved */
14893 + .quad 0x0000000000000000 /* 0x13 reserved */
14894 + .quad 0x0000000000000000 /* 0x1b reserved */
14895 +
14896 +#ifdef CONFIG_PAX_KERNEXEC
14897 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14898 +#else
14899 + .quad 0x0000000000000000 /* 0x20 unused */
14900 +#endif
14901 +
14902 + .quad 0x0000000000000000 /* 0x28 unused */
14903 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14904 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14905 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14906 + .quad 0x0000000000000000 /* 0x4b reserved */
14907 + .quad 0x0000000000000000 /* 0x53 reserved */
14908 + .quad 0x0000000000000000 /* 0x5b reserved */
14909 +
14910 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14911 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14912 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14913 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14914 +
14915 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14916 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14917 +
14918 + /*
14919 + * Segments used for calling PnP BIOS have byte granularity.
14920 + * The code segments and data segments have fixed 64k limits,
14921 + * the transfer segment sizes are set at run time.
14922 + */
14923 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14924 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14925 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14926 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14927 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14928 +
14929 + /*
14930 + * The APM segments have byte granularity and their bases
14931 + * are set at run time. All have 64k limits.
14932 + */
14933 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14934 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14935 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14936 +
14937 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14938 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14939 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14940 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14941 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14942 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14943 +
14944 + /* Be sure this is zeroed to avoid false validations in Xen */
14945 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14946 + .endr
14947 diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14948 --- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14949 +++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14950 @@ -19,6 +19,7 @@
14951 #include <asm/cache.h>
14952 #include <asm/processor-flags.h>
14953 #include <asm/percpu.h>
14954 +#include <asm/cpufeature.h>
14955
14956 #ifdef CONFIG_PARAVIRT
14957 #include <asm/asm-offsets.h>
14958 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14959 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14960 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14961 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14962 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14963 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14964 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14965 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14966
14967 .text
14968 __HEAD
14969 @@ -85,35 +90,22 @@ startup_64:
14970 */
14971 addq %rbp, init_level4_pgt + 0(%rip)
14972 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14973 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14974 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14975 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14976
14977 addq %rbp, level3_ident_pgt + 0(%rip)
14978 +#ifndef CONFIG_XEN
14979 + addq %rbp, level3_ident_pgt + 8(%rip)
14980 +#endif
14981
14982 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14983 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14984 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14985
14986 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14987 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14988 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14989
14990 - /* Add an Identity mapping if I am above 1G */
14991 - leaq _text(%rip), %rdi
14992 - andq $PMD_PAGE_MASK, %rdi
14993 -
14994 - movq %rdi, %rax
14995 - shrq $PUD_SHIFT, %rax
14996 - andq $(PTRS_PER_PUD - 1), %rax
14997 - jz ident_complete
14998 -
14999 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15000 - leaq level3_ident_pgt(%rip), %rbx
15001 - movq %rdx, 0(%rbx, %rax, 8)
15002 -
15003 - movq %rdi, %rax
15004 - shrq $PMD_SHIFT, %rax
15005 - andq $(PTRS_PER_PMD - 1), %rax
15006 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15007 - leaq level2_spare_pgt(%rip), %rbx
15008 - movq %rdx, 0(%rbx, %rax, 8)
15009 -ident_complete:
15010 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15011 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15012
15013 /*
15014 * Fixup the kernel text+data virtual addresses. Note that
15015 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15016 * after the boot processor executes this code.
15017 */
15018
15019 - /* Enable PAE mode and PGE */
15020 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15021 + /* Enable PAE mode and PSE/PGE */
15022 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15023 movq %rax, %cr4
15024
15025 /* Setup early boot stage 4 level pagetables. */
15026 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15027 movl $MSR_EFER, %ecx
15028 rdmsr
15029 btsl $_EFER_SCE, %eax /* Enable System Call */
15030 - btl $20,%edi /* No Execute supported? */
15031 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15032 jnc 1f
15033 btsl $_EFER_NX, %eax
15034 + leaq init_level4_pgt(%rip), %rdi
15035 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15036 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15037 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15038 1: wrmsr /* Make changes effective */
15039
15040 /* Setup cr0 */
15041 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15042 .quad x86_64_start_kernel
15043 ENTRY(initial_gs)
15044 .quad INIT_PER_CPU_VAR(irq_stack_union)
15045 - __FINITDATA
15046
15047 ENTRY(stack_start)
15048 .quad init_thread_union+THREAD_SIZE-8
15049 .word 0
15050 + __FINITDATA
15051
15052 bad_address:
15053 jmp bad_address
15054
15055 - .section ".init.text","ax"
15056 + __INIT
15057 #ifdef CONFIG_EARLY_PRINTK
15058 .globl early_idt_handlers
15059 early_idt_handlers:
15060 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15061 #endif /* EARLY_PRINTK */
15062 1: hlt
15063 jmp 1b
15064 + .previous
15065
15066 #ifdef CONFIG_EARLY_PRINTK
15067 + __INITDATA
15068 early_recursion_flag:
15069 .long 0
15070 + .previous
15071
15072 + .section .rodata,"a",@progbits
15073 early_idt_msg:
15074 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15075 early_idt_ripmsg:
15076 .asciz "RIP %s\n"
15077 -#endif /* CONFIG_EARLY_PRINTK */
15078 .previous
15079 +#endif /* CONFIG_EARLY_PRINTK */
15080
15081 + .section .rodata,"a",@progbits
15082 #define NEXT_PAGE(name) \
15083 .balign PAGE_SIZE; \
15084 ENTRY(name)
15085 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15086 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15087 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15088 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15089 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15090 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15091 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15092 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15093 .org init_level4_pgt + L4_START_KERNEL*8, 0
15094 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15095 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15096
15097 +#ifdef CONFIG_PAX_PER_CPU_PGD
15098 +NEXT_PAGE(cpu_pgd)
15099 + .rept NR_CPUS
15100 + .fill 512,8,0
15101 + .endr
15102 +#endif
15103 +
15104 NEXT_PAGE(level3_ident_pgt)
15105 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15106 +#ifdef CONFIG_XEN
15107 .fill 511,8,0
15108 +#else
15109 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15110 + .fill 510,8,0
15111 +#endif
15112 +
15113 +NEXT_PAGE(level3_vmalloc_pgt)
15114 + .fill 512,8,0
15115 +
15116 +NEXT_PAGE(level3_vmemmap_pgt)
15117 + .fill L3_VMEMMAP_START,8,0
15118 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15119
15120 NEXT_PAGE(level3_kernel_pgt)
15121 .fill L3_START_KERNEL,8,0
15122 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15123 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15124 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15125
15126 +NEXT_PAGE(level2_vmemmap_pgt)
15127 + .fill 512,8,0
15128 +
15129 NEXT_PAGE(level2_fixmap_pgt)
15130 - .fill 506,8,0
15131 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15132 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15133 - .fill 5,8,0
15134 + .fill 507,8,0
15135 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15136 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15137 + .fill 4,8,0
15138
15139 -NEXT_PAGE(level1_fixmap_pgt)
15140 +NEXT_PAGE(level1_vsyscall_pgt)
15141 .fill 512,8,0
15142
15143 -NEXT_PAGE(level2_ident_pgt)
15144 - /* Since I easily can, map the first 1G.
15145 + /* Since I easily can, map the first 2G.
15146 * Don't set NX because code runs from these pages.
15147 */
15148 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15149 +NEXT_PAGE(level2_ident_pgt)
15150 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15151
15152 NEXT_PAGE(level2_kernel_pgt)
15153 /*
15154 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15155 * If you want to increase this then increase MODULES_VADDR
15156 * too.)
15157 */
15158 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15159 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15160 -
15161 -NEXT_PAGE(level2_spare_pgt)
15162 - .fill 512, 8, 0
15163 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15164
15165 #undef PMDS
15166 #undef NEXT_PAGE
15167
15168 - .data
15169 + .align PAGE_SIZE
15170 +ENTRY(cpu_gdt_table)
15171 + .rept NR_CPUS
15172 + .quad 0x0000000000000000 /* NULL descriptor */
15173 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15174 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15175 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15176 + .quad 0x00cffb000000ffff /* __USER32_CS */
15177 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15178 + .quad 0x00affb000000ffff /* __USER_CS */
15179 +
15180 +#ifdef CONFIG_PAX_KERNEXEC
15181 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15182 +#else
15183 + .quad 0x0 /* unused */
15184 +#endif
15185 +
15186 + .quad 0,0 /* TSS */
15187 + .quad 0,0 /* LDT */
15188 + .quad 0,0,0 /* three TLS descriptors */
15189 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15190 + /* asm/segment.h:GDT_ENTRIES must match this */
15191 +
15192 + /* zero the remaining page */
15193 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15194 + .endr
15195 +
15196 .align 16
15197 .globl early_gdt_descr
15198 early_gdt_descr:
15199 .word GDT_ENTRIES*8-1
15200 early_gdt_descr_base:
15201 - .quad INIT_PER_CPU_VAR(gdt_page)
15202 + .quad cpu_gdt_table
15203
15204 ENTRY(phys_base)
15205 /* This must match the first entry in level2_kernel_pgt */
15206 .quad 0x0000000000000000
15207
15208 #include "../../x86/xen/xen-head.S"
15209 -
15210 - .section .bss, "aw", @nobits
15211 +
15212 + .section .rodata,"a",@progbits
15213 .align L1_CACHE_BYTES
15214 ENTRY(idt_table)
15215 - .skip IDT_ENTRIES * 16
15216 + .fill 512,8,0
15217
15218 __PAGE_ALIGNED_BSS
15219 .align PAGE_SIZE
15220 diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15221 --- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15222 +++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15223 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15224 EXPORT_SYMBOL(cmpxchg8b_emu);
15225 #endif
15226
15227 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15228 +
15229 /* Networking helper routines. */
15230 EXPORT_SYMBOL(csum_partial_copy_generic);
15231 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15232 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15233
15234 EXPORT_SYMBOL(__get_user_1);
15235 EXPORT_SYMBOL(__get_user_2);
15236 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15237
15238 EXPORT_SYMBOL(csum_partial);
15239 EXPORT_SYMBOL(empty_zero_page);
15240 +
15241 +#ifdef CONFIG_PAX_KERNEXEC
15242 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15243 +#endif
15244 diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15245 --- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15246 +++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15247 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15248 "spurious 8259A interrupt: IRQ%d.\n", irq);
15249 spurious_irq_mask |= irqmask;
15250 }
15251 - atomic_inc(&irq_err_count);
15252 + atomic_inc_unchecked(&irq_err_count);
15253 /*
15254 * Theoretically we do not have to handle this IRQ,
15255 * but in Linux this does not cause problems and is
15256 diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15257 --- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15258 +++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15259 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15260 * way process stacks are handled. This is done by having a special
15261 * "init_task" linker map entry..
15262 */
15263 -union thread_union init_thread_union __init_task_data =
15264 - { INIT_THREAD_INFO(init_task) };
15265 +union thread_union init_thread_union __init_task_data;
15266
15267 /*
15268 * Initial task structure.
15269 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15270 * section. Since TSS's are completely CPU-local, we want them
15271 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15272 */
15273 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15274 -
15275 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15276 +EXPORT_SYMBOL(init_tss);
15277 diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15278 --- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15279 +++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15280 @@ -6,6 +6,7 @@
15281 #include <linux/sched.h>
15282 #include <linux/kernel.h>
15283 #include <linux/capability.h>
15284 +#include <linux/security.h>
15285 #include <linux/errno.h>
15286 #include <linux/types.h>
15287 #include <linux/ioport.h>
15288 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15289
15290 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15291 return -EINVAL;
15292 +#ifdef CONFIG_GRKERNSEC_IO
15293 + if (turn_on && grsec_disable_privio) {
15294 + gr_handle_ioperm();
15295 + return -EPERM;
15296 + }
15297 +#endif
15298 if (turn_on && !capable(CAP_SYS_RAWIO))
15299 return -EPERM;
15300
15301 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15302 * because the ->io_bitmap_max value must match the bitmap
15303 * contents:
15304 */
15305 - tss = &per_cpu(init_tss, get_cpu());
15306 + tss = init_tss + get_cpu();
15307
15308 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15309
15310 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15311 return -EINVAL;
15312 /* Trying to gain more privileges? */
15313 if (level > old) {
15314 +#ifdef CONFIG_GRKERNSEC_IO
15315 + if (grsec_disable_privio) {
15316 + gr_handle_iopl();
15317 + return -EPERM;
15318 + }
15319 +#endif
15320 if (!capable(CAP_SYS_RAWIO))
15321 return -EPERM;
15322 }
15323 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15324 --- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15325 +++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15326 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15327 __asm__ __volatile__("andl %%esp,%0" :
15328 "=r" (sp) : "0" (THREAD_SIZE - 1));
15329
15330 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15331 + return sp < STACK_WARN;
15332 }
15333
15334 static void print_stack_overflow(void)
15335 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15336 * per-CPU IRQ handling contexts (thread information and stack)
15337 */
15338 union irq_ctx {
15339 - struct thread_info tinfo;
15340 - u32 stack[THREAD_SIZE/sizeof(u32)];
15341 -} __attribute__((aligned(PAGE_SIZE)));
15342 + unsigned long previous_esp;
15343 + u32 stack[THREAD_SIZE/sizeof(u32)];
15344 +} __attribute__((aligned(THREAD_SIZE)));
15345
15346 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15347 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15348 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15349 static inline int
15350 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15351 {
15352 - union irq_ctx *curctx, *irqctx;
15353 + union irq_ctx *irqctx;
15354 u32 *isp, arg1, arg2;
15355
15356 - curctx = (union irq_ctx *) current_thread_info();
15357 irqctx = __get_cpu_var(hardirq_ctx);
15358
15359 /*
15360 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15361 * handler) we can't do that and just have to keep using the
15362 * current stack (which is the irq stack already after all)
15363 */
15364 - if (unlikely(curctx == irqctx))
15365 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15366 return 0;
15367
15368 /* build the stack frame on the IRQ stack */
15369 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15370 - irqctx->tinfo.task = curctx->tinfo.task;
15371 - irqctx->tinfo.previous_esp = current_stack_pointer;
15372 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15373 + irqctx->previous_esp = current_stack_pointer;
15374
15375 - /*
15376 - * Copy the softirq bits in preempt_count so that the
15377 - * softirq checks work in the hardirq context.
15378 - */
15379 - irqctx->tinfo.preempt_count =
15380 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15381 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15383 + __set_fs(MAKE_MM_SEG(0));
15384 +#endif
15385
15386 if (unlikely(overflow))
15387 call_on_stack(print_stack_overflow, isp);
15388 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15389 : "0" (irq), "1" (desc), "2" (isp),
15390 "D" (desc->handle_irq)
15391 : "memory", "cc", "ecx");
15392 +
15393 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15394 + __set_fs(current_thread_info()->addr_limit);
15395 +#endif
15396 +
15397 return 1;
15398 }
15399
15400 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15401 */
15402 void __cpuinit irq_ctx_init(int cpu)
15403 {
15404 - union irq_ctx *irqctx;
15405 -
15406 if (per_cpu(hardirq_ctx, cpu))
15407 return;
15408
15409 - irqctx = &per_cpu(hardirq_stack, cpu);
15410 - irqctx->tinfo.task = NULL;
15411 - irqctx->tinfo.exec_domain = NULL;
15412 - irqctx->tinfo.cpu = cpu;
15413 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15414 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15415 -
15416 - per_cpu(hardirq_ctx, cpu) = irqctx;
15417 -
15418 - irqctx = &per_cpu(softirq_stack, cpu);
15419 - irqctx->tinfo.task = NULL;
15420 - irqctx->tinfo.exec_domain = NULL;
15421 - irqctx->tinfo.cpu = cpu;
15422 - irqctx->tinfo.preempt_count = 0;
15423 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15424 -
15425 - per_cpu(softirq_ctx, cpu) = irqctx;
15426 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15427 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15428
15429 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15430 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15431 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15432 asmlinkage void do_softirq(void)
15433 {
15434 unsigned long flags;
15435 - struct thread_info *curctx;
15436 union irq_ctx *irqctx;
15437 u32 *isp;
15438
15439 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15440 local_irq_save(flags);
15441
15442 if (local_softirq_pending()) {
15443 - curctx = current_thread_info();
15444 irqctx = __get_cpu_var(softirq_ctx);
15445 - irqctx->tinfo.task = curctx->task;
15446 - irqctx->tinfo.previous_esp = current_stack_pointer;
15447 + irqctx->previous_esp = current_stack_pointer;
15448
15449 /* build the stack frame on the softirq stack */
15450 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15451 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15452 +
15453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15454 + __set_fs(MAKE_MM_SEG(0));
15455 +#endif
15456
15457 call_on_stack(__do_softirq, isp);
15458 +
15459 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15460 + __set_fs(current_thread_info()->addr_limit);
15461 +#endif
15462 +
15463 /*
15464 * Shouldnt happen, we returned above if in_interrupt():
15465 */
15466 diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15467 --- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15468 +++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15469 @@ -15,7 +15,7 @@
15470 #include <asm/mce.h>
15471 #include <asm/hw_irq.h>
15472
15473 -atomic_t irq_err_count;
15474 +atomic_unchecked_t irq_err_count;
15475
15476 /* Function pointer for generic interrupt vector handling */
15477 void (*generic_interrupt_extension)(void) = NULL;
15478 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15479 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15480 seq_printf(p, " Machine check polls\n");
15481 #endif
15482 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15483 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15484 #if defined(CONFIG_X86_IO_APIC)
15485 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15486 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15487 #endif
15488 return 0;
15489 }
15490 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15491
15492 u64 arch_irq_stat(void)
15493 {
15494 - u64 sum = atomic_read(&irq_err_count);
15495 + u64 sum = atomic_read_unchecked(&irq_err_count);
15496
15497 #ifdef CONFIG_X86_IO_APIC
15498 - sum += atomic_read(&irq_mis_count);
15499 + sum += atomic_read_unchecked(&irq_mis_count);
15500 #endif
15501 return sum;
15502 }
15503 diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15504 --- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15505 +++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15506 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15507
15508 /* clear the trace bit */
15509 linux_regs->flags &= ~X86_EFLAGS_TF;
15510 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15511 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15512
15513 /* set the trace bit if we're stepping */
15514 if (remcomInBuffer[0] == 's') {
15515 linux_regs->flags |= X86_EFLAGS_TF;
15516 kgdb_single_step = 1;
15517 - atomic_set(&kgdb_cpu_doing_single_step,
15518 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15519 raw_smp_processor_id());
15520 }
15521
15522 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15523 break;
15524
15525 case DIE_DEBUG:
15526 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15527 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15528 raw_smp_processor_id()) {
15529 if (user_mode(regs))
15530 return single_step_cont(regs, args);
15531 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15532 return instruction_pointer(regs);
15533 }
15534
15535 -struct kgdb_arch arch_kgdb_ops = {
15536 +const struct kgdb_arch arch_kgdb_ops = {
15537 /* Breakpoint instruction: */
15538 .gdb_bpt_instr = { 0xcc },
15539 .flags = KGDB_HW_BREAKPOINT,
15540 diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15541 --- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15542 +++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15543 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15544 char op;
15545 s32 raddr;
15546 } __attribute__((packed)) * jop;
15547 - jop = (struct __arch_jmp_op *)from;
15548 +
15549 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15550 +
15551 + pax_open_kernel();
15552 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15553 jop->op = RELATIVEJUMP_INSTRUCTION;
15554 + pax_close_kernel();
15555 }
15556
15557 /*
15558 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15559 kprobe_opcode_t opcode;
15560 kprobe_opcode_t *orig_opcodes = opcodes;
15561
15562 - if (search_exception_tables((unsigned long)opcodes))
15563 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15564 return 0; /* Page fault may occur on this address. */
15565
15566 retry:
15567 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15568 disp = (u8 *) p->addr + *((s32 *) insn) -
15569 (u8 *) p->ainsn.insn;
15570 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15571 + pax_open_kernel();
15572 *(s32 *)insn = (s32) disp;
15573 + pax_close_kernel();
15574 }
15575 }
15576 #endif
15577 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15578
15579 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15580 {
15581 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15582 + pax_open_kernel();
15583 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15584 + pax_close_kernel();
15585
15586 fix_riprel(p);
15587
15588 - if (can_boost(p->addr))
15589 + if (can_boost(ktla_ktva(p->addr)))
15590 p->ainsn.boostable = 0;
15591 else
15592 p->ainsn.boostable = -1;
15593
15594 - p->opcode = *p->addr;
15595 + p->opcode = *(ktla_ktva(p->addr));
15596 }
15597
15598 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15599 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15600 if (p->opcode == BREAKPOINT_INSTRUCTION)
15601 regs->ip = (unsigned long)p->addr;
15602 else
15603 - regs->ip = (unsigned long)p->ainsn.insn;
15604 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15605 }
15606
15607 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15608 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15609 if (p->ainsn.boostable == 1 && !p->post_handler) {
15610 /* Boost up -- we can execute copied instructions directly */
15611 reset_current_kprobe();
15612 - regs->ip = (unsigned long)p->ainsn.insn;
15613 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15614 preempt_enable_no_resched();
15615 return;
15616 }
15617 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15618 struct kprobe_ctlblk *kcb;
15619
15620 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15621 - if (*addr != BREAKPOINT_INSTRUCTION) {
15622 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15623 /*
15624 * The breakpoint instruction was removed right
15625 * after we hit it. Another cpu has removed
15626 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15627 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15628 {
15629 unsigned long *tos = stack_addr(regs);
15630 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15631 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15632 unsigned long orig_ip = (unsigned long)p->addr;
15633 kprobe_opcode_t *insn = p->ainsn.insn;
15634
15635 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15636 struct die_args *args = data;
15637 int ret = NOTIFY_DONE;
15638
15639 - if (args->regs && user_mode_vm(args->regs))
15640 + if (args->regs && user_mode(args->regs))
15641 return ret;
15642
15643 switch (val) {
15644 diff -urNp linux-2.6.32.45/arch/x86/kernel/kvm.c linux-2.6.32.45/arch/x86/kernel/kvm.c
15645 --- linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-03-27 14:31:47.000000000 -0400
15646 +++ linux-2.6.32.45/arch/x86/kernel/kvm.c 2011-08-24 18:35:52.000000000 -0400
15647 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(vo
15648 pv_mmu_ops.set_pud = kvm_set_pud;
15649 #if PAGETABLE_LEVELS == 4
15650 pv_mmu_ops.set_pgd = kvm_set_pgd;
15651 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15652 #endif
15653 #endif
15654 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15655 diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15656 --- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15657 +++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15658 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15659 if (reload) {
15660 #ifdef CONFIG_SMP
15661 preempt_disable();
15662 - load_LDT(pc);
15663 + load_LDT_nolock(pc);
15664 if (!cpumask_equal(mm_cpumask(current->mm),
15665 cpumask_of(smp_processor_id())))
15666 smp_call_function(flush_ldt, current->mm, 1);
15667 preempt_enable();
15668 #else
15669 - load_LDT(pc);
15670 + load_LDT_nolock(pc);
15671 #endif
15672 }
15673 if (oldsize) {
15674 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15675 return err;
15676
15677 for (i = 0; i < old->size; i++)
15678 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15679 + write_ldt_entry(new->ldt, i, old->ldt + i);
15680 return 0;
15681 }
15682
15683 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15684 retval = copy_ldt(&mm->context, &old_mm->context);
15685 mutex_unlock(&old_mm->context.lock);
15686 }
15687 +
15688 + if (tsk == current) {
15689 + mm->context.vdso = 0;
15690 +
15691 +#ifdef CONFIG_X86_32
15692 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15693 + mm->context.user_cs_base = 0UL;
15694 + mm->context.user_cs_limit = ~0UL;
15695 +
15696 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15697 + cpus_clear(mm->context.cpu_user_cs_mask);
15698 +#endif
15699 +
15700 +#endif
15701 +#endif
15702 +
15703 + }
15704 +
15705 return retval;
15706 }
15707
15708 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15709 }
15710 }
15711
15712 +#ifdef CONFIG_PAX_SEGMEXEC
15713 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15714 + error = -EINVAL;
15715 + goto out_unlock;
15716 + }
15717 +#endif
15718 +
15719 fill_ldt(&ldt, &ldt_info);
15720 if (oldmode)
15721 ldt.avl = 0;
15722 diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15723 --- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15724 +++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15725 @@ -26,7 +26,7 @@
15726 #include <asm/system.h>
15727 #include <asm/cacheflush.h>
15728
15729 -static void set_idt(void *newidt, __u16 limit)
15730 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15731 {
15732 struct desc_ptr curidt;
15733
15734 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15735 }
15736
15737
15738 -static void set_gdt(void *newgdt, __u16 limit)
15739 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15740 {
15741 struct desc_ptr curgdt;
15742
15743 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15744 }
15745
15746 control_page = page_address(image->control_code_page);
15747 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15748 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15749
15750 relocate_kernel_ptr = control_page;
15751 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15752 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15753 --- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15754 +++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15755 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15756 uci->mc = NULL;
15757 }
15758
15759 -static struct microcode_ops microcode_amd_ops = {
15760 +static const struct microcode_ops microcode_amd_ops = {
15761 .request_microcode_user = request_microcode_user,
15762 .request_microcode_fw = request_microcode_fw,
15763 .collect_cpu_info = collect_cpu_info_amd,
15764 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15765 .microcode_fini_cpu = microcode_fini_cpu_amd,
15766 };
15767
15768 -struct microcode_ops * __init init_amd_microcode(void)
15769 +const struct microcode_ops * __init init_amd_microcode(void)
15770 {
15771 return &microcode_amd_ops;
15772 }
15773 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15774 --- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15775 +++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15776 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15777
15778 #define MICROCODE_VERSION "2.00"
15779
15780 -static struct microcode_ops *microcode_ops;
15781 +static const struct microcode_ops *microcode_ops;
15782
15783 /*
15784 * Synchronization.
15785 diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15786 --- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15787 +++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15788 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15789
15790 static int get_ucode_user(void *to, const void *from, size_t n)
15791 {
15792 - return copy_from_user(to, from, n);
15793 + return copy_from_user(to, (__force const void __user *)from, n);
15794 }
15795
15796 static enum ucode_state
15797 request_microcode_user(int cpu, const void __user *buf, size_t size)
15798 {
15799 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15800 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15801 }
15802
15803 static void microcode_fini_cpu(int cpu)
15804 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15805 uci->mc = NULL;
15806 }
15807
15808 -static struct microcode_ops microcode_intel_ops = {
15809 +static const struct microcode_ops microcode_intel_ops = {
15810 .request_microcode_user = request_microcode_user,
15811 .request_microcode_fw = request_microcode_fw,
15812 .collect_cpu_info = collect_cpu_info,
15813 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15814 .microcode_fini_cpu = microcode_fini_cpu,
15815 };
15816
15817 -struct microcode_ops * __init init_intel_microcode(void)
15818 +const struct microcode_ops * __init init_intel_microcode(void)
15819 {
15820 return &microcode_intel_ops;
15821 }
15822 diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15823 --- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15824 +++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15825 @@ -34,7 +34,7 @@
15826 #define DEBUGP(fmt...)
15827 #endif
15828
15829 -void *module_alloc(unsigned long size)
15830 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15831 {
15832 struct vm_struct *area;
15833
15834 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15835 if (!area)
15836 return NULL;
15837
15838 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15839 - PAGE_KERNEL_EXEC);
15840 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15841 +}
15842 +
15843 +void *module_alloc(unsigned long size)
15844 +{
15845 +
15846 +#ifdef CONFIG_PAX_KERNEXEC
15847 + return __module_alloc(size, PAGE_KERNEL);
15848 +#else
15849 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15850 +#endif
15851 +
15852 }
15853
15854 /* Free memory returned from module_alloc */
15855 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15856 vfree(module_region);
15857 }
15858
15859 +#ifdef CONFIG_PAX_KERNEXEC
15860 +#ifdef CONFIG_X86_32
15861 +void *module_alloc_exec(unsigned long size)
15862 +{
15863 + struct vm_struct *area;
15864 +
15865 + if (size == 0)
15866 + return NULL;
15867 +
15868 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15869 + return area ? area->addr : NULL;
15870 +}
15871 +EXPORT_SYMBOL(module_alloc_exec);
15872 +
15873 +void module_free_exec(struct module *mod, void *module_region)
15874 +{
15875 + vunmap(module_region);
15876 +}
15877 +EXPORT_SYMBOL(module_free_exec);
15878 +#else
15879 +void module_free_exec(struct module *mod, void *module_region)
15880 +{
15881 + module_free(mod, module_region);
15882 +}
15883 +EXPORT_SYMBOL(module_free_exec);
15884 +
15885 +void *module_alloc_exec(unsigned long size)
15886 +{
15887 + return __module_alloc(size, PAGE_KERNEL_RX);
15888 +}
15889 +EXPORT_SYMBOL(module_alloc_exec);
15890 +#endif
15891 +#endif
15892 +
15893 /* We don't need anything special. */
15894 int module_frob_arch_sections(Elf_Ehdr *hdr,
15895 Elf_Shdr *sechdrs,
15896 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15897 unsigned int i;
15898 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15899 Elf32_Sym *sym;
15900 - uint32_t *location;
15901 + uint32_t *plocation, location;
15902
15903 DEBUGP("Applying relocate section %u to %u\n", relsec,
15904 sechdrs[relsec].sh_info);
15905 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15906 /* This is where to make the change */
15907 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15908 - + rel[i].r_offset;
15909 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15910 + location = (uint32_t)plocation;
15911 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15912 + plocation = ktla_ktva((void *)plocation);
15913 /* This is the symbol it is referring to. Note that all
15914 undefined symbols have been resolved. */
15915 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15916 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15917 switch (ELF32_R_TYPE(rel[i].r_info)) {
15918 case R_386_32:
15919 /* We add the value into the location given */
15920 - *location += sym->st_value;
15921 + pax_open_kernel();
15922 + *plocation += sym->st_value;
15923 + pax_close_kernel();
15924 break;
15925 case R_386_PC32:
15926 /* Add the value, subtract its postition */
15927 - *location += sym->st_value - (uint32_t)location;
15928 + pax_open_kernel();
15929 + *plocation += sym->st_value - location;
15930 + pax_close_kernel();
15931 break;
15932 default:
15933 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15934 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15935 case R_X86_64_NONE:
15936 break;
15937 case R_X86_64_64:
15938 + pax_open_kernel();
15939 *(u64 *)loc = val;
15940 + pax_close_kernel();
15941 break;
15942 case R_X86_64_32:
15943 + pax_open_kernel();
15944 *(u32 *)loc = val;
15945 + pax_close_kernel();
15946 if (val != *(u32 *)loc)
15947 goto overflow;
15948 break;
15949 case R_X86_64_32S:
15950 + pax_open_kernel();
15951 *(s32 *)loc = val;
15952 + pax_close_kernel();
15953 if ((s64)val != *(s32 *)loc)
15954 goto overflow;
15955 break;
15956 case R_X86_64_PC32:
15957 val -= (u64)loc;
15958 + pax_open_kernel();
15959 *(u32 *)loc = val;
15960 + pax_close_kernel();
15961 +
15962 #if 0
15963 if ((s64)val != *(s32 *)loc)
15964 goto overflow;
15965 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15966 --- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15967 +++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-23 20:24:19.000000000 -0400
15968 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15969 {
15970 return x;
15971 }
15972 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15973 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15974 +#endif
15975
15976 void __init default_banner(void)
15977 {
15978 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15979 * corresponding structure. */
15980 static void *get_call_destination(u8 type)
15981 {
15982 - struct paravirt_patch_template tmpl = {
15983 + const struct paravirt_patch_template tmpl = {
15984 .pv_init_ops = pv_init_ops,
15985 .pv_time_ops = pv_time_ops,
15986 .pv_cpu_ops = pv_cpu_ops,
15987 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15988 .pv_lock_ops = pv_lock_ops,
15989 #endif
15990 };
15991 +
15992 + pax_track_stack();
15993 return *((void **)&tmpl + type);
15994 }
15995
15996 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15997 if (opfunc == NULL)
15998 /* If there's no function, patch it with a ud2a (BUG) */
15999 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16000 - else if (opfunc == _paravirt_nop)
16001 + else if (opfunc == (void *)_paravirt_nop)
16002 /* If the operation is a nop, then nop the callsite */
16003 ret = paravirt_patch_nop();
16004
16005 /* identity functions just return their single argument */
16006 - else if (opfunc == _paravirt_ident_32)
16007 + else if (opfunc == (void *)_paravirt_ident_32)
16008 ret = paravirt_patch_ident_32(insnbuf, len);
16009 - else if (opfunc == _paravirt_ident_64)
16010 + else if (opfunc == (void *)_paravirt_ident_64)
16011 + ret = paravirt_patch_ident_64(insnbuf, len);
16012 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16013 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16014 ret = paravirt_patch_ident_64(insnbuf, len);
16015 +#endif
16016
16017 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16018 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16019 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16020 if (insn_len > len || start == NULL)
16021 insn_len = len;
16022 else
16023 - memcpy(insnbuf, start, insn_len);
16024 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16025
16026 return insn_len;
16027 }
16028 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16029 preempt_enable();
16030 }
16031
16032 -struct pv_info pv_info = {
16033 +struct pv_info pv_info __read_only = {
16034 .name = "bare hardware",
16035 .paravirt_enabled = 0,
16036 .kernel_rpl = 0,
16037 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16038 };
16039
16040 -struct pv_init_ops pv_init_ops = {
16041 +struct pv_init_ops pv_init_ops __read_only = {
16042 .patch = native_patch,
16043 };
16044
16045 -struct pv_time_ops pv_time_ops = {
16046 +struct pv_time_ops pv_time_ops __read_only = {
16047 .sched_clock = native_sched_clock,
16048 };
16049
16050 -struct pv_irq_ops pv_irq_ops = {
16051 +struct pv_irq_ops pv_irq_ops __read_only = {
16052 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16053 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16054 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16055 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16056 #endif
16057 };
16058
16059 -struct pv_cpu_ops pv_cpu_ops = {
16060 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16061 .cpuid = native_cpuid,
16062 .get_debugreg = native_get_debugreg,
16063 .set_debugreg = native_set_debugreg,
16064 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16065 .end_context_switch = paravirt_nop,
16066 };
16067
16068 -struct pv_apic_ops pv_apic_ops = {
16069 +struct pv_apic_ops pv_apic_ops __read_only = {
16070 #ifdef CONFIG_X86_LOCAL_APIC
16071 .startup_ipi_hook = paravirt_nop,
16072 #endif
16073 };
16074
16075 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16076 +#ifdef CONFIG_X86_32
16077 +#ifdef CONFIG_X86_PAE
16078 +/* 64-bit pagetable entries */
16079 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16080 +#else
16081 /* 32-bit pagetable entries */
16082 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16083 +#endif
16084 #else
16085 /* 64-bit pagetable entries */
16086 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16087 #endif
16088
16089 -struct pv_mmu_ops pv_mmu_ops = {
16090 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16091
16092 .read_cr2 = native_read_cr2,
16093 .write_cr2 = native_write_cr2,
16094 @@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16095 .make_pud = PTE_IDENT,
16096
16097 .set_pgd = native_set_pgd,
16098 + .set_pgd_batched = native_set_pgd_batched,
16099 #endif
16100 #endif /* PAGETABLE_LEVELS >= 3 */
16101
16102 @@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16103 },
16104
16105 .set_fixmap = native_set_fixmap,
16106 +
16107 +#ifdef CONFIG_PAX_KERNEXEC
16108 + .pax_open_kernel = native_pax_open_kernel,
16109 + .pax_close_kernel = native_pax_close_kernel,
16110 +#endif
16111 +
16112 };
16113
16114 EXPORT_SYMBOL_GPL(pv_time_ops);
16115 diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
16116 --- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16117 +++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16118 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16119 __raw_spin_lock(lock);
16120 }
16121
16122 -struct pv_lock_ops pv_lock_ops = {
16123 +struct pv_lock_ops pv_lock_ops __read_only = {
16124 #ifdef CONFIG_SMP
16125 .spin_is_locked = __ticket_spin_is_locked,
16126 .spin_is_contended = __ticket_spin_is_contended,
16127 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
16128 --- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16129 +++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16130 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16131 free_pages((unsigned long)vaddr, get_order(size));
16132 }
16133
16134 -static struct dma_map_ops calgary_dma_ops = {
16135 +static const struct dma_map_ops calgary_dma_ops = {
16136 .alloc_coherent = calgary_alloc_coherent,
16137 .free_coherent = calgary_free_coherent,
16138 .map_sg = calgary_map_sg,
16139 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
16140 --- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16141 +++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16142 @@ -14,7 +14,7 @@
16143
16144 static int forbid_dac __read_mostly;
16145
16146 -struct dma_map_ops *dma_ops;
16147 +const struct dma_map_ops *dma_ops;
16148 EXPORT_SYMBOL(dma_ops);
16149
16150 static int iommu_sac_force __read_mostly;
16151 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16152
16153 int dma_supported(struct device *dev, u64 mask)
16154 {
16155 - struct dma_map_ops *ops = get_dma_ops(dev);
16156 + const struct dma_map_ops *ops = get_dma_ops(dev);
16157
16158 #ifdef CONFIG_PCI
16159 if (mask > 0xffffffff && forbid_dac > 0) {
16160 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
16161 --- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16162 +++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16163 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16164 return -1;
16165 }
16166
16167 -static struct dma_map_ops gart_dma_ops = {
16168 +static const struct dma_map_ops gart_dma_ops = {
16169 .map_sg = gart_map_sg,
16170 .unmap_sg = gart_unmap_sg,
16171 .map_page = gart_map_page,
16172 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
16173 --- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16174 +++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16175 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16176 flush_write_buffers();
16177 }
16178
16179 -struct dma_map_ops nommu_dma_ops = {
16180 +const struct dma_map_ops nommu_dma_ops = {
16181 .alloc_coherent = dma_generic_alloc_coherent,
16182 .free_coherent = nommu_free_coherent,
16183 .map_sg = nommu_map_sg,
16184 diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16185 --- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16186 +++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16187 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16188 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16189 }
16190
16191 -static struct dma_map_ops swiotlb_dma_ops = {
16192 +static const struct dma_map_ops swiotlb_dma_ops = {
16193 .mapping_error = swiotlb_dma_mapping_error,
16194 .alloc_coherent = x86_swiotlb_alloc_coherent,
16195 .free_coherent = swiotlb_free_coherent,
16196 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16197 --- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16198 +++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16199 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16200 unsigned long thread_saved_pc(struct task_struct *tsk)
16201 {
16202 return ((unsigned long *)tsk->thread.sp)[3];
16203 +//XXX return tsk->thread.eip;
16204 }
16205
16206 #ifndef CONFIG_SMP
16207 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16208 unsigned short ss, gs;
16209 const char *board;
16210
16211 - if (user_mode_vm(regs)) {
16212 + if (user_mode(regs)) {
16213 sp = regs->sp;
16214 ss = regs->ss & 0xffff;
16215 - gs = get_user_gs(regs);
16216 } else {
16217 sp = (unsigned long) (&regs->sp);
16218 savesegment(ss, ss);
16219 - savesegment(gs, gs);
16220 }
16221 + gs = get_user_gs(regs);
16222
16223 printk("\n");
16224
16225 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16226 regs.bx = (unsigned long) fn;
16227 regs.dx = (unsigned long) arg;
16228
16229 - regs.ds = __USER_DS;
16230 - regs.es = __USER_DS;
16231 + regs.ds = __KERNEL_DS;
16232 + regs.es = __KERNEL_DS;
16233 regs.fs = __KERNEL_PERCPU;
16234 - regs.gs = __KERNEL_STACK_CANARY;
16235 + savesegment(gs, regs.gs);
16236 regs.orig_ax = -1;
16237 regs.ip = (unsigned long) kernel_thread_helper;
16238 regs.cs = __KERNEL_CS | get_kernel_rpl();
16239 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16240 struct task_struct *tsk;
16241 int err;
16242
16243 - childregs = task_pt_regs(p);
16244 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16245 *childregs = *regs;
16246 childregs->ax = 0;
16247 childregs->sp = sp;
16248
16249 p->thread.sp = (unsigned long) childregs;
16250 p->thread.sp0 = (unsigned long) (childregs+1);
16251 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16252
16253 p->thread.ip = (unsigned long) ret_from_fork;
16254
16255 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16256 struct thread_struct *prev = &prev_p->thread,
16257 *next = &next_p->thread;
16258 int cpu = smp_processor_id();
16259 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16260 + struct tss_struct *tss = init_tss + cpu;
16261 bool preload_fpu;
16262
16263 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16264 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16265 */
16266 lazy_save_gs(prev->gs);
16267
16268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16269 + __set_fs(task_thread_info(next_p)->addr_limit);
16270 +#endif
16271 +
16272 /*
16273 * Load the per-thread Thread-Local Storage descriptor.
16274 */
16275 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16276 */
16277 arch_end_context_switch(next_p);
16278
16279 + percpu_write(current_task, next_p);
16280 + percpu_write(current_tinfo, &next_p->tinfo);
16281 +
16282 if (preload_fpu)
16283 __math_state_restore();
16284
16285 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16286 if (prev->gs | next->gs)
16287 lazy_load_gs(next->gs);
16288
16289 - percpu_write(current_task, next_p);
16290 -
16291 return prev_p;
16292 }
16293
16294 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16295 } while (count++ < 16);
16296 return 0;
16297 }
16298 -
16299 diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16300 --- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16301 +++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16302 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16303 void exit_idle(void)
16304 {
16305 /* idle loop has pid 0 */
16306 - if (current->pid)
16307 + if (task_pid_nr(current))
16308 return;
16309 __exit_idle();
16310 }
16311 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16312 if (!board)
16313 board = "";
16314 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16315 - current->pid, current->comm, print_tainted(),
16316 + task_pid_nr(current), current->comm, print_tainted(),
16317 init_utsname()->release,
16318 (int)strcspn(init_utsname()->version, " "),
16319 init_utsname()->version, board);
16320 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16321 struct pt_regs *childregs;
16322 struct task_struct *me = current;
16323
16324 - childregs = ((struct pt_regs *)
16325 - (THREAD_SIZE + task_stack_page(p))) - 1;
16326 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16327 *childregs = *regs;
16328
16329 childregs->ax = 0;
16330 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16331 p->thread.sp = (unsigned long) childregs;
16332 p->thread.sp0 = (unsigned long) (childregs+1);
16333 p->thread.usersp = me->thread.usersp;
16334 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16335
16336 set_tsk_thread_flag(p, TIF_FORK);
16337
16338 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16339 struct thread_struct *prev = &prev_p->thread;
16340 struct thread_struct *next = &next_p->thread;
16341 int cpu = smp_processor_id();
16342 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16343 + struct tss_struct *tss = init_tss + cpu;
16344 unsigned fsindex, gsindex;
16345 bool preload_fpu;
16346
16347 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16348 prev->usersp = percpu_read(old_rsp);
16349 percpu_write(old_rsp, next->usersp);
16350 percpu_write(current_task, next_p);
16351 + percpu_write(current_tinfo, &next_p->tinfo);
16352
16353 - percpu_write(kernel_stack,
16354 - (unsigned long)task_stack_page(next_p) +
16355 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16356 + percpu_write(kernel_stack, next->sp0);
16357
16358 /*
16359 * Now maybe reload the debug registers and handle I/O bitmaps
16360 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16361 if (!p || p == current || p->state == TASK_RUNNING)
16362 return 0;
16363 stack = (unsigned long)task_stack_page(p);
16364 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16365 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16366 return 0;
16367 fp = *(u64 *)(p->thread.sp);
16368 do {
16369 - if (fp < (unsigned long)stack ||
16370 - fp >= (unsigned long)stack+THREAD_SIZE)
16371 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16372 return 0;
16373 ip = *(u64 *)(fp+8);
16374 if (!in_sched_functions(ip))
16375 diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16376 --- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16377 +++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16378 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16379
16380 void free_thread_info(struct thread_info *ti)
16381 {
16382 - free_thread_xstate(ti->task);
16383 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16384 }
16385
16386 +static struct kmem_cache *task_struct_cachep;
16387 +
16388 void arch_task_cache_init(void)
16389 {
16390 - task_xstate_cachep =
16391 - kmem_cache_create("task_xstate", xstate_size,
16392 + /* create a slab on which task_structs can be allocated */
16393 + task_struct_cachep =
16394 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16395 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16396 +
16397 + task_xstate_cachep =
16398 + kmem_cache_create("task_xstate", xstate_size,
16399 __alignof__(union thread_xstate),
16400 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16401 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16402 +}
16403 +
16404 +struct task_struct *alloc_task_struct(void)
16405 +{
16406 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16407 +}
16408 +
16409 +void free_task_struct(struct task_struct *task)
16410 +{
16411 + free_thread_xstate(task);
16412 + kmem_cache_free(task_struct_cachep, task);
16413 }
16414
16415 /*
16416 @@ -73,7 +90,7 @@ void exit_thread(void)
16417 unsigned long *bp = t->io_bitmap_ptr;
16418
16419 if (bp) {
16420 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16421 + struct tss_struct *tss = init_tss + get_cpu();
16422
16423 t->io_bitmap_ptr = NULL;
16424 clear_thread_flag(TIF_IO_BITMAP);
16425 @@ -93,6 +110,9 @@ void flush_thread(void)
16426
16427 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16428
16429 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16430 + loadsegment(gs, 0);
16431 +#endif
16432 tsk->thread.debugreg0 = 0;
16433 tsk->thread.debugreg1 = 0;
16434 tsk->thread.debugreg2 = 0;
16435 @@ -307,7 +327,7 @@ void default_idle(void)
16436 EXPORT_SYMBOL(default_idle);
16437 #endif
16438
16439 -void stop_this_cpu(void *dummy)
16440 +__noreturn void stop_this_cpu(void *dummy)
16441 {
16442 local_irq_disable();
16443 /*
16444 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16445 }
16446 early_param("idle", idle_setup);
16447
16448 -unsigned long arch_align_stack(unsigned long sp)
16449 +#ifdef CONFIG_PAX_RANDKSTACK
16450 +asmlinkage void pax_randomize_kstack(void)
16451 {
16452 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16453 - sp -= get_random_int() % 8192;
16454 - return sp & ~0xf;
16455 -}
16456 + struct thread_struct *thread = &current->thread;
16457 + unsigned long time;
16458
16459 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16460 -{
16461 - unsigned long range_end = mm->brk + 0x02000000;
16462 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16463 + if (!randomize_va_space)
16464 + return;
16465 +
16466 + rdtscl(time);
16467 +
16468 + /* P4 seems to return a 0 LSB, ignore it */
16469 +#ifdef CONFIG_MPENTIUM4
16470 + time &= 0x3EUL;
16471 + time <<= 2;
16472 +#elif defined(CONFIG_X86_64)
16473 + time &= 0xFUL;
16474 + time <<= 4;
16475 +#else
16476 + time &= 0x1FUL;
16477 + time <<= 3;
16478 +#endif
16479 +
16480 + thread->sp0 ^= time;
16481 + load_sp0(init_tss + smp_processor_id(), thread);
16482 +
16483 +#ifdef CONFIG_X86_64
16484 + percpu_write(kernel_stack, thread->sp0);
16485 +#endif
16486 }
16487 +#endif
16488
16489 diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16490 --- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16491 +++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16492 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16493 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16494 {
16495 int ret;
16496 - unsigned long __user *datap = (unsigned long __user *)data;
16497 + unsigned long __user *datap = (__force unsigned long __user *)data;
16498
16499 switch (request) {
16500 /* read the word at location addr in the USER area. */
16501 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16502 if (addr < 0)
16503 return -EIO;
16504 ret = do_get_thread_area(child, addr,
16505 - (struct user_desc __user *) data);
16506 + (__force struct user_desc __user *) data);
16507 break;
16508
16509 case PTRACE_SET_THREAD_AREA:
16510 if (addr < 0)
16511 return -EIO;
16512 ret = do_set_thread_area(child, addr,
16513 - (struct user_desc __user *) data, 0);
16514 + (__force struct user_desc __user *) data, 0);
16515 break;
16516 #endif
16517
16518 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16519 #ifdef CONFIG_X86_PTRACE_BTS
16520 case PTRACE_BTS_CONFIG:
16521 ret = ptrace_bts_config
16522 - (child, data, (struct ptrace_bts_config __user *)addr);
16523 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16524 break;
16525
16526 case PTRACE_BTS_STATUS:
16527 ret = ptrace_bts_status
16528 - (child, data, (struct ptrace_bts_config __user *)addr);
16529 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16530 break;
16531
16532 case PTRACE_BTS_SIZE:
16533 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16534
16535 case PTRACE_BTS_GET:
16536 ret = ptrace_bts_read_record
16537 - (child, data, (struct bts_struct __user *) addr);
16538 + (child, data, (__force struct bts_struct __user *) addr);
16539 break;
16540
16541 case PTRACE_BTS_CLEAR:
16542 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16543
16544 case PTRACE_BTS_DRAIN:
16545 ret = ptrace_bts_drain
16546 - (child, data, (struct bts_struct __user *) addr);
16547 + (child, data, (__force struct bts_struct __user *) addr);
16548 break;
16549 #endif /* CONFIG_X86_PTRACE_BTS */
16550
16551 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16552 info.si_code = si_code;
16553
16554 /* User-mode ip? */
16555 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16556 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16557
16558 /* Send us the fake SIGTRAP */
16559 force_sig_info(SIGTRAP, &info, tsk);
16560 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16561 * We must return the syscall number to actually look up in the table.
16562 * This can be -1L to skip running any syscall at all.
16563 */
16564 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16565 +long syscall_trace_enter(struct pt_regs *regs)
16566 {
16567 long ret = 0;
16568
16569 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16570 return ret ?: regs->orig_ax;
16571 }
16572
16573 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16574 +void syscall_trace_leave(struct pt_regs *regs)
16575 {
16576 if (unlikely(current->audit_context))
16577 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16578 diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16579 --- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16580 +++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16581 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16582 EXPORT_SYMBOL(pm_power_off);
16583
16584 static const struct desc_ptr no_idt = {};
16585 -static int reboot_mode;
16586 +static unsigned short reboot_mode;
16587 enum reboot_type reboot_type = BOOT_KBD;
16588 int reboot_force;
16589
16590 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16591 controller to pulse the CPU reset line, which is more thorough, but
16592 doesn't work with at least one type of 486 motherboard. It is easy
16593 to stop this code working; hence the copious comments. */
16594 -static const unsigned long long
16595 -real_mode_gdt_entries [3] =
16596 +static struct desc_struct
16597 +real_mode_gdt_entries [3] __read_only =
16598 {
16599 - 0x0000000000000000ULL, /* Null descriptor */
16600 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16601 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16602 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16603 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16604 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16605 };
16606
16607 static const struct desc_ptr
16608 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16609 * specified by the code and length parameters.
16610 * We assume that length will aways be less that 100!
16611 */
16612 -void machine_real_restart(const unsigned char *code, int length)
16613 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16614 {
16615 local_irq_disable();
16616
16617 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16618 /* Remap the kernel at virtual address zero, as well as offset zero
16619 from the kernel segment. This assumes the kernel segment starts at
16620 virtual address PAGE_OFFSET. */
16621 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16622 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16623 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16624 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16625
16626 /*
16627 * Use `swapper_pg_dir' as our page directory.
16628 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16629 boot)". This seems like a fairly standard thing that gets set by
16630 REBOOT.COM programs, and the previous reset routine did this
16631 too. */
16632 - *((unsigned short *)0x472) = reboot_mode;
16633 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16634
16635 /* For the switch to real mode, copy some code to low memory. It has
16636 to be in the first 64k because it is running in 16-bit mode, and it
16637 has to have the same physical and virtual address, because it turns
16638 off paging. Copy it near the end of the first page, out of the way
16639 of BIOS variables. */
16640 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16641 - real_mode_switch, sizeof (real_mode_switch));
16642 - memcpy((void *)(0x1000 - 100), code, length);
16643 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16644 + memcpy(__va(0x1000 - 100), code, length);
16645
16646 /* Set up the IDT for real mode. */
16647 load_idt(&real_mode_idt);
16648 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16649 __asm__ __volatile__ ("ljmp $0x0008,%0"
16650 :
16651 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16652 + do { } while (1);
16653 }
16654 #ifdef CONFIG_APM_MODULE
16655 EXPORT_SYMBOL(machine_real_restart);
16656 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16657 {
16658 }
16659
16660 -static void native_machine_emergency_restart(void)
16661 +__noreturn static void native_machine_emergency_restart(void)
16662 {
16663 int i;
16664
16665 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16666 #endif
16667 }
16668
16669 -static void __machine_emergency_restart(int emergency)
16670 +static __noreturn void __machine_emergency_restart(int emergency)
16671 {
16672 reboot_emergency = emergency;
16673 machine_ops.emergency_restart();
16674 }
16675
16676 -static void native_machine_restart(char *__unused)
16677 +static __noreturn void native_machine_restart(char *__unused)
16678 {
16679 printk("machine restart\n");
16680
16681 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16682 __machine_emergency_restart(0);
16683 }
16684
16685 -static void native_machine_halt(void)
16686 +static __noreturn void native_machine_halt(void)
16687 {
16688 /* stop other cpus and apics */
16689 machine_shutdown();
16690 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16691 stop_this_cpu(NULL);
16692 }
16693
16694 -static void native_machine_power_off(void)
16695 +__noreturn static void native_machine_power_off(void)
16696 {
16697 if (pm_power_off) {
16698 if (!reboot_force)
16699 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16700 }
16701 /* a fallback in case there is no PM info available */
16702 tboot_shutdown(TB_SHUTDOWN_HALT);
16703 + do { } while (1);
16704 }
16705
16706 struct machine_ops machine_ops = {
16707 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16708 --- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16709 +++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16710 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16711
16712 if (!boot_params.hdr.root_flags)
16713 root_mountflags &= ~MS_RDONLY;
16714 - init_mm.start_code = (unsigned long) _text;
16715 - init_mm.end_code = (unsigned long) _etext;
16716 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16717 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16718 init_mm.end_data = (unsigned long) _edata;
16719 init_mm.brk = _brk_end;
16720
16721 - code_resource.start = virt_to_phys(_text);
16722 - code_resource.end = virt_to_phys(_etext)-1;
16723 - data_resource.start = virt_to_phys(_etext);
16724 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16725 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16726 + data_resource.start = virt_to_phys(_sdata);
16727 data_resource.end = virt_to_phys(_edata)-1;
16728 bss_resource.start = virt_to_phys(&__bss_start);
16729 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16730 diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16731 --- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16732 +++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16733 @@ -25,19 +25,17 @@
16734 # define DBG(x...)
16735 #endif
16736
16737 -DEFINE_PER_CPU(int, cpu_number);
16738 +#ifdef CONFIG_SMP
16739 +DEFINE_PER_CPU(unsigned int, cpu_number);
16740 EXPORT_PER_CPU_SYMBOL(cpu_number);
16741 +#endif
16742
16743 -#ifdef CONFIG_X86_64
16744 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16745 -#else
16746 -#define BOOT_PERCPU_OFFSET 0
16747 -#endif
16748
16749 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16750 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16751
16752 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16753 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16754 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16755 };
16756 EXPORT_SYMBOL(__per_cpu_offset);
16757 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16758 {
16759 #ifdef CONFIG_X86_32
16760 struct desc_struct gdt;
16761 + unsigned long base = per_cpu_offset(cpu);
16762
16763 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16764 - 0x2 | DESCTYPE_S, 0x8);
16765 - gdt.s = 1;
16766 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16767 + 0x83 | DESCTYPE_S, 0xC);
16768 write_gdt_entry(get_cpu_gdt_table(cpu),
16769 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16770 #endif
16771 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16772 /* alrighty, percpu areas up and running */
16773 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16774 for_each_possible_cpu(cpu) {
16775 +#ifdef CONFIG_CC_STACKPROTECTOR
16776 +#ifdef CONFIG_X86_32
16777 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16778 +#endif
16779 +#endif
16780 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16781 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16782 per_cpu(cpu_number, cpu) = cpu;
16783 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16784 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16785 #endif
16786 #endif
16787 +#ifdef CONFIG_CC_STACKPROTECTOR
16788 +#ifdef CONFIG_X86_32
16789 + if (!cpu)
16790 + per_cpu(stack_canary.canary, cpu) = canary;
16791 +#endif
16792 +#endif
16793 /*
16794 * Up to this point, the boot CPU has been using .data.init
16795 * area. Reload any changed state for the boot CPU.
16796 diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16797 --- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16798 +++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16799 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16800 * Align the stack pointer according to the i386 ABI,
16801 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16802 */
16803 - sp = ((sp + 4) & -16ul) - 4;
16804 + sp = ((sp - 12) & -16ul) - 4;
16805 #else /* !CONFIG_X86_32 */
16806 sp = round_down(sp, 16) - 8;
16807 #endif
16808 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16809 * Return an always-bogus address instead so we will die with SIGSEGV.
16810 */
16811 if (onsigstack && !likely(on_sig_stack(sp)))
16812 - return (void __user *)-1L;
16813 + return (__force void __user *)-1L;
16814
16815 /* save i387 state */
16816 if (used_math() && save_i387_xstate(*fpstate) < 0)
16817 - return (void __user *)-1L;
16818 + return (__force void __user *)-1L;
16819
16820 return (void __user *)sp;
16821 }
16822 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16823 }
16824
16825 if (current->mm->context.vdso)
16826 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16827 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16828 else
16829 - restorer = &frame->retcode;
16830 + restorer = (void __user *)&frame->retcode;
16831 if (ka->sa.sa_flags & SA_RESTORER)
16832 restorer = ka->sa.sa_restorer;
16833
16834 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16835 * reasons and because gdb uses it as a signature to notice
16836 * signal handler stack frames.
16837 */
16838 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16839 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16840
16841 if (err)
16842 return -EFAULT;
16843 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16844 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16845
16846 /* Set up to return from userspace. */
16847 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16848 + if (current->mm->context.vdso)
16849 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16850 + else
16851 + restorer = (void __user *)&frame->retcode;
16852 if (ka->sa.sa_flags & SA_RESTORER)
16853 restorer = ka->sa.sa_restorer;
16854 put_user_ex(restorer, &frame->pretcode);
16855 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16856 * reasons and because gdb uses it as a signature to notice
16857 * signal handler stack frames.
16858 */
16859 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16860 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16861 } put_user_catch(err);
16862
16863 if (err)
16864 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16865 int signr;
16866 sigset_t *oldset;
16867
16868 + pax_track_stack();
16869 +
16870 /*
16871 * We want the common case to go fast, which is why we may in certain
16872 * cases get here from kernel mode. Just return without doing anything
16873 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16874 * X86_32: vm86 regs switched out by assembly code before reaching
16875 * here, so testing against kernel CS suffices.
16876 */
16877 - if (!user_mode(regs))
16878 + if (!user_mode_novm(regs))
16879 return;
16880
16881 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16882 diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16883 --- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16884 +++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16885 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16886 */
16887 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16888
16889 -void cpu_hotplug_driver_lock()
16890 +void cpu_hotplug_driver_lock(void)
16891 {
16892 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16893 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16894 }
16895
16896 -void cpu_hotplug_driver_unlock()
16897 +void cpu_hotplug_driver_unlock(void)
16898 {
16899 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16900 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16901 }
16902
16903 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16904 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16905 * target processor state.
16906 */
16907 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16908 - (unsigned long)stack_start.sp);
16909 + stack_start);
16910
16911 /*
16912 * Run STARTUP IPI loop.
16913 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16914 set_idle_for_cpu(cpu, c_idle.idle);
16915 do_rest:
16916 per_cpu(current_task, cpu) = c_idle.idle;
16917 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16918 #ifdef CONFIG_X86_32
16919 /* Stack for startup_32 can be just as for start_secondary onwards */
16920 irq_ctx_init(cpu);
16921 @@ -750,13 +751,15 @@ do_rest:
16922 #else
16923 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16924 initial_gs = per_cpu_offset(cpu);
16925 - per_cpu(kernel_stack, cpu) =
16926 - (unsigned long)task_stack_page(c_idle.idle) -
16927 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16928 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16929 #endif
16930 +
16931 + pax_open_kernel();
16932 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16933 + pax_close_kernel();
16934 +
16935 initial_code = (unsigned long)start_secondary;
16936 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16937 + stack_start = c_idle.idle->thread.sp;
16938
16939 /* start_ip had better be page-aligned! */
16940 start_ip = setup_trampoline();
16941 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16942
16943 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16944
16945 +#ifdef CONFIG_PAX_PER_CPU_PGD
16946 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16947 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16948 + KERNEL_PGD_PTRS);
16949 +#endif
16950 +
16951 err = do_boot_cpu(apicid, cpu);
16952
16953 if (err) {
16954 diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16955 --- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16956 +++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16957 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16958 struct desc_struct *desc;
16959 unsigned long base;
16960
16961 - seg &= ~7UL;
16962 + seg >>= 3;
16963
16964 mutex_lock(&child->mm->context.lock);
16965 - if (unlikely((seg >> 3) >= child->mm->context.size))
16966 + if (unlikely(seg >= child->mm->context.size))
16967 addr = -1L; /* bogus selector, access would fault */
16968 else {
16969 desc = child->mm->context.ldt + seg;
16970 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16971 addr += base;
16972 }
16973 mutex_unlock(&child->mm->context.lock);
16974 - }
16975 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16976 + addr = ktla_ktva(addr);
16977
16978 return addr;
16979 }
16980 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16981 unsigned char opcode[15];
16982 unsigned long addr = convert_ip_to_linear(child, regs);
16983
16984 + if (addr == -EINVAL)
16985 + return 0;
16986 +
16987 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16988 for (i = 0; i < copied; i++) {
16989 switch (opcode[i]) {
16990 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16991
16992 #ifdef CONFIG_X86_64
16993 case 0x40 ... 0x4f:
16994 - if (regs->cs != __USER_CS)
16995 + if ((regs->cs & 0xffff) != __USER_CS)
16996 /* 32-bit mode: register increment */
16997 return 0;
16998 /* 64-bit mode: REX prefix */
16999 diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
17000 --- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
17001 +++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
17002 @@ -1,3 +1,4 @@
17003 +.section .rodata,"a",@progbits
17004 ENTRY(sys_call_table)
17005 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17006 .long sys_exit
17007 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
17008 --- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
17009 +++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
17010 @@ -24,6 +24,21 @@
17011
17012 #include <asm/syscalls.h>
17013
17014 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17015 +{
17016 + unsigned long pax_task_size = TASK_SIZE;
17017 +
17018 +#ifdef CONFIG_PAX_SEGMEXEC
17019 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17020 + pax_task_size = SEGMEXEC_TASK_SIZE;
17021 +#endif
17022 +
17023 + if (len > pax_task_size || addr > pax_task_size - len)
17024 + return -EINVAL;
17025 +
17026 + return 0;
17027 +}
17028 +
17029 /*
17030 * Perform the select(nd, in, out, ex, tv) and mmap() system
17031 * calls. Linux/i386 didn't use to be able to handle more than
17032 @@ -58,6 +73,212 @@ out:
17033 return err;
17034 }
17035
17036 +unsigned long
17037 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17038 + unsigned long len, unsigned long pgoff, unsigned long flags)
17039 +{
17040 + struct mm_struct *mm = current->mm;
17041 + struct vm_area_struct *vma;
17042 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17043 +
17044 +#ifdef CONFIG_PAX_SEGMEXEC
17045 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17046 + pax_task_size = SEGMEXEC_TASK_SIZE;
17047 +#endif
17048 +
17049 + pax_task_size -= PAGE_SIZE;
17050 +
17051 + if (len > pax_task_size)
17052 + return -ENOMEM;
17053 +
17054 + if (flags & MAP_FIXED)
17055 + return addr;
17056 +
17057 +#ifdef CONFIG_PAX_RANDMMAP
17058 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17059 +#endif
17060 +
17061 + if (addr) {
17062 + addr = PAGE_ALIGN(addr);
17063 + if (pax_task_size - len >= addr) {
17064 + vma = find_vma(mm, addr);
17065 + if (check_heap_stack_gap(vma, addr, len))
17066 + return addr;
17067 + }
17068 + }
17069 + if (len > mm->cached_hole_size) {
17070 + start_addr = addr = mm->free_area_cache;
17071 + } else {
17072 + start_addr = addr = mm->mmap_base;
17073 + mm->cached_hole_size = 0;
17074 + }
17075 +
17076 +#ifdef CONFIG_PAX_PAGEEXEC
17077 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17078 + start_addr = 0x00110000UL;
17079 +
17080 +#ifdef CONFIG_PAX_RANDMMAP
17081 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17082 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17083 +#endif
17084 +
17085 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17086 + start_addr = addr = mm->mmap_base;
17087 + else
17088 + addr = start_addr;
17089 + }
17090 +#endif
17091 +
17092 +full_search:
17093 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17094 + /* At this point: (!vma || addr < vma->vm_end). */
17095 + if (pax_task_size - len < addr) {
17096 + /*
17097 + * Start a new search - just in case we missed
17098 + * some holes.
17099 + */
17100 + if (start_addr != mm->mmap_base) {
17101 + start_addr = addr = mm->mmap_base;
17102 + mm->cached_hole_size = 0;
17103 + goto full_search;
17104 + }
17105 + return -ENOMEM;
17106 + }
17107 + if (check_heap_stack_gap(vma, addr, len))
17108 + break;
17109 + if (addr + mm->cached_hole_size < vma->vm_start)
17110 + mm->cached_hole_size = vma->vm_start - addr;
17111 + addr = vma->vm_end;
17112 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17113 + start_addr = addr = mm->mmap_base;
17114 + mm->cached_hole_size = 0;
17115 + goto full_search;
17116 + }
17117 + }
17118 +
17119 + /*
17120 + * Remember the place where we stopped the search:
17121 + */
17122 + mm->free_area_cache = addr + len;
17123 + return addr;
17124 +}
17125 +
17126 +unsigned long
17127 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17128 + const unsigned long len, const unsigned long pgoff,
17129 + const unsigned long flags)
17130 +{
17131 + struct vm_area_struct *vma;
17132 + struct mm_struct *mm = current->mm;
17133 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17134 +
17135 +#ifdef CONFIG_PAX_SEGMEXEC
17136 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17137 + pax_task_size = SEGMEXEC_TASK_SIZE;
17138 +#endif
17139 +
17140 + pax_task_size -= PAGE_SIZE;
17141 +
17142 + /* requested length too big for entire address space */
17143 + if (len > pax_task_size)
17144 + return -ENOMEM;
17145 +
17146 + if (flags & MAP_FIXED)
17147 + return addr;
17148 +
17149 +#ifdef CONFIG_PAX_PAGEEXEC
17150 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17151 + goto bottomup;
17152 +#endif
17153 +
17154 +#ifdef CONFIG_PAX_RANDMMAP
17155 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17156 +#endif
17157 +
17158 + /* requesting a specific address */
17159 + if (addr) {
17160 + addr = PAGE_ALIGN(addr);
17161 + if (pax_task_size - len >= addr) {
17162 + vma = find_vma(mm, addr);
17163 + if (check_heap_stack_gap(vma, addr, len))
17164 + return addr;
17165 + }
17166 + }
17167 +
17168 + /* check if free_area_cache is useful for us */
17169 + if (len <= mm->cached_hole_size) {
17170 + mm->cached_hole_size = 0;
17171 + mm->free_area_cache = mm->mmap_base;
17172 + }
17173 +
17174 + /* either no address requested or can't fit in requested address hole */
17175 + addr = mm->free_area_cache;
17176 +
17177 + /* make sure it can fit in the remaining address space */
17178 + if (addr > len) {
17179 + vma = find_vma(mm, addr-len);
17180 + if (check_heap_stack_gap(vma, addr - len, len))
17181 + /* remember the address as a hint for next time */
17182 + return (mm->free_area_cache = addr-len);
17183 + }
17184 +
17185 + if (mm->mmap_base < len)
17186 + goto bottomup;
17187 +
17188 + addr = mm->mmap_base-len;
17189 +
17190 + do {
17191 + /*
17192 + * Lookup failure means no vma is above this address,
17193 + * else if new region fits below vma->vm_start,
17194 + * return with success:
17195 + */
17196 + vma = find_vma(mm, addr);
17197 + if (check_heap_stack_gap(vma, addr, len))
17198 + /* remember the address as a hint for next time */
17199 + return (mm->free_area_cache = addr);
17200 +
17201 + /* remember the largest hole we saw so far */
17202 + if (addr + mm->cached_hole_size < vma->vm_start)
17203 + mm->cached_hole_size = vma->vm_start - addr;
17204 +
17205 + /* try just below the current vma->vm_start */
17206 + addr = skip_heap_stack_gap(vma, len);
17207 + } while (!IS_ERR_VALUE(addr));
17208 +
17209 +bottomup:
17210 + /*
17211 + * A failed mmap() very likely causes application failure,
17212 + * so fall back to the bottom-up function here. This scenario
17213 + * can happen with large stack limits and large mmap()
17214 + * allocations.
17215 + */
17216 +
17217 +#ifdef CONFIG_PAX_SEGMEXEC
17218 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17219 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17220 + else
17221 +#endif
17222 +
17223 + mm->mmap_base = TASK_UNMAPPED_BASE;
17224 +
17225 +#ifdef CONFIG_PAX_RANDMMAP
17226 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17227 + mm->mmap_base += mm->delta_mmap;
17228 +#endif
17229 +
17230 + mm->free_area_cache = mm->mmap_base;
17231 + mm->cached_hole_size = ~0UL;
17232 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17233 + /*
17234 + * Restore the topdown base:
17235 + */
17236 + mm->mmap_base = base;
17237 + mm->free_area_cache = base;
17238 + mm->cached_hole_size = ~0UL;
17239 +
17240 + return addr;
17241 +}
17242
17243 struct sel_arg_struct {
17244 unsigned long n;
17245 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17246 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17247 case SEMTIMEDOP:
17248 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17249 - (const struct timespec __user *)fifth);
17250 + (__force const struct timespec __user *)fifth);
17251
17252 case SEMGET:
17253 return sys_semget(first, second, third);
17254 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17255 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17256 if (ret)
17257 return ret;
17258 - return put_user(raddr, (ulong __user *) third);
17259 + return put_user(raddr, (__force ulong __user *) third);
17260 }
17261 case 1: /* iBCS2 emulator entry point */
17262 if (!segment_eq(get_fs(), get_ds()))
17263 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17264
17265 return error;
17266 }
17267 -
17268 -
17269 -/*
17270 - * Do a system call from kernel instead of calling sys_execve so we
17271 - * end up with proper pt_regs.
17272 - */
17273 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17274 -{
17275 - long __res;
17276 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17277 - : "=a" (__res)
17278 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17279 - return __res;
17280 -}
17281 diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17282 --- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17283 +++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17284 @@ -32,8 +32,8 @@ out:
17285 return error;
17286 }
17287
17288 -static void find_start_end(unsigned long flags, unsigned long *begin,
17289 - unsigned long *end)
17290 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17291 + unsigned long *begin, unsigned long *end)
17292 {
17293 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17294 unsigned long new_begin;
17295 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17296 *begin = new_begin;
17297 }
17298 } else {
17299 - *begin = TASK_UNMAPPED_BASE;
17300 + *begin = mm->mmap_base;
17301 *end = TASK_SIZE;
17302 }
17303 }
17304 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17305 if (flags & MAP_FIXED)
17306 return addr;
17307
17308 - find_start_end(flags, &begin, &end);
17309 + find_start_end(mm, flags, &begin, &end);
17310
17311 if (len > end)
17312 return -ENOMEM;
17313
17314 +#ifdef CONFIG_PAX_RANDMMAP
17315 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17316 +#endif
17317 +
17318 if (addr) {
17319 addr = PAGE_ALIGN(addr);
17320 vma = find_vma(mm, addr);
17321 - if (end - len >= addr &&
17322 - (!vma || addr + len <= vma->vm_start))
17323 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17324 return addr;
17325 }
17326 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17327 @@ -106,7 +109,7 @@ full_search:
17328 }
17329 return -ENOMEM;
17330 }
17331 - if (!vma || addr + len <= vma->vm_start) {
17332 + if (check_heap_stack_gap(vma, addr, len)) {
17333 /*
17334 * Remember the place where we stopped the search:
17335 */
17336 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17337 {
17338 struct vm_area_struct *vma;
17339 struct mm_struct *mm = current->mm;
17340 - unsigned long addr = addr0;
17341 + unsigned long base = mm->mmap_base, addr = addr0;
17342
17343 /* requested length too big for entire address space */
17344 if (len > TASK_SIZE)
17345 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17346 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17347 goto bottomup;
17348
17349 +#ifdef CONFIG_PAX_RANDMMAP
17350 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17351 +#endif
17352 +
17353 /* requesting a specific address */
17354 if (addr) {
17355 addr = PAGE_ALIGN(addr);
17356 - vma = find_vma(mm, addr);
17357 - if (TASK_SIZE - len >= addr &&
17358 - (!vma || addr + len <= vma->vm_start))
17359 - return addr;
17360 + if (TASK_SIZE - len >= addr) {
17361 + vma = find_vma(mm, addr);
17362 + if (check_heap_stack_gap(vma, addr, len))
17363 + return addr;
17364 + }
17365 }
17366
17367 /* check if free_area_cache is useful for us */
17368 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17369 /* make sure it can fit in the remaining address space */
17370 if (addr > len) {
17371 vma = find_vma(mm, addr-len);
17372 - if (!vma || addr <= vma->vm_start)
17373 + if (check_heap_stack_gap(vma, addr - len, len))
17374 /* remember the address as a hint for next time */
17375 return mm->free_area_cache = addr-len;
17376 }
17377 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17378 * return with success:
17379 */
17380 vma = find_vma(mm, addr);
17381 - if (!vma || addr+len <= vma->vm_start)
17382 + if (check_heap_stack_gap(vma, addr, len))
17383 /* remember the address as a hint for next time */
17384 return mm->free_area_cache = addr;
17385
17386 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17387 mm->cached_hole_size = vma->vm_start - addr;
17388
17389 /* try just below the current vma->vm_start */
17390 - addr = vma->vm_start-len;
17391 - } while (len < vma->vm_start);
17392 + addr = skip_heap_stack_gap(vma, len);
17393 + } while (!IS_ERR_VALUE(addr));
17394
17395 bottomup:
17396 /*
17397 @@ -198,13 +206,21 @@ bottomup:
17398 * can happen with large stack limits and large mmap()
17399 * allocations.
17400 */
17401 + mm->mmap_base = TASK_UNMAPPED_BASE;
17402 +
17403 +#ifdef CONFIG_PAX_RANDMMAP
17404 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17405 + mm->mmap_base += mm->delta_mmap;
17406 +#endif
17407 +
17408 + mm->free_area_cache = mm->mmap_base;
17409 mm->cached_hole_size = ~0UL;
17410 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17411 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17412 /*
17413 * Restore the topdown base:
17414 */
17415 - mm->free_area_cache = mm->mmap_base;
17416 + mm->mmap_base = base;
17417 + mm->free_area_cache = base;
17418 mm->cached_hole_size = ~0UL;
17419
17420 return addr;
17421 diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17422 --- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17423 +++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17424 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17425
17426 void tboot_shutdown(u32 shutdown_type)
17427 {
17428 - void (*shutdown)(void);
17429 + void (* __noreturn shutdown)(void);
17430
17431 if (!tboot_enabled())
17432 return;
17433 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17434
17435 switch_to_tboot_pt();
17436
17437 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17438 + shutdown = (void *)tboot->shutdown_entry;
17439 shutdown();
17440
17441 /* should not reach here */
17442 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17443 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17444 }
17445
17446 -static atomic_t ap_wfs_count;
17447 +static atomic_unchecked_t ap_wfs_count;
17448
17449 static int tboot_wait_for_aps(int num_aps)
17450 {
17451 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17452 {
17453 switch (action) {
17454 case CPU_DYING:
17455 - atomic_inc(&ap_wfs_count);
17456 + atomic_inc_unchecked(&ap_wfs_count);
17457 if (num_online_cpus() == 1)
17458 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17459 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17460 return NOTIFY_BAD;
17461 break;
17462 }
17463 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17464
17465 tboot_create_trampoline();
17466
17467 - atomic_set(&ap_wfs_count, 0);
17468 + atomic_set_unchecked(&ap_wfs_count, 0);
17469 register_hotcpu_notifier(&tboot_cpu_notifier);
17470 return 0;
17471 }
17472 diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17473 --- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17474 +++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17475 @@ -26,17 +26,13 @@
17476 int timer_ack;
17477 #endif
17478
17479 -#ifdef CONFIG_X86_64
17480 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17481 -#endif
17482 -
17483 unsigned long profile_pc(struct pt_regs *regs)
17484 {
17485 unsigned long pc = instruction_pointer(regs);
17486
17487 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17488 + if (!user_mode(regs) && in_lock_functions(pc)) {
17489 #ifdef CONFIG_FRAME_POINTER
17490 - return *(unsigned long *)(regs->bp + sizeof(long));
17491 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17492 #else
17493 unsigned long *sp =
17494 (unsigned long *)kernel_stack_pointer(regs);
17495 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17496 * or above a saved flags. Eflags has bits 22-31 zero,
17497 * kernel addresses don't.
17498 */
17499 +
17500 +#ifdef CONFIG_PAX_KERNEXEC
17501 + return ktla_ktva(sp[0]);
17502 +#else
17503 if (sp[0] >> 22)
17504 return sp[0];
17505 if (sp[1] >> 22)
17506 return sp[1];
17507 #endif
17508 +
17509 +#endif
17510 }
17511 return pc;
17512 }
17513 diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17514 --- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17515 +++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17516 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17517 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17518 return -EINVAL;
17519
17520 +#ifdef CONFIG_PAX_SEGMEXEC
17521 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17522 + return -EINVAL;
17523 +#endif
17524 +
17525 set_tls_desc(p, idx, &info, 1);
17526
17527 return 0;
17528 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17529 --- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17530 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17531 @@ -32,6 +32,12 @@
17532 #include <asm/segment.h>
17533 #include <asm/page_types.h>
17534
17535 +#ifdef CONFIG_PAX_KERNEXEC
17536 +#define ta(X) (X)
17537 +#else
17538 +#define ta(X) ((X) - __PAGE_OFFSET)
17539 +#endif
17540 +
17541 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17542 __CPUINITRODATA
17543 .code16
17544 @@ -60,7 +66,7 @@ r_base = .
17545 inc %ax # protected mode (PE) bit
17546 lmsw %ax # into protected mode
17547 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17548 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17549 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17550
17551 # These need to be in the same 64K segment as the above;
17552 # hence we don't use the boot_gdt_descr defined in head.S
17553 diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17554 --- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17555 +++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17556 @@ -91,7 +91,7 @@ startup_32:
17557 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17558 movl %eax, %ds
17559
17560 - movl $X86_CR4_PAE, %eax
17561 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17562 movl %eax, %cr4 # Enable PAE mode
17563
17564 # Setup trampoline 4 level pagetables
17565 @@ -127,7 +127,7 @@ startup_64:
17566 no_longmode:
17567 hlt
17568 jmp no_longmode
17569 -#include "verify_cpu_64.S"
17570 +#include "verify_cpu.S"
17571
17572 # Careful these need to be in the same 64K segment as the above;
17573 tidt:
17574 @@ -138,7 +138,7 @@ tidt:
17575 # so the kernel can live anywhere
17576 .balign 4
17577 tgdt:
17578 - .short tgdt_end - tgdt # gdt limit
17579 + .short tgdt_end - tgdt - 1 # gdt limit
17580 .long tgdt - r_base
17581 .short 0
17582 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17583 diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17584 --- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17585 +++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17586 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17587
17588 /* Do we ignore FPU interrupts ? */
17589 char ignore_fpu_irq;
17590 -
17591 -/*
17592 - * The IDT has to be page-aligned to simplify the Pentium
17593 - * F0 0F bug workaround.
17594 - */
17595 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17596 #endif
17597
17598 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17599 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17600 static inline void
17601 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17602 {
17603 - if (!user_mode_vm(regs))
17604 + if (!user_mode(regs))
17605 die(str, regs, err);
17606 }
17607 #endif
17608
17609 static void __kprobes
17610 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17611 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17612 long error_code, siginfo_t *info)
17613 {
17614 struct task_struct *tsk = current;
17615
17616 #ifdef CONFIG_X86_32
17617 - if (regs->flags & X86_VM_MASK) {
17618 + if (v8086_mode(regs)) {
17619 /*
17620 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17621 * On nmi (interrupt 2), do_trap should not be called.
17622 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17623 }
17624 #endif
17625
17626 - if (!user_mode(regs))
17627 + if (!user_mode_novm(regs))
17628 goto kernel_trap;
17629
17630 #ifdef CONFIG_X86_32
17631 @@ -158,7 +152,7 @@ trap_signal:
17632 printk_ratelimit()) {
17633 printk(KERN_INFO
17634 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17635 - tsk->comm, tsk->pid, str,
17636 + tsk->comm, task_pid_nr(tsk), str,
17637 regs->ip, regs->sp, error_code);
17638 print_vma_addr(" in ", regs->ip);
17639 printk("\n");
17640 @@ -175,8 +169,20 @@ kernel_trap:
17641 if (!fixup_exception(regs)) {
17642 tsk->thread.error_code = error_code;
17643 tsk->thread.trap_no = trapnr;
17644 +
17645 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17646 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17647 + str = "PAX: suspicious stack segment fault";
17648 +#endif
17649 +
17650 die(str, regs, error_code);
17651 }
17652 +
17653 +#ifdef CONFIG_PAX_REFCOUNT
17654 + if (trapnr == 4)
17655 + pax_report_refcount_overflow(regs);
17656 +#endif
17657 +
17658 return;
17659
17660 #ifdef CONFIG_X86_32
17661 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17662 conditional_sti(regs);
17663
17664 #ifdef CONFIG_X86_32
17665 - if (regs->flags & X86_VM_MASK)
17666 + if (v8086_mode(regs))
17667 goto gp_in_vm86;
17668 #endif
17669
17670 tsk = current;
17671 - if (!user_mode(regs))
17672 + if (!user_mode_novm(regs))
17673 goto gp_in_kernel;
17674
17675 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17676 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17677 + struct mm_struct *mm = tsk->mm;
17678 + unsigned long limit;
17679 +
17680 + down_write(&mm->mmap_sem);
17681 + limit = mm->context.user_cs_limit;
17682 + if (limit < TASK_SIZE) {
17683 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17684 + up_write(&mm->mmap_sem);
17685 + return;
17686 + }
17687 + up_write(&mm->mmap_sem);
17688 + }
17689 +#endif
17690 +
17691 tsk->thread.error_code = error_code;
17692 tsk->thread.trap_no = 13;
17693
17694 @@ -305,6 +327,13 @@ gp_in_kernel:
17695 if (notify_die(DIE_GPF, "general protection fault", regs,
17696 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17697 return;
17698 +
17699 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17700 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17701 + die("PAX: suspicious general protection fault", regs, error_code);
17702 + else
17703 +#endif
17704 +
17705 die("general protection fault", regs, error_code);
17706 }
17707
17708 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17709 dotraplinkage notrace __kprobes void
17710 do_nmi(struct pt_regs *regs, long error_code)
17711 {
17712 +
17713 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17714 + if (!user_mode(regs)) {
17715 + unsigned long cs = regs->cs & 0xFFFF;
17716 + unsigned long ip = ktva_ktla(regs->ip);
17717 +
17718 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17719 + regs->ip = ip;
17720 + }
17721 +#endif
17722 +
17723 nmi_enter();
17724
17725 inc_irq_stat(__nmi_count);
17726 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17727 }
17728
17729 #ifdef CONFIG_X86_32
17730 - if (regs->flags & X86_VM_MASK)
17731 + if (v8086_mode(regs))
17732 goto debug_vm86;
17733 #endif
17734
17735 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17736 * kernel space (but re-enable TF when returning to user mode).
17737 */
17738 if (condition & DR_STEP) {
17739 - if (!user_mode(regs))
17740 + if (!user_mode_novm(regs))
17741 goto clear_TF_reenable;
17742 }
17743
17744 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17745 * Handle strange cache flush from user space exception
17746 * in all other cases. This is undocumented behaviour.
17747 */
17748 - if (regs->flags & X86_VM_MASK) {
17749 + if (v8086_mode(regs)) {
17750 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17751 return;
17752 }
17753 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17754 void __math_state_restore(void)
17755 {
17756 struct thread_info *thread = current_thread_info();
17757 - struct task_struct *tsk = thread->task;
17758 + struct task_struct *tsk = current;
17759
17760 /*
17761 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17762 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17763 */
17764 asmlinkage void math_state_restore(void)
17765 {
17766 - struct thread_info *thread = current_thread_info();
17767 - struct task_struct *tsk = thread->task;
17768 + struct task_struct *tsk = current;
17769
17770 if (!tsk_used_math(tsk)) {
17771 local_irq_enable();
17772 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17773 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17774 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17775 @@ -1,105 +0,0 @@
17776 -/*
17777 - *
17778 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17779 - * code has been borrowed from boot/setup.S and was introduced by
17780 - * Andi Kleen.
17781 - *
17782 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17783 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17784 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17785 - *
17786 - * This source code is licensed under the GNU General Public License,
17787 - * Version 2. See the file COPYING for more details.
17788 - *
17789 - * This is a common code for verification whether CPU supports
17790 - * long mode and SSE or not. It is not called directly instead this
17791 - * file is included at various places and compiled in that context.
17792 - * Following are the current usage.
17793 - *
17794 - * This file is included by both 16bit and 32bit code.
17795 - *
17796 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17797 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17798 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17799 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17800 - *
17801 - * verify_cpu, returns the status of cpu check in register %eax.
17802 - * 0: Success 1: Failure
17803 - *
17804 - * The caller needs to check for the error code and take the action
17805 - * appropriately. Either display a message or halt.
17806 - */
17807 -
17808 -#include <asm/cpufeature.h>
17809 -
17810 -verify_cpu:
17811 - pushfl # Save caller passed flags
17812 - pushl $0 # Kill any dangerous flags
17813 - popfl
17814 -
17815 - pushfl # standard way to check for cpuid
17816 - popl %eax
17817 - movl %eax,%ebx
17818 - xorl $0x200000,%eax
17819 - pushl %eax
17820 - popfl
17821 - pushfl
17822 - popl %eax
17823 - cmpl %eax,%ebx
17824 - jz verify_cpu_no_longmode # cpu has no cpuid
17825 -
17826 - movl $0x0,%eax # See if cpuid 1 is implemented
17827 - cpuid
17828 - cmpl $0x1,%eax
17829 - jb verify_cpu_no_longmode # no cpuid 1
17830 -
17831 - xor %di,%di
17832 - cmpl $0x68747541,%ebx # AuthenticAMD
17833 - jnz verify_cpu_noamd
17834 - cmpl $0x69746e65,%edx
17835 - jnz verify_cpu_noamd
17836 - cmpl $0x444d4163,%ecx
17837 - jnz verify_cpu_noamd
17838 - mov $1,%di # cpu is from AMD
17839 -
17840 -verify_cpu_noamd:
17841 - movl $0x1,%eax # Does the cpu have what it takes
17842 - cpuid
17843 - andl $REQUIRED_MASK0,%edx
17844 - xorl $REQUIRED_MASK0,%edx
17845 - jnz verify_cpu_no_longmode
17846 -
17847 - movl $0x80000000,%eax # See if extended cpuid is implemented
17848 - cpuid
17849 - cmpl $0x80000001,%eax
17850 - jb verify_cpu_no_longmode # no extended cpuid
17851 -
17852 - movl $0x80000001,%eax # Does the cpu have what it takes
17853 - cpuid
17854 - andl $REQUIRED_MASK1,%edx
17855 - xorl $REQUIRED_MASK1,%edx
17856 - jnz verify_cpu_no_longmode
17857 -
17858 -verify_cpu_sse_test:
17859 - movl $1,%eax
17860 - cpuid
17861 - andl $SSE_MASK,%edx
17862 - cmpl $SSE_MASK,%edx
17863 - je verify_cpu_sse_ok
17864 - test %di,%di
17865 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17866 - movl $0xc0010015,%ecx # HWCR
17867 - rdmsr
17868 - btr $15,%eax # enable SSE
17869 - wrmsr
17870 - xor %di,%di # don't loop
17871 - jmp verify_cpu_sse_test # try again
17872 -
17873 -verify_cpu_no_longmode:
17874 - popfl # Restore caller passed flags
17875 - movl $1,%eax
17876 - ret
17877 -verify_cpu_sse_ok:
17878 - popfl # Restore caller passed flags
17879 - xorl %eax, %eax
17880 - ret
17881 diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17882 --- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17883 +++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17884 @@ -0,0 +1,140 @@
17885 +/*
17886 + *
17887 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17888 + * code has been borrowed from boot/setup.S and was introduced by
17889 + * Andi Kleen.
17890 + *
17891 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17892 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17893 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17894 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17895 + *
17896 + * This source code is licensed under the GNU General Public License,
17897 + * Version 2. See the file COPYING for more details.
17898 + *
17899 + * This is a common code for verification whether CPU supports
17900 + * long mode and SSE or not. It is not called directly instead this
17901 + * file is included at various places and compiled in that context.
17902 + * This file is expected to run in 32bit code. Currently:
17903 + *
17904 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17905 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17906 + * arch/x86/kernel/head_32.S: processor startup
17907 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17908 + *
17909 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17910 + * 0: Success 1: Failure
17911 + *
17912 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17913 + *
17914 + * The caller needs to check for the error code and take the action
17915 + * appropriately. Either display a message or halt.
17916 + */
17917 +
17918 +#include <asm/cpufeature.h>
17919 +#include <asm/msr-index.h>
17920 +
17921 +verify_cpu:
17922 + pushfl # Save caller passed flags
17923 + pushl $0 # Kill any dangerous flags
17924 + popfl
17925 +
17926 + pushfl # standard way to check for cpuid
17927 + popl %eax
17928 + movl %eax,%ebx
17929 + xorl $0x200000,%eax
17930 + pushl %eax
17931 + popfl
17932 + pushfl
17933 + popl %eax
17934 + cmpl %eax,%ebx
17935 + jz verify_cpu_no_longmode # cpu has no cpuid
17936 +
17937 + movl $0x0,%eax # See if cpuid 1 is implemented
17938 + cpuid
17939 + cmpl $0x1,%eax
17940 + jb verify_cpu_no_longmode # no cpuid 1
17941 +
17942 + xor %di,%di
17943 + cmpl $0x68747541,%ebx # AuthenticAMD
17944 + jnz verify_cpu_noamd
17945 + cmpl $0x69746e65,%edx
17946 + jnz verify_cpu_noamd
17947 + cmpl $0x444d4163,%ecx
17948 + jnz verify_cpu_noamd
17949 + mov $1,%di # cpu is from AMD
17950 + jmp verify_cpu_check
17951 +
17952 +verify_cpu_noamd:
17953 + cmpl $0x756e6547,%ebx # GenuineIntel?
17954 + jnz verify_cpu_check
17955 + cmpl $0x49656e69,%edx
17956 + jnz verify_cpu_check
17957 + cmpl $0x6c65746e,%ecx
17958 + jnz verify_cpu_check
17959 +
17960 + # only call IA32_MISC_ENABLE when:
17961 + # family > 6 || (family == 6 && model >= 0xd)
17962 + movl $0x1, %eax # check CPU family and model
17963 + cpuid
17964 + movl %eax, %ecx
17965 +
17966 + andl $0x0ff00f00, %eax # mask family and extended family
17967 + shrl $8, %eax
17968 + cmpl $6, %eax
17969 + ja verify_cpu_clear_xd # family > 6, ok
17970 + jb verify_cpu_check # family < 6, skip
17971 +
17972 + andl $0x000f00f0, %ecx # mask model and extended model
17973 + shrl $4, %ecx
17974 + cmpl $0xd, %ecx
17975 + jb verify_cpu_check # family == 6, model < 0xd, skip
17976 +
17977 +verify_cpu_clear_xd:
17978 + movl $MSR_IA32_MISC_ENABLE, %ecx
17979 + rdmsr
17980 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17981 + jnc verify_cpu_check # only write MSR if bit was changed
17982 + wrmsr
17983 +
17984 +verify_cpu_check:
17985 + movl $0x1,%eax # Does the cpu have what it takes
17986 + cpuid
17987 + andl $REQUIRED_MASK0,%edx
17988 + xorl $REQUIRED_MASK0,%edx
17989 + jnz verify_cpu_no_longmode
17990 +
17991 + movl $0x80000000,%eax # See if extended cpuid is implemented
17992 + cpuid
17993 + cmpl $0x80000001,%eax
17994 + jb verify_cpu_no_longmode # no extended cpuid
17995 +
17996 + movl $0x80000001,%eax # Does the cpu have what it takes
17997 + cpuid
17998 + andl $REQUIRED_MASK1,%edx
17999 + xorl $REQUIRED_MASK1,%edx
18000 + jnz verify_cpu_no_longmode
18001 +
18002 +verify_cpu_sse_test:
18003 + movl $1,%eax
18004 + cpuid
18005 + andl $SSE_MASK,%edx
18006 + cmpl $SSE_MASK,%edx
18007 + je verify_cpu_sse_ok
18008 + test %di,%di
18009 + jz verify_cpu_no_longmode # only try to force SSE on AMD
18010 + movl $MSR_K7_HWCR,%ecx
18011 + rdmsr
18012 + btr $15,%eax # enable SSE
18013 + wrmsr
18014 + xor %di,%di # don't loop
18015 + jmp verify_cpu_sse_test # try again
18016 +
18017 +verify_cpu_no_longmode:
18018 + popfl # Restore caller passed flags
18019 + movl $1,%eax
18020 + ret
18021 +verify_cpu_sse_ok:
18022 + popfl # Restore caller passed flags
18023 + xorl %eax, %eax
18024 + ret
18025 diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
18026 --- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
18027 +++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
18028 @@ -41,6 +41,7 @@
18029 #include <linux/ptrace.h>
18030 #include <linux/audit.h>
18031 #include <linux/stddef.h>
18032 +#include <linux/grsecurity.h>
18033
18034 #include <asm/uaccess.h>
18035 #include <asm/io.h>
18036 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18037 do_exit(SIGSEGV);
18038 }
18039
18040 - tss = &per_cpu(init_tss, get_cpu());
18041 + tss = init_tss + get_cpu();
18042 current->thread.sp0 = current->thread.saved_sp0;
18043 current->thread.sysenter_cs = __KERNEL_CS;
18044 load_sp0(tss, &current->thread);
18045 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18046 struct task_struct *tsk;
18047 int tmp, ret = -EPERM;
18048
18049 +#ifdef CONFIG_GRKERNSEC_VM86
18050 + if (!capable(CAP_SYS_RAWIO)) {
18051 + gr_handle_vm86();
18052 + goto out;
18053 + }
18054 +#endif
18055 +
18056 tsk = current;
18057 if (tsk->thread.saved_sp0)
18058 goto out;
18059 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18060 int tmp, ret;
18061 struct vm86plus_struct __user *v86;
18062
18063 +#ifdef CONFIG_GRKERNSEC_VM86
18064 + if (!capable(CAP_SYS_RAWIO)) {
18065 + gr_handle_vm86();
18066 + ret = -EPERM;
18067 + goto out;
18068 + }
18069 +#endif
18070 +
18071 tsk = current;
18072 switch (regs->bx) {
18073 case VM86_REQUEST_IRQ:
18074 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18075 tsk->thread.saved_fs = info->regs32->fs;
18076 tsk->thread.saved_gs = get_user_gs(info->regs32);
18077
18078 - tss = &per_cpu(init_tss, get_cpu());
18079 + tss = init_tss + get_cpu();
18080 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18081 if (cpu_has_sep)
18082 tsk->thread.sysenter_cs = 0;
18083 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18084 goto cannot_handle;
18085 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18086 goto cannot_handle;
18087 - intr_ptr = (unsigned long __user *) (i << 2);
18088 + intr_ptr = (__force unsigned long __user *) (i << 2);
18089 if (get_user(segoffs, intr_ptr))
18090 goto cannot_handle;
18091 if ((segoffs >> 16) == BIOSSEG)
18092 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
18093 --- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18094 +++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18095 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18096 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18097
18098 #define call_vrom_func(rom,func) \
18099 - (((VROMFUNC *)(rom->func))())
18100 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
18101
18102 #define call_vrom_long_func(rom,func,arg) \
18103 - (((VROMLONGFUNC *)(rom->func)) (arg))
18104 +({\
18105 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18106 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18107 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18108 + __reloc;\
18109 +})
18110
18111 -static struct vrom_header *vmi_rom;
18112 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18113 static int disable_pge;
18114 static int disable_pse;
18115 static int disable_sep;
18116 @@ -76,10 +81,10 @@ static struct {
18117 void (*set_initial_ap_state)(int, int);
18118 void (*halt)(void);
18119 void (*set_lazy_mode)(int mode);
18120 -} vmi_ops;
18121 +} __no_const vmi_ops __read_only;
18122
18123 /* Cached VMI operations */
18124 -struct vmi_timer_ops vmi_timer_ops;
18125 +struct vmi_timer_ops vmi_timer_ops __read_only;
18126
18127 /*
18128 * VMI patching routines.
18129 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18130 static inline void patch_offset(void *insnbuf,
18131 unsigned long ip, unsigned long dest)
18132 {
18133 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
18134 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
18135 }
18136
18137 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18138 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18139 {
18140 u64 reloc;
18141 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18142 +
18143 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18144 switch(rel->type) {
18145 case VMI_RELOCATION_CALL_REL:
18146 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18147
18148 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18149 {
18150 - const pte_t pte = { .pte = 0 };
18151 + const pte_t pte = __pte(0ULL);
18152 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18153 }
18154
18155 static void vmi_pmd_clear(pmd_t *pmd)
18156 {
18157 - const pte_t pte = { .pte = 0 };
18158 + const pte_t pte = __pte(0ULL);
18159 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18160 }
18161 #endif
18162 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18163 ap.ss = __KERNEL_DS;
18164 ap.esp = (unsigned long) start_esp;
18165
18166 - ap.ds = __USER_DS;
18167 - ap.es = __USER_DS;
18168 + ap.ds = __KERNEL_DS;
18169 + ap.es = __KERNEL_DS;
18170 ap.fs = __KERNEL_PERCPU;
18171 - ap.gs = __KERNEL_STACK_CANARY;
18172 + savesegment(gs, ap.gs);
18173
18174 ap.eflags = 0;
18175
18176 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18177 paravirt_leave_lazy_mmu();
18178 }
18179
18180 +#ifdef CONFIG_PAX_KERNEXEC
18181 +static unsigned long vmi_pax_open_kernel(void)
18182 +{
18183 + return 0;
18184 +}
18185 +
18186 +static unsigned long vmi_pax_close_kernel(void)
18187 +{
18188 + return 0;
18189 +}
18190 +#endif
18191 +
18192 static inline int __init check_vmi_rom(struct vrom_header *rom)
18193 {
18194 struct pci_header *pci;
18195 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18196 return 0;
18197 if (rom->vrom_signature != VMI_SIGNATURE)
18198 return 0;
18199 + if (rom->rom_length * 512 > sizeof(*rom)) {
18200 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18201 + return 0;
18202 + }
18203 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18204 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18205 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18206 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18207 struct vrom_header *romstart;
18208 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18209 if (check_vmi_rom(romstart)) {
18210 - vmi_rom = romstart;
18211 + vmi_rom = *romstart;
18212 return 1;
18213 }
18214 }
18215 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18216
18217 para_fill(pv_irq_ops.safe_halt, Halt);
18218
18219 +#ifdef CONFIG_PAX_KERNEXEC
18220 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18221 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18222 +#endif
18223 +
18224 /*
18225 * Alternative instruction rewriting doesn't happen soon enough
18226 * to convert VMI_IRET to a call instead of a jump; so we have
18227 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18228
18229 void __init vmi_init(void)
18230 {
18231 - if (!vmi_rom)
18232 + if (!vmi_rom.rom_signature)
18233 probe_vmi_rom();
18234 else
18235 - check_vmi_rom(vmi_rom);
18236 + check_vmi_rom(&vmi_rom);
18237
18238 /* In case probing for or validating the ROM failed, basil */
18239 - if (!vmi_rom)
18240 + if (!vmi_rom.rom_signature)
18241 return;
18242
18243 - reserve_top_address(-vmi_rom->virtual_top);
18244 + reserve_top_address(-vmi_rom.virtual_top);
18245
18246 #ifdef CONFIG_X86_IO_APIC
18247 /* This is virtual hardware; timer routing is wired correctly */
18248 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18249 {
18250 unsigned long flags;
18251
18252 - if (!vmi_rom)
18253 + if (!vmi_rom.rom_signature)
18254 return;
18255
18256 local_irq_save(flags);
18257 diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18258 --- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18259 +++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18260 @@ -26,6 +26,13 @@
18261 #include <asm/page_types.h>
18262 #include <asm/cache.h>
18263 #include <asm/boot.h>
18264 +#include <asm/segment.h>
18265 +
18266 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18267 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18268 +#else
18269 +#define __KERNEL_TEXT_OFFSET 0
18270 +#endif
18271
18272 #undef i386 /* in case the preprocessor is a 32bit one */
18273
18274 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18275 #ifdef CONFIG_X86_32
18276 OUTPUT_ARCH(i386)
18277 ENTRY(phys_startup_32)
18278 -jiffies = jiffies_64;
18279 #else
18280 OUTPUT_ARCH(i386:x86-64)
18281 ENTRY(phys_startup_64)
18282 -jiffies_64 = jiffies;
18283 #endif
18284
18285 PHDRS {
18286 text PT_LOAD FLAGS(5); /* R_E */
18287 - data PT_LOAD FLAGS(7); /* RWE */
18288 +#ifdef CONFIG_X86_32
18289 + module PT_LOAD FLAGS(5); /* R_E */
18290 +#endif
18291 +#ifdef CONFIG_XEN
18292 + rodata PT_LOAD FLAGS(5); /* R_E */
18293 +#else
18294 + rodata PT_LOAD FLAGS(4); /* R__ */
18295 +#endif
18296 + data PT_LOAD FLAGS(6); /* RW_ */
18297 #ifdef CONFIG_X86_64
18298 user PT_LOAD FLAGS(5); /* R_E */
18299 +#endif
18300 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18301 #ifdef CONFIG_SMP
18302 percpu PT_LOAD FLAGS(6); /* RW_ */
18303 #endif
18304 + text.init PT_LOAD FLAGS(5); /* R_E */
18305 + text.exit PT_LOAD FLAGS(5); /* R_E */
18306 init PT_LOAD FLAGS(7); /* RWE */
18307 -#endif
18308 note PT_NOTE FLAGS(0); /* ___ */
18309 }
18310
18311 SECTIONS
18312 {
18313 #ifdef CONFIG_X86_32
18314 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18315 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18316 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18317 #else
18318 - . = __START_KERNEL;
18319 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18320 + . = __START_KERNEL;
18321 #endif
18322
18323 /* Text and read-only data */
18324 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18325 - _text = .;
18326 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18327 /* bootstrapping code */
18328 +#ifdef CONFIG_X86_32
18329 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18330 +#else
18331 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18332 +#endif
18333 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18334 + _text = .;
18335 HEAD_TEXT
18336 #ifdef CONFIG_X86_32
18337 . = ALIGN(PAGE_SIZE);
18338 @@ -82,28 +102,71 @@ SECTIONS
18339 IRQENTRY_TEXT
18340 *(.fixup)
18341 *(.gnu.warning)
18342 - /* End of text section */
18343 - _etext = .;
18344 } :text = 0x9090
18345
18346 - NOTES :text :note
18347 + . += __KERNEL_TEXT_OFFSET;
18348 +
18349 +#ifdef CONFIG_X86_32
18350 + . = ALIGN(PAGE_SIZE);
18351 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18352 + *(.vmi.rom)
18353 + } :module
18354 +
18355 + . = ALIGN(PAGE_SIZE);
18356 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18357 +
18358 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18359 + MODULES_EXEC_VADDR = .;
18360 + BYTE(0)
18361 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18362 + . = ALIGN(HPAGE_SIZE);
18363 + MODULES_EXEC_END = . - 1;
18364 +#endif
18365 +
18366 + } :module
18367 +#endif
18368
18369 - EXCEPTION_TABLE(16) :text = 0x9090
18370 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18371 + /* End of text section */
18372 + _etext = . - __KERNEL_TEXT_OFFSET;
18373 + }
18374 +
18375 +#ifdef CONFIG_X86_32
18376 + . = ALIGN(PAGE_SIZE);
18377 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18378 + *(.idt)
18379 + . = ALIGN(PAGE_SIZE);
18380 + *(.empty_zero_page)
18381 + *(.swapper_pg_fixmap)
18382 + *(.swapper_pg_pmd)
18383 + *(.swapper_pg_dir)
18384 + *(.trampoline_pg_dir)
18385 + } :rodata
18386 +#endif
18387 +
18388 + . = ALIGN(PAGE_SIZE);
18389 + NOTES :rodata :note
18390 +
18391 + EXCEPTION_TABLE(16) :rodata
18392
18393 RO_DATA(PAGE_SIZE)
18394
18395 /* Data */
18396 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18397 +
18398 +#ifdef CONFIG_PAX_KERNEXEC
18399 + . = ALIGN(HPAGE_SIZE);
18400 +#else
18401 + . = ALIGN(PAGE_SIZE);
18402 +#endif
18403 +
18404 /* Start of data section */
18405 _sdata = .;
18406
18407 /* init_task */
18408 INIT_TASK_DATA(THREAD_SIZE)
18409
18410 -#ifdef CONFIG_X86_32
18411 - /* 32 bit has nosave before _edata */
18412 NOSAVE_DATA
18413 -#endif
18414
18415 PAGE_ALIGNED_DATA(PAGE_SIZE)
18416
18417 @@ -112,6 +175,8 @@ SECTIONS
18418 DATA_DATA
18419 CONSTRUCTORS
18420
18421 + jiffies = jiffies_64;
18422 +
18423 /* rarely changed data like cpu maps */
18424 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18425
18426 @@ -166,12 +231,6 @@ SECTIONS
18427 }
18428 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18429
18430 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18431 - .jiffies : AT(VLOAD(.jiffies)) {
18432 - *(.jiffies)
18433 - }
18434 - jiffies = VVIRT(.jiffies);
18435 -
18436 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18437 *(.vsyscall_3)
18438 }
18439 @@ -187,12 +246,19 @@ SECTIONS
18440 #endif /* CONFIG_X86_64 */
18441
18442 /* Init code and data - will be freed after init */
18443 - . = ALIGN(PAGE_SIZE);
18444 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18445 + BYTE(0)
18446 +
18447 +#ifdef CONFIG_PAX_KERNEXEC
18448 + . = ALIGN(HPAGE_SIZE);
18449 +#else
18450 + . = ALIGN(PAGE_SIZE);
18451 +#endif
18452 +
18453 __init_begin = .; /* paired with __init_end */
18454 - }
18455 + } :init.begin
18456
18457 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18458 +#ifdef CONFIG_SMP
18459 /*
18460 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18461 * output PHDR, so the next output section - .init.text - should
18462 @@ -201,12 +267,27 @@ SECTIONS
18463 PERCPU_VADDR(0, :percpu)
18464 #endif
18465
18466 - INIT_TEXT_SECTION(PAGE_SIZE)
18467 -#ifdef CONFIG_X86_64
18468 - :init
18469 -#endif
18470 + . = ALIGN(PAGE_SIZE);
18471 + init_begin = .;
18472 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18473 + VMLINUX_SYMBOL(_sinittext) = .;
18474 + INIT_TEXT
18475 + VMLINUX_SYMBOL(_einittext) = .;
18476 + . = ALIGN(PAGE_SIZE);
18477 + } :text.init
18478
18479 - INIT_DATA_SECTION(16)
18480 + /*
18481 + * .exit.text is discard at runtime, not link time, to deal with
18482 + * references from .altinstructions and .eh_frame
18483 + */
18484 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18485 + EXIT_TEXT
18486 + . = ALIGN(16);
18487 + } :text.exit
18488 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18489 +
18490 + . = ALIGN(PAGE_SIZE);
18491 + INIT_DATA_SECTION(16) :init
18492
18493 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18494 __x86_cpu_dev_start = .;
18495 @@ -232,19 +313,11 @@ SECTIONS
18496 *(.altinstr_replacement)
18497 }
18498
18499 - /*
18500 - * .exit.text is discard at runtime, not link time, to deal with
18501 - * references from .altinstructions and .eh_frame
18502 - */
18503 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18504 - EXIT_TEXT
18505 - }
18506 -
18507 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18508 EXIT_DATA
18509 }
18510
18511 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18512 +#ifndef CONFIG_SMP
18513 PERCPU(PAGE_SIZE)
18514 #endif
18515
18516 @@ -267,12 +340,6 @@ SECTIONS
18517 . = ALIGN(PAGE_SIZE);
18518 }
18519
18520 -#ifdef CONFIG_X86_64
18521 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18522 - NOSAVE_DATA
18523 - }
18524 -#endif
18525 -
18526 /* BSS */
18527 . = ALIGN(PAGE_SIZE);
18528 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18529 @@ -288,6 +355,7 @@ SECTIONS
18530 __brk_base = .;
18531 . += 64 * 1024; /* 64k alignment slop space */
18532 *(.brk_reservation) /* areas brk users have reserved */
18533 + . = ALIGN(HPAGE_SIZE);
18534 __brk_limit = .;
18535 }
18536
18537 @@ -316,13 +384,12 @@ SECTIONS
18538 * for the boot processor.
18539 */
18540 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18541 -INIT_PER_CPU(gdt_page);
18542 INIT_PER_CPU(irq_stack_union);
18543
18544 /*
18545 * Build-time check on the image size:
18546 */
18547 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18548 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18549 "kernel image bigger than KERNEL_IMAGE_SIZE");
18550
18551 #ifdef CONFIG_SMP
18552 diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18553 --- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18554 +++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18555 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18556
18557 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18558 /* copy vsyscall data */
18559 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18560 vsyscall_gtod_data.clock.vread = clock->vread;
18561 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18562 vsyscall_gtod_data.clock.mask = clock->mask;
18563 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18564 We do this here because otherwise user space would do it on
18565 its own in a likely inferior way (no access to jiffies).
18566 If you don't like it pass NULL. */
18567 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18568 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18569 p = tcache->blob[1];
18570 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18571 /* Load per CPU data from RDTSCP */
18572 diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18573 --- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18574 +++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18575 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18576
18577 EXPORT_SYMBOL(copy_user_generic);
18578 EXPORT_SYMBOL(__copy_user_nocache);
18579 -EXPORT_SYMBOL(copy_from_user);
18580 -EXPORT_SYMBOL(copy_to_user);
18581 EXPORT_SYMBOL(__copy_from_user_inatomic);
18582
18583 EXPORT_SYMBOL(copy_page);
18584 diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18585 --- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18586 +++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18587 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18588 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18589 return -1;
18590
18591 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18592 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18593 fx_sw_user->extended_size -
18594 FP_XSTATE_MAGIC2_SIZE));
18595 /*
18596 @@ -196,7 +196,7 @@ fx_only:
18597 * the other extended state.
18598 */
18599 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18600 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18601 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18602 }
18603
18604 /*
18605 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18606 if (task_thread_info(tsk)->status & TS_XSAVE)
18607 err = restore_user_xstate(buf);
18608 else
18609 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18610 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18611 buf);
18612 if (unlikely(err)) {
18613 /*
18614 diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18615 --- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18616 +++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18617 @@ -81,8 +81,8 @@
18618 #define Src2CL (1<<29)
18619 #define Src2ImmByte (2<<29)
18620 #define Src2One (3<<29)
18621 -#define Src2Imm16 (4<<29)
18622 -#define Src2Mask (7<<29)
18623 +#define Src2Imm16 (4U<<29)
18624 +#define Src2Mask (7U<<29)
18625
18626 enum {
18627 Group1_80, Group1_81, Group1_82, Group1_83,
18628 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18629
18630 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18631 do { \
18632 + unsigned long _tmp; \
18633 __asm__ __volatile__ ( \
18634 _PRE_EFLAGS("0", "4", "2") \
18635 _op _suffix " %"_x"3,%1; " \
18636 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18637 /* Raw emulation: instruction has two explicit operands. */
18638 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18639 do { \
18640 - unsigned long _tmp; \
18641 - \
18642 switch ((_dst).bytes) { \
18643 case 2: \
18644 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18645 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18646
18647 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18648 do { \
18649 - unsigned long _tmp; \
18650 switch ((_dst).bytes) { \
18651 case 1: \
18652 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18653 diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18654 --- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18655 +++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18656 @@ -52,7 +52,7 @@
18657 #define APIC_BUS_CYCLE_NS 1
18658
18659 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18660 -#define apic_debug(fmt, arg...)
18661 +#define apic_debug(fmt, arg...) do {} while (0)
18662
18663 #define APIC_LVT_NUM 6
18664 /* 14 is the version for Xeon and Pentium 8.4.8*/
18665 diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18666 --- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18667 +++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18668 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18669 int level = PT_PAGE_TABLE_LEVEL;
18670 unsigned long mmu_seq;
18671
18672 + pax_track_stack();
18673 +
18674 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18675 kvm_mmu_audit(vcpu, "pre page fault");
18676
18677 diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18678 --- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18679 +++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18680 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18681 int cpu = raw_smp_processor_id();
18682
18683 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18684 +
18685 + pax_open_kernel();
18686 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18687 + pax_close_kernel();
18688 +
18689 load_TR_desc();
18690 }
18691
18692 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18693 return true;
18694 }
18695
18696 -static struct kvm_x86_ops svm_x86_ops = {
18697 +static const struct kvm_x86_ops svm_x86_ops = {
18698 .cpu_has_kvm_support = has_svm,
18699 .disabled_by_bios = is_disabled,
18700 .hardware_setup = svm_hardware_setup,
18701 diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18702 --- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18703 +++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18704 @@ -570,7 +570,11 @@ static void reload_tss(void)
18705
18706 kvm_get_gdt(&gdt);
18707 descs = (void *)gdt.base;
18708 +
18709 + pax_open_kernel();
18710 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18711 + pax_close_kernel();
18712 +
18713 load_TR_desc();
18714 }
18715
18716 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18717 if (!cpu_has_vmx_flexpriority())
18718 flexpriority_enabled = 0;
18719
18720 - if (!cpu_has_vmx_tpr_shadow())
18721 - kvm_x86_ops->update_cr8_intercept = NULL;
18722 + if (!cpu_has_vmx_tpr_shadow()) {
18723 + pax_open_kernel();
18724 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18725 + pax_close_kernel();
18726 + }
18727
18728 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18729 kvm_disable_largepages();
18730 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18731 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18732
18733 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18734 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18735 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18736 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18737 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18738 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18739 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18740 "jmp .Lkvm_vmx_return \n\t"
18741 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18742 ".Lkvm_vmx_return: "
18743 +
18744 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18745 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18746 + ".Lkvm_vmx_return2: "
18747 +#endif
18748 +
18749 /* Save guest registers, load host registers, keep flags */
18750 "xchg %0, (%%"R"sp) \n\t"
18751 "mov %%"R"ax, %c[rax](%0) \n\t"
18752 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18753 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18754 #endif
18755 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18756 +
18757 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18758 + ,[cs]"i"(__KERNEL_CS)
18759 +#endif
18760 +
18761 : "cc", "memory"
18762 - , R"bx", R"di", R"si"
18763 + , R"ax", R"bx", R"di", R"si"
18764 #ifdef CONFIG_X86_64
18765 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18766 #endif
18767 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18768 if (vmx->rmode.irq.pending)
18769 fixup_rmode_irq(vmx);
18770
18771 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18772 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18773 +
18774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18775 + loadsegment(fs, __KERNEL_PERCPU);
18776 +#endif
18777 +
18778 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18779 + __set_fs(current_thread_info()->addr_limit);
18780 +#endif
18781 +
18782 vmx->launched = 1;
18783
18784 vmx_complete_interrupts(vmx);
18785 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18786 return false;
18787 }
18788
18789 -static struct kvm_x86_ops vmx_x86_ops = {
18790 +static const struct kvm_x86_ops vmx_x86_ops = {
18791 .cpu_has_kvm_support = cpu_has_kvm_support,
18792 .disabled_by_bios = vmx_disabled_by_bios,
18793 .hardware_setup = hardware_setup,
18794 diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18795 --- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18796 +++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18797 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18798 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18799 struct kvm_cpuid_entry2 __user *entries);
18800
18801 -struct kvm_x86_ops *kvm_x86_ops;
18802 +const struct kvm_x86_ops *kvm_x86_ops;
18803 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18804
18805 int ignore_msrs = 0;
18806 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18807 struct kvm_cpuid2 *cpuid,
18808 struct kvm_cpuid_entry2 __user *entries)
18809 {
18810 - int r;
18811 + int r, i;
18812
18813 r = -E2BIG;
18814 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18815 goto out;
18816 r = -EFAULT;
18817 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18818 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18819 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18820 goto out;
18821 + for (i = 0; i < cpuid->nent; ++i) {
18822 + struct kvm_cpuid_entry2 cpuid_entry;
18823 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18824 + goto out;
18825 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18826 + }
18827 vcpu->arch.cpuid_nent = cpuid->nent;
18828 kvm_apic_set_version(vcpu);
18829 return 0;
18830 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18831 struct kvm_cpuid2 *cpuid,
18832 struct kvm_cpuid_entry2 __user *entries)
18833 {
18834 - int r;
18835 + int r, i;
18836
18837 vcpu_load(vcpu);
18838 r = -E2BIG;
18839 if (cpuid->nent < vcpu->arch.cpuid_nent)
18840 goto out;
18841 r = -EFAULT;
18842 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18843 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18844 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18845 goto out;
18846 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18847 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18848 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18849 + goto out;
18850 + }
18851 return 0;
18852
18853 out:
18854 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18855 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18856 struct kvm_interrupt *irq)
18857 {
18858 - if (irq->irq < 0 || irq->irq >= 256)
18859 + if (irq->irq >= 256)
18860 return -EINVAL;
18861 if (irqchip_in_kernel(vcpu->kvm))
18862 return -ENXIO;
18863 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18864 .notifier_call = kvmclock_cpufreq_notifier
18865 };
18866
18867 -int kvm_arch_init(void *opaque)
18868 +int kvm_arch_init(const void *opaque)
18869 {
18870 int r, cpu;
18871 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18872 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18873
18874 if (kvm_x86_ops) {
18875 printk(KERN_ERR "kvm: already loaded the other module\n");
18876 diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18877 --- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18878 +++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18879 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18880 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18881 * Launcher to reboot us.
18882 */
18883 -static void lguest_restart(char *reason)
18884 +static __noreturn void lguest_restart(char *reason)
18885 {
18886 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18887 + BUG();
18888 }
18889
18890 /*G:050
18891 diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18892 --- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18893 +++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18894 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18895 }
18896 EXPORT_SYMBOL(atomic64_cmpxchg);
18897
18898 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18899 +{
18900 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18901 +}
18902 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18903 +
18904 /**
18905 * atomic64_xchg - xchg atomic64 variable
18906 * @ptr: pointer to type atomic64_t
18907 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18908 EXPORT_SYMBOL(atomic64_xchg);
18909
18910 /**
18911 + * atomic64_xchg_unchecked - xchg atomic64 variable
18912 + * @ptr: pointer to type atomic64_unchecked_t
18913 + * @new_val: value to assign
18914 + *
18915 + * Atomically xchgs the value of @ptr to @new_val and returns
18916 + * the old value.
18917 + */
18918 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18919 +{
18920 + /*
18921 + * Try first with a (possibly incorrect) assumption about
18922 + * what we have there. We'll do two loops most likely,
18923 + * but we'll get an ownership MESI transaction straight away
18924 + * instead of a read transaction followed by a
18925 + * flush-for-ownership transaction:
18926 + */
18927 + u64 old_val, real_val = 0;
18928 +
18929 + do {
18930 + old_val = real_val;
18931 +
18932 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18933 +
18934 + } while (real_val != old_val);
18935 +
18936 + return old_val;
18937 +}
18938 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18939 +
18940 +/**
18941 * atomic64_set - set atomic64 variable
18942 * @ptr: pointer to type atomic64_t
18943 * @new_val: value to assign
18944 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18945 EXPORT_SYMBOL(atomic64_set);
18946
18947 /**
18948 -EXPORT_SYMBOL(atomic64_read);
18949 + * atomic64_unchecked_set - set atomic64 variable
18950 + * @ptr: pointer to type atomic64_unchecked_t
18951 + * @new_val: value to assign
18952 + *
18953 + * Atomically sets the value of @ptr to @new_val.
18954 + */
18955 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18956 +{
18957 + atomic64_xchg_unchecked(ptr, new_val);
18958 +}
18959 +EXPORT_SYMBOL(atomic64_set_unchecked);
18960 +
18961 +/**
18962 * atomic64_add_return - add and return
18963 * @delta: integer value to add
18964 * @ptr: pointer to type atomic64_t
18965 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18966 }
18967 EXPORT_SYMBOL(atomic64_add_return);
18968
18969 +/**
18970 + * atomic64_add_return_unchecked - add and return
18971 + * @delta: integer value to add
18972 + * @ptr: pointer to type atomic64_unchecked_t
18973 + *
18974 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18975 + */
18976 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18977 +{
18978 + /*
18979 + * Try first with a (possibly incorrect) assumption about
18980 + * what we have there. We'll do two loops most likely,
18981 + * but we'll get an ownership MESI transaction straight away
18982 + * instead of a read transaction followed by a
18983 + * flush-for-ownership transaction:
18984 + */
18985 + u64 old_val, new_val, real_val = 0;
18986 +
18987 + do {
18988 + old_val = real_val;
18989 + new_val = old_val + delta;
18990 +
18991 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18992 +
18993 + } while (real_val != old_val);
18994 +
18995 + return new_val;
18996 +}
18997 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18998 +
18999 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
19000 {
19001 return atomic64_add_return(-delta, ptr);
19002 }
19003 EXPORT_SYMBOL(atomic64_sub_return);
19004
19005 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19006 +{
19007 + return atomic64_add_return_unchecked(-delta, ptr);
19008 +}
19009 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19010 +
19011 u64 atomic64_inc_return(atomic64_t *ptr)
19012 {
19013 return atomic64_add_return(1, ptr);
19014 }
19015 EXPORT_SYMBOL(atomic64_inc_return);
19016
19017 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19018 +{
19019 + return atomic64_add_return_unchecked(1, ptr);
19020 +}
19021 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19022 +
19023 u64 atomic64_dec_return(atomic64_t *ptr)
19024 {
19025 return atomic64_sub_return(1, ptr);
19026 }
19027 EXPORT_SYMBOL(atomic64_dec_return);
19028
19029 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19030 +{
19031 + return atomic64_sub_return_unchecked(1, ptr);
19032 +}
19033 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19034 +
19035 /**
19036 * atomic64_add - add integer to atomic64 variable
19037 * @delta: integer value to add
19038 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19039 EXPORT_SYMBOL(atomic64_add);
19040
19041 /**
19042 + * atomic64_add_unchecked - add integer to atomic64 variable
19043 + * @delta: integer value to add
19044 + * @ptr: pointer to type atomic64_unchecked_t
19045 + *
19046 + * Atomically adds @delta to @ptr.
19047 + */
19048 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19049 +{
19050 + atomic64_add_return_unchecked(delta, ptr);
19051 +}
19052 +EXPORT_SYMBOL(atomic64_add_unchecked);
19053 +
19054 +/**
19055 * atomic64_sub - subtract the atomic64 variable
19056 * @delta: integer value to subtract
19057 * @ptr: pointer to type atomic64_t
19058 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19059 EXPORT_SYMBOL(atomic64_sub);
19060
19061 /**
19062 + * atomic64_sub_unchecked - subtract the atomic64 variable
19063 + * @delta: integer value to subtract
19064 + * @ptr: pointer to type atomic64_unchecked_t
19065 + *
19066 + * Atomically subtracts @delta from @ptr.
19067 + */
19068 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19069 +{
19070 + atomic64_add_unchecked(-delta, ptr);
19071 +}
19072 +EXPORT_SYMBOL(atomic64_sub_unchecked);
19073 +
19074 +/**
19075 * atomic64_sub_and_test - subtract value from variable and test result
19076 * @delta: integer value to subtract
19077 * @ptr: pointer to type atomic64_t
19078 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19079 EXPORT_SYMBOL(atomic64_inc);
19080
19081 /**
19082 + * atomic64_inc_unchecked - increment atomic64 variable
19083 + * @ptr: pointer to type atomic64_unchecked_t
19084 + *
19085 + * Atomically increments @ptr by 1.
19086 + */
19087 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19088 +{
19089 + atomic64_add_unchecked(1, ptr);
19090 +}
19091 +EXPORT_SYMBOL(atomic64_inc_unchecked);
19092 +
19093 +/**
19094 * atomic64_dec - decrement atomic64 variable
19095 * @ptr: pointer to type atomic64_t
19096 *
19097 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19098 EXPORT_SYMBOL(atomic64_dec);
19099
19100 /**
19101 + * atomic64_dec_unchecked - decrement atomic64 variable
19102 + * @ptr: pointer to type atomic64_unchecked_t
19103 + *
19104 + * Atomically decrements @ptr by 1.
19105 + */
19106 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19107 +{
19108 + atomic64_sub_unchecked(1, ptr);
19109 +}
19110 +EXPORT_SYMBOL(atomic64_dec_unchecked);
19111 +
19112 +/**
19113 * atomic64_dec_and_test - decrement and test
19114 * @ptr: pointer to type atomic64_t
19115 *
19116 diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
19117 --- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19118 +++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19119 @@ -28,7 +28,8 @@
19120 #include <linux/linkage.h>
19121 #include <asm/dwarf2.h>
19122 #include <asm/errno.h>
19123 -
19124 +#include <asm/segment.h>
19125 +
19126 /*
19127 * computes a partial checksum, e.g. for TCP/UDP fragments
19128 */
19129 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19130
19131 #define ARGBASE 16
19132 #define FP 12
19133 -
19134 -ENTRY(csum_partial_copy_generic)
19135 +
19136 +ENTRY(csum_partial_copy_generic_to_user)
19137 CFI_STARTPROC
19138 +
19139 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19140 + pushl %gs
19141 + CFI_ADJUST_CFA_OFFSET 4
19142 + popl %es
19143 + CFI_ADJUST_CFA_OFFSET -4
19144 + jmp csum_partial_copy_generic
19145 +#endif
19146 +
19147 +ENTRY(csum_partial_copy_generic_from_user)
19148 +
19149 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19150 + pushl %gs
19151 + CFI_ADJUST_CFA_OFFSET 4
19152 + popl %ds
19153 + CFI_ADJUST_CFA_OFFSET -4
19154 +#endif
19155 +
19156 +ENTRY(csum_partial_copy_generic)
19157 subl $4,%esp
19158 CFI_ADJUST_CFA_OFFSET 4
19159 pushl %edi
19160 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19161 jmp 4f
19162 SRC(1: movw (%esi), %bx )
19163 addl $2, %esi
19164 -DST( movw %bx, (%edi) )
19165 +DST( movw %bx, %es:(%edi) )
19166 addl $2, %edi
19167 addw %bx, %ax
19168 adcl $0, %eax
19169 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19170 SRC(1: movl (%esi), %ebx )
19171 SRC( movl 4(%esi), %edx )
19172 adcl %ebx, %eax
19173 -DST( movl %ebx, (%edi) )
19174 +DST( movl %ebx, %es:(%edi) )
19175 adcl %edx, %eax
19176 -DST( movl %edx, 4(%edi) )
19177 +DST( movl %edx, %es:4(%edi) )
19178
19179 SRC( movl 8(%esi), %ebx )
19180 SRC( movl 12(%esi), %edx )
19181 adcl %ebx, %eax
19182 -DST( movl %ebx, 8(%edi) )
19183 +DST( movl %ebx, %es:8(%edi) )
19184 adcl %edx, %eax
19185 -DST( movl %edx, 12(%edi) )
19186 +DST( movl %edx, %es:12(%edi) )
19187
19188 SRC( movl 16(%esi), %ebx )
19189 SRC( movl 20(%esi), %edx )
19190 adcl %ebx, %eax
19191 -DST( movl %ebx, 16(%edi) )
19192 +DST( movl %ebx, %es:16(%edi) )
19193 adcl %edx, %eax
19194 -DST( movl %edx, 20(%edi) )
19195 +DST( movl %edx, %es:20(%edi) )
19196
19197 SRC( movl 24(%esi), %ebx )
19198 SRC( movl 28(%esi), %edx )
19199 adcl %ebx, %eax
19200 -DST( movl %ebx, 24(%edi) )
19201 +DST( movl %ebx, %es:24(%edi) )
19202 adcl %edx, %eax
19203 -DST( movl %edx, 28(%edi) )
19204 +DST( movl %edx, %es:28(%edi) )
19205
19206 lea 32(%esi), %esi
19207 lea 32(%edi), %edi
19208 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19209 shrl $2, %edx # This clears CF
19210 SRC(3: movl (%esi), %ebx )
19211 adcl %ebx, %eax
19212 -DST( movl %ebx, (%edi) )
19213 +DST( movl %ebx, %es:(%edi) )
19214 lea 4(%esi), %esi
19215 lea 4(%edi), %edi
19216 dec %edx
19217 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19218 jb 5f
19219 SRC( movw (%esi), %cx )
19220 leal 2(%esi), %esi
19221 -DST( movw %cx, (%edi) )
19222 +DST( movw %cx, %es:(%edi) )
19223 leal 2(%edi), %edi
19224 je 6f
19225 shll $16,%ecx
19226 SRC(5: movb (%esi), %cl )
19227 -DST( movb %cl, (%edi) )
19228 +DST( movb %cl, %es:(%edi) )
19229 6: addl %ecx, %eax
19230 adcl $0, %eax
19231 7:
19232 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19233
19234 6001:
19235 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19236 - movl $-EFAULT, (%ebx)
19237 + movl $-EFAULT, %ss:(%ebx)
19238
19239 # zero the complete destination - computing the rest
19240 # is too much work
19241 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19242
19243 6002:
19244 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19245 - movl $-EFAULT,(%ebx)
19246 + movl $-EFAULT,%ss:(%ebx)
19247 jmp 5000b
19248
19249 .previous
19250
19251 + pushl %ss
19252 + CFI_ADJUST_CFA_OFFSET 4
19253 + popl %ds
19254 + CFI_ADJUST_CFA_OFFSET -4
19255 + pushl %ss
19256 + CFI_ADJUST_CFA_OFFSET 4
19257 + popl %es
19258 + CFI_ADJUST_CFA_OFFSET -4
19259 popl %ebx
19260 CFI_ADJUST_CFA_OFFSET -4
19261 CFI_RESTORE ebx
19262 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19263 CFI_ADJUST_CFA_OFFSET -4
19264 ret
19265 CFI_ENDPROC
19266 -ENDPROC(csum_partial_copy_generic)
19267 +ENDPROC(csum_partial_copy_generic_to_user)
19268
19269 #else
19270
19271 /* Version for PentiumII/PPro */
19272
19273 #define ROUND1(x) \
19274 + nop; nop; nop; \
19275 SRC(movl x(%esi), %ebx ) ; \
19276 addl %ebx, %eax ; \
19277 - DST(movl %ebx, x(%edi) ) ;
19278 + DST(movl %ebx, %es:x(%edi)) ;
19279
19280 #define ROUND(x) \
19281 + nop; nop; nop; \
19282 SRC(movl x(%esi), %ebx ) ; \
19283 adcl %ebx, %eax ; \
19284 - DST(movl %ebx, x(%edi) ) ;
19285 + DST(movl %ebx, %es:x(%edi)) ;
19286
19287 #define ARGBASE 12
19288 -
19289 -ENTRY(csum_partial_copy_generic)
19290 +
19291 +ENTRY(csum_partial_copy_generic_to_user)
19292 CFI_STARTPROC
19293 +
19294 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19295 + pushl %gs
19296 + CFI_ADJUST_CFA_OFFSET 4
19297 + popl %es
19298 + CFI_ADJUST_CFA_OFFSET -4
19299 + jmp csum_partial_copy_generic
19300 +#endif
19301 +
19302 +ENTRY(csum_partial_copy_generic_from_user)
19303 +
19304 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19305 + pushl %gs
19306 + CFI_ADJUST_CFA_OFFSET 4
19307 + popl %ds
19308 + CFI_ADJUST_CFA_OFFSET -4
19309 +#endif
19310 +
19311 +ENTRY(csum_partial_copy_generic)
19312 pushl %ebx
19313 CFI_ADJUST_CFA_OFFSET 4
19314 CFI_REL_OFFSET ebx, 0
19315 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19316 subl %ebx, %edi
19317 lea -1(%esi),%edx
19318 andl $-32,%edx
19319 - lea 3f(%ebx,%ebx), %ebx
19320 + lea 3f(%ebx,%ebx,2), %ebx
19321 testl %esi, %esi
19322 jmp *%ebx
19323 1: addl $64,%esi
19324 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19325 jb 5f
19326 SRC( movw (%esi), %dx )
19327 leal 2(%esi), %esi
19328 -DST( movw %dx, (%edi) )
19329 +DST( movw %dx, %es:(%edi) )
19330 leal 2(%edi), %edi
19331 je 6f
19332 shll $16,%edx
19333 5:
19334 SRC( movb (%esi), %dl )
19335 -DST( movb %dl, (%edi) )
19336 +DST( movb %dl, %es:(%edi) )
19337 6: addl %edx, %eax
19338 adcl $0, %eax
19339 7:
19340 .section .fixup, "ax"
19341 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19342 - movl $-EFAULT, (%ebx)
19343 + movl $-EFAULT, %ss:(%ebx)
19344 # zero the complete destination (computing the rest is too much work)
19345 movl ARGBASE+8(%esp),%edi # dst
19346 movl ARGBASE+12(%esp),%ecx # len
19347 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19348 rep; stosb
19349 jmp 7b
19350 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19351 - movl $-EFAULT, (%ebx)
19352 + movl $-EFAULT, %ss:(%ebx)
19353 jmp 7b
19354 .previous
19355
19356 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19357 + pushl %ss
19358 + CFI_ADJUST_CFA_OFFSET 4
19359 + popl %ds
19360 + CFI_ADJUST_CFA_OFFSET -4
19361 + pushl %ss
19362 + CFI_ADJUST_CFA_OFFSET 4
19363 + popl %es
19364 + CFI_ADJUST_CFA_OFFSET -4
19365 +#endif
19366 +
19367 popl %esi
19368 CFI_ADJUST_CFA_OFFSET -4
19369 CFI_RESTORE esi
19370 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19371 CFI_RESTORE ebx
19372 ret
19373 CFI_ENDPROC
19374 -ENDPROC(csum_partial_copy_generic)
19375 +ENDPROC(csum_partial_copy_generic_to_user)
19376
19377 #undef ROUND
19378 #undef ROUND1
19379 diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19380 --- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19381 +++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19382 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19383
19384 #include <asm/cpufeature.h>
19385
19386 - .section .altinstr_replacement,"ax"
19387 + .section .altinstr_replacement,"a"
19388 1: .byte 0xeb /* jmp <disp8> */
19389 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19390 2:
19391 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19392 --- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19393 +++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19394 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19395
19396 #include <asm/cpufeature.h>
19397
19398 - .section .altinstr_replacement,"ax"
19399 + .section .altinstr_replacement,"a"
19400 1: .byte 0xeb /* jmp <disp8> */
19401 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19402 2:
19403 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19404 --- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19405 +++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19406 @@ -15,13 +15,14 @@
19407 #include <asm/asm-offsets.h>
19408 #include <asm/thread_info.h>
19409 #include <asm/cpufeature.h>
19410 +#include <asm/pgtable.h>
19411
19412 .macro ALTERNATIVE_JUMP feature,orig,alt
19413 0:
19414 .byte 0xe9 /* 32bit jump */
19415 .long \orig-1f /* by default jump to orig */
19416 1:
19417 - .section .altinstr_replacement,"ax"
19418 + .section .altinstr_replacement,"a"
19419 2: .byte 0xe9 /* near jump with 32bit immediate */
19420 .long \alt-1b /* offset */ /* or alternatively to alt */
19421 .previous
19422 @@ -64,49 +65,19 @@
19423 #endif
19424 .endm
19425
19426 -/* Standard copy_to_user with segment limit checking */
19427 -ENTRY(copy_to_user)
19428 - CFI_STARTPROC
19429 - GET_THREAD_INFO(%rax)
19430 - movq %rdi,%rcx
19431 - addq %rdx,%rcx
19432 - jc bad_to_user
19433 - cmpq TI_addr_limit(%rax),%rcx
19434 - ja bad_to_user
19435 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19436 - CFI_ENDPROC
19437 -ENDPROC(copy_to_user)
19438 -
19439 -/* Standard copy_from_user with segment limit checking */
19440 -ENTRY(copy_from_user)
19441 - CFI_STARTPROC
19442 - GET_THREAD_INFO(%rax)
19443 - movq %rsi,%rcx
19444 - addq %rdx,%rcx
19445 - jc bad_from_user
19446 - cmpq TI_addr_limit(%rax),%rcx
19447 - ja bad_from_user
19448 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19449 - CFI_ENDPROC
19450 -ENDPROC(copy_from_user)
19451 -
19452 ENTRY(copy_user_generic)
19453 CFI_STARTPROC
19454 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19455 CFI_ENDPROC
19456 ENDPROC(copy_user_generic)
19457
19458 -ENTRY(__copy_from_user_inatomic)
19459 - CFI_STARTPROC
19460 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19461 - CFI_ENDPROC
19462 -ENDPROC(__copy_from_user_inatomic)
19463 -
19464 .section .fixup,"ax"
19465 /* must zero dest */
19466 ENTRY(bad_from_user)
19467 bad_from_user:
19468 CFI_STARTPROC
19469 + testl %edx,%edx
19470 + js bad_to_user
19471 movl %edx,%ecx
19472 xorl %eax,%eax
19473 rep
19474 diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19475 --- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19476 +++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19477 @@ -14,6 +14,7 @@
19478 #include <asm/current.h>
19479 #include <asm/asm-offsets.h>
19480 #include <asm/thread_info.h>
19481 +#include <asm/pgtable.h>
19482
19483 .macro ALIGN_DESTINATION
19484 #ifdef FIX_ALIGNMENT
19485 @@ -50,6 +51,15 @@
19486 */
19487 ENTRY(__copy_user_nocache)
19488 CFI_STARTPROC
19489 +
19490 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19491 + mov $PAX_USER_SHADOW_BASE,%rcx
19492 + cmp %rcx,%rsi
19493 + jae 1f
19494 + add %rcx,%rsi
19495 +1:
19496 +#endif
19497 +
19498 cmpl $8,%edx
19499 jb 20f /* less then 8 bytes, go to byte copy loop */
19500 ALIGN_DESTINATION
19501 diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19502 --- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19503 +++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19504 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19505 len -= 2;
19506 }
19507 }
19508 +
19509 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19510 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19511 + src += PAX_USER_SHADOW_BASE;
19512 +#endif
19513 +
19514 isum = csum_partial_copy_generic((__force const void *)src,
19515 dst, len, isum, errp, NULL);
19516 if (unlikely(*errp))
19517 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19518 }
19519
19520 *errp = 0;
19521 +
19522 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19523 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19524 + dst += PAX_USER_SHADOW_BASE;
19525 +#endif
19526 +
19527 return csum_partial_copy_generic(src, (void __force *)dst,
19528 len, isum, NULL, errp);
19529 }
19530 diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19531 --- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19532 +++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19533 @@ -33,14 +33,35 @@
19534 #include <asm/asm-offsets.h>
19535 #include <asm/thread_info.h>
19536 #include <asm/asm.h>
19537 +#include <asm/segment.h>
19538 +#include <asm/pgtable.h>
19539 +
19540 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19541 +#define __copyuser_seg gs;
19542 +#else
19543 +#define __copyuser_seg
19544 +#endif
19545
19546 .text
19547 ENTRY(__get_user_1)
19548 CFI_STARTPROC
19549 +
19550 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19551 GET_THREAD_INFO(%_ASM_DX)
19552 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19553 jae bad_get_user
19554 -1: movzb (%_ASM_AX),%edx
19555 +
19556 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19557 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19558 + cmp %_ASM_DX,%_ASM_AX
19559 + jae 1234f
19560 + add %_ASM_DX,%_ASM_AX
19561 +1234:
19562 +#endif
19563 +
19564 +#endif
19565 +
19566 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19567 xor %eax,%eax
19568 ret
19569 CFI_ENDPROC
19570 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19571 ENTRY(__get_user_2)
19572 CFI_STARTPROC
19573 add $1,%_ASM_AX
19574 +
19575 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19576 jc bad_get_user
19577 GET_THREAD_INFO(%_ASM_DX)
19578 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19579 jae bad_get_user
19580 -2: movzwl -1(%_ASM_AX),%edx
19581 +
19582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19583 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19584 + cmp %_ASM_DX,%_ASM_AX
19585 + jae 1234f
19586 + add %_ASM_DX,%_ASM_AX
19587 +1234:
19588 +#endif
19589 +
19590 +#endif
19591 +
19592 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19593 xor %eax,%eax
19594 ret
19595 CFI_ENDPROC
19596 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19597 ENTRY(__get_user_4)
19598 CFI_STARTPROC
19599 add $3,%_ASM_AX
19600 +
19601 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19602 jc bad_get_user
19603 GET_THREAD_INFO(%_ASM_DX)
19604 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19605 jae bad_get_user
19606 -3: mov -3(%_ASM_AX),%edx
19607 +
19608 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19609 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19610 + cmp %_ASM_DX,%_ASM_AX
19611 + jae 1234f
19612 + add %_ASM_DX,%_ASM_AX
19613 +1234:
19614 +#endif
19615 +
19616 +#endif
19617 +
19618 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19619 xor %eax,%eax
19620 ret
19621 CFI_ENDPROC
19622 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19623 GET_THREAD_INFO(%_ASM_DX)
19624 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19625 jae bad_get_user
19626 +
19627 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19628 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19629 + cmp %_ASM_DX,%_ASM_AX
19630 + jae 1234f
19631 + add %_ASM_DX,%_ASM_AX
19632 +1234:
19633 +#endif
19634 +
19635 4: movq -7(%_ASM_AX),%_ASM_DX
19636 xor %eax,%eax
19637 ret
19638 diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19639 --- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19640 +++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19641 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19642 * It is also a lot simpler. Use this when possible:
19643 */
19644
19645 - .section .altinstr_replacement, "ax"
19646 + .section .altinstr_replacement, "a"
19647 1: .byte 0xeb /* jmp <disp8> */
19648 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19649 2:
19650 diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19651 --- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19652 +++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19653 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19654
19655 #include <asm/cpufeature.h>
19656
19657 - .section .altinstr_replacement,"ax"
19658 + .section .altinstr_replacement,"a"
19659 1: .byte 0xeb /* jmp <disp8> */
19660 .byte (memset_c - memset) - (2f - 1b) /* offset */
19661 2:
19662 diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19663 --- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19664 +++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19665 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19666 {
19667 void *p;
19668 int i;
19669 + unsigned long cr0;
19670
19671 if (unlikely(in_interrupt()))
19672 return __memcpy(to, from, len);
19673 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19674 kernel_fpu_begin();
19675
19676 __asm__ __volatile__ (
19677 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19678 - " prefetch 64(%0)\n"
19679 - " prefetch 128(%0)\n"
19680 - " prefetch 192(%0)\n"
19681 - " prefetch 256(%0)\n"
19682 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19683 + " prefetch 64(%1)\n"
19684 + " prefetch 128(%1)\n"
19685 + " prefetch 192(%1)\n"
19686 + " prefetch 256(%1)\n"
19687 "2: \n"
19688 ".section .fixup, \"ax\"\n"
19689 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19690 + "3: \n"
19691 +
19692 +#ifdef CONFIG_PAX_KERNEXEC
19693 + " movl %%cr0, %0\n"
19694 + " movl %0, %%eax\n"
19695 + " andl $0xFFFEFFFF, %%eax\n"
19696 + " movl %%eax, %%cr0\n"
19697 +#endif
19698 +
19699 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19700 +
19701 +#ifdef CONFIG_PAX_KERNEXEC
19702 + " movl %0, %%cr0\n"
19703 +#endif
19704 +
19705 " jmp 2b\n"
19706 ".previous\n"
19707 _ASM_EXTABLE(1b, 3b)
19708 - : : "r" (from));
19709 + : "=&r" (cr0) : "r" (from) : "ax");
19710
19711 for ( ; i > 5; i--) {
19712 __asm__ __volatile__ (
19713 - "1: prefetch 320(%0)\n"
19714 - "2: movq (%0), %%mm0\n"
19715 - " movq 8(%0), %%mm1\n"
19716 - " movq 16(%0), %%mm2\n"
19717 - " movq 24(%0), %%mm3\n"
19718 - " movq %%mm0, (%1)\n"
19719 - " movq %%mm1, 8(%1)\n"
19720 - " movq %%mm2, 16(%1)\n"
19721 - " movq %%mm3, 24(%1)\n"
19722 - " movq 32(%0), %%mm0\n"
19723 - " movq 40(%0), %%mm1\n"
19724 - " movq 48(%0), %%mm2\n"
19725 - " movq 56(%0), %%mm3\n"
19726 - " movq %%mm0, 32(%1)\n"
19727 - " movq %%mm1, 40(%1)\n"
19728 - " movq %%mm2, 48(%1)\n"
19729 - " movq %%mm3, 56(%1)\n"
19730 + "1: prefetch 320(%1)\n"
19731 + "2: movq (%1), %%mm0\n"
19732 + " movq 8(%1), %%mm1\n"
19733 + " movq 16(%1), %%mm2\n"
19734 + " movq 24(%1), %%mm3\n"
19735 + " movq %%mm0, (%2)\n"
19736 + " movq %%mm1, 8(%2)\n"
19737 + " movq %%mm2, 16(%2)\n"
19738 + " movq %%mm3, 24(%2)\n"
19739 + " movq 32(%1), %%mm0\n"
19740 + " movq 40(%1), %%mm1\n"
19741 + " movq 48(%1), %%mm2\n"
19742 + " movq 56(%1), %%mm3\n"
19743 + " movq %%mm0, 32(%2)\n"
19744 + " movq %%mm1, 40(%2)\n"
19745 + " movq %%mm2, 48(%2)\n"
19746 + " movq %%mm3, 56(%2)\n"
19747 ".section .fixup, \"ax\"\n"
19748 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19749 + "3:\n"
19750 +
19751 +#ifdef CONFIG_PAX_KERNEXEC
19752 + " movl %%cr0, %0\n"
19753 + " movl %0, %%eax\n"
19754 + " andl $0xFFFEFFFF, %%eax\n"
19755 + " movl %%eax, %%cr0\n"
19756 +#endif
19757 +
19758 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19759 +
19760 +#ifdef CONFIG_PAX_KERNEXEC
19761 + " movl %0, %%cr0\n"
19762 +#endif
19763 +
19764 " jmp 2b\n"
19765 ".previous\n"
19766 _ASM_EXTABLE(1b, 3b)
19767 - : : "r" (from), "r" (to) : "memory");
19768 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19769
19770 from += 64;
19771 to += 64;
19772 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19773 static void fast_copy_page(void *to, void *from)
19774 {
19775 int i;
19776 + unsigned long cr0;
19777
19778 kernel_fpu_begin();
19779
19780 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19781 * but that is for later. -AV
19782 */
19783 __asm__ __volatile__(
19784 - "1: prefetch (%0)\n"
19785 - " prefetch 64(%0)\n"
19786 - " prefetch 128(%0)\n"
19787 - " prefetch 192(%0)\n"
19788 - " prefetch 256(%0)\n"
19789 + "1: prefetch (%1)\n"
19790 + " prefetch 64(%1)\n"
19791 + " prefetch 128(%1)\n"
19792 + " prefetch 192(%1)\n"
19793 + " prefetch 256(%1)\n"
19794 "2: \n"
19795 ".section .fixup, \"ax\"\n"
19796 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19797 + "3: \n"
19798 +
19799 +#ifdef CONFIG_PAX_KERNEXEC
19800 + " movl %%cr0, %0\n"
19801 + " movl %0, %%eax\n"
19802 + " andl $0xFFFEFFFF, %%eax\n"
19803 + " movl %%eax, %%cr0\n"
19804 +#endif
19805 +
19806 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19807 +
19808 +#ifdef CONFIG_PAX_KERNEXEC
19809 + " movl %0, %%cr0\n"
19810 +#endif
19811 +
19812 " jmp 2b\n"
19813 ".previous\n"
19814 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19815 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19816
19817 for (i = 0; i < (4096-320)/64; i++) {
19818 __asm__ __volatile__ (
19819 - "1: prefetch 320(%0)\n"
19820 - "2: movq (%0), %%mm0\n"
19821 - " movntq %%mm0, (%1)\n"
19822 - " movq 8(%0), %%mm1\n"
19823 - " movntq %%mm1, 8(%1)\n"
19824 - " movq 16(%0), %%mm2\n"
19825 - " movntq %%mm2, 16(%1)\n"
19826 - " movq 24(%0), %%mm3\n"
19827 - " movntq %%mm3, 24(%1)\n"
19828 - " movq 32(%0), %%mm4\n"
19829 - " movntq %%mm4, 32(%1)\n"
19830 - " movq 40(%0), %%mm5\n"
19831 - " movntq %%mm5, 40(%1)\n"
19832 - " movq 48(%0), %%mm6\n"
19833 - " movntq %%mm6, 48(%1)\n"
19834 - " movq 56(%0), %%mm7\n"
19835 - " movntq %%mm7, 56(%1)\n"
19836 + "1: prefetch 320(%1)\n"
19837 + "2: movq (%1), %%mm0\n"
19838 + " movntq %%mm0, (%2)\n"
19839 + " movq 8(%1), %%mm1\n"
19840 + " movntq %%mm1, 8(%2)\n"
19841 + " movq 16(%1), %%mm2\n"
19842 + " movntq %%mm2, 16(%2)\n"
19843 + " movq 24(%1), %%mm3\n"
19844 + " movntq %%mm3, 24(%2)\n"
19845 + " movq 32(%1), %%mm4\n"
19846 + " movntq %%mm4, 32(%2)\n"
19847 + " movq 40(%1), %%mm5\n"
19848 + " movntq %%mm5, 40(%2)\n"
19849 + " movq 48(%1), %%mm6\n"
19850 + " movntq %%mm6, 48(%2)\n"
19851 + " movq 56(%1), %%mm7\n"
19852 + " movntq %%mm7, 56(%2)\n"
19853 ".section .fixup, \"ax\"\n"
19854 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19855 + "3:\n"
19856 +
19857 +#ifdef CONFIG_PAX_KERNEXEC
19858 + " movl %%cr0, %0\n"
19859 + " movl %0, %%eax\n"
19860 + " andl $0xFFFEFFFF, %%eax\n"
19861 + " movl %%eax, %%cr0\n"
19862 +#endif
19863 +
19864 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19865 +
19866 +#ifdef CONFIG_PAX_KERNEXEC
19867 + " movl %0, %%cr0\n"
19868 +#endif
19869 +
19870 " jmp 2b\n"
19871 ".previous\n"
19872 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19873 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19874
19875 from += 64;
19876 to += 64;
19877 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19878 static void fast_copy_page(void *to, void *from)
19879 {
19880 int i;
19881 + unsigned long cr0;
19882
19883 kernel_fpu_begin();
19884
19885 __asm__ __volatile__ (
19886 - "1: prefetch (%0)\n"
19887 - " prefetch 64(%0)\n"
19888 - " prefetch 128(%0)\n"
19889 - " prefetch 192(%0)\n"
19890 - " prefetch 256(%0)\n"
19891 + "1: prefetch (%1)\n"
19892 + " prefetch 64(%1)\n"
19893 + " prefetch 128(%1)\n"
19894 + " prefetch 192(%1)\n"
19895 + " prefetch 256(%1)\n"
19896 "2: \n"
19897 ".section .fixup, \"ax\"\n"
19898 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19899 + "3: \n"
19900 +
19901 +#ifdef CONFIG_PAX_KERNEXEC
19902 + " movl %%cr0, %0\n"
19903 + " movl %0, %%eax\n"
19904 + " andl $0xFFFEFFFF, %%eax\n"
19905 + " movl %%eax, %%cr0\n"
19906 +#endif
19907 +
19908 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19909 +
19910 +#ifdef CONFIG_PAX_KERNEXEC
19911 + " movl %0, %%cr0\n"
19912 +#endif
19913 +
19914 " jmp 2b\n"
19915 ".previous\n"
19916 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19917 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19918
19919 for (i = 0; i < 4096/64; i++) {
19920 __asm__ __volatile__ (
19921 - "1: prefetch 320(%0)\n"
19922 - "2: movq (%0), %%mm0\n"
19923 - " movq 8(%0), %%mm1\n"
19924 - " movq 16(%0), %%mm2\n"
19925 - " movq 24(%0), %%mm3\n"
19926 - " movq %%mm0, (%1)\n"
19927 - " movq %%mm1, 8(%1)\n"
19928 - " movq %%mm2, 16(%1)\n"
19929 - " movq %%mm3, 24(%1)\n"
19930 - " movq 32(%0), %%mm0\n"
19931 - " movq 40(%0), %%mm1\n"
19932 - " movq 48(%0), %%mm2\n"
19933 - " movq 56(%0), %%mm3\n"
19934 - " movq %%mm0, 32(%1)\n"
19935 - " movq %%mm1, 40(%1)\n"
19936 - " movq %%mm2, 48(%1)\n"
19937 - " movq %%mm3, 56(%1)\n"
19938 + "1: prefetch 320(%1)\n"
19939 + "2: movq (%1), %%mm0\n"
19940 + " movq 8(%1), %%mm1\n"
19941 + " movq 16(%1), %%mm2\n"
19942 + " movq 24(%1), %%mm3\n"
19943 + " movq %%mm0, (%2)\n"
19944 + " movq %%mm1, 8(%2)\n"
19945 + " movq %%mm2, 16(%2)\n"
19946 + " movq %%mm3, 24(%2)\n"
19947 + " movq 32(%1), %%mm0\n"
19948 + " movq 40(%1), %%mm1\n"
19949 + " movq 48(%1), %%mm2\n"
19950 + " movq 56(%1), %%mm3\n"
19951 + " movq %%mm0, 32(%2)\n"
19952 + " movq %%mm1, 40(%2)\n"
19953 + " movq %%mm2, 48(%2)\n"
19954 + " movq %%mm3, 56(%2)\n"
19955 ".section .fixup, \"ax\"\n"
19956 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19957 + "3:\n"
19958 +
19959 +#ifdef CONFIG_PAX_KERNEXEC
19960 + " movl %%cr0, %0\n"
19961 + " movl %0, %%eax\n"
19962 + " andl $0xFFFEFFFF, %%eax\n"
19963 + " movl %%eax, %%cr0\n"
19964 +#endif
19965 +
19966 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19967 +
19968 +#ifdef CONFIG_PAX_KERNEXEC
19969 + " movl %0, %%cr0\n"
19970 +#endif
19971 +
19972 " jmp 2b\n"
19973 ".previous\n"
19974 _ASM_EXTABLE(1b, 3b)
19975 - : : "r" (from), "r" (to) : "memory");
19976 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19977
19978 from += 64;
19979 to += 64;
19980 diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19981 --- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19982 +++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19983 @@ -15,7 +15,8 @@
19984 #include <asm/thread_info.h>
19985 #include <asm/errno.h>
19986 #include <asm/asm.h>
19987 -
19988 +#include <asm/segment.h>
19989 +#include <asm/pgtable.h>
19990
19991 /*
19992 * __put_user_X
19993 @@ -29,52 +30,119 @@
19994 * as they get called from within inline assembly.
19995 */
19996
19997 -#define ENTER CFI_STARTPROC ; \
19998 - GET_THREAD_INFO(%_ASM_BX)
19999 +#define ENTER CFI_STARTPROC
20000 #define EXIT ret ; \
20001 CFI_ENDPROC
20002
20003 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20004 +#define _DEST %_ASM_CX,%_ASM_BX
20005 +#else
20006 +#define _DEST %_ASM_CX
20007 +#endif
20008 +
20009 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20010 +#define __copyuser_seg gs;
20011 +#else
20012 +#define __copyuser_seg
20013 +#endif
20014 +
20015 .text
20016 ENTRY(__put_user_1)
20017 ENTER
20018 +
20019 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20020 + GET_THREAD_INFO(%_ASM_BX)
20021 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20022 jae bad_put_user
20023 -1: movb %al,(%_ASM_CX)
20024 +
20025 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20026 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20027 + cmp %_ASM_BX,%_ASM_CX
20028 + jb 1234f
20029 + xor %ebx,%ebx
20030 +1234:
20031 +#endif
20032 +
20033 +#endif
20034 +
20035 +1: __copyuser_seg movb %al,(_DEST)
20036 xor %eax,%eax
20037 EXIT
20038 ENDPROC(__put_user_1)
20039
20040 ENTRY(__put_user_2)
20041 ENTER
20042 +
20043 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20044 + GET_THREAD_INFO(%_ASM_BX)
20045 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20046 sub $1,%_ASM_BX
20047 cmp %_ASM_BX,%_ASM_CX
20048 jae bad_put_user
20049 -2: movw %ax,(%_ASM_CX)
20050 +
20051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20052 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20053 + cmp %_ASM_BX,%_ASM_CX
20054 + jb 1234f
20055 + xor %ebx,%ebx
20056 +1234:
20057 +#endif
20058 +
20059 +#endif
20060 +
20061 +2: __copyuser_seg movw %ax,(_DEST)
20062 xor %eax,%eax
20063 EXIT
20064 ENDPROC(__put_user_2)
20065
20066 ENTRY(__put_user_4)
20067 ENTER
20068 +
20069 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20070 + GET_THREAD_INFO(%_ASM_BX)
20071 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20072 sub $3,%_ASM_BX
20073 cmp %_ASM_BX,%_ASM_CX
20074 jae bad_put_user
20075 -3: movl %eax,(%_ASM_CX)
20076 +
20077 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20078 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20079 + cmp %_ASM_BX,%_ASM_CX
20080 + jb 1234f
20081 + xor %ebx,%ebx
20082 +1234:
20083 +#endif
20084 +
20085 +#endif
20086 +
20087 +3: __copyuser_seg movl %eax,(_DEST)
20088 xor %eax,%eax
20089 EXIT
20090 ENDPROC(__put_user_4)
20091
20092 ENTRY(__put_user_8)
20093 ENTER
20094 +
20095 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20096 + GET_THREAD_INFO(%_ASM_BX)
20097 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20098 sub $7,%_ASM_BX
20099 cmp %_ASM_BX,%_ASM_CX
20100 jae bad_put_user
20101 -4: mov %_ASM_AX,(%_ASM_CX)
20102 +
20103 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20104 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20105 + cmp %_ASM_BX,%_ASM_CX
20106 + jb 1234f
20107 + xor %ebx,%ebx
20108 +1234:
20109 +#endif
20110 +
20111 +#endif
20112 +
20113 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20114 #ifdef CONFIG_X86_32
20115 -5: movl %edx,4(%_ASM_CX)
20116 +5: __copyuser_seg movl %edx,4(_DEST)
20117 #endif
20118 xor %eax,%eax
20119 EXIT
20120 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
20121 --- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20122 +++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20123 @@ -43,7 +43,7 @@ do { \
20124 __asm__ __volatile__( \
20125 " testl %1,%1\n" \
20126 " jz 2f\n" \
20127 - "0: lodsb\n" \
20128 + "0: "__copyuser_seg"lodsb\n" \
20129 " stosb\n" \
20130 " testb %%al,%%al\n" \
20131 " jz 1f\n" \
20132 @@ -128,10 +128,12 @@ do { \
20133 int __d0; \
20134 might_fault(); \
20135 __asm__ __volatile__( \
20136 + __COPYUSER_SET_ES \
20137 "0: rep; stosl\n" \
20138 " movl %2,%0\n" \
20139 "1: rep; stosb\n" \
20140 "2:\n" \
20141 + __COPYUSER_RESTORE_ES \
20142 ".section .fixup,\"ax\"\n" \
20143 "3: lea 0(%2,%0,4),%0\n" \
20144 " jmp 2b\n" \
20145 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20146 might_fault();
20147
20148 __asm__ __volatile__(
20149 + __COPYUSER_SET_ES
20150 " testl %0, %0\n"
20151 " jz 3f\n"
20152 " andl %0,%%ecx\n"
20153 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20154 " subl %%ecx,%0\n"
20155 " addl %0,%%eax\n"
20156 "1:\n"
20157 + __COPYUSER_RESTORE_ES
20158 ".section .fixup,\"ax\"\n"
20159 "2: xorl %%eax,%%eax\n"
20160 " jmp 1b\n"
20161 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20162
20163 #ifdef CONFIG_X86_INTEL_USERCOPY
20164 static unsigned long
20165 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20166 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20167 {
20168 int d0, d1;
20169 __asm__ __volatile__(
20170 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20171 " .align 2,0x90\n"
20172 "3: movl 0(%4), %%eax\n"
20173 "4: movl 4(%4), %%edx\n"
20174 - "5: movl %%eax, 0(%3)\n"
20175 - "6: movl %%edx, 4(%3)\n"
20176 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20177 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20178 "7: movl 8(%4), %%eax\n"
20179 "8: movl 12(%4),%%edx\n"
20180 - "9: movl %%eax, 8(%3)\n"
20181 - "10: movl %%edx, 12(%3)\n"
20182 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20183 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20184 "11: movl 16(%4), %%eax\n"
20185 "12: movl 20(%4), %%edx\n"
20186 - "13: movl %%eax, 16(%3)\n"
20187 - "14: movl %%edx, 20(%3)\n"
20188 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20189 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20190 "15: movl 24(%4), %%eax\n"
20191 "16: movl 28(%4), %%edx\n"
20192 - "17: movl %%eax, 24(%3)\n"
20193 - "18: movl %%edx, 28(%3)\n"
20194 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20195 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20196 "19: movl 32(%4), %%eax\n"
20197 "20: movl 36(%4), %%edx\n"
20198 - "21: movl %%eax, 32(%3)\n"
20199 - "22: movl %%edx, 36(%3)\n"
20200 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20201 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20202 "23: movl 40(%4), %%eax\n"
20203 "24: movl 44(%4), %%edx\n"
20204 - "25: movl %%eax, 40(%3)\n"
20205 - "26: movl %%edx, 44(%3)\n"
20206 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20207 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20208 "27: movl 48(%4), %%eax\n"
20209 "28: movl 52(%4), %%edx\n"
20210 - "29: movl %%eax, 48(%3)\n"
20211 - "30: movl %%edx, 52(%3)\n"
20212 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20213 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20214 "31: movl 56(%4), %%eax\n"
20215 "32: movl 60(%4), %%edx\n"
20216 - "33: movl %%eax, 56(%3)\n"
20217 - "34: movl %%edx, 60(%3)\n"
20218 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20219 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20220 " addl $-64, %0\n"
20221 " addl $64, %4\n"
20222 " addl $64, %3\n"
20223 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20224 " shrl $2, %0\n"
20225 " andl $3, %%eax\n"
20226 " cld\n"
20227 + __COPYUSER_SET_ES
20228 "99: rep; movsl\n"
20229 "36: movl %%eax, %0\n"
20230 "37: rep; movsb\n"
20231 "100:\n"
20232 + __COPYUSER_RESTORE_ES
20233 + ".section .fixup,\"ax\"\n"
20234 + "101: lea 0(%%eax,%0,4),%0\n"
20235 + " jmp 100b\n"
20236 + ".previous\n"
20237 + ".section __ex_table,\"a\"\n"
20238 + " .align 4\n"
20239 + " .long 1b,100b\n"
20240 + " .long 2b,100b\n"
20241 + " .long 3b,100b\n"
20242 + " .long 4b,100b\n"
20243 + " .long 5b,100b\n"
20244 + " .long 6b,100b\n"
20245 + " .long 7b,100b\n"
20246 + " .long 8b,100b\n"
20247 + " .long 9b,100b\n"
20248 + " .long 10b,100b\n"
20249 + " .long 11b,100b\n"
20250 + " .long 12b,100b\n"
20251 + " .long 13b,100b\n"
20252 + " .long 14b,100b\n"
20253 + " .long 15b,100b\n"
20254 + " .long 16b,100b\n"
20255 + " .long 17b,100b\n"
20256 + " .long 18b,100b\n"
20257 + " .long 19b,100b\n"
20258 + " .long 20b,100b\n"
20259 + " .long 21b,100b\n"
20260 + " .long 22b,100b\n"
20261 + " .long 23b,100b\n"
20262 + " .long 24b,100b\n"
20263 + " .long 25b,100b\n"
20264 + " .long 26b,100b\n"
20265 + " .long 27b,100b\n"
20266 + " .long 28b,100b\n"
20267 + " .long 29b,100b\n"
20268 + " .long 30b,100b\n"
20269 + " .long 31b,100b\n"
20270 + " .long 32b,100b\n"
20271 + " .long 33b,100b\n"
20272 + " .long 34b,100b\n"
20273 + " .long 35b,100b\n"
20274 + " .long 36b,100b\n"
20275 + " .long 37b,100b\n"
20276 + " .long 99b,101b\n"
20277 + ".previous"
20278 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20279 + : "1"(to), "2"(from), "0"(size)
20280 + : "eax", "edx", "memory");
20281 + return size;
20282 +}
20283 +
20284 +static unsigned long
20285 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20286 +{
20287 + int d0, d1;
20288 + __asm__ __volatile__(
20289 + " .align 2,0x90\n"
20290 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20291 + " cmpl $67, %0\n"
20292 + " jbe 3f\n"
20293 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20294 + " .align 2,0x90\n"
20295 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20296 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20297 + "5: movl %%eax, 0(%3)\n"
20298 + "6: movl %%edx, 4(%3)\n"
20299 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20300 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20301 + "9: movl %%eax, 8(%3)\n"
20302 + "10: movl %%edx, 12(%3)\n"
20303 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20304 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20305 + "13: movl %%eax, 16(%3)\n"
20306 + "14: movl %%edx, 20(%3)\n"
20307 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20308 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20309 + "17: movl %%eax, 24(%3)\n"
20310 + "18: movl %%edx, 28(%3)\n"
20311 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20312 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20313 + "21: movl %%eax, 32(%3)\n"
20314 + "22: movl %%edx, 36(%3)\n"
20315 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20316 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20317 + "25: movl %%eax, 40(%3)\n"
20318 + "26: movl %%edx, 44(%3)\n"
20319 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20320 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20321 + "29: movl %%eax, 48(%3)\n"
20322 + "30: movl %%edx, 52(%3)\n"
20323 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20324 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20325 + "33: movl %%eax, 56(%3)\n"
20326 + "34: movl %%edx, 60(%3)\n"
20327 + " addl $-64, %0\n"
20328 + " addl $64, %4\n"
20329 + " addl $64, %3\n"
20330 + " cmpl $63, %0\n"
20331 + " ja 1b\n"
20332 + "35: movl %0, %%eax\n"
20333 + " shrl $2, %0\n"
20334 + " andl $3, %%eax\n"
20335 + " cld\n"
20336 + "99: rep; "__copyuser_seg" movsl\n"
20337 + "36: movl %%eax, %0\n"
20338 + "37: rep; "__copyuser_seg" movsb\n"
20339 + "100:\n"
20340 ".section .fixup,\"ax\"\n"
20341 "101: lea 0(%%eax,%0,4),%0\n"
20342 " jmp 100b\n"
20343 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20344 int d0, d1;
20345 __asm__ __volatile__(
20346 " .align 2,0x90\n"
20347 - "0: movl 32(%4), %%eax\n"
20348 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20349 " cmpl $67, %0\n"
20350 " jbe 2f\n"
20351 - "1: movl 64(%4), %%eax\n"
20352 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20353 " .align 2,0x90\n"
20354 - "2: movl 0(%4), %%eax\n"
20355 - "21: movl 4(%4), %%edx\n"
20356 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20357 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20358 " movl %%eax, 0(%3)\n"
20359 " movl %%edx, 4(%3)\n"
20360 - "3: movl 8(%4), %%eax\n"
20361 - "31: movl 12(%4),%%edx\n"
20362 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20363 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20364 " movl %%eax, 8(%3)\n"
20365 " movl %%edx, 12(%3)\n"
20366 - "4: movl 16(%4), %%eax\n"
20367 - "41: movl 20(%4), %%edx\n"
20368 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20369 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20370 " movl %%eax, 16(%3)\n"
20371 " movl %%edx, 20(%3)\n"
20372 - "10: movl 24(%4), %%eax\n"
20373 - "51: movl 28(%4), %%edx\n"
20374 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20375 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20376 " movl %%eax, 24(%3)\n"
20377 " movl %%edx, 28(%3)\n"
20378 - "11: movl 32(%4), %%eax\n"
20379 - "61: movl 36(%4), %%edx\n"
20380 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20381 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20382 " movl %%eax, 32(%3)\n"
20383 " movl %%edx, 36(%3)\n"
20384 - "12: movl 40(%4), %%eax\n"
20385 - "71: movl 44(%4), %%edx\n"
20386 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20387 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20388 " movl %%eax, 40(%3)\n"
20389 " movl %%edx, 44(%3)\n"
20390 - "13: movl 48(%4), %%eax\n"
20391 - "81: movl 52(%4), %%edx\n"
20392 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20393 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20394 " movl %%eax, 48(%3)\n"
20395 " movl %%edx, 52(%3)\n"
20396 - "14: movl 56(%4), %%eax\n"
20397 - "91: movl 60(%4), %%edx\n"
20398 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20399 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20400 " movl %%eax, 56(%3)\n"
20401 " movl %%edx, 60(%3)\n"
20402 " addl $-64, %0\n"
20403 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20404 " shrl $2, %0\n"
20405 " andl $3, %%eax\n"
20406 " cld\n"
20407 - "6: rep; movsl\n"
20408 + "6: rep; "__copyuser_seg" movsl\n"
20409 " movl %%eax,%0\n"
20410 - "7: rep; movsb\n"
20411 + "7: rep; "__copyuser_seg" movsb\n"
20412 "8:\n"
20413 ".section .fixup,\"ax\"\n"
20414 "9: lea 0(%%eax,%0,4),%0\n"
20415 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20416
20417 __asm__ __volatile__(
20418 " .align 2,0x90\n"
20419 - "0: movl 32(%4), %%eax\n"
20420 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20421 " cmpl $67, %0\n"
20422 " jbe 2f\n"
20423 - "1: movl 64(%4), %%eax\n"
20424 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20425 " .align 2,0x90\n"
20426 - "2: movl 0(%4), %%eax\n"
20427 - "21: movl 4(%4), %%edx\n"
20428 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20429 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20430 " movnti %%eax, 0(%3)\n"
20431 " movnti %%edx, 4(%3)\n"
20432 - "3: movl 8(%4), %%eax\n"
20433 - "31: movl 12(%4),%%edx\n"
20434 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20435 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20436 " movnti %%eax, 8(%3)\n"
20437 " movnti %%edx, 12(%3)\n"
20438 - "4: movl 16(%4), %%eax\n"
20439 - "41: movl 20(%4), %%edx\n"
20440 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20441 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20442 " movnti %%eax, 16(%3)\n"
20443 " movnti %%edx, 20(%3)\n"
20444 - "10: movl 24(%4), %%eax\n"
20445 - "51: movl 28(%4), %%edx\n"
20446 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20447 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20448 " movnti %%eax, 24(%3)\n"
20449 " movnti %%edx, 28(%3)\n"
20450 - "11: movl 32(%4), %%eax\n"
20451 - "61: movl 36(%4), %%edx\n"
20452 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20453 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20454 " movnti %%eax, 32(%3)\n"
20455 " movnti %%edx, 36(%3)\n"
20456 - "12: movl 40(%4), %%eax\n"
20457 - "71: movl 44(%4), %%edx\n"
20458 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20459 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20460 " movnti %%eax, 40(%3)\n"
20461 " movnti %%edx, 44(%3)\n"
20462 - "13: movl 48(%4), %%eax\n"
20463 - "81: movl 52(%4), %%edx\n"
20464 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20465 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20466 " movnti %%eax, 48(%3)\n"
20467 " movnti %%edx, 52(%3)\n"
20468 - "14: movl 56(%4), %%eax\n"
20469 - "91: movl 60(%4), %%edx\n"
20470 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20471 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20472 " movnti %%eax, 56(%3)\n"
20473 " movnti %%edx, 60(%3)\n"
20474 " addl $-64, %0\n"
20475 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20476 " shrl $2, %0\n"
20477 " andl $3, %%eax\n"
20478 " cld\n"
20479 - "6: rep; movsl\n"
20480 + "6: rep; "__copyuser_seg" movsl\n"
20481 " movl %%eax,%0\n"
20482 - "7: rep; movsb\n"
20483 + "7: rep; "__copyuser_seg" movsb\n"
20484 "8:\n"
20485 ".section .fixup,\"ax\"\n"
20486 "9: lea 0(%%eax,%0,4),%0\n"
20487 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20488
20489 __asm__ __volatile__(
20490 " .align 2,0x90\n"
20491 - "0: movl 32(%4), %%eax\n"
20492 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20493 " cmpl $67, %0\n"
20494 " jbe 2f\n"
20495 - "1: movl 64(%4), %%eax\n"
20496 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20497 " .align 2,0x90\n"
20498 - "2: movl 0(%4), %%eax\n"
20499 - "21: movl 4(%4), %%edx\n"
20500 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20501 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20502 " movnti %%eax, 0(%3)\n"
20503 " movnti %%edx, 4(%3)\n"
20504 - "3: movl 8(%4), %%eax\n"
20505 - "31: movl 12(%4),%%edx\n"
20506 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20507 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20508 " movnti %%eax, 8(%3)\n"
20509 " movnti %%edx, 12(%3)\n"
20510 - "4: movl 16(%4), %%eax\n"
20511 - "41: movl 20(%4), %%edx\n"
20512 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20513 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20514 " movnti %%eax, 16(%3)\n"
20515 " movnti %%edx, 20(%3)\n"
20516 - "10: movl 24(%4), %%eax\n"
20517 - "51: movl 28(%4), %%edx\n"
20518 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20519 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20520 " movnti %%eax, 24(%3)\n"
20521 " movnti %%edx, 28(%3)\n"
20522 - "11: movl 32(%4), %%eax\n"
20523 - "61: movl 36(%4), %%edx\n"
20524 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20525 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20526 " movnti %%eax, 32(%3)\n"
20527 " movnti %%edx, 36(%3)\n"
20528 - "12: movl 40(%4), %%eax\n"
20529 - "71: movl 44(%4), %%edx\n"
20530 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20531 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20532 " movnti %%eax, 40(%3)\n"
20533 " movnti %%edx, 44(%3)\n"
20534 - "13: movl 48(%4), %%eax\n"
20535 - "81: movl 52(%4), %%edx\n"
20536 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20537 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20538 " movnti %%eax, 48(%3)\n"
20539 " movnti %%edx, 52(%3)\n"
20540 - "14: movl 56(%4), %%eax\n"
20541 - "91: movl 60(%4), %%edx\n"
20542 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20543 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20544 " movnti %%eax, 56(%3)\n"
20545 " movnti %%edx, 60(%3)\n"
20546 " addl $-64, %0\n"
20547 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20548 " shrl $2, %0\n"
20549 " andl $3, %%eax\n"
20550 " cld\n"
20551 - "6: rep; movsl\n"
20552 + "6: rep; "__copyuser_seg" movsl\n"
20553 " movl %%eax,%0\n"
20554 - "7: rep; movsb\n"
20555 + "7: rep; "__copyuser_seg" movsb\n"
20556 "8:\n"
20557 ".section .fixup,\"ax\"\n"
20558 "9: lea 0(%%eax,%0,4),%0\n"
20559 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20560 */
20561 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20562 unsigned long size);
20563 -unsigned long __copy_user_intel(void __user *to, const void *from,
20564 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20565 + unsigned long size);
20566 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20567 unsigned long size);
20568 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20569 const void __user *from, unsigned long size);
20570 #endif /* CONFIG_X86_INTEL_USERCOPY */
20571
20572 /* Generic arbitrary sized copy. */
20573 -#define __copy_user(to, from, size) \
20574 +#define __copy_user(to, from, size, prefix, set, restore) \
20575 do { \
20576 int __d0, __d1, __d2; \
20577 __asm__ __volatile__( \
20578 + set \
20579 " cmp $7,%0\n" \
20580 " jbe 1f\n" \
20581 " movl %1,%0\n" \
20582 " negl %0\n" \
20583 " andl $7,%0\n" \
20584 " subl %0,%3\n" \
20585 - "4: rep; movsb\n" \
20586 + "4: rep; "prefix"movsb\n" \
20587 " movl %3,%0\n" \
20588 " shrl $2,%0\n" \
20589 " andl $3,%3\n" \
20590 " .align 2,0x90\n" \
20591 - "0: rep; movsl\n" \
20592 + "0: rep; "prefix"movsl\n" \
20593 " movl %3,%0\n" \
20594 - "1: rep; movsb\n" \
20595 + "1: rep; "prefix"movsb\n" \
20596 "2:\n" \
20597 + restore \
20598 ".section .fixup,\"ax\"\n" \
20599 "5: addl %3,%0\n" \
20600 " jmp 2b\n" \
20601 @@ -682,14 +799,14 @@ do { \
20602 " negl %0\n" \
20603 " andl $7,%0\n" \
20604 " subl %0,%3\n" \
20605 - "4: rep; movsb\n" \
20606 + "4: rep; "__copyuser_seg"movsb\n" \
20607 " movl %3,%0\n" \
20608 " shrl $2,%0\n" \
20609 " andl $3,%3\n" \
20610 " .align 2,0x90\n" \
20611 - "0: rep; movsl\n" \
20612 + "0: rep; "__copyuser_seg"movsl\n" \
20613 " movl %3,%0\n" \
20614 - "1: rep; movsb\n" \
20615 + "1: rep; "__copyuser_seg"movsb\n" \
20616 "2:\n" \
20617 ".section .fixup,\"ax\"\n" \
20618 "5: addl %3,%0\n" \
20619 @@ -775,9 +892,9 @@ survive:
20620 }
20621 #endif
20622 if (movsl_is_ok(to, from, n))
20623 - __copy_user(to, from, n);
20624 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20625 else
20626 - n = __copy_user_intel(to, from, n);
20627 + n = __generic_copy_to_user_intel(to, from, n);
20628 return n;
20629 }
20630 EXPORT_SYMBOL(__copy_to_user_ll);
20631 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20632 unsigned long n)
20633 {
20634 if (movsl_is_ok(to, from, n))
20635 - __copy_user(to, from, n);
20636 + __copy_user(to, from, n, __copyuser_seg, "", "");
20637 else
20638 - n = __copy_user_intel((void __user *)to,
20639 - (const void *)from, n);
20640 + n = __generic_copy_from_user_intel(to, from, n);
20641 return n;
20642 }
20643 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20644 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20645 if (n > 64 && cpu_has_xmm2)
20646 n = __copy_user_intel_nocache(to, from, n);
20647 else
20648 - __copy_user(to, from, n);
20649 + __copy_user(to, from, n, __copyuser_seg, "", "");
20650 #else
20651 - __copy_user(to, from, n);
20652 + __copy_user(to, from, n, __copyuser_seg, "", "");
20653 #endif
20654 return n;
20655 }
20656 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20657
20658 -/**
20659 - * copy_to_user: - Copy a block of data into user space.
20660 - * @to: Destination address, in user space.
20661 - * @from: Source address, in kernel space.
20662 - * @n: Number of bytes to copy.
20663 - *
20664 - * Context: User context only. This function may sleep.
20665 - *
20666 - * Copy data from kernel space to user space.
20667 - *
20668 - * Returns number of bytes that could not be copied.
20669 - * On success, this will be zero.
20670 - */
20671 -unsigned long
20672 -copy_to_user(void __user *to, const void *from, unsigned long n)
20673 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20674 +void __set_fs(mm_segment_t x)
20675 {
20676 - if (access_ok(VERIFY_WRITE, to, n))
20677 - n = __copy_to_user(to, from, n);
20678 - return n;
20679 + switch (x.seg) {
20680 + case 0:
20681 + loadsegment(gs, 0);
20682 + break;
20683 + case TASK_SIZE_MAX:
20684 + loadsegment(gs, __USER_DS);
20685 + break;
20686 + case -1UL:
20687 + loadsegment(gs, __KERNEL_DS);
20688 + break;
20689 + default:
20690 + BUG();
20691 + }
20692 + return;
20693 }
20694 -EXPORT_SYMBOL(copy_to_user);
20695 +EXPORT_SYMBOL(__set_fs);
20696
20697 -/**
20698 - * copy_from_user: - Copy a block of data from user space.
20699 - * @to: Destination address, in kernel space.
20700 - * @from: Source address, in user space.
20701 - * @n: Number of bytes to copy.
20702 - *
20703 - * Context: User context only. This function may sleep.
20704 - *
20705 - * Copy data from user space to kernel space.
20706 - *
20707 - * Returns number of bytes that could not be copied.
20708 - * On success, this will be zero.
20709 - *
20710 - * If some data could not be copied, this function will pad the copied
20711 - * data to the requested size using zero bytes.
20712 - */
20713 -unsigned long
20714 -copy_from_user(void *to, const void __user *from, unsigned long n)
20715 +void set_fs(mm_segment_t x)
20716 {
20717 - if (access_ok(VERIFY_READ, from, n))
20718 - n = __copy_from_user(to, from, n);
20719 - else
20720 - memset(to, 0, n);
20721 - return n;
20722 + current_thread_info()->addr_limit = x;
20723 + __set_fs(x);
20724 }
20725 -EXPORT_SYMBOL(copy_from_user);
20726 +EXPORT_SYMBOL(set_fs);
20727 +#endif
20728 diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20729 --- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20730 +++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20731 @@ -42,6 +42,12 @@ long
20732 __strncpy_from_user(char *dst, const char __user *src, long count)
20733 {
20734 long res;
20735 +
20736 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20737 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20738 + src += PAX_USER_SHADOW_BASE;
20739 +#endif
20740 +
20741 __do_strncpy_from_user(dst, src, count, res);
20742 return res;
20743 }
20744 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20745 {
20746 long __d0;
20747 might_fault();
20748 +
20749 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20750 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20751 + addr += PAX_USER_SHADOW_BASE;
20752 +#endif
20753 +
20754 /* no memory constraint because it doesn't change any memory gcc knows
20755 about */
20756 asm volatile(
20757 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20758
20759 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20760 {
20761 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20762 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20763 +
20764 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20765 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20766 + to += PAX_USER_SHADOW_BASE;
20767 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20768 + from += PAX_USER_SHADOW_BASE;
20769 +#endif
20770 +
20771 return copy_user_generic((__force void *)to, (__force void *)from, len);
20772 - }
20773 - return len;
20774 + }
20775 + return len;
20776 }
20777 EXPORT_SYMBOL(copy_in_user);
20778
20779 diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20780 --- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20781 +++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20782 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20783 else
20784 BITS := 64
20785 UTS_MACHINE := x86_64
20786 + biarch := $(call cc-option,-m64)
20787 CHECKFLAGS += -D__x86_64__ -m64
20788
20789 KBUILD_AFLAGS += -m64
20790 @@ -189,3 +190,12 @@ define archhelp
20791 echo ' FDARGS="..." arguments for the booted kernel'
20792 echo ' FDINITRD=file initrd for the booted kernel'
20793 endef
20794 +
20795 +define OLD_LD
20796 +
20797 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20798 +*** Please upgrade your binutils to 2.18 or newer
20799 +endef
20800 +
20801 +archprepare:
20802 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20803 diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20804 --- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20805 +++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20806 @@ -1,14 +1,71 @@
20807 #include <linux/module.h>
20808 #include <linux/spinlock.h>
20809 +#include <linux/sort.h>
20810 #include <asm/uaccess.h>
20811 +#include <asm/pgtable.h>
20812
20813 +/*
20814 + * The exception table needs to be sorted so that the binary
20815 + * search that we use to find entries in it works properly.
20816 + * This is used both for the kernel exception table and for
20817 + * the exception tables of modules that get loaded.
20818 + */
20819 +static int cmp_ex(const void *a, const void *b)
20820 +{
20821 + const struct exception_table_entry *x = a, *y = b;
20822 +
20823 + /* avoid overflow */
20824 + if (x->insn > y->insn)
20825 + return 1;
20826 + if (x->insn < y->insn)
20827 + return -1;
20828 + return 0;
20829 +}
20830 +
20831 +static void swap_ex(void *a, void *b, int size)
20832 +{
20833 + struct exception_table_entry t, *x = a, *y = b;
20834 +
20835 + t = *x;
20836 +
20837 + pax_open_kernel();
20838 + *x = *y;
20839 + *y = t;
20840 + pax_close_kernel();
20841 +}
20842 +
20843 +void sort_extable(struct exception_table_entry *start,
20844 + struct exception_table_entry *finish)
20845 +{
20846 + sort(start, finish - start, sizeof(struct exception_table_entry),
20847 + cmp_ex, swap_ex);
20848 +}
20849 +
20850 +#ifdef CONFIG_MODULES
20851 +/*
20852 + * If the exception table is sorted, any referring to the module init
20853 + * will be at the beginning or the end.
20854 + */
20855 +void trim_init_extable(struct module *m)
20856 +{
20857 + /*trim the beginning*/
20858 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20859 + m->extable++;
20860 + m->num_exentries--;
20861 + }
20862 + /*trim the end*/
20863 + while (m->num_exentries &&
20864 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20865 + m->num_exentries--;
20866 +}
20867 +#endif /* CONFIG_MODULES */
20868
20869 int fixup_exception(struct pt_regs *regs)
20870 {
20871 const struct exception_table_entry *fixup;
20872
20873 #ifdef CONFIG_PNPBIOS
20874 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20875 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20876 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20877 extern u32 pnp_bios_is_utter_crap;
20878 pnp_bios_is_utter_crap = 1;
20879 diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20880 --- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20881 +++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20882 @@ -11,10 +11,19 @@
20883 #include <linux/kprobes.h> /* __kprobes, ... */
20884 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20885 #include <linux/perf_event.h> /* perf_sw_event */
20886 +#include <linux/unistd.h>
20887 +#include <linux/compiler.h>
20888
20889 #include <asm/traps.h> /* dotraplinkage, ... */
20890 #include <asm/pgalloc.h> /* pgd_*(), ... */
20891 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20892 +#include <asm/vsyscall.h>
20893 +#include <asm/tlbflush.h>
20894 +
20895 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20896 +#include <asm/stacktrace.h>
20897 +#include "../kernel/dumpstack.h"
20898 +#endif
20899
20900 /*
20901 * Page fault error code bits:
20902 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20903 int ret = 0;
20904
20905 /* kprobe_running() needs smp_processor_id() */
20906 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20907 + if (kprobes_built_in() && !user_mode(regs)) {
20908 preempt_disable();
20909 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20910 ret = 1;
20911 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20912 return !instr_lo || (instr_lo>>1) == 1;
20913 case 0x00:
20914 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20915 - if (probe_kernel_address(instr, opcode))
20916 + if (user_mode(regs)) {
20917 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20918 + return 0;
20919 + } else if (probe_kernel_address(instr, opcode))
20920 return 0;
20921
20922 *prefetch = (instr_lo == 0xF) &&
20923 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20924 while (instr < max_instr) {
20925 unsigned char opcode;
20926
20927 - if (probe_kernel_address(instr, opcode))
20928 + if (user_mode(regs)) {
20929 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20930 + break;
20931 + } else if (probe_kernel_address(instr, opcode))
20932 break;
20933
20934 instr++;
20935 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20936 force_sig_info(si_signo, &info, tsk);
20937 }
20938
20939 +#ifdef CONFIG_PAX_EMUTRAMP
20940 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20941 +#endif
20942 +
20943 +#ifdef CONFIG_PAX_PAGEEXEC
20944 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20945 +{
20946 + pgd_t *pgd;
20947 + pud_t *pud;
20948 + pmd_t *pmd;
20949 +
20950 + pgd = pgd_offset(mm, address);
20951 + if (!pgd_present(*pgd))
20952 + return NULL;
20953 + pud = pud_offset(pgd, address);
20954 + if (!pud_present(*pud))
20955 + return NULL;
20956 + pmd = pmd_offset(pud, address);
20957 + if (!pmd_present(*pmd))
20958 + return NULL;
20959 + return pmd;
20960 +}
20961 +#endif
20962 +
20963 DEFINE_SPINLOCK(pgd_lock);
20964 LIST_HEAD(pgd_list);
20965
20966 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20967 address += PMD_SIZE) {
20968
20969 unsigned long flags;
20970 +
20971 +#ifdef CONFIG_PAX_PER_CPU_PGD
20972 + unsigned long cpu;
20973 +#else
20974 struct page *page;
20975 +#endif
20976
20977 spin_lock_irqsave(&pgd_lock, flags);
20978 +
20979 +#ifdef CONFIG_PAX_PER_CPU_PGD
20980 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20981 + pgd_t *pgd = get_cpu_pgd(cpu);
20982 +#else
20983 list_for_each_entry(page, &pgd_list, lru) {
20984 - if (!vmalloc_sync_one(page_address(page), address))
20985 + pgd_t *pgd = page_address(page);
20986 +#endif
20987 +
20988 + if (!vmalloc_sync_one(pgd, address))
20989 break;
20990 }
20991 spin_unlock_irqrestore(&pgd_lock, flags);
20992 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20993 * an interrupt in the middle of a task switch..
20994 */
20995 pgd_paddr = read_cr3();
20996 +
20997 +#ifdef CONFIG_PAX_PER_CPU_PGD
20998 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20999 +#endif
21000 +
21001 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21002 if (!pmd_k)
21003 return -1;
21004 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21005
21006 const pgd_t *pgd_ref = pgd_offset_k(address);
21007 unsigned long flags;
21008 +
21009 +#ifdef CONFIG_PAX_PER_CPU_PGD
21010 + unsigned long cpu;
21011 +#else
21012 struct page *page;
21013 +#endif
21014
21015 if (pgd_none(*pgd_ref))
21016 continue;
21017
21018 spin_lock_irqsave(&pgd_lock, flags);
21019 +
21020 +#ifdef CONFIG_PAX_PER_CPU_PGD
21021 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21022 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
21023 +#else
21024 list_for_each_entry(page, &pgd_list, lru) {
21025 pgd_t *pgd;
21026 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21027 +#endif
21028 +
21029 if (pgd_none(*pgd))
21030 set_pgd(pgd, *pgd_ref);
21031 else
21032 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21033 * happen within a race in page table update. In the later
21034 * case just flush:
21035 */
21036 +
21037 +#ifdef CONFIG_PAX_PER_CPU_PGD
21038 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21039 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21040 +#else
21041 pgd = pgd_offset(current->active_mm, address);
21042 +#endif
21043 +
21044 pgd_ref = pgd_offset_k(address);
21045 if (pgd_none(*pgd_ref))
21046 return -1;
21047 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21048 static int is_errata100(struct pt_regs *regs, unsigned long address)
21049 {
21050 #ifdef CONFIG_X86_64
21051 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21052 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21053 return 1;
21054 #endif
21055 return 0;
21056 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21057 }
21058
21059 static const char nx_warning[] = KERN_CRIT
21060 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21061 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21062
21063 static void
21064 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21065 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21066 if (!oops_may_print())
21067 return;
21068
21069 - if (error_code & PF_INSTR) {
21070 + if (nx_enabled && (error_code & PF_INSTR)) {
21071 unsigned int level;
21072
21073 pte_t *pte = lookup_address(address, &level);
21074
21075 if (pte && pte_present(*pte) && !pte_exec(*pte))
21076 - printk(nx_warning, current_uid());
21077 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21078 }
21079
21080 +#ifdef CONFIG_PAX_KERNEXEC
21081 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21082 + if (current->signal->curr_ip)
21083 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21084 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21085 + else
21086 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21087 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21088 + }
21089 +#endif
21090 +
21091 printk(KERN_ALERT "BUG: unable to handle kernel ");
21092 if (address < PAGE_SIZE)
21093 printk(KERN_CONT "NULL pointer dereference");
21094 @@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21095 unsigned long address, int si_code)
21096 {
21097 struct task_struct *tsk = current;
21098 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21099 + struct mm_struct *mm = tsk->mm;
21100 +#endif
21101 +
21102 +#ifdef CONFIG_X86_64
21103 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21104 + if (regs->ip == (unsigned long)vgettimeofday) {
21105 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21106 + return;
21107 + } else if (regs->ip == (unsigned long)vtime) {
21108 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21109 + return;
21110 + } else if (regs->ip == (unsigned long)vgetcpu) {
21111 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21112 + return;
21113 + }
21114 + }
21115 +#endif
21116 +
21117 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21118 + if (mm && (error_code & PF_USER)) {
21119 + unsigned long ip = regs->ip;
21120 +
21121 + if (v8086_mode(regs))
21122 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21123 +
21124 + /*
21125 + * It's possible to have interrupts off here:
21126 + */
21127 + local_irq_enable();
21128 +
21129 +#ifdef CONFIG_PAX_PAGEEXEC
21130 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21131 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21132 +
21133 +#ifdef CONFIG_PAX_EMUTRAMP
21134 + switch (pax_handle_fetch_fault(regs)) {
21135 + case 2:
21136 + return;
21137 + }
21138 +#endif
21139 +
21140 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21141 + do_group_exit(SIGKILL);
21142 + }
21143 +#endif
21144 +
21145 +#ifdef CONFIG_PAX_SEGMEXEC
21146 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21147 +
21148 +#ifdef CONFIG_PAX_EMUTRAMP
21149 + switch (pax_handle_fetch_fault(regs)) {
21150 + case 2:
21151 + return;
21152 + }
21153 +#endif
21154 +
21155 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21156 + do_group_exit(SIGKILL);
21157 + }
21158 +#endif
21159 +
21160 + }
21161 +#endif
21162
21163 /* User mode accesses just cause a SIGSEGV */
21164 if (error_code & PF_USER) {
21165 @@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21166 return 1;
21167 }
21168
21169 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21170 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21171 +{
21172 + pte_t *pte;
21173 + pmd_t *pmd;
21174 + spinlock_t *ptl;
21175 + unsigned char pte_mask;
21176 +
21177 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21178 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21179 + return 0;
21180 +
21181 + /* PaX: it's our fault, let's handle it if we can */
21182 +
21183 + /* PaX: take a look at read faults before acquiring any locks */
21184 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21185 + /* instruction fetch attempt from a protected page in user mode */
21186 + up_read(&mm->mmap_sem);
21187 +
21188 +#ifdef CONFIG_PAX_EMUTRAMP
21189 + switch (pax_handle_fetch_fault(regs)) {
21190 + case 2:
21191 + return 1;
21192 + }
21193 +#endif
21194 +
21195 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21196 + do_group_exit(SIGKILL);
21197 + }
21198 +
21199 + pmd = pax_get_pmd(mm, address);
21200 + if (unlikely(!pmd))
21201 + return 0;
21202 +
21203 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21204 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21205 + pte_unmap_unlock(pte, ptl);
21206 + return 0;
21207 + }
21208 +
21209 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21210 + /* write attempt to a protected page in user mode */
21211 + pte_unmap_unlock(pte, ptl);
21212 + return 0;
21213 + }
21214 +
21215 +#ifdef CONFIG_SMP
21216 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21217 +#else
21218 + if (likely(address > get_limit(regs->cs)))
21219 +#endif
21220 + {
21221 + set_pte(pte, pte_mkread(*pte));
21222 + __flush_tlb_one(address);
21223 + pte_unmap_unlock(pte, ptl);
21224 + up_read(&mm->mmap_sem);
21225 + return 1;
21226 + }
21227 +
21228 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21229 +
21230 + /*
21231 + * PaX: fill DTLB with user rights and retry
21232 + */
21233 + __asm__ __volatile__ (
21234 + "orb %2,(%1)\n"
21235 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21236 +/*
21237 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21238 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21239 + * page fault when examined during a TLB load attempt. this is true not only
21240 + * for PTEs holding a non-present entry but also present entries that will
21241 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21242 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21243 + * for our target pages since their PTEs are simply not in the TLBs at all.
21244 +
21245 + * the best thing in omitting it is that we gain around 15-20% speed in the
21246 + * fast path of the page fault handler and can get rid of tracing since we
21247 + * can no longer flush unintended entries.
21248 + */
21249 + "invlpg (%0)\n"
21250 +#endif
21251 + __copyuser_seg"testb $0,(%0)\n"
21252 + "xorb %3,(%1)\n"
21253 + :
21254 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21255 + : "memory", "cc");
21256 + pte_unmap_unlock(pte, ptl);
21257 + up_read(&mm->mmap_sem);
21258 + return 1;
21259 +}
21260 +#endif
21261 +
21262 /*
21263 * Handle a spurious fault caused by a stale TLB entry.
21264 *
21265 @@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21266 static inline int
21267 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21268 {
21269 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21270 + return 1;
21271 +
21272 if (write) {
21273 /* write, present and write, not present: */
21274 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21275 @@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21276 {
21277 struct vm_area_struct *vma;
21278 struct task_struct *tsk;
21279 - unsigned long address;
21280 struct mm_struct *mm;
21281 int write;
21282 int fault;
21283
21284 + /* Get the faulting address: */
21285 + unsigned long address = read_cr2();
21286 +
21287 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21288 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21289 + if (!search_exception_tables(regs->ip)) {
21290 + bad_area_nosemaphore(regs, error_code, address);
21291 + return;
21292 + }
21293 + if (address < PAX_USER_SHADOW_BASE) {
21294 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21295 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21296 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21297 + } else
21298 + address -= PAX_USER_SHADOW_BASE;
21299 + }
21300 +#endif
21301 +
21302 tsk = current;
21303 mm = tsk->mm;
21304
21305 - /* Get the faulting address: */
21306 - address = read_cr2();
21307 -
21308 /*
21309 * Detect and handle instructions that would cause a page fault for
21310 * both a tracked kernel page and a userspace page.
21311 @@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21312 * User-mode registers count as a user access even for any
21313 * potential system fault or CPU buglet:
21314 */
21315 - if (user_mode_vm(regs)) {
21316 + if (user_mode(regs)) {
21317 local_irq_enable();
21318 error_code |= PF_USER;
21319 } else {
21320 @@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21321 might_sleep();
21322 }
21323
21324 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21325 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21326 + return;
21327 +#endif
21328 +
21329 vma = find_vma(mm, address);
21330 if (unlikely(!vma)) {
21331 bad_area(regs, error_code, address);
21332 @@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21333 bad_area(regs, error_code, address);
21334 return;
21335 }
21336 - if (error_code & PF_USER) {
21337 - /*
21338 - * Accessing the stack below %sp is always a bug.
21339 - * The large cushion allows instructions like enter
21340 - * and pusha to work. ("enter $65535, $31" pushes
21341 - * 32 pointers and then decrements %sp by 65535.)
21342 - */
21343 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21344 - bad_area(regs, error_code, address);
21345 - return;
21346 - }
21347 + /*
21348 + * Accessing the stack below %sp is always a bug.
21349 + * The large cushion allows instructions like enter
21350 + * and pusha to work. ("enter $65535, $31" pushes
21351 + * 32 pointers and then decrements %sp by 65535.)
21352 + */
21353 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21354 + bad_area(regs, error_code, address);
21355 + return;
21356 }
21357 +
21358 +#ifdef CONFIG_PAX_SEGMEXEC
21359 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21360 + bad_area(regs, error_code, address);
21361 + return;
21362 + }
21363 +#endif
21364 +
21365 if (unlikely(expand_stack(vma, address))) {
21366 bad_area(regs, error_code, address);
21367 return;
21368 @@ -1146,3 +1418,199 @@ good_area:
21369
21370 up_read(&mm->mmap_sem);
21371 }
21372 +
21373 +#ifdef CONFIG_PAX_EMUTRAMP
21374 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21375 +{
21376 + int err;
21377 +
21378 + do { /* PaX: gcc trampoline emulation #1 */
21379 + unsigned char mov1, mov2;
21380 + unsigned short jmp;
21381 + unsigned int addr1, addr2;
21382 +
21383 +#ifdef CONFIG_X86_64
21384 + if ((regs->ip + 11) >> 32)
21385 + break;
21386 +#endif
21387 +
21388 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21389 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21390 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21391 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21392 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21393 +
21394 + if (err)
21395 + break;
21396 +
21397 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21398 + regs->cx = addr1;
21399 + regs->ax = addr2;
21400 + regs->ip = addr2;
21401 + return 2;
21402 + }
21403 + } while (0);
21404 +
21405 + do { /* PaX: gcc trampoline emulation #2 */
21406 + unsigned char mov, jmp;
21407 + unsigned int addr1, addr2;
21408 +
21409 +#ifdef CONFIG_X86_64
21410 + if ((regs->ip + 9) >> 32)
21411 + break;
21412 +#endif
21413 +
21414 + err = get_user(mov, (unsigned char __user *)regs->ip);
21415 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21416 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21417 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21418 +
21419 + if (err)
21420 + break;
21421 +
21422 + if (mov == 0xB9 && jmp == 0xE9) {
21423 + regs->cx = addr1;
21424 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21425 + return 2;
21426 + }
21427 + } while (0);
21428 +
21429 + return 1; /* PaX in action */
21430 +}
21431 +
21432 +#ifdef CONFIG_X86_64
21433 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21434 +{
21435 + int err;
21436 +
21437 + do { /* PaX: gcc trampoline emulation #1 */
21438 + unsigned short mov1, mov2, jmp1;
21439 + unsigned char jmp2;
21440 + unsigned int addr1;
21441 + unsigned long addr2;
21442 +
21443 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21444 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21445 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21446 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21447 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21448 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21449 +
21450 + if (err)
21451 + break;
21452 +
21453 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21454 + regs->r11 = addr1;
21455 + regs->r10 = addr2;
21456 + regs->ip = addr1;
21457 + return 2;
21458 + }
21459 + } while (0);
21460 +
21461 + do { /* PaX: gcc trampoline emulation #2 */
21462 + unsigned short mov1, mov2, jmp1;
21463 + unsigned char jmp2;
21464 + unsigned long addr1, addr2;
21465 +
21466 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21467 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21468 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21469 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21470 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21471 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21472 +
21473 + if (err)
21474 + break;
21475 +
21476 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21477 + regs->r11 = addr1;
21478 + regs->r10 = addr2;
21479 + regs->ip = addr1;
21480 + return 2;
21481 + }
21482 + } while (0);
21483 +
21484 + return 1; /* PaX in action */
21485 +}
21486 +#endif
21487 +
21488 +/*
21489 + * PaX: decide what to do with offenders (regs->ip = fault address)
21490 + *
21491 + * returns 1 when task should be killed
21492 + * 2 when gcc trampoline was detected
21493 + */
21494 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21495 +{
21496 + if (v8086_mode(regs))
21497 + return 1;
21498 +
21499 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21500 + return 1;
21501 +
21502 +#ifdef CONFIG_X86_32
21503 + return pax_handle_fetch_fault_32(regs);
21504 +#else
21505 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21506 + return pax_handle_fetch_fault_32(regs);
21507 + else
21508 + return pax_handle_fetch_fault_64(regs);
21509 +#endif
21510 +}
21511 +#endif
21512 +
21513 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21514 +void pax_report_insns(void *pc, void *sp)
21515 +{
21516 + long i;
21517 +
21518 + printk(KERN_ERR "PAX: bytes at PC: ");
21519 + for (i = 0; i < 20; i++) {
21520 + unsigned char c;
21521 + if (get_user(c, (__force unsigned char __user *)pc+i))
21522 + printk(KERN_CONT "?? ");
21523 + else
21524 + printk(KERN_CONT "%02x ", c);
21525 + }
21526 + printk("\n");
21527 +
21528 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21529 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21530 + unsigned long c;
21531 + if (get_user(c, (__force unsigned long __user *)sp+i))
21532 +#ifdef CONFIG_X86_32
21533 + printk(KERN_CONT "???????? ");
21534 +#else
21535 + printk(KERN_CONT "???????????????? ");
21536 +#endif
21537 + else
21538 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21539 + }
21540 + printk("\n");
21541 +}
21542 +#endif
21543 +
21544 +/**
21545 + * probe_kernel_write(): safely attempt to write to a location
21546 + * @dst: address to write to
21547 + * @src: pointer to the data that shall be written
21548 + * @size: size of the data chunk
21549 + *
21550 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21551 + * happens, handle that and return -EFAULT.
21552 + */
21553 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21554 +{
21555 + long ret;
21556 + mm_segment_t old_fs = get_fs();
21557 +
21558 + set_fs(KERNEL_DS);
21559 + pagefault_disable();
21560 + pax_open_kernel();
21561 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21562 + pax_close_kernel();
21563 + pagefault_enable();
21564 + set_fs(old_fs);
21565 +
21566 + return ret ? -EFAULT : 0;
21567 +}
21568 diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21569 --- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21570 +++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21571 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21572 addr = start;
21573 len = (unsigned long) nr_pages << PAGE_SHIFT;
21574 end = start + len;
21575 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21576 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21577 (void __user *)start, len)))
21578 return 0;
21579
21580 diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21581 --- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21582 +++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21583 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21584 idx = type + KM_TYPE_NR*smp_processor_id();
21585 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21586 BUG_ON(!pte_none(*(kmap_pte-idx)));
21587 +
21588 + pax_open_kernel();
21589 set_pte(kmap_pte-idx, mk_pte(page, prot));
21590 + pax_close_kernel();
21591
21592 return (void *)vaddr;
21593 }
21594 diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21595 --- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21596 +++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21597 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21598 struct hstate *h = hstate_file(file);
21599 struct mm_struct *mm = current->mm;
21600 struct vm_area_struct *vma;
21601 - unsigned long start_addr;
21602 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21603 +
21604 +#ifdef CONFIG_PAX_SEGMEXEC
21605 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21606 + pax_task_size = SEGMEXEC_TASK_SIZE;
21607 +#endif
21608 +
21609 + pax_task_size -= PAGE_SIZE;
21610
21611 if (len > mm->cached_hole_size) {
21612 - start_addr = mm->free_area_cache;
21613 + start_addr = mm->free_area_cache;
21614 } else {
21615 - start_addr = TASK_UNMAPPED_BASE;
21616 - mm->cached_hole_size = 0;
21617 + start_addr = mm->mmap_base;
21618 + mm->cached_hole_size = 0;
21619 }
21620
21621 full_search:
21622 @@ -281,26 +288,27 @@ full_search:
21623
21624 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21625 /* At this point: (!vma || addr < vma->vm_end). */
21626 - if (TASK_SIZE - len < addr) {
21627 + if (pax_task_size - len < addr) {
21628 /*
21629 * Start a new search - just in case we missed
21630 * some holes.
21631 */
21632 - if (start_addr != TASK_UNMAPPED_BASE) {
21633 - start_addr = TASK_UNMAPPED_BASE;
21634 + if (start_addr != mm->mmap_base) {
21635 + start_addr = mm->mmap_base;
21636 mm->cached_hole_size = 0;
21637 goto full_search;
21638 }
21639 return -ENOMEM;
21640 }
21641 - if (!vma || addr + len <= vma->vm_start) {
21642 - mm->free_area_cache = addr + len;
21643 - return addr;
21644 - }
21645 + if (check_heap_stack_gap(vma, addr, len))
21646 + break;
21647 if (addr + mm->cached_hole_size < vma->vm_start)
21648 mm->cached_hole_size = vma->vm_start - addr;
21649 addr = ALIGN(vma->vm_end, huge_page_size(h));
21650 }
21651 +
21652 + mm->free_area_cache = addr + len;
21653 + return addr;
21654 }
21655
21656 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21657 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21658 {
21659 struct hstate *h = hstate_file(file);
21660 struct mm_struct *mm = current->mm;
21661 - struct vm_area_struct *vma, *prev_vma;
21662 - unsigned long base = mm->mmap_base, addr = addr0;
21663 + struct vm_area_struct *vma;
21664 + unsigned long base = mm->mmap_base, addr;
21665 unsigned long largest_hole = mm->cached_hole_size;
21666 - int first_time = 1;
21667
21668 /* don't allow allocations above current base */
21669 if (mm->free_area_cache > base)
21670 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21671 largest_hole = 0;
21672 mm->free_area_cache = base;
21673 }
21674 -try_again:
21675 +
21676 /* make sure it can fit in the remaining address space */
21677 if (mm->free_area_cache < len)
21678 goto fail;
21679
21680 /* either no address requested or cant fit in requested address hole */
21681 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21682 + addr = (mm->free_area_cache - len);
21683 do {
21684 + addr &= huge_page_mask(h);
21685 + vma = find_vma(mm, addr);
21686 /*
21687 * Lookup failure means no vma is above this address,
21688 * i.e. return with success:
21689 - */
21690 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21691 - return addr;
21692 -
21693 - /*
21694 * new region fits between prev_vma->vm_end and
21695 * vma->vm_start, use it:
21696 */
21697 - if (addr + len <= vma->vm_start &&
21698 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21699 + if (check_heap_stack_gap(vma, addr, len)) {
21700 /* remember the address as a hint for next time */
21701 - mm->cached_hole_size = largest_hole;
21702 - return (mm->free_area_cache = addr);
21703 - } else {
21704 - /* pull free_area_cache down to the first hole */
21705 - if (mm->free_area_cache == vma->vm_end) {
21706 - mm->free_area_cache = vma->vm_start;
21707 - mm->cached_hole_size = largest_hole;
21708 - }
21709 + mm->cached_hole_size = largest_hole;
21710 + return (mm->free_area_cache = addr);
21711 + }
21712 + /* pull free_area_cache down to the first hole */
21713 + if (mm->free_area_cache == vma->vm_end) {
21714 + mm->free_area_cache = vma->vm_start;
21715 + mm->cached_hole_size = largest_hole;
21716 }
21717
21718 /* remember the largest hole we saw so far */
21719 if (addr + largest_hole < vma->vm_start)
21720 - largest_hole = vma->vm_start - addr;
21721 + largest_hole = vma->vm_start - addr;
21722
21723 /* try just below the current vma->vm_start */
21724 - addr = (vma->vm_start - len) & huge_page_mask(h);
21725 - } while (len <= vma->vm_start);
21726 + addr = skip_heap_stack_gap(vma, len);
21727 + } while (!IS_ERR_VALUE(addr));
21728
21729 fail:
21730 /*
21731 - * if hint left us with no space for the requested
21732 - * mapping then try again:
21733 - */
21734 - if (first_time) {
21735 - mm->free_area_cache = base;
21736 - largest_hole = 0;
21737 - first_time = 0;
21738 - goto try_again;
21739 - }
21740 - /*
21741 * A failed mmap() very likely causes application failure,
21742 * so fall back to the bottom-up function here. This scenario
21743 * can happen with large stack limits and large mmap()
21744 * allocations.
21745 */
21746 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21747 +
21748 +#ifdef CONFIG_PAX_SEGMEXEC
21749 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21750 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21751 + else
21752 +#endif
21753 +
21754 + mm->mmap_base = TASK_UNMAPPED_BASE;
21755 +
21756 +#ifdef CONFIG_PAX_RANDMMAP
21757 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21758 + mm->mmap_base += mm->delta_mmap;
21759 +#endif
21760 +
21761 + mm->free_area_cache = mm->mmap_base;
21762 mm->cached_hole_size = ~0UL;
21763 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21764 len, pgoff, flags);
21765 @@ -387,6 +393,7 @@ fail:
21766 /*
21767 * Restore the topdown base:
21768 */
21769 + mm->mmap_base = base;
21770 mm->free_area_cache = base;
21771 mm->cached_hole_size = ~0UL;
21772
21773 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21774 struct hstate *h = hstate_file(file);
21775 struct mm_struct *mm = current->mm;
21776 struct vm_area_struct *vma;
21777 + unsigned long pax_task_size = TASK_SIZE;
21778
21779 if (len & ~huge_page_mask(h))
21780 return -EINVAL;
21781 - if (len > TASK_SIZE)
21782 +
21783 +#ifdef CONFIG_PAX_SEGMEXEC
21784 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21785 + pax_task_size = SEGMEXEC_TASK_SIZE;
21786 +#endif
21787 +
21788 + pax_task_size -= PAGE_SIZE;
21789 +
21790 + if (len > pax_task_size)
21791 return -ENOMEM;
21792
21793 if (flags & MAP_FIXED) {
21794 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21795 if (addr) {
21796 addr = ALIGN(addr, huge_page_size(h));
21797 vma = find_vma(mm, addr);
21798 - if (TASK_SIZE - len >= addr &&
21799 - (!vma || addr + len <= vma->vm_start))
21800 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21801 return addr;
21802 }
21803 if (mm->get_unmapped_area == arch_get_unmapped_area)
21804 diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21805 --- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21806 +++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21807 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21808 }
21809
21810 /*
21811 - * Creates a middle page table and puts a pointer to it in the
21812 - * given global directory entry. This only returns the gd entry
21813 - * in non-PAE compilation mode, since the middle layer is folded.
21814 - */
21815 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21816 -{
21817 - pud_t *pud;
21818 - pmd_t *pmd_table;
21819 -
21820 -#ifdef CONFIG_X86_PAE
21821 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21822 - if (after_bootmem)
21823 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21824 - else
21825 - pmd_table = (pmd_t *)alloc_low_page();
21826 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21827 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21828 - pud = pud_offset(pgd, 0);
21829 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21830 -
21831 - return pmd_table;
21832 - }
21833 -#endif
21834 - pud = pud_offset(pgd, 0);
21835 - pmd_table = pmd_offset(pud, 0);
21836 -
21837 - return pmd_table;
21838 -}
21839 -
21840 -/*
21841 * Create a page table and place a pointer to it in a middle page
21842 * directory entry:
21843 */
21844 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21845 page_table = (pte_t *)alloc_low_page();
21846
21847 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21848 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21849 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21850 +#else
21851 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21852 +#endif
21853 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21854 }
21855
21856 return pte_offset_kernel(pmd, 0);
21857 }
21858
21859 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21860 +{
21861 + pud_t *pud;
21862 + pmd_t *pmd_table;
21863 +
21864 + pud = pud_offset(pgd, 0);
21865 + pmd_table = pmd_offset(pud, 0);
21866 +
21867 + return pmd_table;
21868 +}
21869 +
21870 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21871 {
21872 int pgd_idx = pgd_index(vaddr);
21873 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21874 int pgd_idx, pmd_idx;
21875 unsigned long vaddr;
21876 pgd_t *pgd;
21877 + pud_t *pud;
21878 pmd_t *pmd;
21879 pte_t *pte = NULL;
21880
21881 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21882 pgd = pgd_base + pgd_idx;
21883
21884 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21885 - pmd = one_md_table_init(pgd);
21886 - pmd = pmd + pmd_index(vaddr);
21887 + pud = pud_offset(pgd, vaddr);
21888 + pmd = pmd_offset(pud, vaddr);
21889 +
21890 +#ifdef CONFIG_X86_PAE
21891 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21892 +#endif
21893 +
21894 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21895 pmd++, pmd_idx++) {
21896 pte = page_table_kmap_check(one_page_table_init(pmd),
21897 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21898 }
21899 }
21900
21901 -static inline int is_kernel_text(unsigned long addr)
21902 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21903 {
21904 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21905 - return 1;
21906 - return 0;
21907 + if ((start > ktla_ktva((unsigned long)_etext) ||
21908 + end <= ktla_ktva((unsigned long)_stext)) &&
21909 + (start > ktla_ktva((unsigned long)_einittext) ||
21910 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21911 +
21912 +#ifdef CONFIG_ACPI_SLEEP
21913 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21914 +#endif
21915 +
21916 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21917 + return 0;
21918 + return 1;
21919 }
21920
21921 /*
21922 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21923 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21924 unsigned long start_pfn, end_pfn;
21925 pgd_t *pgd_base = swapper_pg_dir;
21926 - int pgd_idx, pmd_idx, pte_ofs;
21927 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21928 unsigned long pfn;
21929 pgd_t *pgd;
21930 + pud_t *pud;
21931 pmd_t *pmd;
21932 pte_t *pte;
21933 unsigned pages_2m, pages_4k;
21934 @@ -278,8 +279,13 @@ repeat:
21935 pfn = start_pfn;
21936 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21937 pgd = pgd_base + pgd_idx;
21938 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21939 - pmd = one_md_table_init(pgd);
21940 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21941 + pud = pud_offset(pgd, 0);
21942 + pmd = pmd_offset(pud, 0);
21943 +
21944 +#ifdef CONFIG_X86_PAE
21945 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21946 +#endif
21947
21948 if (pfn >= end_pfn)
21949 continue;
21950 @@ -291,14 +297,13 @@ repeat:
21951 #endif
21952 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21953 pmd++, pmd_idx++) {
21954 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21955 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21956
21957 /*
21958 * Map with big pages if possible, otherwise
21959 * create normal page tables:
21960 */
21961 if (use_pse) {
21962 - unsigned int addr2;
21963 pgprot_t prot = PAGE_KERNEL_LARGE;
21964 /*
21965 * first pass will use the same initial
21966 @@ -308,11 +313,7 @@ repeat:
21967 __pgprot(PTE_IDENT_ATTR |
21968 _PAGE_PSE);
21969
21970 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21971 - PAGE_OFFSET + PAGE_SIZE-1;
21972 -
21973 - if (is_kernel_text(addr) ||
21974 - is_kernel_text(addr2))
21975 + if (is_kernel_text(address, address + PMD_SIZE))
21976 prot = PAGE_KERNEL_LARGE_EXEC;
21977
21978 pages_2m++;
21979 @@ -329,7 +330,7 @@ repeat:
21980 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21981 pte += pte_ofs;
21982 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21983 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21984 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21985 pgprot_t prot = PAGE_KERNEL;
21986 /*
21987 * first pass will use the same initial
21988 @@ -337,7 +338,7 @@ repeat:
21989 */
21990 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21991
21992 - if (is_kernel_text(addr))
21993 + if (is_kernel_text(address, address + PAGE_SIZE))
21994 prot = PAGE_KERNEL_EXEC;
21995
21996 pages_4k++;
21997 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21998
21999 pud = pud_offset(pgd, va);
22000 pmd = pmd_offset(pud, va);
22001 - if (!pmd_present(*pmd))
22002 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22003 break;
22004
22005 pte = pte_offset_kernel(pmd, va);
22006 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22007
22008 static void __init pagetable_init(void)
22009 {
22010 - pgd_t *pgd_base = swapper_pg_dir;
22011 -
22012 - permanent_kmaps_init(pgd_base);
22013 + permanent_kmaps_init(swapper_pg_dir);
22014 }
22015
22016 #ifdef CONFIG_ACPI_SLEEP
22017 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22018 * ACPI suspend needs this for resume, because things like the intel-agp
22019 * driver might have split up a kernel 4MB mapping.
22020 */
22021 -char swsusp_pg_dir[PAGE_SIZE]
22022 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22023 __attribute__ ((aligned(PAGE_SIZE)));
22024
22025 static inline void save_pg_dir(void)
22026 {
22027 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22028 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22029 }
22030 #else /* !CONFIG_ACPI_SLEEP */
22031 static inline void save_pg_dir(void)
22032 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22033 flush_tlb_all();
22034 }
22035
22036 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22037 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22038 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22039
22040 /* user-defined highmem size */
22041 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22042 * Initialize the boot-time allocator (with low memory only):
22043 */
22044 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22045 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22046 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22047 PAGE_SIZE);
22048 if (bootmap == -1L)
22049 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22050 @@ -864,6 +863,12 @@ void __init mem_init(void)
22051
22052 pci_iommu_alloc();
22053
22054 +#ifdef CONFIG_PAX_PER_CPU_PGD
22055 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22056 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22057 + KERNEL_PGD_PTRS);
22058 +#endif
22059 +
22060 #ifdef CONFIG_FLATMEM
22061 BUG_ON(!mem_map);
22062 #endif
22063 @@ -881,7 +886,7 @@ void __init mem_init(void)
22064 set_highmem_pages_init();
22065
22066 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22067 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22068 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22069 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22070
22071 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22072 @@ -923,10 +928,10 @@ void __init mem_init(void)
22073 ((unsigned long)&__init_end -
22074 (unsigned long)&__init_begin) >> 10,
22075
22076 - (unsigned long)&_etext, (unsigned long)&_edata,
22077 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22078 + (unsigned long)&_sdata, (unsigned long)&_edata,
22079 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22080
22081 - (unsigned long)&_text, (unsigned long)&_etext,
22082 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22083 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22084
22085 /*
22086 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22087 if (!kernel_set_to_readonly)
22088 return;
22089
22090 + start = ktla_ktva(start);
22091 pr_debug("Set kernel text: %lx - %lx for read write\n",
22092 start, start+size);
22093
22094 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22095 if (!kernel_set_to_readonly)
22096 return;
22097
22098 + start = ktla_ktva(start);
22099 pr_debug("Set kernel text: %lx - %lx for read only\n",
22100 start, start+size);
22101
22102 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22103 unsigned long start = PFN_ALIGN(_text);
22104 unsigned long size = PFN_ALIGN(_etext) - start;
22105
22106 + start = ktla_ktva(start);
22107 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22108 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22109 size >> 10);
22110 diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
22111 --- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22112 +++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22113 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22114 pmd = fill_pmd(pud, vaddr);
22115 pte = fill_pte(pmd, vaddr);
22116
22117 + pax_open_kernel();
22118 set_pte(pte, new_pte);
22119 + pax_close_kernel();
22120
22121 /*
22122 * It's enough to flush this one mapping.
22123 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22124 pgd = pgd_offset_k((unsigned long)__va(phys));
22125 if (pgd_none(*pgd)) {
22126 pud = (pud_t *) spp_getpage();
22127 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22128 - _PAGE_USER));
22129 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22130 }
22131 pud = pud_offset(pgd, (unsigned long)__va(phys));
22132 if (pud_none(*pud)) {
22133 pmd = (pmd_t *) spp_getpage();
22134 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22135 - _PAGE_USER));
22136 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22137 }
22138 pmd = pmd_offset(pud, phys);
22139 BUG_ON(!pmd_none(*pmd));
22140 @@ -675,6 +675,12 @@ void __init mem_init(void)
22141
22142 pci_iommu_alloc();
22143
22144 +#ifdef CONFIG_PAX_PER_CPU_PGD
22145 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22146 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22147 + KERNEL_PGD_PTRS);
22148 +#endif
22149 +
22150 /* clear_bss() already clear the empty_zero_page */
22151
22152 reservedpages = 0;
22153 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22154 static struct vm_area_struct gate_vma = {
22155 .vm_start = VSYSCALL_START,
22156 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22157 - .vm_page_prot = PAGE_READONLY_EXEC,
22158 - .vm_flags = VM_READ | VM_EXEC
22159 + .vm_page_prot = PAGE_READONLY,
22160 + .vm_flags = VM_READ
22161 };
22162
22163 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22164 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22165
22166 const char *arch_vma_name(struct vm_area_struct *vma)
22167 {
22168 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22169 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22170 return "[vdso]";
22171 if (vma == &gate_vma)
22172 return "[vsyscall]";
22173 diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
22174 --- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22175 +++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22176 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
22177 * cause a hotspot and fill up ZONE_DMA. The page tables
22178 * need roughly 0.5KB per GB.
22179 */
22180 -#ifdef CONFIG_X86_32
22181 - start = 0x7000;
22182 -#else
22183 - start = 0x8000;
22184 -#endif
22185 + start = 0x100000;
22186 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22187 tables, PAGE_SIZE);
22188 if (e820_table_start == -1UL)
22189 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22190 #endif
22191
22192 set_nx();
22193 - if (nx_enabled)
22194 + if (nx_enabled && cpu_has_nx)
22195 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22196
22197 /* Enable PSE if available */
22198 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22199 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22200 * mmio resources as well as potential bios/acpi data regions.
22201 */
22202 +
22203 int devmem_is_allowed(unsigned long pagenr)
22204 {
22205 +#ifdef CONFIG_GRKERNSEC_KMEM
22206 + /* allow BDA */
22207 + if (!pagenr)
22208 + return 1;
22209 + /* allow EBDA */
22210 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22211 + return 1;
22212 + /* allow ISA/video mem */
22213 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22214 + return 1;
22215 + /* throw out everything else below 1MB */
22216 + if (pagenr <= 256)
22217 + return 0;
22218 +#else
22219 if (pagenr <= 256)
22220 return 1;
22221 +#endif
22222 +
22223 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22224 return 0;
22225 if (!page_is_ram(pagenr))
22226 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22227
22228 void free_initmem(void)
22229 {
22230 +
22231 +#ifdef CONFIG_PAX_KERNEXEC
22232 +#ifdef CONFIG_X86_32
22233 + /* PaX: limit KERNEL_CS to actual size */
22234 + unsigned long addr, limit;
22235 + struct desc_struct d;
22236 + int cpu;
22237 +
22238 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22239 + limit = (limit - 1UL) >> PAGE_SHIFT;
22240 +
22241 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22242 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22243 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22244 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22245 + }
22246 +
22247 + /* PaX: make KERNEL_CS read-only */
22248 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22249 + if (!paravirt_enabled())
22250 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22251 +/*
22252 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22253 + pgd = pgd_offset_k(addr);
22254 + pud = pud_offset(pgd, addr);
22255 + pmd = pmd_offset(pud, addr);
22256 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22257 + }
22258 +*/
22259 +#ifdef CONFIG_X86_PAE
22260 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22261 +/*
22262 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22263 + pgd = pgd_offset_k(addr);
22264 + pud = pud_offset(pgd, addr);
22265 + pmd = pmd_offset(pud, addr);
22266 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22267 + }
22268 +*/
22269 +#endif
22270 +
22271 +#ifdef CONFIG_MODULES
22272 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22273 +#endif
22274 +
22275 +#else
22276 + pgd_t *pgd;
22277 + pud_t *pud;
22278 + pmd_t *pmd;
22279 + unsigned long addr, end;
22280 +
22281 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22282 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22283 + pgd = pgd_offset_k(addr);
22284 + pud = pud_offset(pgd, addr);
22285 + pmd = pmd_offset(pud, addr);
22286 + if (!pmd_present(*pmd))
22287 + continue;
22288 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22289 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22290 + else
22291 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22292 + }
22293 +
22294 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22295 + end = addr + KERNEL_IMAGE_SIZE;
22296 + for (; addr < end; addr += PMD_SIZE) {
22297 + pgd = pgd_offset_k(addr);
22298 + pud = pud_offset(pgd, addr);
22299 + pmd = pmd_offset(pud, addr);
22300 + if (!pmd_present(*pmd))
22301 + continue;
22302 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22303 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22304 + }
22305 +#endif
22306 +
22307 + flush_tlb_all();
22308 +#endif
22309 +
22310 free_init_pages("unused kernel memory",
22311 (unsigned long)(&__init_begin),
22312 (unsigned long)(&__init_end));
22313 diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22314 --- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22315 +++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22316 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22317 debug_kmap_atomic(type);
22318 idx = type + KM_TYPE_NR * smp_processor_id();
22319 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22320 +
22321 + pax_open_kernel();
22322 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22323 + pax_close_kernel();
22324 +
22325 arch_flush_lazy_mmu_mode();
22326
22327 return (void *)vaddr;
22328 diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22329 --- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22330 +++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22331 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22332 * Second special case: Some BIOSen report the PC BIOS
22333 * area (640->1Mb) as ram even though it is not.
22334 */
22335 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22336 - pagenr < (BIOS_END >> PAGE_SHIFT))
22337 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22338 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22339 return 0;
22340
22341 for (i = 0; i < e820.nr_map; i++) {
22342 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22343 /*
22344 * Don't allow anybody to remap normal RAM that we're using..
22345 */
22346 - for (pfn = phys_addr >> PAGE_SHIFT;
22347 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22348 - pfn++) {
22349 -
22350 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22351 int is_ram = page_is_ram(pfn);
22352
22353 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22354 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22355 return NULL;
22356 WARN_ON_ONCE(is_ram);
22357 }
22358 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22359 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22360
22361 static __initdata int after_paging_init;
22362 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22363 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22364
22365 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22366 {
22367 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22368 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22369
22370 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22371 - memset(bm_pte, 0, sizeof(bm_pte));
22372 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22373 + pmd_populate_user(&init_mm, pmd, bm_pte);
22374
22375 /*
22376 * The boot-ioremap range spans multiple pmds, for which
22377 diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22378 --- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22379 +++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22380 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22381 * memory (e.g. tracked pages)? For now, we need this to avoid
22382 * invoking kmemcheck for PnP BIOS calls.
22383 */
22384 - if (regs->flags & X86_VM_MASK)
22385 + if (v8086_mode(regs))
22386 return false;
22387 - if (regs->cs != __KERNEL_CS)
22388 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22389 return false;
22390
22391 pte = kmemcheck_pte_lookup(address);
22392 diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22393 --- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22394 +++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22395 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22396 * Leave an at least ~128 MB hole with possible stack randomization.
22397 */
22398 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22399 -#define MAX_GAP (TASK_SIZE/6*5)
22400 +#define MAX_GAP (pax_task_size/6*5)
22401
22402 /*
22403 * True on X86_32 or when emulating IA32 on X86_64
22404 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22405 return rnd << PAGE_SHIFT;
22406 }
22407
22408 -static unsigned long mmap_base(void)
22409 +static unsigned long mmap_base(struct mm_struct *mm)
22410 {
22411 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22412 + unsigned long pax_task_size = TASK_SIZE;
22413 +
22414 +#ifdef CONFIG_PAX_SEGMEXEC
22415 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22416 + pax_task_size = SEGMEXEC_TASK_SIZE;
22417 +#endif
22418
22419 if (gap < MIN_GAP)
22420 gap = MIN_GAP;
22421 else if (gap > MAX_GAP)
22422 gap = MAX_GAP;
22423
22424 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22425 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22426 }
22427
22428 /*
22429 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22430 * does, but not when emulating X86_32
22431 */
22432 -static unsigned long mmap_legacy_base(void)
22433 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22434 {
22435 - if (mmap_is_ia32())
22436 + if (mmap_is_ia32()) {
22437 +
22438 +#ifdef CONFIG_PAX_SEGMEXEC
22439 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22440 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22441 + else
22442 +#endif
22443 +
22444 return TASK_UNMAPPED_BASE;
22445 - else
22446 + } else
22447 return TASK_UNMAPPED_BASE + mmap_rnd();
22448 }
22449
22450 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22451 void arch_pick_mmap_layout(struct mm_struct *mm)
22452 {
22453 if (mmap_is_legacy()) {
22454 - mm->mmap_base = mmap_legacy_base();
22455 + mm->mmap_base = mmap_legacy_base(mm);
22456 +
22457 +#ifdef CONFIG_PAX_RANDMMAP
22458 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22459 + mm->mmap_base += mm->delta_mmap;
22460 +#endif
22461 +
22462 mm->get_unmapped_area = arch_get_unmapped_area;
22463 mm->unmap_area = arch_unmap_area;
22464 } else {
22465 - mm->mmap_base = mmap_base();
22466 + mm->mmap_base = mmap_base(mm);
22467 +
22468 +#ifdef CONFIG_PAX_RANDMMAP
22469 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22470 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22471 +#endif
22472 +
22473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22474 mm->unmap_area = arch_unmap_area_topdown;
22475 }
22476 diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22477 --- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22478 +++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22479 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22480 break;
22481 default:
22482 {
22483 - unsigned char *ip = (unsigned char *)instptr;
22484 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22485 my_trace->opcode = MMIO_UNKNOWN_OP;
22486 my_trace->width = 0;
22487 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22488 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22489 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22490 void __iomem *addr)
22491 {
22492 - static atomic_t next_id;
22493 + static atomic_unchecked_t next_id;
22494 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22495 /* These are page-unaligned. */
22496 struct mmiotrace_map map = {
22497 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22498 .private = trace
22499 },
22500 .phys = offset,
22501 - .id = atomic_inc_return(&next_id)
22502 + .id = atomic_inc_return_unchecked(&next_id)
22503 };
22504 map.map_id = trace->id;
22505
22506 diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22507 --- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22508 +++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22509 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22510 }
22511 #endif
22512
22513 -extern unsigned long find_max_low_pfn(void);
22514 extern unsigned long highend_pfn, highstart_pfn;
22515
22516 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22517 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22518 --- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22519 +++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22520 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22521 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22522 */
22523 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22524 - pgprot_val(forbidden) |= _PAGE_NX;
22525 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22526
22527 /*
22528 * The kernel text needs to be executable for obvious reasons
22529 * Does not cover __inittext since that is gone later on. On
22530 * 64bit we do not enforce !NX on the low mapping
22531 */
22532 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22533 - pgprot_val(forbidden) |= _PAGE_NX;
22534 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22535 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22536
22537 +#ifdef CONFIG_DEBUG_RODATA
22538 /*
22539 * The .rodata section needs to be read-only. Using the pfn
22540 * catches all aliases.
22541 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22542 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22543 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22544 pgprot_val(forbidden) |= _PAGE_RW;
22545 +#endif
22546 +
22547 +#ifdef CONFIG_PAX_KERNEXEC
22548 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22549 + pgprot_val(forbidden) |= _PAGE_RW;
22550 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22551 + }
22552 +#endif
22553
22554 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22555
22556 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22557 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22558 {
22559 /* change init_mm */
22560 + pax_open_kernel();
22561 set_pte_atomic(kpte, pte);
22562 +
22563 #ifdef CONFIG_X86_32
22564 if (!SHARED_KERNEL_PMD) {
22565 +
22566 +#ifdef CONFIG_PAX_PER_CPU_PGD
22567 + unsigned long cpu;
22568 +#else
22569 struct page *page;
22570 +#endif
22571
22572 +#ifdef CONFIG_PAX_PER_CPU_PGD
22573 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22574 + pgd_t *pgd = get_cpu_pgd(cpu);
22575 +#else
22576 list_for_each_entry(page, &pgd_list, lru) {
22577 - pgd_t *pgd;
22578 + pgd_t *pgd = (pgd_t *)page_address(page);
22579 +#endif
22580 +
22581 pud_t *pud;
22582 pmd_t *pmd;
22583
22584 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22585 + pgd += pgd_index(address);
22586 pud = pud_offset(pgd, address);
22587 pmd = pmd_offset(pud, address);
22588 set_pte_atomic((pte_t *)pmd, pte);
22589 }
22590 }
22591 #endif
22592 + pax_close_kernel();
22593 }
22594
22595 static int
22596 diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22597 --- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22598 +++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22599 @@ -36,7 +36,7 @@ enum {
22600
22601 static int pte_testbit(pte_t pte)
22602 {
22603 - return pte_flags(pte) & _PAGE_UNUSED1;
22604 + return pte_flags(pte) & _PAGE_CPA_TEST;
22605 }
22606
22607 struct split_state {
22608 diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22609 --- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22610 +++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22611 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22612
22613 conflict:
22614 printk(KERN_INFO "%s:%d conflicting memory types "
22615 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22616 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22617 new->end, cattr_name(new->type), cattr_name(entry->type));
22618 return -EBUSY;
22619 }
22620 @@ -559,7 +559,7 @@ unlock_ret:
22621
22622 if (err) {
22623 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22624 - current->comm, current->pid, start, end);
22625 + current->comm, task_pid_nr(current), start, end);
22626 }
22627
22628 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22629 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22630 while (cursor < to) {
22631 if (!devmem_is_allowed(pfn)) {
22632 printk(KERN_INFO
22633 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22634 - current->comm, from, to);
22635 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22636 + current->comm, from, to, cursor);
22637 return 0;
22638 }
22639 cursor += PAGE_SIZE;
22640 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22641 printk(KERN_INFO
22642 "%s:%d ioremap_change_attr failed %s "
22643 "for %Lx-%Lx\n",
22644 - current->comm, current->pid,
22645 + current->comm, task_pid_nr(current),
22646 cattr_name(flags),
22647 base, (unsigned long long)(base + size));
22648 return -EINVAL;
22649 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22650 free_memtype(paddr, paddr + size);
22651 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22652 " for %Lx-%Lx, got %s\n",
22653 - current->comm, current->pid,
22654 + current->comm, task_pid_nr(current),
22655 cattr_name(want_flags),
22656 (unsigned long long)paddr,
22657 (unsigned long long)(paddr + size),
22658 diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22659 --- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22660 +++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22661 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22662 int i;
22663 enum reason_type rv = OTHERS;
22664
22665 - p = (unsigned char *)ins_addr;
22666 + p = (unsigned char *)ktla_ktva(ins_addr);
22667 p += skip_prefix(p, &prf);
22668 p += get_opcode(p, &opcode);
22669
22670 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22671 struct prefix_bits prf;
22672 int i;
22673
22674 - p = (unsigned char *)ins_addr;
22675 + p = (unsigned char *)ktla_ktva(ins_addr);
22676 p += skip_prefix(p, &prf);
22677 p += get_opcode(p, &opcode);
22678
22679 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22680 struct prefix_bits prf;
22681 int i;
22682
22683 - p = (unsigned char *)ins_addr;
22684 + p = (unsigned char *)ktla_ktva(ins_addr);
22685 p += skip_prefix(p, &prf);
22686 p += get_opcode(p, &opcode);
22687
22688 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22689 int i;
22690 unsigned long rv;
22691
22692 - p = (unsigned char *)ins_addr;
22693 + p = (unsigned char *)ktla_ktva(ins_addr);
22694 p += skip_prefix(p, &prf);
22695 p += get_opcode(p, &opcode);
22696 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22697 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22698 int i;
22699 unsigned long rv;
22700
22701 - p = (unsigned char *)ins_addr;
22702 + p = (unsigned char *)ktla_ktva(ins_addr);
22703 p += skip_prefix(p, &prf);
22704 p += get_opcode(p, &opcode);
22705 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22706 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22707 --- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22708 +++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22709 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22710 return;
22711 }
22712 pte = pte_offset_kernel(pmd, vaddr);
22713 +
22714 + pax_open_kernel();
22715 if (pte_val(pteval))
22716 set_pte_at(&init_mm, vaddr, pte, pteval);
22717 else
22718 pte_clear(&init_mm, vaddr, pte);
22719 + pax_close_kernel();
22720
22721 /*
22722 * It's enough to flush this one mapping.
22723 diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22724 --- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22725 +++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22726 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22727 list_del(&page->lru);
22728 }
22729
22730 -#define UNSHARED_PTRS_PER_PGD \
22731 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22732 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22733 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22734
22735 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22736 +{
22737 + while (count--)
22738 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22739 +}
22740 +#endif
22741 +
22742 +#ifdef CONFIG_PAX_PER_CPU_PGD
22743 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22744 +{
22745 + while (count--)
22746 +
22747 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22748 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22749 +#else
22750 + *dst++ = *src++;
22751 +#endif
22752 +
22753 +}
22754 +#endif
22755 +
22756 +#ifdef CONFIG_X86_64
22757 +#define pxd_t pud_t
22758 +#define pyd_t pgd_t
22759 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22760 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22761 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22762 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22763 +#define PYD_SIZE PGDIR_SIZE
22764 +#else
22765 +#define pxd_t pmd_t
22766 +#define pyd_t pud_t
22767 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22768 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22769 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22770 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22771 +#define PYD_SIZE PUD_SIZE
22772 +#endif
22773 +
22774 +#ifdef CONFIG_PAX_PER_CPU_PGD
22775 +static inline void pgd_ctor(pgd_t *pgd) {}
22776 +static inline void pgd_dtor(pgd_t *pgd) {}
22777 +#else
22778 static void pgd_ctor(pgd_t *pgd)
22779 {
22780 /* If the pgd points to a shared pagetable level (either the
22781 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22782 pgd_list_del(pgd);
22783 spin_unlock_irqrestore(&pgd_lock, flags);
22784 }
22785 +#endif
22786
22787 /*
22788 * List of all pgd's needed for non-PAE so it can invalidate entries
22789 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22790 * -- wli
22791 */
22792
22793 -#ifdef CONFIG_X86_PAE
22794 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22795 /*
22796 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22797 * updating the top-level pagetable entries to guarantee the
22798 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22799 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22800 * and initialize the kernel pmds here.
22801 */
22802 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22803 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22804
22805 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22806 {
22807 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22808 */
22809 flush_tlb_mm(mm);
22810 }
22811 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22812 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22813 #else /* !CONFIG_X86_PAE */
22814
22815 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22816 -#define PREALLOCATED_PMDS 0
22817 +#define PREALLOCATED_PXDS 0
22818
22819 #endif /* CONFIG_X86_PAE */
22820
22821 -static void free_pmds(pmd_t *pmds[])
22822 +static void free_pxds(pxd_t *pxds[])
22823 {
22824 int i;
22825
22826 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22827 - if (pmds[i])
22828 - free_page((unsigned long)pmds[i]);
22829 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22830 + if (pxds[i])
22831 + free_page((unsigned long)pxds[i]);
22832 }
22833
22834 -static int preallocate_pmds(pmd_t *pmds[])
22835 +static int preallocate_pxds(pxd_t *pxds[])
22836 {
22837 int i;
22838 bool failed = false;
22839
22840 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22841 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22842 - if (pmd == NULL)
22843 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22844 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22845 + if (pxd == NULL)
22846 failed = true;
22847 - pmds[i] = pmd;
22848 + pxds[i] = pxd;
22849 }
22850
22851 if (failed) {
22852 - free_pmds(pmds);
22853 + free_pxds(pxds);
22854 return -ENOMEM;
22855 }
22856
22857 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22858 * preallocate which never got a corresponding vma will need to be
22859 * freed manually.
22860 */
22861 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22862 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22863 {
22864 int i;
22865
22866 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22867 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22868 pgd_t pgd = pgdp[i];
22869
22870 if (pgd_val(pgd) != 0) {
22871 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22872 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22873
22874 - pgdp[i] = native_make_pgd(0);
22875 + set_pgd(pgdp + i, native_make_pgd(0));
22876
22877 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22878 - pmd_free(mm, pmd);
22879 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22880 + pxd_free(mm, pxd);
22881 }
22882 }
22883 }
22884
22885 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22886 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22887 {
22888 - pud_t *pud;
22889 + pyd_t *pyd;
22890 unsigned long addr;
22891 int i;
22892
22893 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22894 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22895 return;
22896
22897 - pud = pud_offset(pgd, 0);
22898 +#ifdef CONFIG_X86_64
22899 + pyd = pyd_offset(mm, 0L);
22900 +#else
22901 + pyd = pyd_offset(pgd, 0L);
22902 +#endif
22903
22904 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22905 - i++, pud++, addr += PUD_SIZE) {
22906 - pmd_t *pmd = pmds[i];
22907 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22908 + i++, pyd++, addr += PYD_SIZE) {
22909 + pxd_t *pxd = pxds[i];
22910
22911 if (i >= KERNEL_PGD_BOUNDARY)
22912 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22913 - sizeof(pmd_t) * PTRS_PER_PMD);
22914 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22915 + sizeof(pxd_t) * PTRS_PER_PMD);
22916
22917 - pud_populate(mm, pud, pmd);
22918 + pyd_populate(mm, pyd, pxd);
22919 }
22920 }
22921
22922 pgd_t *pgd_alloc(struct mm_struct *mm)
22923 {
22924 pgd_t *pgd;
22925 - pmd_t *pmds[PREALLOCATED_PMDS];
22926 + pxd_t *pxds[PREALLOCATED_PXDS];
22927 +
22928 unsigned long flags;
22929
22930 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22931 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22932
22933 mm->pgd = pgd;
22934
22935 - if (preallocate_pmds(pmds) != 0)
22936 + if (preallocate_pxds(pxds) != 0)
22937 goto out_free_pgd;
22938
22939 if (paravirt_pgd_alloc(mm) != 0)
22940 - goto out_free_pmds;
22941 + goto out_free_pxds;
22942
22943 /*
22944 * Make sure that pre-populating the pmds is atomic with
22945 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22946 spin_lock_irqsave(&pgd_lock, flags);
22947
22948 pgd_ctor(pgd);
22949 - pgd_prepopulate_pmd(mm, pgd, pmds);
22950 + pgd_prepopulate_pxd(mm, pgd, pxds);
22951
22952 spin_unlock_irqrestore(&pgd_lock, flags);
22953
22954 return pgd;
22955
22956 -out_free_pmds:
22957 - free_pmds(pmds);
22958 +out_free_pxds:
22959 + free_pxds(pxds);
22960 out_free_pgd:
22961 free_page((unsigned long)pgd);
22962 out:
22963 @@ -287,7 +338,7 @@ out:
22964
22965 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22966 {
22967 - pgd_mop_up_pmds(mm, pgd);
22968 + pgd_mop_up_pxds(mm, pgd);
22969 pgd_dtor(pgd);
22970 paravirt_pgd_free(mm, pgd);
22971 free_page((unsigned long)pgd);
22972 diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22973 --- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22974 +++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22975 @@ -4,11 +4,10 @@
22976
22977 #include <asm/pgtable.h>
22978
22979 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22980 int nx_enabled;
22981
22982 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22983 -static int disable_nx __cpuinitdata;
22984 -
22985 +#ifndef CONFIG_PAX_PAGEEXEC
22986 /*
22987 * noexec = on|off
22988 *
22989 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22990 if (!str)
22991 return -EINVAL;
22992 if (!strncmp(str, "on", 2)) {
22993 - __supported_pte_mask |= _PAGE_NX;
22994 - disable_nx = 0;
22995 + nx_enabled = 1;
22996 } else if (!strncmp(str, "off", 3)) {
22997 - disable_nx = 1;
22998 - __supported_pte_mask &= ~_PAGE_NX;
22999 + nx_enabled = 0;
23000 }
23001 return 0;
23002 }
23003 early_param("noexec", noexec_setup);
23004 #endif
23005 +#endif
23006
23007 #ifdef CONFIG_X86_PAE
23008 void __init set_nx(void)
23009 {
23010 - unsigned int v[4], l, h;
23011 + if (!nx_enabled && cpu_has_nx) {
23012 + unsigned l, h;
23013
23014 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23015 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23016 -
23017 - if ((v[3] & (1 << 20)) && !disable_nx) {
23018 - rdmsr(MSR_EFER, l, h);
23019 - l |= EFER_NX;
23020 - wrmsr(MSR_EFER, l, h);
23021 - nx_enabled = 1;
23022 - __supported_pte_mask |= _PAGE_NX;
23023 - }
23024 + __supported_pte_mask &= ~_PAGE_NX;
23025 + rdmsr(MSR_EFER, l, h);
23026 + l &= ~EFER_NX;
23027 + wrmsr(MSR_EFER, l, h);
23028 }
23029 }
23030 #else
23031 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23032 unsigned long efer;
23033
23034 rdmsrl(MSR_EFER, efer);
23035 - if (!(efer & EFER_NX) || disable_nx)
23036 + if (!(efer & EFER_NX) || !nx_enabled)
23037 __supported_pte_mask &= ~_PAGE_NX;
23038 }
23039 #endif
23040 diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
23041 --- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
23042 +++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
23043 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
23044 BUG();
23045 cpumask_clear_cpu(cpu,
23046 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23047 +
23048 +#ifndef CONFIG_PAX_PER_CPU_PGD
23049 load_cr3(swapper_pg_dir);
23050 +#endif
23051 +
23052 }
23053 EXPORT_SYMBOL_GPL(leave_mm);
23054
23055 diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
23056 --- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
23057 +++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
23058 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23059 struct frame_head bufhead[2];
23060
23061 /* Also check accessibility of one struct frame_head beyond */
23062 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23063 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23064 return NULL;
23065 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23066 return NULL;
23067 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23068 {
23069 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23070
23071 - if (!user_mode_vm(regs)) {
23072 + if (!user_mode(regs)) {
23073 unsigned long stack = kernel_stack_pointer(regs);
23074 if (depth)
23075 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23076 diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
23077 --- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23078 +++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23079 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23080 #endif
23081 }
23082
23083 -static int inline addr_increment(void)
23084 +static inline int addr_increment(void)
23085 {
23086 #ifdef CONFIG_SMP
23087 return smp_num_siblings == 2 ? 2 : 1;
23088 diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
23089 --- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23090 +++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23091 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
23092 int pcibios_last_bus = -1;
23093 unsigned long pirq_table_addr;
23094 struct pci_bus *pci_root_bus;
23095 -struct pci_raw_ops *raw_pci_ops;
23096 -struct pci_raw_ops *raw_pci_ext_ops;
23097 +const struct pci_raw_ops *raw_pci_ops;
23098 +const struct pci_raw_ops *raw_pci_ext_ops;
23099
23100 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23101 int reg, int len, u32 *val)
23102 diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
23103 --- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23104 +++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23105 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23106
23107 #undef PCI_CONF1_ADDRESS
23108
23109 -struct pci_raw_ops pci_direct_conf1 = {
23110 +const struct pci_raw_ops pci_direct_conf1 = {
23111 .read = pci_conf1_read,
23112 .write = pci_conf1_write,
23113 };
23114 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23115
23116 #undef PCI_CONF2_ADDRESS
23117
23118 -struct pci_raw_ops pci_direct_conf2 = {
23119 +const struct pci_raw_ops pci_direct_conf2 = {
23120 .read = pci_conf2_read,
23121 .write = pci_conf2_write,
23122 };
23123 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23124 * This should be close to trivial, but it isn't, because there are buggy
23125 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23126 */
23127 -static int __init pci_sanity_check(struct pci_raw_ops *o)
23128 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
23129 {
23130 u32 x = 0;
23131 int year, devfn;
23132 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
23133 --- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23134 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23135 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23136 return 0;
23137 }
23138
23139 -static struct pci_raw_ops pci_mmcfg = {
23140 +static const struct pci_raw_ops pci_mmcfg = {
23141 .read = pci_mmcfg_read,
23142 .write = pci_mmcfg_write,
23143 };
23144 diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
23145 --- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23146 +++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23147 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23148 return 0;
23149 }
23150
23151 -static struct pci_raw_ops pci_mmcfg = {
23152 +static const struct pci_raw_ops pci_mmcfg = {
23153 .read = pci_mmcfg_read,
23154 .write = pci_mmcfg_write,
23155 };
23156 diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
23157 --- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23158 +++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23159 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23160
23161 #undef PCI_CONF1_MQ_ADDRESS
23162
23163 -static struct pci_raw_ops pci_direct_conf1_mq = {
23164 +static const struct pci_raw_ops pci_direct_conf1_mq = {
23165 .read = pci_conf1_mq_read,
23166 .write = pci_conf1_mq_write
23167 };
23168 diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
23169 --- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23170 +++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23171 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23172 return 0;
23173 }
23174
23175 -static struct pci_raw_ops pci_olpc_conf = {
23176 +static const struct pci_raw_ops pci_olpc_conf = {
23177 .read = pci_olpc_read,
23178 .write = pci_olpc_write,
23179 };
23180 diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23181 --- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23182 +++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23183 @@ -56,50 +56,93 @@ union bios32 {
23184 static struct {
23185 unsigned long address;
23186 unsigned short segment;
23187 -} bios32_indirect = { 0, __KERNEL_CS };
23188 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23189
23190 /*
23191 * Returns the entry point for the given service, NULL on error
23192 */
23193
23194 -static unsigned long bios32_service(unsigned long service)
23195 +static unsigned long __devinit bios32_service(unsigned long service)
23196 {
23197 unsigned char return_code; /* %al */
23198 unsigned long address; /* %ebx */
23199 unsigned long length; /* %ecx */
23200 unsigned long entry; /* %edx */
23201 unsigned long flags;
23202 + struct desc_struct d, *gdt;
23203
23204 local_irq_save(flags);
23205 - __asm__("lcall *(%%edi); cld"
23206 +
23207 + gdt = get_cpu_gdt_table(smp_processor_id());
23208 +
23209 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23210 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23211 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23212 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23213 +
23214 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23215 : "=a" (return_code),
23216 "=b" (address),
23217 "=c" (length),
23218 "=d" (entry)
23219 : "0" (service),
23220 "1" (0),
23221 - "D" (&bios32_indirect));
23222 + "D" (&bios32_indirect),
23223 + "r"(__PCIBIOS_DS)
23224 + : "memory");
23225 +
23226 + pax_open_kernel();
23227 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23228 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23229 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23230 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23231 + pax_close_kernel();
23232 +
23233 local_irq_restore(flags);
23234
23235 switch (return_code) {
23236 - case 0:
23237 - return address + entry;
23238 - case 0x80: /* Not present */
23239 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23240 - return 0;
23241 - default: /* Shouldn't happen */
23242 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23243 - service, return_code);
23244 + case 0: {
23245 + int cpu;
23246 + unsigned char flags;
23247 +
23248 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23249 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23250 + printk(KERN_WARNING "bios32_service: not valid\n");
23251 return 0;
23252 + }
23253 + address = address + PAGE_OFFSET;
23254 + length += 16UL; /* some BIOSs underreport this... */
23255 + flags = 4;
23256 + if (length >= 64*1024*1024) {
23257 + length >>= PAGE_SHIFT;
23258 + flags |= 8;
23259 + }
23260 +
23261 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23262 + gdt = get_cpu_gdt_table(cpu);
23263 + pack_descriptor(&d, address, length, 0x9b, flags);
23264 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23265 + pack_descriptor(&d, address, length, 0x93, flags);
23266 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23267 + }
23268 + return entry;
23269 + }
23270 + case 0x80: /* Not present */
23271 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23272 + return 0;
23273 + default: /* Shouldn't happen */
23274 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23275 + service, return_code);
23276 + return 0;
23277 }
23278 }
23279
23280 static struct {
23281 unsigned long address;
23282 unsigned short segment;
23283 -} pci_indirect = { 0, __KERNEL_CS };
23284 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23285
23286 -static int pci_bios_present;
23287 +static int pci_bios_present __read_only;
23288
23289 static int __devinit check_pcibios(void)
23290 {
23291 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23292 unsigned long flags, pcibios_entry;
23293
23294 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23295 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23296 + pci_indirect.address = pcibios_entry;
23297
23298 local_irq_save(flags);
23299 - __asm__(
23300 - "lcall *(%%edi); cld\n\t"
23301 + __asm__("movw %w6, %%ds\n\t"
23302 + "lcall *%%ss:(%%edi); cld\n\t"
23303 + "push %%ss\n\t"
23304 + "pop %%ds\n\t"
23305 "jc 1f\n\t"
23306 "xor %%ah, %%ah\n"
23307 "1:"
23308 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23309 "=b" (ebx),
23310 "=c" (ecx)
23311 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23312 - "D" (&pci_indirect)
23313 + "D" (&pci_indirect),
23314 + "r" (__PCIBIOS_DS)
23315 : "memory");
23316 local_irq_restore(flags);
23317
23318 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23319
23320 switch (len) {
23321 case 1:
23322 - __asm__("lcall *(%%esi); cld\n\t"
23323 + __asm__("movw %w6, %%ds\n\t"
23324 + "lcall *%%ss:(%%esi); cld\n\t"
23325 + "push %%ss\n\t"
23326 + "pop %%ds\n\t"
23327 "jc 1f\n\t"
23328 "xor %%ah, %%ah\n"
23329 "1:"
23330 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23331 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23332 "b" (bx),
23333 "D" ((long)reg),
23334 - "S" (&pci_indirect));
23335 + "S" (&pci_indirect),
23336 + "r" (__PCIBIOS_DS));
23337 /*
23338 * Zero-extend the result beyond 8 bits, do not trust the
23339 * BIOS having done it:
23340 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23341 *value &= 0xff;
23342 break;
23343 case 2:
23344 - __asm__("lcall *(%%esi); cld\n\t"
23345 + __asm__("movw %w6, %%ds\n\t"
23346 + "lcall *%%ss:(%%esi); cld\n\t"
23347 + "push %%ss\n\t"
23348 + "pop %%ds\n\t"
23349 "jc 1f\n\t"
23350 "xor %%ah, %%ah\n"
23351 "1:"
23352 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23353 : "1" (PCIBIOS_READ_CONFIG_WORD),
23354 "b" (bx),
23355 "D" ((long)reg),
23356 - "S" (&pci_indirect));
23357 + "S" (&pci_indirect),
23358 + "r" (__PCIBIOS_DS));
23359 /*
23360 * Zero-extend the result beyond 16 bits, do not trust the
23361 * BIOS having done it:
23362 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23363 *value &= 0xffff;
23364 break;
23365 case 4:
23366 - __asm__("lcall *(%%esi); cld\n\t"
23367 + __asm__("movw %w6, %%ds\n\t"
23368 + "lcall *%%ss:(%%esi); cld\n\t"
23369 + "push %%ss\n\t"
23370 + "pop %%ds\n\t"
23371 "jc 1f\n\t"
23372 "xor %%ah, %%ah\n"
23373 "1:"
23374 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23375 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23376 "b" (bx),
23377 "D" ((long)reg),
23378 - "S" (&pci_indirect));
23379 + "S" (&pci_indirect),
23380 + "r" (__PCIBIOS_DS));
23381 break;
23382 }
23383
23384 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23385
23386 switch (len) {
23387 case 1:
23388 - __asm__("lcall *(%%esi); cld\n\t"
23389 + __asm__("movw %w6, %%ds\n\t"
23390 + "lcall *%%ss:(%%esi); cld\n\t"
23391 + "push %%ss\n\t"
23392 + "pop %%ds\n\t"
23393 "jc 1f\n\t"
23394 "xor %%ah, %%ah\n"
23395 "1:"
23396 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23397 "c" (value),
23398 "b" (bx),
23399 "D" ((long)reg),
23400 - "S" (&pci_indirect));
23401 + "S" (&pci_indirect),
23402 + "r" (__PCIBIOS_DS));
23403 break;
23404 case 2:
23405 - __asm__("lcall *(%%esi); cld\n\t"
23406 + __asm__("movw %w6, %%ds\n\t"
23407 + "lcall *%%ss:(%%esi); cld\n\t"
23408 + "push %%ss\n\t"
23409 + "pop %%ds\n\t"
23410 "jc 1f\n\t"
23411 "xor %%ah, %%ah\n"
23412 "1:"
23413 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23414 "c" (value),
23415 "b" (bx),
23416 "D" ((long)reg),
23417 - "S" (&pci_indirect));
23418 + "S" (&pci_indirect),
23419 + "r" (__PCIBIOS_DS));
23420 break;
23421 case 4:
23422 - __asm__("lcall *(%%esi); cld\n\t"
23423 + __asm__("movw %w6, %%ds\n\t"
23424 + "lcall *%%ss:(%%esi); cld\n\t"
23425 + "push %%ss\n\t"
23426 + "pop %%ds\n\t"
23427 "jc 1f\n\t"
23428 "xor %%ah, %%ah\n"
23429 "1:"
23430 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23431 "c" (value),
23432 "b" (bx),
23433 "D" ((long)reg),
23434 - "S" (&pci_indirect));
23435 + "S" (&pci_indirect),
23436 + "r" (__PCIBIOS_DS));
23437 break;
23438 }
23439
23440 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23441 * Function table for BIOS32 access
23442 */
23443
23444 -static struct pci_raw_ops pci_bios_access = {
23445 +static const struct pci_raw_ops pci_bios_access = {
23446 .read = pci_bios_read,
23447 .write = pci_bios_write
23448 };
23449 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23450 * Try to find PCI BIOS.
23451 */
23452
23453 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23454 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23455 {
23456 union bios32 *check;
23457 unsigned char sum;
23458 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23459
23460 DBG("PCI: Fetching IRQ routing table... ");
23461 __asm__("push %%es\n\t"
23462 + "movw %w8, %%ds\n\t"
23463 "push %%ds\n\t"
23464 "pop %%es\n\t"
23465 - "lcall *(%%esi); cld\n\t"
23466 + "lcall *%%ss:(%%esi); cld\n\t"
23467 "pop %%es\n\t"
23468 + "push %%ss\n\t"
23469 + "pop %%ds\n"
23470 "jc 1f\n\t"
23471 "xor %%ah, %%ah\n"
23472 "1:"
23473 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23474 "1" (0),
23475 "D" ((long) &opt),
23476 "S" (&pci_indirect),
23477 - "m" (opt)
23478 + "m" (opt),
23479 + "r" (__PCIBIOS_DS)
23480 : "memory");
23481 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23482 if (ret & 0xff00)
23483 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23484 {
23485 int ret;
23486
23487 - __asm__("lcall *(%%esi); cld\n\t"
23488 + __asm__("movw %w5, %%ds\n\t"
23489 + "lcall *%%ss:(%%esi); cld\n\t"
23490 + "push %%ss\n\t"
23491 + "pop %%ds\n"
23492 "jc 1f\n\t"
23493 "xor %%ah, %%ah\n"
23494 "1:"
23495 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23496 : "0" (PCIBIOS_SET_PCI_HW_INT),
23497 "b" ((dev->bus->number << 8) | dev->devfn),
23498 "c" ((irq << 8) | (pin + 10)),
23499 - "S" (&pci_indirect));
23500 + "S" (&pci_indirect),
23501 + "r" (__PCIBIOS_DS));
23502 return !(ret & 0xff00);
23503 }
23504 EXPORT_SYMBOL(pcibios_set_irq_routing);
23505 diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23506 --- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23507 +++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23508 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23509 static void fix_processor_context(void)
23510 {
23511 int cpu = smp_processor_id();
23512 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23513 + struct tss_struct *t = init_tss + cpu;
23514
23515 set_tss_desc(cpu, t); /*
23516 * This just modifies memory; should not be
23517 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23518 */
23519
23520 #ifdef CONFIG_X86_64
23521 + pax_open_kernel();
23522 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23523 + pax_close_kernel();
23524
23525 syscall_init(); /* This sets MSR_*STAR and related */
23526 #endif
23527 diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23528 --- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23529 +++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23530 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23531 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23532 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23533
23534 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23535 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23536 GCOV_PROFILE := n
23537
23538 #
23539 diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23540 --- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23541 +++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23542 @@ -22,24 +22,48 @@
23543 #include <asm/hpet.h>
23544 #include <asm/unistd.h>
23545 #include <asm/io.h>
23546 +#include <asm/fixmap.h>
23547 #include "vextern.h"
23548
23549 #define gtod vdso_vsyscall_gtod_data
23550
23551 +notrace noinline long __vdso_fallback_time(long *t)
23552 +{
23553 + long secs;
23554 + asm volatile("syscall"
23555 + : "=a" (secs)
23556 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23557 + return secs;
23558 +}
23559 +
23560 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23561 {
23562 long ret;
23563 asm("syscall" : "=a" (ret) :
23564 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23565 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23566 return ret;
23567 }
23568
23569 +notrace static inline cycle_t __vdso_vread_hpet(void)
23570 +{
23571 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23572 +}
23573 +
23574 +notrace static inline cycle_t __vdso_vread_tsc(void)
23575 +{
23576 + cycle_t ret = (cycle_t)vget_cycles();
23577 +
23578 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23579 +}
23580 +
23581 notrace static inline long vgetns(void)
23582 {
23583 long v;
23584 - cycles_t (*vread)(void);
23585 - vread = gtod->clock.vread;
23586 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23587 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23588 + v = __vdso_vread_tsc();
23589 + else
23590 + v = __vdso_vread_hpet();
23591 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23592 return (v * gtod->clock.mult) >> gtod->clock.shift;
23593 }
23594
23595 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23596
23597 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23598 {
23599 - if (likely(gtod->sysctl_enabled))
23600 + if (likely(gtod->sysctl_enabled &&
23601 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23602 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23603 switch (clock) {
23604 case CLOCK_REALTIME:
23605 if (likely(gtod->clock.vread))
23606 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23607 int clock_gettime(clockid_t, struct timespec *)
23608 __attribute__((weak, alias("__vdso_clock_gettime")));
23609
23610 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23611 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23612 {
23613 long ret;
23614 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23615 + asm("syscall" : "=a" (ret) :
23616 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23617 + return ret;
23618 +}
23619 +
23620 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23621 +{
23622 + if (likely(gtod->sysctl_enabled &&
23623 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23624 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23625 + {
23626 if (likely(tv != NULL)) {
23627 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23628 offsetof(struct timespec, tv_nsec) ||
23629 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23630 }
23631 return 0;
23632 }
23633 - asm("syscall" : "=a" (ret) :
23634 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23635 - return ret;
23636 + return __vdso_fallback_gettimeofday(tv, tz);
23637 }
23638 int gettimeofday(struct timeval *, struct timezone *)
23639 __attribute__((weak, alias("__vdso_gettimeofday")));
23640 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23641 --- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23642 +++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23643 @@ -25,6 +25,7 @@
23644 #include <asm/tlbflush.h>
23645 #include <asm/vdso.h>
23646 #include <asm/proto.h>
23647 +#include <asm/mman.h>
23648
23649 enum {
23650 VDSO_DISABLED = 0,
23651 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23652 void enable_sep_cpu(void)
23653 {
23654 int cpu = get_cpu();
23655 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23656 + struct tss_struct *tss = init_tss + cpu;
23657
23658 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23659 put_cpu();
23660 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23661 gate_vma.vm_start = FIXADDR_USER_START;
23662 gate_vma.vm_end = FIXADDR_USER_END;
23663 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23664 - gate_vma.vm_page_prot = __P101;
23665 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23666 /*
23667 * Make sure the vDSO gets into every core dump.
23668 * Dumping its contents makes post-mortem fully interpretable later
23669 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23670 if (compat)
23671 addr = VDSO_HIGH_BASE;
23672 else {
23673 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23674 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23675 if (IS_ERR_VALUE(addr)) {
23676 ret = addr;
23677 goto up_fail;
23678 }
23679 }
23680
23681 - current->mm->context.vdso = (void *)addr;
23682 + current->mm->context.vdso = addr;
23683
23684 if (compat_uses_vma || !compat) {
23685 /*
23686 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23687 }
23688
23689 current_thread_info()->sysenter_return =
23690 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23691 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23692
23693 up_fail:
23694 if (ret)
23695 - current->mm->context.vdso = NULL;
23696 + current->mm->context.vdso = 0;
23697
23698 up_write(&mm->mmap_sem);
23699
23700 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23701
23702 const char *arch_vma_name(struct vm_area_struct *vma)
23703 {
23704 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23705 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23706 return "[vdso]";
23707 +
23708 +#ifdef CONFIG_PAX_SEGMEXEC
23709 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23710 + return "[vdso]";
23711 +#endif
23712 +
23713 return NULL;
23714 }
23715
23716 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23717 struct mm_struct *mm = tsk->mm;
23718
23719 /* Check to see if this task was created in compat vdso mode */
23720 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23721 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23722 return &gate_vma;
23723 return NULL;
23724 }
23725 diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23726 --- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23727 +++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23728 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23729 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23730 #include "vextern.h"
23731 #undef VEXTERN
23732 +
23733 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23734 +VEXTERN(fallback_gettimeofday)
23735 +VEXTERN(fallback_time)
23736 +VEXTERN(getcpu)
23737 +#undef VEXTERN
23738 diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23739 --- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23740 +++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23741 @@ -11,6 +11,5 @@
23742 put into vextern.h and be referenced as a pointer with vdso prefix.
23743 The main kernel later fills in the values. */
23744
23745 -VEXTERN(jiffies)
23746 VEXTERN(vgetcpu_mode)
23747 VEXTERN(vsyscall_gtod_data)
23748 diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23749 --- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23750 +++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-08-23 20:24:19.000000000 -0400
23751 @@ -17,8 +17,6 @@
23752 #include "vextern.h" /* Just for VMAGIC. */
23753 #undef VEXTERN
23754
23755 -unsigned int __read_mostly vdso_enabled = 1;
23756 -
23757 extern char vdso_start[], vdso_end[];
23758 extern unsigned short vdso_sync_cpuid;
23759
23760 @@ -27,10 +25,8 @@ static unsigned vdso_size;
23761
23762 static inline void *var_ref(void *p, char *name)
23763 {
23764 - if (*(void **)p != (void *)VMAGIC) {
23765 - printk("VDSO: variable %s broken\n", name);
23766 - vdso_enabled = 0;
23767 - }
23768 + if (*(void **)p != (void *)VMAGIC)
23769 + panic("VDSO: variable %s broken\n", name);
23770 return p;
23771 }
23772
23773 @@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
23774 if (!vbase)
23775 goto oom;
23776
23777 - if (memcmp(vbase, "\177ELF", 4)) {
23778 - printk("VDSO: I'm broken; not ELF\n");
23779 - vdso_enabled = 0;
23780 - }
23781 + if (memcmp(vbase, ELFMAG, SELFMAG))
23782 + panic("VDSO: I'm broken; not ELF\n");
23783
23784 #define VEXTERN(x) \
23785 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23786 #include "vextern.h"
23787 #undef VEXTERN
23788 + vunmap(vbase);
23789 return 0;
23790
23791 oom:
23792 - printk("Cannot allocate vdso\n");
23793 - vdso_enabled = 0;
23794 - return -ENOMEM;
23795 + panic("Cannot allocate vdso\n");
23796 }
23797 __initcall(init_vdso_vars);
23798
23799 @@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
23800 unsigned long addr;
23801 int ret;
23802
23803 - if (!vdso_enabled)
23804 - return 0;
23805 -
23806 down_write(&mm->mmap_sem);
23807 addr = vdso_addr(mm->start_stack, vdso_size);
23808 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23809 @@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
23810 goto up_fail;
23811 }
23812
23813 - current->mm->context.vdso = (void *)addr;
23814 + current->mm->context.vdso = addr;
23815
23816 ret = install_special_mapping(mm, addr, vdso_size,
23817 VM_READ|VM_EXEC|
23818 @@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
23819 VM_ALWAYSDUMP,
23820 vdso_pages);
23821 if (ret) {
23822 - current->mm->context.vdso = NULL;
23823 + current->mm->context.vdso = 0;
23824 goto up_fail;
23825 }
23826
23827 @@ -132,10 +122,3 @@ up_fail:
23828 up_write(&mm->mmap_sem);
23829 return ret;
23830 }
23831 -
23832 -static __init int vdso_setup(char *s)
23833 -{
23834 - vdso_enabled = simple_strtoul(s, NULL, 0);
23835 - return 0;
23836 -}
23837 -__setup("vdso=", vdso_setup);
23838 diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23839 --- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23840 +++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23841 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23842
23843 struct shared_info xen_dummy_shared_info;
23844
23845 -void *xen_initial_gdt;
23846 -
23847 /*
23848 * Point at some empty memory to start with. We map the real shared_info
23849 * page as soon as fixmap is up and running.
23850 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23851
23852 preempt_disable();
23853
23854 - start = __get_cpu_var(idt_desc).address;
23855 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23856 end = start + __get_cpu_var(idt_desc).size + 1;
23857
23858 xen_mc_flush();
23859 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23860 #endif
23861 };
23862
23863 -static void xen_reboot(int reason)
23864 +static __noreturn void xen_reboot(int reason)
23865 {
23866 struct sched_shutdown r = { .reason = reason };
23867
23868 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23869 BUG();
23870 }
23871
23872 -static void xen_restart(char *msg)
23873 +static __noreturn void xen_restart(char *msg)
23874 {
23875 xen_reboot(SHUTDOWN_reboot);
23876 }
23877
23878 -static void xen_emergency_restart(void)
23879 +static __noreturn void xen_emergency_restart(void)
23880 {
23881 xen_reboot(SHUTDOWN_reboot);
23882 }
23883
23884 -static void xen_machine_halt(void)
23885 +static __noreturn void xen_machine_halt(void)
23886 {
23887 xen_reboot(SHUTDOWN_poweroff);
23888 }
23889 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23890 */
23891 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23892
23893 -#ifdef CONFIG_X86_64
23894 /* Work out if we support NX */
23895 - check_efer();
23896 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23897 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23898 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23899 + unsigned l, h;
23900 +
23901 +#ifdef CONFIG_X86_PAE
23902 + nx_enabled = 1;
23903 +#endif
23904 + __supported_pte_mask |= _PAGE_NX;
23905 + rdmsr(MSR_EFER, l, h);
23906 + l |= EFER_NX;
23907 + wrmsr(MSR_EFER, l, h);
23908 + }
23909 #endif
23910
23911 xen_setup_features();
23912 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23913
23914 machine_ops = xen_machine_ops;
23915
23916 - /*
23917 - * The only reliable way to retain the initial address of the
23918 - * percpu gdt_page is to remember it here, so we can go and
23919 - * mark it RW later, when the initial percpu area is freed.
23920 - */
23921 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23922 -
23923 xen_smp_init();
23924
23925 pgd = (pgd_t *)xen_start_info->pt_base;
23926 diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23927 --- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23928 +++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-08-24 18:35:52.000000000 -0400
23929 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23930 convert_pfn_mfn(init_level4_pgt);
23931 convert_pfn_mfn(level3_ident_pgt);
23932 convert_pfn_mfn(level3_kernel_pgt);
23933 + convert_pfn_mfn(level3_vmalloc_pgt);
23934 + convert_pfn_mfn(level3_vmemmap_pgt);
23935
23936 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23937 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23938 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23939 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23940 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23941 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23942 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23943 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23944 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23945 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23946 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23947 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23948
23949 @@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_in
23950 pv_mmu_ops.set_pud = xen_set_pud;
23951 #if PAGETABLE_LEVELS == 4
23952 pv_mmu_ops.set_pgd = xen_set_pgd;
23953 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23954 #endif
23955
23956 /* This will work as long as patching hasn't happened yet
23957 @@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_o
23958 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23959 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23960 .set_pgd = xen_set_pgd_hyper,
23961 + .set_pgd_batched = xen_set_pgd_hyper,
23962
23963 .alloc_pud = xen_alloc_pmd_init,
23964 .release_pud = xen_release_pmd_init,
23965 diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23966 --- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23967 +++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23968 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23969 {
23970 BUG_ON(smp_processor_id() != 0);
23971 native_smp_prepare_boot_cpu();
23972 -
23973 - /* We've switched to the "real" per-cpu gdt, so make sure the
23974 - old memory can be recycled */
23975 - make_lowmem_page_readwrite(xen_initial_gdt);
23976 -
23977 xen_setup_vcpu_info_placement();
23978 }
23979
23980 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23981 gdt = get_cpu_gdt_table(cpu);
23982
23983 ctxt->flags = VGCF_IN_KERNEL;
23984 - ctxt->user_regs.ds = __USER_DS;
23985 - ctxt->user_regs.es = __USER_DS;
23986 + ctxt->user_regs.ds = __KERNEL_DS;
23987 + ctxt->user_regs.es = __KERNEL_DS;
23988 ctxt->user_regs.ss = __KERNEL_DS;
23989 #ifdef CONFIG_X86_32
23990 ctxt->user_regs.fs = __KERNEL_PERCPU;
23991 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23992 + savesegment(gs, ctxt->user_regs.gs);
23993 #else
23994 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23995 #endif
23996 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23997 int rc;
23998
23999 per_cpu(current_task, cpu) = idle;
24000 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24001 #ifdef CONFIG_X86_32
24002 irq_ctx_init(cpu);
24003 #else
24004 clear_tsk_thread_flag(idle, TIF_FORK);
24005 - per_cpu(kernel_stack, cpu) =
24006 - (unsigned long)task_stack_page(idle) -
24007 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24008 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24009 #endif
24010 xen_setup_runstate_info(cpu);
24011 xen_setup_timer(cpu);
24012 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
24013 --- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
24014 +++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
24015 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24016 ESP_OFFSET=4 # bytes pushed onto stack
24017
24018 /*
24019 - * Store vcpu_info pointer for easy access. Do it this way to
24020 - * avoid having to reload %fs
24021 + * Store vcpu_info pointer for easy access.
24022 */
24023 #ifdef CONFIG_SMP
24024 - GET_THREAD_INFO(%eax)
24025 - movl TI_cpu(%eax), %eax
24026 - movl __per_cpu_offset(,%eax,4), %eax
24027 - mov per_cpu__xen_vcpu(%eax), %eax
24028 + push %fs
24029 + mov $(__KERNEL_PERCPU), %eax
24030 + mov %eax, %fs
24031 + mov PER_CPU_VAR(xen_vcpu), %eax
24032 + pop %fs
24033 #else
24034 movl per_cpu__xen_vcpu, %eax
24035 #endif
24036 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
24037 --- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
24038 +++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
24039 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24040 #ifdef CONFIG_X86_32
24041 mov %esi,xen_start_info
24042 mov $init_thread_union+THREAD_SIZE,%esp
24043 +#ifdef CONFIG_SMP
24044 + movl $cpu_gdt_table,%edi
24045 + movl $__per_cpu_load,%eax
24046 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24047 + rorl $16,%eax
24048 + movb %al,__KERNEL_PERCPU + 4(%edi)
24049 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24050 + movl $__per_cpu_end - 1,%eax
24051 + subl $__per_cpu_start,%eax
24052 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24053 +#endif
24054 #else
24055 mov %rsi,xen_start_info
24056 mov $init_thread_union+THREAD_SIZE,%rsp
24057 diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
24058 --- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
24059 +++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
24060 @@ -10,8 +10,6 @@
24061 extern const char xen_hypervisor_callback[];
24062 extern const char xen_failsafe_callback[];
24063
24064 -extern void *xen_initial_gdt;
24065 -
24066 struct trap_info;
24067 void xen_copy_trap_info(struct trap_info *traps);
24068
24069 diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
24070 --- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
24071 +++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
24072 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24073 NULL,
24074 };
24075
24076 -static struct sysfs_ops integrity_ops = {
24077 +static const struct sysfs_ops integrity_ops = {
24078 .show = &integrity_attr_show,
24079 .store = &integrity_attr_store,
24080 };
24081 diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
24082 --- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
24083 +++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
24084 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24085 }
24086 EXPORT_SYMBOL(blk_iopoll_complete);
24087
24088 -static void blk_iopoll_softirq(struct softirq_action *h)
24089 +static void blk_iopoll_softirq(void)
24090 {
24091 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24092 int rearm = 0, budget = blk_iopoll_budget;
24093 diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
24094 --- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
24095 +++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
24096 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24097 * direct dma. else, set up kernel bounce buffers
24098 */
24099 uaddr = (unsigned long) ubuf;
24100 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
24101 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24102 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24103 else
24104 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24105 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24106 for (i = 0; i < iov_count; i++) {
24107 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24108
24109 + if (!iov[i].iov_len)
24110 + return -EINVAL;
24111 +
24112 if (uaddr & queue_dma_alignment(q)) {
24113 unaligned = 1;
24114 break;
24115 }
24116 - if (!iov[i].iov_len)
24117 - return -EINVAL;
24118 }
24119
24120 if (unaligned || (q->dma_pad_mask & len) || map_data)
24121 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24122 if (!len || !kbuf)
24123 return -EINVAL;
24124
24125 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24126 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24127 if (do_copy)
24128 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24129 else
24130 diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
24131 --- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24132 +++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24133 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24134 * Softirq action handler - move entries to local list and loop over them
24135 * while passing them to the queue registered handler.
24136 */
24137 -static void blk_done_softirq(struct softirq_action *h)
24138 +static void blk_done_softirq(void)
24139 {
24140 struct list_head *cpu_list, local_list;
24141
24142 diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
24143 --- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24144 +++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24145 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24146 kmem_cache_free(blk_requestq_cachep, q);
24147 }
24148
24149 -static struct sysfs_ops queue_sysfs_ops = {
24150 +static const struct sysfs_ops queue_sysfs_ops = {
24151 .show = queue_attr_show,
24152 .store = queue_attr_store,
24153 };
24154 diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
24155 --- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24156 +++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24157 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24158 struct sg_io_v4 *hdr, struct bsg_device *bd,
24159 fmode_t has_write_perm)
24160 {
24161 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24162 + unsigned char *cmdptr;
24163 +
24164 if (hdr->request_len > BLK_MAX_CDB) {
24165 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24166 if (!rq->cmd)
24167 return -ENOMEM;
24168 - }
24169 + cmdptr = rq->cmd;
24170 + } else
24171 + cmdptr = tmpcmd;
24172
24173 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24174 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24175 hdr->request_len))
24176 return -EFAULT;
24177
24178 + if (cmdptr != rq->cmd)
24179 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24180 +
24181 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24182 if (blk_verify_command(rq->cmd, has_write_perm))
24183 return -EPERM;
24184 diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
24185 --- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24186 +++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24187 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24188 return error;
24189 }
24190
24191 -static struct sysfs_ops elv_sysfs_ops = {
24192 +static const struct sysfs_ops elv_sysfs_ops = {
24193 .show = elv_attr_show,
24194 .store = elv_attr_store,
24195 };
24196 diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
24197 --- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24198 +++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24199 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24200 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24201 struct sg_io_hdr *hdr, fmode_t mode)
24202 {
24203 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24204 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24205 + unsigned char *cmdptr;
24206 +
24207 + if (rq->cmd != rq->__cmd)
24208 + cmdptr = rq->cmd;
24209 + else
24210 + cmdptr = tmpcmd;
24211 +
24212 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24213 return -EFAULT;
24214 +
24215 + if (cmdptr != rq->cmd)
24216 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24217 +
24218 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24219 return -EPERM;
24220
24221 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24222 int err;
24223 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24224 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24225 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24226 + unsigned char *cmdptr;
24227
24228 if (!sic)
24229 return -EINVAL;
24230 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24231 */
24232 err = -EFAULT;
24233 rq->cmd_len = cmdlen;
24234 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24235 +
24236 + if (rq->cmd != rq->__cmd)
24237 + cmdptr = rq->cmd;
24238 + else
24239 + cmdptr = tmpcmd;
24240 +
24241 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24242 goto error;
24243
24244 + if (rq->cmd != cmdptr)
24245 + memcpy(rq->cmd, cmdptr, cmdlen);
24246 +
24247 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24248 goto error;
24249
24250 diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24251 --- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24252 +++ linux-2.6.32.45/crypto/cryptd.c 2011-08-23 21:22:32.000000000 -0400
24253 @@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
24254
24255 struct cryptd_blkcipher_request_ctx {
24256 crypto_completion_t complete;
24257 -};
24258 +} __no_const;
24259
24260 struct cryptd_hash_ctx {
24261 struct crypto_shash *child;
24262 diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24263 --- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24264 +++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24265 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24266 for (i = 0; i < 7; ++i)
24267 gf128mul_x_lle(&p[i + 1], &p[i]);
24268
24269 - memset(r, 0, sizeof(r));
24270 + memset(r, 0, sizeof(*r));
24271 for (i = 0;;) {
24272 u8 ch = ((u8 *)b)[15 - i];
24273
24274 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24275 for (i = 0; i < 7; ++i)
24276 gf128mul_x_bbe(&p[i + 1], &p[i]);
24277
24278 - memset(r, 0, sizeof(r));
24279 + memset(r, 0, sizeof(*r));
24280 for (i = 0;;) {
24281 u8 ch = ((u8 *)b)[i];
24282
24283 diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24284 --- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24285 +++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24286 @@ -21,6 +21,7 @@
24287 #include <asm/byteorder.h>
24288 #include <linux/crypto.h>
24289 #include <linux/types.h>
24290 +#include <linux/sched.h>
24291
24292 /* Key is padded to the maximum of 256 bits before round key generation.
24293 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24294 @@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24295 u32 r0,r1,r2,r3,r4;
24296 int i;
24297
24298 + pax_track_stack();
24299 +
24300 /* Copy key, add padding */
24301
24302 for (i = 0; i < keylen; ++i)
24303 diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24304 --- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24305 +++ linux-2.6.32.45/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24306 @@ -1,13 +1,16 @@
24307 *.a
24308 *.aux
24309 *.bin
24310 +*.cis
24311 *.cpio
24312 *.csp
24313 +*.dbg
24314 *.dsp
24315 *.dvi
24316 *.elf
24317 *.eps
24318 *.fw
24319 +*.gcno
24320 *.gen.S
24321 *.gif
24322 *.grep
24323 @@ -38,8 +41,10 @@
24324 *.tab.h
24325 *.tex
24326 *.ver
24327 +*.vim
24328 *.xml
24329 *_MODULES
24330 +*_reg_safe.h
24331 *_vga16.c
24332 *~
24333 *.9
24334 @@ -49,11 +54,16 @@
24335 53c700_d.h
24336 CVS
24337 ChangeSet
24338 +GPATH
24339 +GRTAGS
24340 +GSYMS
24341 +GTAGS
24342 Image
24343 Kerntypes
24344 Module.markers
24345 Module.symvers
24346 PENDING
24347 +PERF*
24348 SCCS
24349 System.map*
24350 TAGS
24351 @@ -76,7 +86,11 @@ btfixupprep
24352 build
24353 bvmlinux
24354 bzImage*
24355 +capability_names.h
24356 +capflags.c
24357 classlist.h*
24358 +clut_vga16.c
24359 +common-cmds.h
24360 comp*.log
24361 compile.h*
24362 conf
24363 @@ -97,19 +111,21 @@ elfconfig.h*
24364 fixdep
24365 fore200e_mkfirm
24366 fore200e_pca_fw.c*
24367 +gate.lds
24368 gconf
24369 gen-devlist
24370 gen_crc32table
24371 gen_init_cpio
24372 genksyms
24373 *_gray256.c
24374 +hash
24375 ihex2fw
24376 ikconfig.h*
24377 initramfs_data.cpio
24378 +initramfs_data.cpio.bz2
24379 initramfs_data.cpio.gz
24380 initramfs_list
24381 kallsyms
24382 -kconfig
24383 keywords.c
24384 ksym.c*
24385 ksym.h*
24386 @@ -133,7 +149,9 @@ mkboot
24387 mkbugboot
24388 mkcpustr
24389 mkdep
24390 +mkpiggy
24391 mkprep
24392 +mkregtable
24393 mktables
24394 mktree
24395 modpost
24396 @@ -149,6 +167,7 @@ patches*
24397 pca200e.bin
24398 pca200e_ecd.bin2
24399 piggy.gz
24400 +piggy.S
24401 piggyback
24402 pnmtologo
24403 ppc_defs.h*
24404 @@ -157,12 +176,15 @@ qconf
24405 raid6altivec*.c
24406 raid6int*.c
24407 raid6tables.c
24408 +regdb.c
24409 relocs
24410 +rlim_names.h
24411 series
24412 setup
24413 setup.bin
24414 setup.elf
24415 sImage
24416 +slabinfo
24417 sm_tbl*
24418 split-include
24419 syscalltab.h
24420 @@ -186,14 +208,20 @@ version.h*
24421 vmlinux
24422 vmlinux-*
24423 vmlinux.aout
24424 +vmlinux.bin.all
24425 +vmlinux.bin.bz2
24426 vmlinux.lds
24427 +vmlinux.relocs
24428 +voffset.h
24429 vsyscall.lds
24430 vsyscall_32.lds
24431 wanxlfw.inc
24432 uImage
24433 unifdef
24434 +utsrelease.h
24435 wakeup.bin
24436 wakeup.elf
24437 wakeup.lds
24438 zImage*
24439 zconf.hash.c
24440 +zoffset.h
24441 diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24442 --- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24443 +++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24444 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24445 the specified number of seconds. This is to be used if
24446 your oopses keep scrolling off the screen.
24447
24448 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24449 + virtualization environments that don't cope well with the
24450 + expand down segment used by UDEREF on X86-32 or the frequent
24451 + page table updates on X86-64.
24452 +
24453 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24454 +
24455 pcbit= [HW,ISDN]
24456
24457 pcd. [PARIDE]
24458 diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24459 --- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24460 +++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24461 @@ -30,7 +30,7 @@
24462 #include <acpi/acpi_bus.h>
24463 #include <acpi/acpi_drivers.h>
24464
24465 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24466 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24467 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24468 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24469 static DEFINE_MUTEX(isolated_cpus_lock);
24470 diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24471 --- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24472 +++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24473 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24474 }
24475
24476 static struct battery_file {
24477 - struct file_operations ops;
24478 + const struct file_operations ops;
24479 mode_t mode;
24480 const char *name;
24481 } acpi_battery_file[] = {
24482 diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24483 --- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24484 +++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24485 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24486 struct list_head list;
24487 struct list_head hotplug_list;
24488 acpi_handle handle;
24489 - struct acpi_dock_ops *ops;
24490 + const struct acpi_dock_ops *ops;
24491 void *context;
24492 };
24493
24494 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24495 * the dock driver after _DCK is executed.
24496 */
24497 int
24498 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24499 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24500 void *context)
24501 {
24502 struct dock_dependent_device *dd;
24503 diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24504 --- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24505 +++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24506 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24507 void __iomem *virt_addr;
24508
24509 virt_addr = ioremap(phys_addr, width);
24510 + if (!virt_addr)
24511 + return AE_NO_MEMORY;
24512 if (!value)
24513 value = &dummy;
24514
24515 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24516 void __iomem *virt_addr;
24517
24518 virt_addr = ioremap(phys_addr, width);
24519 + if (!virt_addr)
24520 + return AE_NO_MEMORY;
24521
24522 switch (width) {
24523 case 8:
24524 diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24525 --- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24526 +++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24527 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24528 return res;
24529
24530 temp /= 1000;
24531 - if (temp < 0)
24532 - return -EINVAL;
24533
24534 mutex_lock(&resource->lock);
24535 resource->trip[attr->index - 7] = temp;
24536 diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24537 --- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24538 +++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24539 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24540 size_t count, loff_t * ppos)
24541 {
24542 struct list_head *node, *next;
24543 - char strbuf[5];
24544 - char str[5] = "";
24545 - unsigned int len = count;
24546 + char strbuf[5] = {0};
24547 struct acpi_device *found_dev = NULL;
24548
24549 - if (len > 4)
24550 - len = 4;
24551 - if (len < 0)
24552 - return -EFAULT;
24553 + if (count > 4)
24554 + count = 4;
24555
24556 - if (copy_from_user(strbuf, buffer, len))
24557 + if (copy_from_user(strbuf, buffer, count))
24558 return -EFAULT;
24559 - strbuf[len] = '\0';
24560 - sscanf(strbuf, "%s", str);
24561 + strbuf[count] = '\0';
24562
24563 mutex_lock(&acpi_device_lock);
24564 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24565 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24566 if (!dev->wakeup.flags.valid)
24567 continue;
24568
24569 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24570 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24571 dev->wakeup.state.enabled =
24572 dev->wakeup.state.enabled ? 0 : 1;
24573 found_dev = dev;
24574 diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24575 --- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24576 +++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24577 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24578 return 0;
24579 }
24580
24581 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24582 + BUG_ON(pr->id >= nr_cpu_ids);
24583
24584 /*
24585 * Buggy BIOS check
24586 diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24587 --- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24588 +++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24589 @@ -17,7 +17,7 @@
24590
24591 #define PREFIX "ACPI: "
24592
24593 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24594 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24595 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24596
24597 struct acpi_smb_hc {
24598 diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24599 --- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24600 +++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24601 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24602 }
24603 }
24604
24605 -static struct platform_suspend_ops acpi_suspend_ops = {
24606 +static const struct platform_suspend_ops acpi_suspend_ops = {
24607 .valid = acpi_suspend_state_valid,
24608 .begin = acpi_suspend_begin,
24609 .prepare_late = acpi_pm_prepare,
24610 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24611 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24612 * been requested.
24613 */
24614 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24615 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24616 .valid = acpi_suspend_state_valid,
24617 .begin = acpi_suspend_begin_old,
24618 .prepare_late = acpi_pm_disable_gpes,
24619 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24620 acpi_enable_all_runtime_gpes();
24621 }
24622
24623 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24624 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24625 .begin = acpi_hibernation_begin,
24626 .end = acpi_pm_end,
24627 .pre_snapshot = acpi_hibernation_pre_snapshot,
24628 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24629 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24630 * been requested.
24631 */
24632 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24633 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24634 .begin = acpi_hibernation_begin_old,
24635 .end = acpi_pm_end,
24636 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24637 diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24638 --- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24639 +++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24640 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24641 vd->brightness->levels[request_level]);
24642 }
24643
24644 -static struct backlight_ops acpi_backlight_ops = {
24645 +static const struct backlight_ops acpi_backlight_ops = {
24646 .get_brightness = acpi_video_get_brightness,
24647 .update_status = acpi_video_set_brightness,
24648 };
24649 diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24650 --- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24651 +++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24652 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24653 .sdev_attrs = ahci_sdev_attrs,
24654 };
24655
24656 -static struct ata_port_operations ahci_ops = {
24657 +static const struct ata_port_operations ahci_ops = {
24658 .inherits = &sata_pmp_port_ops,
24659
24660 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24661 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24662 .port_stop = ahci_port_stop,
24663 };
24664
24665 -static struct ata_port_operations ahci_vt8251_ops = {
24666 +static const struct ata_port_operations ahci_vt8251_ops = {
24667 .inherits = &ahci_ops,
24668 .hardreset = ahci_vt8251_hardreset,
24669 };
24670
24671 -static struct ata_port_operations ahci_p5wdh_ops = {
24672 +static const struct ata_port_operations ahci_p5wdh_ops = {
24673 .inherits = &ahci_ops,
24674 .hardreset = ahci_p5wdh_hardreset,
24675 };
24676
24677 -static struct ata_port_operations ahci_sb600_ops = {
24678 +static const struct ata_port_operations ahci_sb600_ops = {
24679 .inherits = &ahci_ops,
24680 .softreset = ahci_sb600_softreset,
24681 .pmp_softreset = ahci_sb600_softreset,
24682 diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24683 --- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24684 +++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24685 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24686 ATA_BMDMA_SHT(DRV_NAME),
24687 };
24688
24689 -static struct ata_port_operations generic_port_ops = {
24690 +static const struct ata_port_operations generic_port_ops = {
24691 .inherits = &ata_bmdma_port_ops,
24692 .cable_detect = ata_cable_unknown,
24693 .set_mode = generic_set_mode,
24694 diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24695 --- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24696 +++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24697 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24698 ATA_BMDMA_SHT(DRV_NAME),
24699 };
24700
24701 -static struct ata_port_operations piix_pata_ops = {
24702 +static const struct ata_port_operations piix_pata_ops = {
24703 .inherits = &ata_bmdma32_port_ops,
24704 .cable_detect = ata_cable_40wire,
24705 .set_piomode = piix_set_piomode,
24706 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24707 .prereset = piix_pata_prereset,
24708 };
24709
24710 -static struct ata_port_operations piix_vmw_ops = {
24711 +static const struct ata_port_operations piix_vmw_ops = {
24712 .inherits = &piix_pata_ops,
24713 .bmdma_status = piix_vmw_bmdma_status,
24714 };
24715
24716 -static struct ata_port_operations ich_pata_ops = {
24717 +static const struct ata_port_operations ich_pata_ops = {
24718 .inherits = &piix_pata_ops,
24719 .cable_detect = ich_pata_cable_detect,
24720 .set_dmamode = ich_set_dmamode,
24721 };
24722
24723 -static struct ata_port_operations piix_sata_ops = {
24724 +static const struct ata_port_operations piix_sata_ops = {
24725 .inherits = &ata_bmdma_port_ops,
24726 };
24727
24728 -static struct ata_port_operations piix_sidpr_sata_ops = {
24729 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24730 .inherits = &piix_sata_ops,
24731 .hardreset = sata_std_hardreset,
24732 .scr_read = piix_sidpr_scr_read,
24733 diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24734 --- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24735 +++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24736 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24737 ata_acpi_uevent(dev->link->ap, dev, event);
24738 }
24739
24740 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24741 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24742 .handler = ata_acpi_dev_notify_dock,
24743 .uevent = ata_acpi_dev_uevent,
24744 };
24745
24746 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24747 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24748 .handler = ata_acpi_ap_notify_dock,
24749 .uevent = ata_acpi_ap_uevent,
24750 };
24751 diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24752 --- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24753 +++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24754 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24755 struct ata_port *ap;
24756 unsigned int tag;
24757
24758 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24759 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24760 ap = qc->ap;
24761
24762 qc->flags = 0;
24763 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24764 struct ata_port *ap;
24765 struct ata_link *link;
24766
24767 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24768 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24769 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24770 ap = qc->ap;
24771 link = qc->dev->link;
24772 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24773 * LOCKING:
24774 * None.
24775 */
24776 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24777 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24778 {
24779 static DEFINE_SPINLOCK(lock);
24780 const struct ata_port_operations *cur;
24781 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24782 return;
24783
24784 spin_lock(&lock);
24785 + pax_open_kernel();
24786
24787 for (cur = ops->inherits; cur; cur = cur->inherits) {
24788 void **inherit = (void **)cur;
24789 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24790 if (IS_ERR(*pp))
24791 *pp = NULL;
24792
24793 - ops->inherits = NULL;
24794 + *(struct ata_port_operations **)&ops->inherits = NULL;
24795
24796 + pax_close_kernel();
24797 spin_unlock(&lock);
24798 }
24799
24800 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24801 */
24802 /* KILLME - the only user left is ipr */
24803 void ata_host_init(struct ata_host *host, struct device *dev,
24804 - unsigned long flags, struct ata_port_operations *ops)
24805 + unsigned long flags, const struct ata_port_operations *ops)
24806 {
24807 spin_lock_init(&host->lock);
24808 host->dev = dev;
24809 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24810 /* truly dummy */
24811 }
24812
24813 -struct ata_port_operations ata_dummy_port_ops = {
24814 +const struct ata_port_operations ata_dummy_port_ops = {
24815 .qc_prep = ata_noop_qc_prep,
24816 .qc_issue = ata_dummy_qc_issue,
24817 .error_handler = ata_dummy_error_handler,
24818 diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24819 --- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24820 +++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24821 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24822 {
24823 struct ata_link *link;
24824
24825 + pax_track_stack();
24826 +
24827 ata_for_each_link(link, ap, HOST_FIRST)
24828 ata_eh_link_report(link);
24829 }
24830 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24831 */
24832 void ata_std_error_handler(struct ata_port *ap)
24833 {
24834 - struct ata_port_operations *ops = ap->ops;
24835 + const struct ata_port_operations *ops = ap->ops;
24836 ata_reset_fn_t hardreset = ops->hardreset;
24837
24838 /* ignore built-in hardreset if SCR access is not available */
24839 diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24840 --- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24841 +++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24842 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24843 */
24844 static int sata_pmp_eh_recover(struct ata_port *ap)
24845 {
24846 - struct ata_port_operations *ops = ap->ops;
24847 + const struct ata_port_operations *ops = ap->ops;
24848 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24849 struct ata_link *pmp_link = &ap->link;
24850 struct ata_device *pmp_dev = pmp_link->device;
24851 diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24852 --- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24853 +++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24854 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24855 ATA_BMDMA_SHT(DRV_NAME),
24856 };
24857
24858 -static struct ata_port_operations pacpi_ops = {
24859 +static const struct ata_port_operations pacpi_ops = {
24860 .inherits = &ata_bmdma_port_ops,
24861 .qc_issue = pacpi_qc_issue,
24862 .cable_detect = pacpi_cable_detect,
24863 diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24864 --- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24865 +++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24866 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24867 * Port operations for PIO only ALi
24868 */
24869
24870 -static struct ata_port_operations ali_early_port_ops = {
24871 +static const struct ata_port_operations ali_early_port_ops = {
24872 .inherits = &ata_sff_port_ops,
24873 .cable_detect = ata_cable_40wire,
24874 .set_piomode = ali_set_piomode,
24875 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24876 * Port operations for DMA capable ALi without cable
24877 * detect
24878 */
24879 -static struct ata_port_operations ali_20_port_ops = {
24880 +static const struct ata_port_operations ali_20_port_ops = {
24881 .inherits = &ali_dma_base_ops,
24882 .cable_detect = ata_cable_40wire,
24883 .mode_filter = ali_20_filter,
24884 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24885 /*
24886 * Port operations for DMA capable ALi with cable detect
24887 */
24888 -static struct ata_port_operations ali_c2_port_ops = {
24889 +static const struct ata_port_operations ali_c2_port_ops = {
24890 .inherits = &ali_dma_base_ops,
24891 .check_atapi_dma = ali_check_atapi_dma,
24892 .cable_detect = ali_c2_cable_detect,
24893 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24894 /*
24895 * Port operations for DMA capable ALi with cable detect
24896 */
24897 -static struct ata_port_operations ali_c4_port_ops = {
24898 +static const struct ata_port_operations ali_c4_port_ops = {
24899 .inherits = &ali_dma_base_ops,
24900 .check_atapi_dma = ali_check_atapi_dma,
24901 .cable_detect = ali_c2_cable_detect,
24902 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24903 /*
24904 * Port operations for DMA capable ALi with cable detect and LBA48
24905 */
24906 -static struct ata_port_operations ali_c5_port_ops = {
24907 +static const struct ata_port_operations ali_c5_port_ops = {
24908 .inherits = &ali_dma_base_ops,
24909 .check_atapi_dma = ali_check_atapi_dma,
24910 .dev_config = ali_warn_atapi_dma,
24911 diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24912 --- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24913 +++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24914 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24915 .prereset = amd_pre_reset,
24916 };
24917
24918 -static struct ata_port_operations amd33_port_ops = {
24919 +static const struct ata_port_operations amd33_port_ops = {
24920 .inherits = &amd_base_port_ops,
24921 .cable_detect = ata_cable_40wire,
24922 .set_piomode = amd33_set_piomode,
24923 .set_dmamode = amd33_set_dmamode,
24924 };
24925
24926 -static struct ata_port_operations amd66_port_ops = {
24927 +static const struct ata_port_operations amd66_port_ops = {
24928 .inherits = &amd_base_port_ops,
24929 .cable_detect = ata_cable_unknown,
24930 .set_piomode = amd66_set_piomode,
24931 .set_dmamode = amd66_set_dmamode,
24932 };
24933
24934 -static struct ata_port_operations amd100_port_ops = {
24935 +static const struct ata_port_operations amd100_port_ops = {
24936 .inherits = &amd_base_port_ops,
24937 .cable_detect = ata_cable_unknown,
24938 .set_piomode = amd100_set_piomode,
24939 .set_dmamode = amd100_set_dmamode,
24940 };
24941
24942 -static struct ata_port_operations amd133_port_ops = {
24943 +static const struct ata_port_operations amd133_port_ops = {
24944 .inherits = &amd_base_port_ops,
24945 .cable_detect = amd_cable_detect,
24946 .set_piomode = amd133_set_piomode,
24947 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24948 .host_stop = nv_host_stop,
24949 };
24950
24951 -static struct ata_port_operations nv100_port_ops = {
24952 +static const struct ata_port_operations nv100_port_ops = {
24953 .inherits = &nv_base_port_ops,
24954 .set_piomode = nv100_set_piomode,
24955 .set_dmamode = nv100_set_dmamode,
24956 };
24957
24958 -static struct ata_port_operations nv133_port_ops = {
24959 +static const struct ata_port_operations nv133_port_ops = {
24960 .inherits = &nv_base_port_ops,
24961 .set_piomode = nv133_set_piomode,
24962 .set_dmamode = nv133_set_dmamode,
24963 diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24964 --- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24965 +++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24966 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24967 ATA_BMDMA_SHT(DRV_NAME),
24968 };
24969
24970 -static struct ata_port_operations artop6210_ops = {
24971 +static const struct ata_port_operations artop6210_ops = {
24972 .inherits = &ata_bmdma_port_ops,
24973 .cable_detect = ata_cable_40wire,
24974 .set_piomode = artop6210_set_piomode,
24975 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24976 .qc_defer = artop6210_qc_defer,
24977 };
24978
24979 -static struct ata_port_operations artop6260_ops = {
24980 +static const struct ata_port_operations artop6260_ops = {
24981 .inherits = &ata_bmdma_port_ops,
24982 .cable_detect = artop6260_cable_detect,
24983 .set_piomode = artop6260_set_piomode,
24984 diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24985 --- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24986 +++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24987 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24988 ATA_PIO_SHT(DRV_NAME),
24989 };
24990
24991 -static struct ata_port_operations at32_port_ops = {
24992 +static const struct ata_port_operations at32_port_ops = {
24993 .inherits = &ata_sff_port_ops,
24994 .cable_detect = ata_cable_40wire,
24995 .set_piomode = pata_at32_set_piomode,
24996 diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24997 --- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24998 +++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24999 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
25000 ATA_PIO_SHT(DRV_NAME),
25001 };
25002
25003 -static struct ata_port_operations pata_at91_port_ops = {
25004 +static const struct ata_port_operations pata_at91_port_ops = {
25005 .inherits = &ata_sff_port_ops,
25006
25007 .sff_data_xfer = pata_at91_data_xfer_noirq,
25008 diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
25009 --- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
25010 +++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
25011 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
25012 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25013 };
25014
25015 -static struct ata_port_operations atiixp_port_ops = {
25016 +static const struct ata_port_operations atiixp_port_ops = {
25017 .inherits = &ata_bmdma_port_ops,
25018
25019 .qc_prep = ata_sff_dumb_qc_prep,
25020 diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
25021 --- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
25022 +++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
25023 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25024 ATA_BMDMA_SHT(DRV_NAME),
25025 };
25026
25027 -static struct ata_port_operations atp867x_ops = {
25028 +static const struct ata_port_operations atp867x_ops = {
25029 .inherits = &ata_bmdma_port_ops,
25030 .cable_detect = atp867x_cable_detect,
25031 .set_piomode = atp867x_set_piomode,
25032 diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
25033 --- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
25034 +++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
25035 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25036 .dma_boundary = ATA_DMA_BOUNDARY,
25037 };
25038
25039 -static struct ata_port_operations bfin_pata_ops = {
25040 +static const struct ata_port_operations bfin_pata_ops = {
25041 .inherits = &ata_sff_port_ops,
25042
25043 .set_piomode = bfin_set_piomode,
25044 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
25045 --- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
25046 +++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
25047 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25048 ATA_BMDMA_SHT(DRV_NAME),
25049 };
25050
25051 -static struct ata_port_operations cmd640_port_ops = {
25052 +static const struct ata_port_operations cmd640_port_ops = {
25053 .inherits = &ata_bmdma_port_ops,
25054 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25055 .sff_data_xfer = ata_sff_data_xfer_noirq,
25056 diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
25057 --- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
25058 +++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
25059 @@ -271,18 +271,18 @@ static const struct ata_port_operations
25060 .set_dmamode = cmd64x_set_dmamode,
25061 };
25062
25063 -static struct ata_port_operations cmd64x_port_ops = {
25064 +static const struct ata_port_operations cmd64x_port_ops = {
25065 .inherits = &cmd64x_base_ops,
25066 .cable_detect = ata_cable_40wire,
25067 };
25068
25069 -static struct ata_port_operations cmd646r1_port_ops = {
25070 +static const struct ata_port_operations cmd646r1_port_ops = {
25071 .inherits = &cmd64x_base_ops,
25072 .bmdma_stop = cmd646r1_bmdma_stop,
25073 .cable_detect = ata_cable_40wire,
25074 };
25075
25076 -static struct ata_port_operations cmd648_port_ops = {
25077 +static const struct ata_port_operations cmd648_port_ops = {
25078 .inherits = &cmd64x_base_ops,
25079 .bmdma_stop = cmd648_bmdma_stop,
25080 .cable_detect = cmd648_cable_detect,
25081 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
25082 --- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
25083 +++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
25084 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25085 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25086 };
25087
25088 -static struct ata_port_operations cs5520_port_ops = {
25089 +static const struct ata_port_operations cs5520_port_ops = {
25090 .inherits = &ata_bmdma_port_ops,
25091 .qc_prep = ata_sff_dumb_qc_prep,
25092 .cable_detect = ata_cable_40wire,
25093 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
25094 --- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
25095 +++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
25096 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25097 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25098 };
25099
25100 -static struct ata_port_operations cs5530_port_ops = {
25101 +static const struct ata_port_operations cs5530_port_ops = {
25102 .inherits = &ata_bmdma_port_ops,
25103
25104 .qc_prep = ata_sff_dumb_qc_prep,
25105 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
25106 --- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
25107 +++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
25108 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25109 ATA_BMDMA_SHT(DRV_NAME),
25110 };
25111
25112 -static struct ata_port_operations cs5535_port_ops = {
25113 +static const struct ata_port_operations cs5535_port_ops = {
25114 .inherits = &ata_bmdma_port_ops,
25115 .cable_detect = cs5535_cable_detect,
25116 .set_piomode = cs5535_set_piomode,
25117 diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
25118 --- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
25119 +++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
25120 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25121 ATA_BMDMA_SHT(DRV_NAME),
25122 };
25123
25124 -static struct ata_port_operations cs5536_port_ops = {
25125 +static const struct ata_port_operations cs5536_port_ops = {
25126 .inherits = &ata_bmdma_port_ops,
25127 .cable_detect = cs5536_cable_detect,
25128 .set_piomode = cs5536_set_piomode,
25129 diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
25130 --- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25131 +++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25132 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25133 ATA_BMDMA_SHT(DRV_NAME),
25134 };
25135
25136 -static struct ata_port_operations cy82c693_port_ops = {
25137 +static const struct ata_port_operations cy82c693_port_ops = {
25138 .inherits = &ata_bmdma_port_ops,
25139 .cable_detect = ata_cable_40wire,
25140 .set_piomode = cy82c693_set_piomode,
25141 diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
25142 --- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25143 +++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25144 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25145 ATA_BMDMA_SHT(DRV_NAME),
25146 };
25147
25148 -static struct ata_port_operations efar_ops = {
25149 +static const struct ata_port_operations efar_ops = {
25150 .inherits = &ata_bmdma_port_ops,
25151 .cable_detect = efar_cable_detect,
25152 .set_piomode = efar_set_piomode,
25153 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
25154 --- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25155 +++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25156 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25157 * Configuration for HPT366/68
25158 */
25159
25160 -static struct ata_port_operations hpt366_port_ops = {
25161 +static const struct ata_port_operations hpt366_port_ops = {
25162 .inherits = &ata_bmdma_port_ops,
25163 .cable_detect = hpt36x_cable_detect,
25164 .mode_filter = hpt366_filter,
25165 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
25166 --- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25167 +++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25168 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25169 * Configuration for HPT370
25170 */
25171
25172 -static struct ata_port_operations hpt370_port_ops = {
25173 +static const struct ata_port_operations hpt370_port_ops = {
25174 .inherits = &ata_bmdma_port_ops,
25175
25176 .bmdma_stop = hpt370_bmdma_stop,
25177 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25178 * Configuration for HPT370A. Close to 370 but less filters
25179 */
25180
25181 -static struct ata_port_operations hpt370a_port_ops = {
25182 +static const struct ata_port_operations hpt370a_port_ops = {
25183 .inherits = &hpt370_port_ops,
25184 .mode_filter = hpt370a_filter,
25185 };
25186 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25187 * and DMA mode setting functionality.
25188 */
25189
25190 -static struct ata_port_operations hpt372_port_ops = {
25191 +static const struct ata_port_operations hpt372_port_ops = {
25192 .inherits = &ata_bmdma_port_ops,
25193
25194 .bmdma_stop = hpt37x_bmdma_stop,
25195 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25196 * but we have a different cable detection procedure for function 1.
25197 */
25198
25199 -static struct ata_port_operations hpt374_fn1_port_ops = {
25200 +static const struct ata_port_operations hpt374_fn1_port_ops = {
25201 .inherits = &hpt372_port_ops,
25202 .prereset = hpt374_fn1_pre_reset,
25203 };
25204 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
25205 --- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25206 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25207 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25208 * Configuration for HPT3x2n.
25209 */
25210
25211 -static struct ata_port_operations hpt3x2n_port_ops = {
25212 +static const struct ata_port_operations hpt3x2n_port_ops = {
25213 .inherits = &ata_bmdma_port_ops,
25214
25215 .bmdma_stop = hpt3x2n_bmdma_stop,
25216 diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
25217 --- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25218 +++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25219 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25220 ATA_BMDMA_SHT(DRV_NAME),
25221 };
25222
25223 -static struct ata_port_operations hpt3x3_port_ops = {
25224 +static const struct ata_port_operations hpt3x3_port_ops = {
25225 .inherits = &ata_bmdma_port_ops,
25226 .cable_detect = ata_cable_40wire,
25227 .set_piomode = hpt3x3_set_piomode,
25228 diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
25229 --- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25230 +++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25231 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25232 }
25233 }
25234
25235 -static struct ata_port_operations pata_icside_port_ops = {
25236 +static const struct ata_port_operations pata_icside_port_ops = {
25237 .inherits = &ata_sff_port_ops,
25238 /* no need to build any PRD tables for DMA */
25239 .qc_prep = ata_noop_qc_prep,
25240 diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
25241 --- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25242 +++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25243 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25244 ATA_PIO_SHT(DRV_NAME),
25245 };
25246
25247 -static struct ata_port_operations isapnp_port_ops = {
25248 +static const struct ata_port_operations isapnp_port_ops = {
25249 .inherits = &ata_sff_port_ops,
25250 .cable_detect = ata_cable_40wire,
25251 };
25252
25253 -static struct ata_port_operations isapnp_noalt_port_ops = {
25254 +static const struct ata_port_operations isapnp_noalt_port_ops = {
25255 .inherits = &ata_sff_port_ops,
25256 .cable_detect = ata_cable_40wire,
25257 /* No altstatus so we don't want to use the lost interrupt poll */
25258 diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25259 --- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25260 +++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25261 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25262 };
25263
25264
25265 -static struct ata_port_operations it8213_ops = {
25266 +static const struct ata_port_operations it8213_ops = {
25267 .inherits = &ata_bmdma_port_ops,
25268 .cable_detect = it8213_cable_detect,
25269 .set_piomode = it8213_set_piomode,
25270 diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25271 --- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25272 +++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25273 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25274 ATA_BMDMA_SHT(DRV_NAME),
25275 };
25276
25277 -static struct ata_port_operations it821x_smart_port_ops = {
25278 +static const struct ata_port_operations it821x_smart_port_ops = {
25279 .inherits = &ata_bmdma_port_ops,
25280
25281 .check_atapi_dma= it821x_check_atapi_dma,
25282 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25283 .port_start = it821x_port_start,
25284 };
25285
25286 -static struct ata_port_operations it821x_passthru_port_ops = {
25287 +static const struct ata_port_operations it821x_passthru_port_ops = {
25288 .inherits = &ata_bmdma_port_ops,
25289
25290 .check_atapi_dma= it821x_check_atapi_dma,
25291 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25292 .port_start = it821x_port_start,
25293 };
25294
25295 -static struct ata_port_operations it821x_rdc_port_ops = {
25296 +static const struct ata_port_operations it821x_rdc_port_ops = {
25297 .inherits = &ata_bmdma_port_ops,
25298
25299 .check_atapi_dma= it821x_check_atapi_dma,
25300 diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25301 --- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25302 +++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25303 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25304 ATA_PIO_SHT(DRV_NAME),
25305 };
25306
25307 -static struct ata_port_operations ixp4xx_port_ops = {
25308 +static const struct ata_port_operations ixp4xx_port_ops = {
25309 .inherits = &ata_sff_port_ops,
25310 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25311 .cable_detect = ata_cable_40wire,
25312 diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25313 --- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25314 +++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25315 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25316 ATA_BMDMA_SHT(DRV_NAME),
25317 };
25318
25319 -static struct ata_port_operations jmicron_ops = {
25320 +static const struct ata_port_operations jmicron_ops = {
25321 .inherits = &ata_bmdma_port_ops,
25322 .prereset = jmicron_pre_reset,
25323 };
25324 diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25325 --- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25326 +++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25327 @@ -106,7 +106,7 @@ struct legacy_probe {
25328
25329 struct legacy_controller {
25330 const char *name;
25331 - struct ata_port_operations *ops;
25332 + const struct ata_port_operations *ops;
25333 unsigned int pio_mask;
25334 unsigned int flags;
25335 unsigned int pflags;
25336 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25337 * pio_mask as well.
25338 */
25339
25340 -static struct ata_port_operations simple_port_ops = {
25341 +static const struct ata_port_operations simple_port_ops = {
25342 .inherits = &legacy_base_port_ops,
25343 .sff_data_xfer = ata_sff_data_xfer_noirq,
25344 };
25345
25346 -static struct ata_port_operations legacy_port_ops = {
25347 +static const struct ata_port_operations legacy_port_ops = {
25348 .inherits = &legacy_base_port_ops,
25349 .sff_data_xfer = ata_sff_data_xfer_noirq,
25350 .set_mode = legacy_set_mode,
25351 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25352 return buflen;
25353 }
25354
25355 -static struct ata_port_operations pdc20230_port_ops = {
25356 +static const struct ata_port_operations pdc20230_port_ops = {
25357 .inherits = &legacy_base_port_ops,
25358 .set_piomode = pdc20230_set_piomode,
25359 .sff_data_xfer = pdc_data_xfer_vlb,
25360 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25361 ioread8(ap->ioaddr.status_addr);
25362 }
25363
25364 -static struct ata_port_operations ht6560a_port_ops = {
25365 +static const struct ata_port_operations ht6560a_port_ops = {
25366 .inherits = &legacy_base_port_ops,
25367 .set_piomode = ht6560a_set_piomode,
25368 };
25369 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25370 ioread8(ap->ioaddr.status_addr);
25371 }
25372
25373 -static struct ata_port_operations ht6560b_port_ops = {
25374 +static const struct ata_port_operations ht6560b_port_ops = {
25375 .inherits = &legacy_base_port_ops,
25376 .set_piomode = ht6560b_set_piomode,
25377 };
25378 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25379 }
25380
25381
25382 -static struct ata_port_operations opti82c611a_port_ops = {
25383 +static const struct ata_port_operations opti82c611a_port_ops = {
25384 .inherits = &legacy_base_port_ops,
25385 .set_piomode = opti82c611a_set_piomode,
25386 };
25387 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25388 return ata_sff_qc_issue(qc);
25389 }
25390
25391 -static struct ata_port_operations opti82c46x_port_ops = {
25392 +static const struct ata_port_operations opti82c46x_port_ops = {
25393 .inherits = &legacy_base_port_ops,
25394 .set_piomode = opti82c46x_set_piomode,
25395 .qc_issue = opti82c46x_qc_issue,
25396 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25397 return 0;
25398 }
25399
25400 -static struct ata_port_operations qdi6500_port_ops = {
25401 +static const struct ata_port_operations qdi6500_port_ops = {
25402 .inherits = &legacy_base_port_ops,
25403 .set_piomode = qdi6500_set_piomode,
25404 .qc_issue = qdi_qc_issue,
25405 .sff_data_xfer = vlb32_data_xfer,
25406 };
25407
25408 -static struct ata_port_operations qdi6580_port_ops = {
25409 +static const struct ata_port_operations qdi6580_port_ops = {
25410 .inherits = &legacy_base_port_ops,
25411 .set_piomode = qdi6580_set_piomode,
25412 .sff_data_xfer = vlb32_data_xfer,
25413 };
25414
25415 -static struct ata_port_operations qdi6580dp_port_ops = {
25416 +static const struct ata_port_operations qdi6580dp_port_ops = {
25417 .inherits = &legacy_base_port_ops,
25418 .set_piomode = qdi6580dp_set_piomode,
25419 .sff_data_xfer = vlb32_data_xfer,
25420 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25421 return 0;
25422 }
25423
25424 -static struct ata_port_operations winbond_port_ops = {
25425 +static const struct ata_port_operations winbond_port_ops = {
25426 .inherits = &legacy_base_port_ops,
25427 .set_piomode = winbond_set_piomode,
25428 .sff_data_xfer = vlb32_data_xfer,
25429 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25430 int pio_modes = controller->pio_mask;
25431 unsigned long io = probe->port;
25432 u32 mask = (1 << probe->slot);
25433 - struct ata_port_operations *ops = controller->ops;
25434 + const struct ata_port_operations *ops = controller->ops;
25435 struct legacy_data *ld = &legacy_data[probe->slot];
25436 struct ata_host *host = NULL;
25437 struct ata_port *ap;
25438 diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25439 --- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25440 +++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25441 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25442 ATA_BMDMA_SHT(DRV_NAME),
25443 };
25444
25445 -static struct ata_port_operations marvell_ops = {
25446 +static const struct ata_port_operations marvell_ops = {
25447 .inherits = &ata_bmdma_port_ops,
25448 .cable_detect = marvell_cable_detect,
25449 .prereset = marvell_pre_reset,
25450 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25451 --- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25452 +++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25453 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25454 ATA_PIO_SHT(DRV_NAME),
25455 };
25456
25457 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25458 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25459 .inherits = &ata_bmdma_port_ops,
25460 .sff_dev_select = mpc52xx_ata_dev_select,
25461 .set_piomode = mpc52xx_ata_set_piomode,
25462 diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25463 --- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25464 +++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25465 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25466 ATA_PIO_SHT(DRV_NAME),
25467 };
25468
25469 -static struct ata_port_operations mpiix_port_ops = {
25470 +static const struct ata_port_operations mpiix_port_ops = {
25471 .inherits = &ata_sff_port_ops,
25472 .qc_issue = mpiix_qc_issue,
25473 .cable_detect = ata_cable_40wire,
25474 diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25475 --- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25476 +++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25477 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25478 ATA_BMDMA_SHT(DRV_NAME),
25479 };
25480
25481 -static struct ata_port_operations netcell_ops = {
25482 +static const struct ata_port_operations netcell_ops = {
25483 .inherits = &ata_bmdma_port_ops,
25484 .cable_detect = ata_cable_80wire,
25485 .read_id = netcell_read_id,
25486 diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25487 --- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25488 +++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25489 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25490 ATA_BMDMA_SHT(DRV_NAME),
25491 };
25492
25493 -static struct ata_port_operations ninja32_port_ops = {
25494 +static const struct ata_port_operations ninja32_port_ops = {
25495 .inherits = &ata_bmdma_port_ops,
25496 .sff_dev_select = ninja32_dev_select,
25497 .cable_detect = ata_cable_40wire,
25498 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25499 --- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25500 +++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25501 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25502 ATA_PIO_SHT(DRV_NAME),
25503 };
25504
25505 -static struct ata_port_operations ns87410_port_ops = {
25506 +static const struct ata_port_operations ns87410_port_ops = {
25507 .inherits = &ata_sff_port_ops,
25508 .qc_issue = ns87410_qc_issue,
25509 .cable_detect = ata_cable_40wire,
25510 diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25511 --- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25512 +++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25513 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25514 }
25515 #endif /* 87560 SuperIO Support */
25516
25517 -static struct ata_port_operations ns87415_pata_ops = {
25518 +static const struct ata_port_operations ns87415_pata_ops = {
25519 .inherits = &ata_bmdma_port_ops,
25520
25521 .check_atapi_dma = ns87415_check_atapi_dma,
25522 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25523 };
25524
25525 #if defined(CONFIG_SUPERIO)
25526 -static struct ata_port_operations ns87560_pata_ops = {
25527 +static const struct ata_port_operations ns87560_pata_ops = {
25528 .inherits = &ns87415_pata_ops,
25529 .sff_tf_read = ns87560_tf_read,
25530 .sff_check_status = ns87560_check_status,
25531 diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25532 --- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25533 +++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25534 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25535 return 0;
25536 }
25537
25538 +/* cannot be const */
25539 static struct ata_port_operations octeon_cf_ops = {
25540 .inherits = &ata_sff_port_ops,
25541 .check_atapi_dma = octeon_cf_check_atapi_dma,
25542 diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25543 --- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25544 +++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25545 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25546 ATA_BMDMA_SHT(DRV_NAME),
25547 };
25548
25549 -static struct ata_port_operations oldpiix_pata_ops = {
25550 +static const struct ata_port_operations oldpiix_pata_ops = {
25551 .inherits = &ata_bmdma_port_ops,
25552 .qc_issue = oldpiix_qc_issue,
25553 .cable_detect = ata_cable_40wire,
25554 diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25555 --- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25556 +++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25557 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25558 ATA_PIO_SHT(DRV_NAME),
25559 };
25560
25561 -static struct ata_port_operations opti_port_ops = {
25562 +static const struct ata_port_operations opti_port_ops = {
25563 .inherits = &ata_sff_port_ops,
25564 .cable_detect = ata_cable_40wire,
25565 .set_piomode = opti_set_piomode,
25566 diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25567 --- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25568 +++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25569 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25570 ATA_BMDMA_SHT(DRV_NAME),
25571 };
25572
25573 -static struct ata_port_operations optidma_port_ops = {
25574 +static const struct ata_port_operations optidma_port_ops = {
25575 .inherits = &ata_bmdma_port_ops,
25576 .cable_detect = ata_cable_40wire,
25577 .set_piomode = optidma_set_pio_mode,
25578 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25579 .prereset = optidma_pre_reset,
25580 };
25581
25582 -static struct ata_port_operations optiplus_port_ops = {
25583 +static const struct ata_port_operations optiplus_port_ops = {
25584 .inherits = &optidma_port_ops,
25585 .set_piomode = optiplus_set_pio_mode,
25586 .set_dmamode = optiplus_set_dma_mode,
25587 diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25588 --- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25589 +++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25590 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25591 ATA_PIO_SHT(DRV_NAME),
25592 };
25593
25594 -static struct ata_port_operations palmld_port_ops = {
25595 +static const struct ata_port_operations palmld_port_ops = {
25596 .inherits = &ata_sff_port_ops,
25597 .sff_data_xfer = ata_sff_data_xfer_noirq,
25598 .cable_detect = ata_cable_40wire,
25599 diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25600 --- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25601 +++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25602 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25603 ATA_PIO_SHT(DRV_NAME),
25604 };
25605
25606 -static struct ata_port_operations pcmcia_port_ops = {
25607 +static const struct ata_port_operations pcmcia_port_ops = {
25608 .inherits = &ata_sff_port_ops,
25609 .sff_data_xfer = ata_sff_data_xfer_noirq,
25610 .cable_detect = ata_cable_40wire,
25611 .set_mode = pcmcia_set_mode,
25612 };
25613
25614 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25615 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25616 .inherits = &ata_sff_port_ops,
25617 .sff_data_xfer = ata_data_xfer_8bit,
25618 .cable_detect = ata_cable_40wire,
25619 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25620 unsigned long io_base, ctl_base;
25621 void __iomem *io_addr, *ctl_addr;
25622 int n_ports = 1;
25623 - struct ata_port_operations *ops = &pcmcia_port_ops;
25624 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25625
25626 info = kzalloc(sizeof(*info), GFP_KERNEL);
25627 if (info == NULL)
25628 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25629 --- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25630 +++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25631 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25632 ATA_BMDMA_SHT(DRV_NAME),
25633 };
25634
25635 -static struct ata_port_operations pdc2027x_pata100_ops = {
25636 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25637 .inherits = &ata_bmdma_port_ops,
25638 .check_atapi_dma = pdc2027x_check_atapi_dma,
25639 .cable_detect = pdc2027x_cable_detect,
25640 .prereset = pdc2027x_prereset,
25641 };
25642
25643 -static struct ata_port_operations pdc2027x_pata133_ops = {
25644 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25645 .inherits = &pdc2027x_pata100_ops,
25646 .mode_filter = pdc2027x_mode_filter,
25647 .set_piomode = pdc2027x_set_piomode,
25648 diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25649 --- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25650 +++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25651 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25652 ATA_BMDMA_SHT(DRV_NAME),
25653 };
25654
25655 -static struct ata_port_operations pdc2024x_port_ops = {
25656 +static const struct ata_port_operations pdc2024x_port_ops = {
25657 .inherits = &ata_bmdma_port_ops,
25658
25659 .cable_detect = ata_cable_40wire,
25660 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25661 .sff_exec_command = pdc202xx_exec_command,
25662 };
25663
25664 -static struct ata_port_operations pdc2026x_port_ops = {
25665 +static const struct ata_port_operations pdc2026x_port_ops = {
25666 .inherits = &pdc2024x_port_ops,
25667
25668 .check_atapi_dma = pdc2026x_check_atapi_dma,
25669 diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25670 --- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25671 +++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25672 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25673 ATA_PIO_SHT(DRV_NAME),
25674 };
25675
25676 -static struct ata_port_operations pata_platform_port_ops = {
25677 +static const struct ata_port_operations pata_platform_port_ops = {
25678 .inherits = &ata_sff_port_ops,
25679 .sff_data_xfer = ata_sff_data_xfer_noirq,
25680 .cable_detect = ata_cable_unknown,
25681 diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25682 --- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25683 +++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25684 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25685 ATA_PIO_SHT(DRV_NAME),
25686 };
25687
25688 -static struct ata_port_operations qdi6500_port_ops = {
25689 +static const struct ata_port_operations qdi6500_port_ops = {
25690 .inherits = &ata_sff_port_ops,
25691 .qc_issue = qdi_qc_issue,
25692 .sff_data_xfer = qdi_data_xfer,
25693 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25694 .set_piomode = qdi6500_set_piomode,
25695 };
25696
25697 -static struct ata_port_operations qdi6580_port_ops = {
25698 +static const struct ata_port_operations qdi6580_port_ops = {
25699 .inherits = &qdi6500_port_ops,
25700 .set_piomode = qdi6580_set_piomode,
25701 };
25702 diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25703 --- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25704 +++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25705 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25706 ATA_BMDMA_SHT(DRV_NAME),
25707 };
25708
25709 -static struct ata_port_operations radisys_pata_ops = {
25710 +static const struct ata_port_operations radisys_pata_ops = {
25711 .inherits = &ata_bmdma_port_ops,
25712 .qc_issue = radisys_qc_issue,
25713 .cable_detect = ata_cable_unknown,
25714 diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25715 --- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25716 +++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25717 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25718 return IRQ_HANDLED;
25719 }
25720
25721 -static struct ata_port_operations rb532_pata_port_ops = {
25722 +static const struct ata_port_operations rb532_pata_port_ops = {
25723 .inherits = &ata_sff_port_ops,
25724 .sff_data_xfer = ata_sff_data_xfer32,
25725 };
25726 diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25727 --- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25728 +++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25729 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25730 pci_write_config_byte(dev, 0x48, udma_enable);
25731 }
25732
25733 -static struct ata_port_operations rdc_pata_ops = {
25734 +static const struct ata_port_operations rdc_pata_ops = {
25735 .inherits = &ata_bmdma32_port_ops,
25736 .cable_detect = rdc_pata_cable_detect,
25737 .set_piomode = rdc_set_piomode,
25738 diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25739 --- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25740 +++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25741 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25742 ATA_PIO_SHT(DRV_NAME),
25743 };
25744
25745 -static struct ata_port_operations rz1000_port_ops = {
25746 +static const struct ata_port_operations rz1000_port_ops = {
25747 .inherits = &ata_sff_port_ops,
25748 .cable_detect = ata_cable_40wire,
25749 .set_mode = rz1000_set_mode,
25750 diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25751 --- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25752 +++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25753 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25754 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25755 };
25756
25757 -static struct ata_port_operations sc1200_port_ops = {
25758 +static const struct ata_port_operations sc1200_port_ops = {
25759 .inherits = &ata_bmdma_port_ops,
25760 .qc_prep = ata_sff_dumb_qc_prep,
25761 .qc_issue = sc1200_qc_issue,
25762 diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25763 --- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25764 +++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25765 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25766 ATA_BMDMA_SHT(DRV_NAME),
25767 };
25768
25769 -static struct ata_port_operations scc_pata_ops = {
25770 +static const struct ata_port_operations scc_pata_ops = {
25771 .inherits = &ata_bmdma_port_ops,
25772
25773 .set_piomode = scc_set_piomode,
25774 diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25775 --- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25776 +++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25777 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25778 ATA_BMDMA_SHT(DRV_NAME),
25779 };
25780
25781 -static struct ata_port_operations sch_pata_ops = {
25782 +static const struct ata_port_operations sch_pata_ops = {
25783 .inherits = &ata_bmdma_port_ops,
25784 .cable_detect = ata_cable_unknown,
25785 .set_piomode = sch_set_piomode,
25786 diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25787 --- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25788 +++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25789 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25790 ATA_BMDMA_SHT(DRV_NAME),
25791 };
25792
25793 -static struct ata_port_operations serverworks_osb4_port_ops = {
25794 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25795 .inherits = &ata_bmdma_port_ops,
25796 .cable_detect = serverworks_cable_detect,
25797 .mode_filter = serverworks_osb4_filter,
25798 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25799 .set_dmamode = serverworks_set_dmamode,
25800 };
25801
25802 -static struct ata_port_operations serverworks_csb_port_ops = {
25803 +static const struct ata_port_operations serverworks_csb_port_ops = {
25804 .inherits = &serverworks_osb4_port_ops,
25805 .mode_filter = serverworks_csb_filter,
25806 };
25807 diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25808 --- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25809 +++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25810 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25811 ATA_BMDMA_SHT(DRV_NAME),
25812 };
25813
25814 -static struct ata_port_operations sil680_port_ops = {
25815 +static const struct ata_port_operations sil680_port_ops = {
25816 .inherits = &ata_bmdma32_port_ops,
25817 .cable_detect = sil680_cable_detect,
25818 .set_piomode = sil680_set_piomode,
25819 diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25820 --- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25821 +++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25822 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25823 ATA_BMDMA_SHT(DRV_NAME),
25824 };
25825
25826 -static struct ata_port_operations sis_133_for_sata_ops = {
25827 +static const struct ata_port_operations sis_133_for_sata_ops = {
25828 .inherits = &ata_bmdma_port_ops,
25829 .set_piomode = sis_133_set_piomode,
25830 .set_dmamode = sis_133_set_dmamode,
25831 .cable_detect = sis_133_cable_detect,
25832 };
25833
25834 -static struct ata_port_operations sis_base_ops = {
25835 +static const struct ata_port_operations sis_base_ops = {
25836 .inherits = &ata_bmdma_port_ops,
25837 .prereset = sis_pre_reset,
25838 };
25839
25840 -static struct ata_port_operations sis_133_ops = {
25841 +static const struct ata_port_operations sis_133_ops = {
25842 .inherits = &sis_base_ops,
25843 .set_piomode = sis_133_set_piomode,
25844 .set_dmamode = sis_133_set_dmamode,
25845 .cable_detect = sis_133_cable_detect,
25846 };
25847
25848 -static struct ata_port_operations sis_133_early_ops = {
25849 +static const struct ata_port_operations sis_133_early_ops = {
25850 .inherits = &sis_base_ops,
25851 .set_piomode = sis_100_set_piomode,
25852 .set_dmamode = sis_133_early_set_dmamode,
25853 .cable_detect = sis_66_cable_detect,
25854 };
25855
25856 -static struct ata_port_operations sis_100_ops = {
25857 +static const struct ata_port_operations sis_100_ops = {
25858 .inherits = &sis_base_ops,
25859 .set_piomode = sis_100_set_piomode,
25860 .set_dmamode = sis_100_set_dmamode,
25861 .cable_detect = sis_66_cable_detect,
25862 };
25863
25864 -static struct ata_port_operations sis_66_ops = {
25865 +static const struct ata_port_operations sis_66_ops = {
25866 .inherits = &sis_base_ops,
25867 .set_piomode = sis_old_set_piomode,
25868 .set_dmamode = sis_66_set_dmamode,
25869 .cable_detect = sis_66_cable_detect,
25870 };
25871
25872 -static struct ata_port_operations sis_old_ops = {
25873 +static const struct ata_port_operations sis_old_ops = {
25874 .inherits = &sis_base_ops,
25875 .set_piomode = sis_old_set_piomode,
25876 .set_dmamode = sis_old_set_dmamode,
25877 diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25878 --- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25879 +++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25880 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25881 ATA_BMDMA_SHT(DRV_NAME),
25882 };
25883
25884 -static struct ata_port_operations sl82c105_port_ops = {
25885 +static const struct ata_port_operations sl82c105_port_ops = {
25886 .inherits = &ata_bmdma_port_ops,
25887 .qc_defer = sl82c105_qc_defer,
25888 .bmdma_start = sl82c105_bmdma_start,
25889 diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25890 --- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25891 +++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25892 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25893 ATA_BMDMA_SHT(DRV_NAME),
25894 };
25895
25896 -static struct ata_port_operations triflex_port_ops = {
25897 +static const struct ata_port_operations triflex_port_ops = {
25898 .inherits = &ata_bmdma_port_ops,
25899 .bmdma_start = triflex_bmdma_start,
25900 .bmdma_stop = triflex_bmdma_stop,
25901 diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25902 --- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25903 +++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25904 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25905 ATA_BMDMA_SHT(DRV_NAME),
25906 };
25907
25908 -static struct ata_port_operations via_port_ops = {
25909 +static const struct ata_port_operations via_port_ops = {
25910 .inherits = &ata_bmdma_port_ops,
25911 .cable_detect = via_cable_detect,
25912 .set_piomode = via_set_piomode,
25913 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25914 .port_start = via_port_start,
25915 };
25916
25917 -static struct ata_port_operations via_port_ops_noirq = {
25918 +static const struct ata_port_operations via_port_ops_noirq = {
25919 .inherits = &via_port_ops,
25920 .sff_data_xfer = ata_sff_data_xfer_noirq,
25921 };
25922 diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25923 --- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25924 +++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25925 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25926 ATA_PIO_SHT(DRV_NAME),
25927 };
25928
25929 -static struct ata_port_operations winbond_port_ops = {
25930 +static const struct ata_port_operations winbond_port_ops = {
25931 .inherits = &ata_sff_port_ops,
25932 .sff_data_xfer = winbond_data_xfer,
25933 .cable_detect = ata_cable_40wire,
25934 diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25935 --- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25936 +++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25937 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25938 .dma_boundary = ADMA_DMA_BOUNDARY,
25939 };
25940
25941 -static struct ata_port_operations adma_ata_ops = {
25942 +static const struct ata_port_operations adma_ata_ops = {
25943 .inherits = &ata_sff_port_ops,
25944
25945 .lost_interrupt = ATA_OP_NULL,
25946 diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25947 --- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25948 +++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25949 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25950 .dma_boundary = ATA_DMA_BOUNDARY,
25951 };
25952
25953 -static struct ata_port_operations sata_fsl_ops = {
25954 +static const struct ata_port_operations sata_fsl_ops = {
25955 .inherits = &sata_pmp_port_ops,
25956
25957 .qc_defer = ata_std_qc_defer,
25958 diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25959 --- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25960 +++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25961 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25962 return 0;
25963 }
25964
25965 -static struct ata_port_operations inic_port_ops = {
25966 +static const struct ata_port_operations inic_port_ops = {
25967 .inherits = &sata_port_ops,
25968
25969 .check_atapi_dma = inic_check_atapi_dma,
25970 diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25971 --- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25972 +++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25973 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25974 .dma_boundary = MV_DMA_BOUNDARY,
25975 };
25976
25977 -static struct ata_port_operations mv5_ops = {
25978 +static const struct ata_port_operations mv5_ops = {
25979 .inherits = &ata_sff_port_ops,
25980
25981 .lost_interrupt = ATA_OP_NULL,
25982 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25983 .port_stop = mv_port_stop,
25984 };
25985
25986 -static struct ata_port_operations mv6_ops = {
25987 +static const struct ata_port_operations mv6_ops = {
25988 .inherits = &mv5_ops,
25989 .dev_config = mv6_dev_config,
25990 .scr_read = mv_scr_read,
25991 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25992 .bmdma_status = mv_bmdma_status,
25993 };
25994
25995 -static struct ata_port_operations mv_iie_ops = {
25996 +static const struct ata_port_operations mv_iie_ops = {
25997 .inherits = &mv6_ops,
25998 .dev_config = ATA_OP_NULL,
25999 .qc_prep = mv_qc_prep_iie,
26000 diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
26001 --- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
26002 +++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
26003 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
26004 * cases. Define nv_hardreset() which only kicks in for post-boot
26005 * probing and use it for all variants.
26006 */
26007 -static struct ata_port_operations nv_generic_ops = {
26008 +static const struct ata_port_operations nv_generic_ops = {
26009 .inherits = &ata_bmdma_port_ops,
26010 .lost_interrupt = ATA_OP_NULL,
26011 .scr_read = nv_scr_read,
26012 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
26013 .hardreset = nv_hardreset,
26014 };
26015
26016 -static struct ata_port_operations nv_nf2_ops = {
26017 +static const struct ata_port_operations nv_nf2_ops = {
26018 .inherits = &nv_generic_ops,
26019 .freeze = nv_nf2_freeze,
26020 .thaw = nv_nf2_thaw,
26021 };
26022
26023 -static struct ata_port_operations nv_ck804_ops = {
26024 +static const struct ata_port_operations nv_ck804_ops = {
26025 .inherits = &nv_generic_ops,
26026 .freeze = nv_ck804_freeze,
26027 .thaw = nv_ck804_thaw,
26028 .host_stop = nv_ck804_host_stop,
26029 };
26030
26031 -static struct ata_port_operations nv_adma_ops = {
26032 +static const struct ata_port_operations nv_adma_ops = {
26033 .inherits = &nv_ck804_ops,
26034
26035 .check_atapi_dma = nv_adma_check_atapi_dma,
26036 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26037 .host_stop = nv_adma_host_stop,
26038 };
26039
26040 -static struct ata_port_operations nv_swncq_ops = {
26041 +static const struct ata_port_operations nv_swncq_ops = {
26042 .inherits = &nv_generic_ops,
26043
26044 .qc_defer = ata_std_qc_defer,
26045 diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
26046 --- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
26047 +++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
26048 @@ -195,7 +195,7 @@ static const struct ata_port_operations
26049 .error_handler = pdc_error_handler,
26050 };
26051
26052 -static struct ata_port_operations pdc_sata_ops = {
26053 +static const struct ata_port_operations pdc_sata_ops = {
26054 .inherits = &pdc_common_ops,
26055 .cable_detect = pdc_sata_cable_detect,
26056 .freeze = pdc_sata_freeze,
26057 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26058
26059 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26060 and ->freeze/thaw that ignore the hotplug controls. */
26061 -static struct ata_port_operations pdc_old_sata_ops = {
26062 +static const struct ata_port_operations pdc_old_sata_ops = {
26063 .inherits = &pdc_sata_ops,
26064 .freeze = pdc_freeze,
26065 .thaw = pdc_thaw,
26066 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26067 };
26068
26069 -static struct ata_port_operations pdc_pata_ops = {
26070 +static const struct ata_port_operations pdc_pata_ops = {
26071 .inherits = &pdc_common_ops,
26072 .cable_detect = pdc_pata_cable_detect,
26073 .freeze = pdc_freeze,
26074 diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
26075 --- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
26076 +++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
26077 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26078 .dma_boundary = QS_DMA_BOUNDARY,
26079 };
26080
26081 -static struct ata_port_operations qs_ata_ops = {
26082 +static const struct ata_port_operations qs_ata_ops = {
26083 .inherits = &ata_sff_port_ops,
26084
26085 .check_atapi_dma = qs_check_atapi_dma,
26086 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
26087 --- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
26088 +++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
26089 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26090 .dma_boundary = ATA_DMA_BOUNDARY,
26091 };
26092
26093 -static struct ata_port_operations sil24_ops = {
26094 +static const struct ata_port_operations sil24_ops = {
26095 .inherits = &sata_pmp_port_ops,
26096
26097 .qc_defer = sil24_qc_defer,
26098 diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
26099 --- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
26100 +++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
26101 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26102 .sg_tablesize = ATA_MAX_PRD
26103 };
26104
26105 -static struct ata_port_operations sil_ops = {
26106 +static const struct ata_port_operations sil_ops = {
26107 .inherits = &ata_bmdma32_port_ops,
26108 .dev_config = sil_dev_config,
26109 .set_mode = sil_set_mode,
26110 diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
26111 --- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
26112 +++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
26113 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26114 ATA_BMDMA_SHT(DRV_NAME),
26115 };
26116
26117 -static struct ata_port_operations sis_ops = {
26118 +static const struct ata_port_operations sis_ops = {
26119 .inherits = &ata_bmdma_port_ops,
26120 .scr_read = sis_scr_read,
26121 .scr_write = sis_scr_write,
26122 diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
26123 --- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26124 +++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26125 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26126 };
26127
26128
26129 -static struct ata_port_operations k2_sata_ops = {
26130 +static const struct ata_port_operations k2_sata_ops = {
26131 .inherits = &ata_bmdma_port_ops,
26132 .sff_tf_load = k2_sata_tf_load,
26133 .sff_tf_read = k2_sata_tf_read,
26134 diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
26135 --- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26136 +++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26137 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26138 };
26139
26140 /* TODO: inherit from base port_ops after converting to new EH */
26141 -static struct ata_port_operations pdc_20621_ops = {
26142 +static const struct ata_port_operations pdc_20621_ops = {
26143 .inherits = &ata_sff_port_ops,
26144
26145 .check_atapi_dma = pdc_check_atapi_dma,
26146 diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
26147 --- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26148 +++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26149 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26150 ATA_BMDMA_SHT(DRV_NAME),
26151 };
26152
26153 -static struct ata_port_operations uli_ops = {
26154 +static const struct ata_port_operations uli_ops = {
26155 .inherits = &ata_bmdma_port_ops,
26156 .scr_read = uli_scr_read,
26157 .scr_write = uli_scr_write,
26158 diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
26159 --- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26160 +++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26161 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26162 ATA_BMDMA_SHT(DRV_NAME),
26163 };
26164
26165 -static struct ata_port_operations svia_base_ops = {
26166 +static const struct ata_port_operations svia_base_ops = {
26167 .inherits = &ata_bmdma_port_ops,
26168 .sff_tf_load = svia_tf_load,
26169 };
26170
26171 -static struct ata_port_operations vt6420_sata_ops = {
26172 +static const struct ata_port_operations vt6420_sata_ops = {
26173 .inherits = &svia_base_ops,
26174 .freeze = svia_noop_freeze,
26175 .prereset = vt6420_prereset,
26176 .bmdma_start = vt6420_bmdma_start,
26177 };
26178
26179 -static struct ata_port_operations vt6421_pata_ops = {
26180 +static const struct ata_port_operations vt6421_pata_ops = {
26181 .inherits = &svia_base_ops,
26182 .cable_detect = vt6421_pata_cable_detect,
26183 .set_piomode = vt6421_set_pio_mode,
26184 .set_dmamode = vt6421_set_dma_mode,
26185 };
26186
26187 -static struct ata_port_operations vt6421_sata_ops = {
26188 +static const struct ata_port_operations vt6421_sata_ops = {
26189 .inherits = &svia_base_ops,
26190 .scr_read = svia_scr_read,
26191 .scr_write = svia_scr_write,
26192 };
26193
26194 -static struct ata_port_operations vt8251_ops = {
26195 +static const struct ata_port_operations vt8251_ops = {
26196 .inherits = &svia_base_ops,
26197 .hardreset = sata_std_hardreset,
26198 .scr_read = vt8251_scr_read,
26199 diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
26200 --- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26201 +++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26202 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26203 };
26204
26205
26206 -static struct ata_port_operations vsc_sata_ops = {
26207 +static const struct ata_port_operations vsc_sata_ops = {
26208 .inherits = &ata_bmdma_port_ops,
26209 /* The IRQ handling is not quite standard SFF behaviour so we
26210 cannot use the default lost interrupt handler */
26211 diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
26212 --- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26213 +++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26214 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26215 vcc->pop(vcc, skb);
26216 else
26217 dev_kfree_skb_any(skb);
26218 - atomic_inc(&vcc->stats->tx);
26219 + atomic_inc_unchecked(&vcc->stats->tx);
26220
26221 return 0;
26222 }
26223 diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
26224 --- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26225 +++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26226 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26227 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26228
26229 // VC layer stats
26230 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26231 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26232
26233 // free the descriptor
26234 kfree (tx_descr);
26235 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26236 dump_skb ("<<<", vc, skb);
26237
26238 // VC layer stats
26239 - atomic_inc(&atm_vcc->stats->rx);
26240 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26241 __net_timestamp(skb);
26242 // end of our responsability
26243 atm_vcc->push (atm_vcc, skb);
26244 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26245 } else {
26246 PRINTK (KERN_INFO, "dropped over-size frame");
26247 // should we count this?
26248 - atomic_inc(&atm_vcc->stats->rx_drop);
26249 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26250 }
26251
26252 } else {
26253 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26254 }
26255
26256 if (check_area (skb->data, skb->len)) {
26257 - atomic_inc(&atm_vcc->stats->tx_err);
26258 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26259 return -ENOMEM; // ?
26260 }
26261
26262 diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26263 --- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26264 +++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26265 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26266 if (vcc->pop) vcc->pop(vcc,skb);
26267 else dev_kfree_skb(skb);
26268 if (dev_data) return 0;
26269 - atomic_inc(&vcc->stats->tx_err);
26270 + atomic_inc_unchecked(&vcc->stats->tx_err);
26271 return -ENOLINK;
26272 }
26273 size = skb->len+sizeof(struct atmtcp_hdr);
26274 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26275 if (!new_skb) {
26276 if (vcc->pop) vcc->pop(vcc,skb);
26277 else dev_kfree_skb(skb);
26278 - atomic_inc(&vcc->stats->tx_err);
26279 + atomic_inc_unchecked(&vcc->stats->tx_err);
26280 return -ENOBUFS;
26281 }
26282 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26283 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26284 if (vcc->pop) vcc->pop(vcc,skb);
26285 else dev_kfree_skb(skb);
26286 out_vcc->push(out_vcc,new_skb);
26287 - atomic_inc(&vcc->stats->tx);
26288 - atomic_inc(&out_vcc->stats->rx);
26289 + atomic_inc_unchecked(&vcc->stats->tx);
26290 + atomic_inc_unchecked(&out_vcc->stats->rx);
26291 return 0;
26292 }
26293
26294 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26295 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26296 read_unlock(&vcc_sklist_lock);
26297 if (!out_vcc) {
26298 - atomic_inc(&vcc->stats->tx_err);
26299 + atomic_inc_unchecked(&vcc->stats->tx_err);
26300 goto done;
26301 }
26302 skb_pull(skb,sizeof(struct atmtcp_hdr));
26303 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26304 __net_timestamp(new_skb);
26305 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26306 out_vcc->push(out_vcc,new_skb);
26307 - atomic_inc(&vcc->stats->tx);
26308 - atomic_inc(&out_vcc->stats->rx);
26309 + atomic_inc_unchecked(&vcc->stats->tx);
26310 + atomic_inc_unchecked(&out_vcc->stats->rx);
26311 done:
26312 if (vcc->pop) vcc->pop(vcc,skb);
26313 else dev_kfree_skb(skb);
26314 diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26315 --- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26316 +++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26317 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26318 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26319 vcc->dev->number);
26320 length = 0;
26321 - atomic_inc(&vcc->stats->rx_err);
26322 + atomic_inc_unchecked(&vcc->stats->rx_err);
26323 }
26324 else {
26325 length = ATM_CELL_SIZE-1; /* no HEC */
26326 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26327 size);
26328 }
26329 eff = length = 0;
26330 - atomic_inc(&vcc->stats->rx_err);
26331 + atomic_inc_unchecked(&vcc->stats->rx_err);
26332 }
26333 else {
26334 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26335 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26336 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26337 vcc->dev->number,vcc->vci,length,size << 2,descr);
26338 length = eff = 0;
26339 - atomic_inc(&vcc->stats->rx_err);
26340 + atomic_inc_unchecked(&vcc->stats->rx_err);
26341 }
26342 }
26343 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26344 @@ -770,7 +770,7 @@ rx_dequeued++;
26345 vcc->push(vcc,skb);
26346 pushed++;
26347 }
26348 - atomic_inc(&vcc->stats->rx);
26349 + atomic_inc_unchecked(&vcc->stats->rx);
26350 }
26351 wake_up(&eni_dev->rx_wait);
26352 }
26353 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26354 PCI_DMA_TODEVICE);
26355 if (vcc->pop) vcc->pop(vcc,skb);
26356 else dev_kfree_skb_irq(skb);
26357 - atomic_inc(&vcc->stats->tx);
26358 + atomic_inc_unchecked(&vcc->stats->tx);
26359 wake_up(&eni_dev->tx_wait);
26360 dma_complete++;
26361 }
26362 diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26363 --- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26364 +++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26365 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26366 }
26367 }
26368
26369 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26370 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26371
26372 fs_dprintk (FS_DEBUG_TXMEM, "i");
26373 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26374 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26375 #endif
26376 skb_put (skb, qe->p1 & 0xffff);
26377 ATM_SKB(skb)->vcc = atm_vcc;
26378 - atomic_inc(&atm_vcc->stats->rx);
26379 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26380 __net_timestamp(skb);
26381 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26382 atm_vcc->push (atm_vcc, skb);
26383 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26384 kfree (pe);
26385 }
26386 if (atm_vcc)
26387 - atomic_inc(&atm_vcc->stats->rx_drop);
26388 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26389 break;
26390 case 0x1f: /* Reassembly abort: no buffers. */
26391 /* Silently increment error counter. */
26392 if (atm_vcc)
26393 - atomic_inc(&atm_vcc->stats->rx_drop);
26394 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26395 break;
26396 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26397 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26398 diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26399 --- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26400 +++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26401 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26402 #endif
26403 /* check error condition */
26404 if (*entry->status & STATUS_ERROR)
26405 - atomic_inc(&vcc->stats->tx_err);
26406 + atomic_inc_unchecked(&vcc->stats->tx_err);
26407 else
26408 - atomic_inc(&vcc->stats->tx);
26409 + atomic_inc_unchecked(&vcc->stats->tx);
26410 }
26411 }
26412
26413 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26414 if (skb == NULL) {
26415 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26416
26417 - atomic_inc(&vcc->stats->rx_drop);
26418 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26419 return -ENOMEM;
26420 }
26421
26422 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26423
26424 dev_kfree_skb_any(skb);
26425
26426 - atomic_inc(&vcc->stats->rx_drop);
26427 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26428 return -ENOMEM;
26429 }
26430
26431 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26432
26433 vcc->push(vcc, skb);
26434 - atomic_inc(&vcc->stats->rx);
26435 + atomic_inc_unchecked(&vcc->stats->rx);
26436
26437 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26438
26439 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26440 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26441 fore200e->atm_dev->number,
26442 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26443 - atomic_inc(&vcc->stats->rx_err);
26444 + atomic_inc_unchecked(&vcc->stats->rx_err);
26445 }
26446 }
26447
26448 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26449 goto retry_here;
26450 }
26451
26452 - atomic_inc(&vcc->stats->tx_err);
26453 + atomic_inc_unchecked(&vcc->stats->tx_err);
26454
26455 fore200e->tx_sat++;
26456 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26457 diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26458 --- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26459 +++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26460 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26461
26462 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26463 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26464 - atomic_inc(&vcc->stats->rx_drop);
26465 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26466 goto return_host_buffers;
26467 }
26468
26469 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26470 RBRQ_LEN_ERR(he_dev->rbrq_head)
26471 ? "LEN_ERR" : "",
26472 vcc->vpi, vcc->vci);
26473 - atomic_inc(&vcc->stats->rx_err);
26474 + atomic_inc_unchecked(&vcc->stats->rx_err);
26475 goto return_host_buffers;
26476 }
26477
26478 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26479 vcc->push(vcc, skb);
26480 spin_lock(&he_dev->global_lock);
26481
26482 - atomic_inc(&vcc->stats->rx);
26483 + atomic_inc_unchecked(&vcc->stats->rx);
26484
26485 return_host_buffers:
26486 ++pdus_assembled;
26487 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26488 tpd->vcc->pop(tpd->vcc, tpd->skb);
26489 else
26490 dev_kfree_skb_any(tpd->skb);
26491 - atomic_inc(&tpd->vcc->stats->tx_err);
26492 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26493 }
26494 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26495 return;
26496 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26497 vcc->pop(vcc, skb);
26498 else
26499 dev_kfree_skb_any(skb);
26500 - atomic_inc(&vcc->stats->tx_err);
26501 + atomic_inc_unchecked(&vcc->stats->tx_err);
26502 return -EINVAL;
26503 }
26504
26505 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26506 vcc->pop(vcc, skb);
26507 else
26508 dev_kfree_skb_any(skb);
26509 - atomic_inc(&vcc->stats->tx_err);
26510 + atomic_inc_unchecked(&vcc->stats->tx_err);
26511 return -EINVAL;
26512 }
26513 #endif
26514 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26515 vcc->pop(vcc, skb);
26516 else
26517 dev_kfree_skb_any(skb);
26518 - atomic_inc(&vcc->stats->tx_err);
26519 + atomic_inc_unchecked(&vcc->stats->tx_err);
26520 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26521 return -ENOMEM;
26522 }
26523 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26524 vcc->pop(vcc, skb);
26525 else
26526 dev_kfree_skb_any(skb);
26527 - atomic_inc(&vcc->stats->tx_err);
26528 + atomic_inc_unchecked(&vcc->stats->tx_err);
26529 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26530 return -ENOMEM;
26531 }
26532 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26533 __enqueue_tpd(he_dev, tpd, cid);
26534 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26535
26536 - atomic_inc(&vcc->stats->tx);
26537 + atomic_inc_unchecked(&vcc->stats->tx);
26538
26539 return 0;
26540 }
26541 diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26542 --- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26543 +++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26544 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26545 {
26546 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26547 // VC layer stats
26548 - atomic_inc(&vcc->stats->rx);
26549 + atomic_inc_unchecked(&vcc->stats->rx);
26550 __net_timestamp(skb);
26551 // end of our responsability
26552 vcc->push (vcc, skb);
26553 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26554 dev->tx_iovec = NULL;
26555
26556 // VC layer stats
26557 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26558 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26559
26560 // free the skb
26561 hrz_kfree_skb (skb);
26562 diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26563 --- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26564 +++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26565 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26566 else
26567 dev_kfree_skb(skb);
26568
26569 - atomic_inc(&vcc->stats->tx);
26570 + atomic_inc_unchecked(&vcc->stats->tx);
26571 }
26572
26573 atomic_dec(&scq->used);
26574 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26575 if ((sb = dev_alloc_skb(64)) == NULL) {
26576 printk("%s: Can't allocate buffers for aal0.\n",
26577 card->name);
26578 - atomic_add(i, &vcc->stats->rx_drop);
26579 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26580 break;
26581 }
26582 if (!atm_charge(vcc, sb->truesize)) {
26583 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26584 card->name);
26585 - atomic_add(i - 1, &vcc->stats->rx_drop);
26586 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26587 dev_kfree_skb(sb);
26588 break;
26589 }
26590 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26591 ATM_SKB(sb)->vcc = vcc;
26592 __net_timestamp(sb);
26593 vcc->push(vcc, sb);
26594 - atomic_inc(&vcc->stats->rx);
26595 + atomic_inc_unchecked(&vcc->stats->rx);
26596
26597 cell += ATM_CELL_PAYLOAD;
26598 }
26599 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26600 "(CDC: %08x)\n",
26601 card->name, len, rpp->len, readl(SAR_REG_CDC));
26602 recycle_rx_pool_skb(card, rpp);
26603 - atomic_inc(&vcc->stats->rx_err);
26604 + atomic_inc_unchecked(&vcc->stats->rx_err);
26605 return;
26606 }
26607 if (stat & SAR_RSQE_CRC) {
26608 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26609 recycle_rx_pool_skb(card, rpp);
26610 - atomic_inc(&vcc->stats->rx_err);
26611 + atomic_inc_unchecked(&vcc->stats->rx_err);
26612 return;
26613 }
26614 if (skb_queue_len(&rpp->queue) > 1) {
26615 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26616 RXPRINTK("%s: Can't alloc RX skb.\n",
26617 card->name);
26618 recycle_rx_pool_skb(card, rpp);
26619 - atomic_inc(&vcc->stats->rx_err);
26620 + atomic_inc_unchecked(&vcc->stats->rx_err);
26621 return;
26622 }
26623 if (!atm_charge(vcc, skb->truesize)) {
26624 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26625 __net_timestamp(skb);
26626
26627 vcc->push(vcc, skb);
26628 - atomic_inc(&vcc->stats->rx);
26629 + atomic_inc_unchecked(&vcc->stats->rx);
26630
26631 return;
26632 }
26633 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26634 __net_timestamp(skb);
26635
26636 vcc->push(vcc, skb);
26637 - atomic_inc(&vcc->stats->rx);
26638 + atomic_inc_unchecked(&vcc->stats->rx);
26639
26640 if (skb->truesize > SAR_FB_SIZE_3)
26641 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26642 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26643 if (vcc->qos.aal != ATM_AAL0) {
26644 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26645 card->name, vpi, vci);
26646 - atomic_inc(&vcc->stats->rx_drop);
26647 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26648 goto drop;
26649 }
26650
26651 if ((sb = dev_alloc_skb(64)) == NULL) {
26652 printk("%s: Can't allocate buffers for AAL0.\n",
26653 card->name);
26654 - atomic_inc(&vcc->stats->rx_err);
26655 + atomic_inc_unchecked(&vcc->stats->rx_err);
26656 goto drop;
26657 }
26658
26659 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26660 ATM_SKB(sb)->vcc = vcc;
26661 __net_timestamp(sb);
26662 vcc->push(vcc, sb);
26663 - atomic_inc(&vcc->stats->rx);
26664 + atomic_inc_unchecked(&vcc->stats->rx);
26665
26666 drop:
26667 skb_pull(queue, 64);
26668 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26669
26670 if (vc == NULL) {
26671 printk("%s: NULL connection in send().\n", card->name);
26672 - atomic_inc(&vcc->stats->tx_err);
26673 + atomic_inc_unchecked(&vcc->stats->tx_err);
26674 dev_kfree_skb(skb);
26675 return -EINVAL;
26676 }
26677 if (!test_bit(VCF_TX, &vc->flags)) {
26678 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26679 - atomic_inc(&vcc->stats->tx_err);
26680 + atomic_inc_unchecked(&vcc->stats->tx_err);
26681 dev_kfree_skb(skb);
26682 return -EINVAL;
26683 }
26684 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26685 break;
26686 default:
26687 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26688 - atomic_inc(&vcc->stats->tx_err);
26689 + atomic_inc_unchecked(&vcc->stats->tx_err);
26690 dev_kfree_skb(skb);
26691 return -EINVAL;
26692 }
26693
26694 if (skb_shinfo(skb)->nr_frags != 0) {
26695 printk("%s: No scatter-gather yet.\n", card->name);
26696 - atomic_inc(&vcc->stats->tx_err);
26697 + atomic_inc_unchecked(&vcc->stats->tx_err);
26698 dev_kfree_skb(skb);
26699 return -EINVAL;
26700 }
26701 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26702
26703 err = queue_skb(card, vc, skb, oam);
26704 if (err) {
26705 - atomic_inc(&vcc->stats->tx_err);
26706 + atomic_inc_unchecked(&vcc->stats->tx_err);
26707 dev_kfree_skb(skb);
26708 return err;
26709 }
26710 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26711 skb = dev_alloc_skb(64);
26712 if (!skb) {
26713 printk("%s: Out of memory in send_oam().\n", card->name);
26714 - atomic_inc(&vcc->stats->tx_err);
26715 + atomic_inc_unchecked(&vcc->stats->tx_err);
26716 return -ENOMEM;
26717 }
26718 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26719 diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26720 --- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26721 +++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26722 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26723 status = (u_short) (buf_desc_ptr->desc_mode);
26724 if (status & (RX_CER | RX_PTE | RX_OFL))
26725 {
26726 - atomic_inc(&vcc->stats->rx_err);
26727 + atomic_inc_unchecked(&vcc->stats->rx_err);
26728 IF_ERR(printk("IA: bad packet, dropping it");)
26729 if (status & RX_CER) {
26730 IF_ERR(printk(" cause: packet CRC error\n");)
26731 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26732 len = dma_addr - buf_addr;
26733 if (len > iadev->rx_buf_sz) {
26734 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26735 - atomic_inc(&vcc->stats->rx_err);
26736 + atomic_inc_unchecked(&vcc->stats->rx_err);
26737 goto out_free_desc;
26738 }
26739
26740 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26741 ia_vcc = INPH_IA_VCC(vcc);
26742 if (ia_vcc == NULL)
26743 {
26744 - atomic_inc(&vcc->stats->rx_err);
26745 + atomic_inc_unchecked(&vcc->stats->rx_err);
26746 dev_kfree_skb_any(skb);
26747 atm_return(vcc, atm_guess_pdu2truesize(len));
26748 goto INCR_DLE;
26749 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26750 if ((length > iadev->rx_buf_sz) || (length >
26751 (skb->len - sizeof(struct cpcs_trailer))))
26752 {
26753 - atomic_inc(&vcc->stats->rx_err);
26754 + atomic_inc_unchecked(&vcc->stats->rx_err);
26755 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26756 length, skb->len);)
26757 dev_kfree_skb_any(skb);
26758 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26759
26760 IF_RX(printk("rx_dle_intr: skb push");)
26761 vcc->push(vcc,skb);
26762 - atomic_inc(&vcc->stats->rx);
26763 + atomic_inc_unchecked(&vcc->stats->rx);
26764 iadev->rx_pkt_cnt++;
26765 }
26766 INCR_DLE:
26767 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26768 {
26769 struct k_sonet_stats *stats;
26770 stats = &PRIV(_ia_dev[board])->sonet_stats;
26771 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26772 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26773 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26774 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26775 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26776 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26777 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26778 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26779 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26780 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26781 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26782 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26783 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26784 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26785 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26786 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26787 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26788 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26789 }
26790 ia_cmds.status = 0;
26791 break;
26792 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26793 if ((desc == 0) || (desc > iadev->num_tx_desc))
26794 {
26795 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26796 - atomic_inc(&vcc->stats->tx);
26797 + atomic_inc_unchecked(&vcc->stats->tx);
26798 if (vcc->pop)
26799 vcc->pop(vcc, skb);
26800 else
26801 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26802 ATM_DESC(skb) = vcc->vci;
26803 skb_queue_tail(&iadev->tx_dma_q, skb);
26804
26805 - atomic_inc(&vcc->stats->tx);
26806 + atomic_inc_unchecked(&vcc->stats->tx);
26807 iadev->tx_pkt_cnt++;
26808 /* Increment transaction counter */
26809 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26810
26811 #if 0
26812 /* add flow control logic */
26813 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26814 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26815 if (iavcc->vc_desc_cnt > 10) {
26816 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26817 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26818 diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26819 --- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26820 +++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26821 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26822 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26823 lanai_endtx(lanai, lvcc);
26824 lanai_free_skb(lvcc->tx.atmvcc, skb);
26825 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26826 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26827 }
26828
26829 /* Try to fill the buffer - don't call unless there is backlog */
26830 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26831 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26832 __net_timestamp(skb);
26833 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26834 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26835 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26836 out:
26837 lvcc->rx.buf.ptr = end;
26838 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26839 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26840 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26841 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26842 lanai->stats.service_rxnotaal5++;
26843 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26844 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26845 return 0;
26846 }
26847 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26848 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26849 int bytes;
26850 read_unlock(&vcc_sklist_lock);
26851 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26852 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26853 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26854 lvcc->stats.x.aal5.service_trash++;
26855 bytes = (SERVICE_GET_END(s) * 16) -
26856 (((unsigned long) lvcc->rx.buf.ptr) -
26857 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26858 }
26859 if (s & SERVICE_STREAM) {
26860 read_unlock(&vcc_sklist_lock);
26861 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26862 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26863 lvcc->stats.x.aal5.service_stream++;
26864 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26865 "PDU on VCI %d!\n", lanai->number, vci);
26866 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26867 return 0;
26868 }
26869 DPRINTK("got rx crc error on vci %d\n", vci);
26870 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26871 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26872 lvcc->stats.x.aal5.service_rxcrc++;
26873 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26874 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26875 diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26876 --- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26877 +++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26878 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26879 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26880 {
26881 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26882 - atomic_inc(&vcc->stats->tx_err);
26883 + atomic_inc_unchecked(&vcc->stats->tx_err);
26884 dev_kfree_skb_any(skb);
26885 return -EINVAL;
26886 }
26887 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26888 if (!vc->tx)
26889 {
26890 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26891 - atomic_inc(&vcc->stats->tx_err);
26892 + atomic_inc_unchecked(&vcc->stats->tx_err);
26893 dev_kfree_skb_any(skb);
26894 return -EINVAL;
26895 }
26896 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26897 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26898 {
26899 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26900 - atomic_inc(&vcc->stats->tx_err);
26901 + atomic_inc_unchecked(&vcc->stats->tx_err);
26902 dev_kfree_skb_any(skb);
26903 return -EINVAL;
26904 }
26905 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26906 if (skb_shinfo(skb)->nr_frags != 0)
26907 {
26908 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26909 - atomic_inc(&vcc->stats->tx_err);
26910 + atomic_inc_unchecked(&vcc->stats->tx_err);
26911 dev_kfree_skb_any(skb);
26912 return -EINVAL;
26913 }
26914 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26915
26916 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26917 {
26918 - atomic_inc(&vcc->stats->tx_err);
26919 + atomic_inc_unchecked(&vcc->stats->tx_err);
26920 dev_kfree_skb_any(skb);
26921 return -EIO;
26922 }
26923 - atomic_inc(&vcc->stats->tx);
26924 + atomic_inc_unchecked(&vcc->stats->tx);
26925
26926 return 0;
26927 }
26928 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26929 {
26930 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26931 card->index);
26932 - atomic_add(i,&vcc->stats->rx_drop);
26933 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26934 break;
26935 }
26936 if (!atm_charge(vcc, sb->truesize))
26937 {
26938 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26939 card->index);
26940 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26941 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26942 dev_kfree_skb_any(sb);
26943 break;
26944 }
26945 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26946 ATM_SKB(sb)->vcc = vcc;
26947 __net_timestamp(sb);
26948 vcc->push(vcc, sb);
26949 - atomic_inc(&vcc->stats->rx);
26950 + atomic_inc_unchecked(&vcc->stats->rx);
26951 cell += ATM_CELL_PAYLOAD;
26952 }
26953
26954 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26955 if (iovb == NULL)
26956 {
26957 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26958 - atomic_inc(&vcc->stats->rx_drop);
26959 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26960 recycle_rx_buf(card, skb);
26961 return;
26962 }
26963 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26964 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26965 {
26966 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26967 - atomic_inc(&vcc->stats->rx_err);
26968 + atomic_inc_unchecked(&vcc->stats->rx_err);
26969 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26970 NS_SKB(iovb)->iovcnt = 0;
26971 iovb->len = 0;
26972 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26973 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26974 card->index);
26975 which_list(card, skb);
26976 - atomic_inc(&vcc->stats->rx_err);
26977 + atomic_inc_unchecked(&vcc->stats->rx_err);
26978 recycle_rx_buf(card, skb);
26979 vc->rx_iov = NULL;
26980 recycle_iov_buf(card, iovb);
26981 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26982 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26983 card->index);
26984 which_list(card, skb);
26985 - atomic_inc(&vcc->stats->rx_err);
26986 + atomic_inc_unchecked(&vcc->stats->rx_err);
26987 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26988 NS_SKB(iovb)->iovcnt);
26989 vc->rx_iov = NULL;
26990 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26991 printk(" - PDU size mismatch.\n");
26992 else
26993 printk(".\n");
26994 - atomic_inc(&vcc->stats->rx_err);
26995 + atomic_inc_unchecked(&vcc->stats->rx_err);
26996 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26997 NS_SKB(iovb)->iovcnt);
26998 vc->rx_iov = NULL;
26999 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
27000 if (!atm_charge(vcc, skb->truesize))
27001 {
27002 push_rxbufs(card, skb);
27003 - atomic_inc(&vcc->stats->rx_drop);
27004 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27005 }
27006 else
27007 {
27008 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
27009 ATM_SKB(skb)->vcc = vcc;
27010 __net_timestamp(skb);
27011 vcc->push(vcc, skb);
27012 - atomic_inc(&vcc->stats->rx);
27013 + atomic_inc_unchecked(&vcc->stats->rx);
27014 }
27015 }
27016 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
27017 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
27018 if (!atm_charge(vcc, sb->truesize))
27019 {
27020 push_rxbufs(card, sb);
27021 - atomic_inc(&vcc->stats->rx_drop);
27022 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27023 }
27024 else
27025 {
27026 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27027 ATM_SKB(sb)->vcc = vcc;
27028 __net_timestamp(sb);
27029 vcc->push(vcc, sb);
27030 - atomic_inc(&vcc->stats->rx);
27031 + atomic_inc_unchecked(&vcc->stats->rx);
27032 }
27033
27034 push_rxbufs(card, skb);
27035 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27036 if (!atm_charge(vcc, skb->truesize))
27037 {
27038 push_rxbufs(card, skb);
27039 - atomic_inc(&vcc->stats->rx_drop);
27040 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27041 }
27042 else
27043 {
27044 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27045 ATM_SKB(skb)->vcc = vcc;
27046 __net_timestamp(skb);
27047 vcc->push(vcc, skb);
27048 - atomic_inc(&vcc->stats->rx);
27049 + atomic_inc_unchecked(&vcc->stats->rx);
27050 }
27051
27052 push_rxbufs(card, sb);
27053 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27054 if (hb == NULL)
27055 {
27056 printk("nicstar%d: Out of huge buffers.\n", card->index);
27057 - atomic_inc(&vcc->stats->rx_drop);
27058 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27059 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27060 NS_SKB(iovb)->iovcnt);
27061 vc->rx_iov = NULL;
27062 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27063 }
27064 else
27065 dev_kfree_skb_any(hb);
27066 - atomic_inc(&vcc->stats->rx_drop);
27067 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27068 }
27069 else
27070 {
27071 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27072 #endif /* NS_USE_DESTRUCTORS */
27073 __net_timestamp(hb);
27074 vcc->push(vcc, hb);
27075 - atomic_inc(&vcc->stats->rx);
27076 + atomic_inc_unchecked(&vcc->stats->rx);
27077 }
27078 }
27079
27080 diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
27081 --- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
27082 +++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
27083 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27084 }
27085 atm_charge(vcc, skb->truesize);
27086 vcc->push(vcc, skb);
27087 - atomic_inc(&vcc->stats->rx);
27088 + atomic_inc_unchecked(&vcc->stats->rx);
27089 break;
27090
27091 case PKT_STATUS:
27092 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27093 char msg[500];
27094 char item[10];
27095
27096 + pax_track_stack();
27097 +
27098 len = buf->len;
27099 for (i = 0; i < len; i++){
27100 if(i % 8 == 0)
27101 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27102 vcc = SKB_CB(oldskb)->vcc;
27103
27104 if (vcc) {
27105 - atomic_inc(&vcc->stats->tx);
27106 + atomic_inc_unchecked(&vcc->stats->tx);
27107 solos_pop(vcc, oldskb);
27108 } else
27109 dev_kfree_skb_irq(oldskb);
27110 diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
27111 --- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
27112 +++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
27113 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27114
27115
27116 #define ADD_LIMITED(s,v) \
27117 - atomic_add((v),&stats->s); \
27118 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27119 + atomic_add_unchecked((v),&stats->s); \
27120 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27121
27122
27123 static void suni_hz(unsigned long from_timer)
27124 diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
27125 --- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27126 +++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27127 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27128 struct sonet_stats tmp;
27129 int error = 0;
27130
27131 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27132 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27133 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27134 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27135 if (zero && !error) {
27136 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27137
27138
27139 #define ADD_LIMITED(s,v) \
27140 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27141 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27142 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27143 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27144 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27145 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27146
27147
27148 static void stat_event(struct atm_dev *dev)
27149 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27150 if (reason & uPD98402_INT_PFM) stat_event(dev);
27151 if (reason & uPD98402_INT_PCO) {
27152 (void) GET(PCOCR); /* clear interrupt cause */
27153 - atomic_add(GET(HECCT),
27154 + atomic_add_unchecked(GET(HECCT),
27155 &PRIV(dev)->sonet_stats.uncorr_hcs);
27156 }
27157 if ((reason & uPD98402_INT_RFO) &&
27158 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27159 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27160 uPD98402_INT_LOS),PIMR); /* enable them */
27161 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27162 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27163 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27164 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27165 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27166 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27167 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27168 return 0;
27169 }
27170
27171 diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
27172 --- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27173 +++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27174 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27175 }
27176 if (!size) {
27177 dev_kfree_skb_irq(skb);
27178 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27179 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27180 continue;
27181 }
27182 if (!atm_charge(vcc,skb->truesize)) {
27183 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27184 skb->len = size;
27185 ATM_SKB(skb)->vcc = vcc;
27186 vcc->push(vcc,skb);
27187 - atomic_inc(&vcc->stats->rx);
27188 + atomic_inc_unchecked(&vcc->stats->rx);
27189 }
27190 zout(pos & 0xffff,MTA(mbx));
27191 #if 0 /* probably a stupid idea */
27192 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27193 skb_queue_head(&zatm_vcc->backlog,skb);
27194 break;
27195 }
27196 - atomic_inc(&vcc->stats->tx);
27197 + atomic_inc_unchecked(&vcc->stats->tx);
27198 wake_up(&zatm_vcc->tx_wait);
27199 }
27200
27201 diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
27202 --- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27203 +++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27204 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27205 return ret;
27206 }
27207
27208 -static struct sysfs_ops driver_sysfs_ops = {
27209 +static const struct sysfs_ops driver_sysfs_ops = {
27210 .show = drv_attr_show,
27211 .store = drv_attr_store,
27212 };
27213 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27214 return ret;
27215 }
27216
27217 -static struct sysfs_ops bus_sysfs_ops = {
27218 +static const struct sysfs_ops bus_sysfs_ops = {
27219 .show = bus_attr_show,
27220 .store = bus_attr_store,
27221 };
27222 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27223 return 0;
27224 }
27225
27226 -static struct kset_uevent_ops bus_uevent_ops = {
27227 +static const struct kset_uevent_ops bus_uevent_ops = {
27228 .filter = bus_uevent_filter,
27229 };
27230
27231 diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
27232 --- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27233 +++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27234 @@ -63,7 +63,7 @@ static void class_release(struct kobject
27235 kfree(cp);
27236 }
27237
27238 -static struct sysfs_ops class_sysfs_ops = {
27239 +static const struct sysfs_ops class_sysfs_ops = {
27240 .show = class_attr_show,
27241 .store = class_attr_store,
27242 };
27243 diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
27244 --- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27245 +++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27246 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27247 return ret;
27248 }
27249
27250 -static struct sysfs_ops dev_sysfs_ops = {
27251 +static const struct sysfs_ops dev_sysfs_ops = {
27252 .show = dev_attr_show,
27253 .store = dev_attr_store,
27254 };
27255 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27256 return retval;
27257 }
27258
27259 -static struct kset_uevent_ops device_uevent_ops = {
27260 +static const struct kset_uevent_ops device_uevent_ops = {
27261 .filter = dev_uevent_filter,
27262 .name = dev_uevent_name,
27263 .uevent = dev_uevent,
27264 diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27265 --- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27266 +++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27267 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27268 return retval;
27269 }
27270
27271 -static struct kset_uevent_ops memory_uevent_ops = {
27272 +static const struct kset_uevent_ops memory_uevent_ops = {
27273 .name = memory_uevent_name,
27274 .uevent = memory_uevent,
27275 };
27276 diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27277 --- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27278 +++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27279 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27280 return -EIO;
27281 }
27282
27283 -static struct sysfs_ops sysfs_ops = {
27284 +static const struct sysfs_ops sysfs_ops = {
27285 .show = sysdev_show,
27286 .store = sysdev_store,
27287 };
27288 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27289 return -EIO;
27290 }
27291
27292 -static struct sysfs_ops sysfs_class_ops = {
27293 +static const struct sysfs_ops sysfs_class_ops = {
27294 .show = sysdev_class_show,
27295 .store = sysdev_class_store,
27296 };
27297 diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27298 --- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27299 +++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27300 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27301 int err;
27302 u32 cp;
27303
27304 + memset(&arg64, 0, sizeof(arg64));
27305 +
27306 err = 0;
27307 err |=
27308 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27309 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27310 /* Wait (up to 20 seconds) for a command to complete */
27311
27312 for (i = 20 * HZ; i > 0; i--) {
27313 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27314 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27315 if (done == FIFO_EMPTY)
27316 schedule_timeout_uninterruptible(1);
27317 else
27318 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27319 resend_cmd1:
27320
27321 /* Disable interrupt on the board. */
27322 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27323 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27324
27325 /* Make sure there is room in the command FIFO */
27326 /* Actually it should be completely empty at this time */
27327 @@ -2884,13 +2886,13 @@ resend_cmd1:
27328 /* tape side of the driver. */
27329 for (i = 200000; i > 0; i--) {
27330 /* if fifo isn't full go */
27331 - if (!(h->access.fifo_full(h)))
27332 + if (!(h->access->fifo_full(h)))
27333 break;
27334 udelay(10);
27335 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27336 " waiting!\n", h->ctlr);
27337 }
27338 - h->access.submit_command(h, c); /* Send the cmd */
27339 + h->access->submit_command(h, c); /* Send the cmd */
27340 do {
27341 complete = pollcomplete(h->ctlr);
27342
27343 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27344 while (!hlist_empty(&h->reqQ)) {
27345 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27346 /* can't do anything if fifo is full */
27347 - if ((h->access.fifo_full(h))) {
27348 + if ((h->access->fifo_full(h))) {
27349 printk(KERN_WARNING "cciss: fifo full\n");
27350 break;
27351 }
27352 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27353 h->Qdepth--;
27354
27355 /* Tell the controller execute command */
27356 - h->access.submit_command(h, c);
27357 + h->access->submit_command(h, c);
27358
27359 /* Put job onto the completed Q */
27360 addQ(&h->cmpQ, c);
27361 @@ -3393,17 +3395,17 @@ startio:
27362
27363 static inline unsigned long get_next_completion(ctlr_info_t *h)
27364 {
27365 - return h->access.command_completed(h);
27366 + return h->access->command_completed(h);
27367 }
27368
27369 static inline int interrupt_pending(ctlr_info_t *h)
27370 {
27371 - return h->access.intr_pending(h);
27372 + return h->access->intr_pending(h);
27373 }
27374
27375 static inline long interrupt_not_for_us(ctlr_info_t *h)
27376 {
27377 - return (((h->access.intr_pending(h) == 0) ||
27378 + return (((h->access->intr_pending(h) == 0) ||
27379 (h->interrupts_enabled == 0)));
27380 }
27381
27382 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27383 */
27384 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27385 c->product_name = products[prod_index].product_name;
27386 - c->access = *(products[prod_index].access);
27387 + c->access = products[prod_index].access;
27388 c->nr_cmds = c->max_commands - 4;
27389 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27390 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27391 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27392 }
27393
27394 /* make sure the board interrupts are off */
27395 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27396 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27397 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27398 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27399 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27400 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27401 cciss_scsi_setup(i);
27402
27403 /* Turn the interrupts on so we can service requests */
27404 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27405 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27406
27407 /* Get the firmware version */
27408 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27409 diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27410 --- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27411 +++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27412 @@ -90,7 +90,7 @@ struct ctlr_info
27413 // information about each logical volume
27414 drive_info_struct *drv[CISS_MAX_LUN];
27415
27416 - struct access_method access;
27417 + struct access_method *access;
27418
27419 /* queue and queue Info */
27420 struct hlist_head reqQ;
27421 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27422 --- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27423 +++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27424 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27425 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27426 goto Enomem4;
27427 }
27428 - hba[i]->access.set_intr_mask(hba[i], 0);
27429 + hba[i]->access->set_intr_mask(hba[i], 0);
27430 if (request_irq(hba[i]->intr, do_ida_intr,
27431 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27432 {
27433 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27434 add_timer(&hba[i]->timer);
27435
27436 /* Enable IRQ now that spinlock and rate limit timer are set up */
27437 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27438 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27439
27440 for(j=0; j<NWD; j++) {
27441 struct gendisk *disk = ida_gendisk[i][j];
27442 @@ -695,7 +695,7 @@ DBGINFO(
27443 for(i=0; i<NR_PRODUCTS; i++) {
27444 if (board_id == products[i].board_id) {
27445 c->product_name = products[i].product_name;
27446 - c->access = *(products[i].access);
27447 + c->access = products[i].access;
27448 break;
27449 }
27450 }
27451 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27452 hba[ctlr]->intr = intr;
27453 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27454 hba[ctlr]->product_name = products[j].product_name;
27455 - hba[ctlr]->access = *(products[j].access);
27456 + hba[ctlr]->access = products[j].access;
27457 hba[ctlr]->ctlr = ctlr;
27458 hba[ctlr]->board_id = board_id;
27459 hba[ctlr]->pci_dev = NULL; /* not PCI */
27460 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27461 struct scatterlist tmp_sg[SG_MAX];
27462 int i, dir, seg;
27463
27464 + pax_track_stack();
27465 +
27466 if (blk_queue_plugged(q))
27467 goto startio;
27468
27469 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27470
27471 while((c = h->reqQ) != NULL) {
27472 /* Can't do anything if we're busy */
27473 - if (h->access.fifo_full(h) == 0)
27474 + if (h->access->fifo_full(h) == 0)
27475 return;
27476
27477 /* Get the first entry from the request Q */
27478 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27479 h->Qdepth--;
27480
27481 /* Tell the controller to do our bidding */
27482 - h->access.submit_command(h, c);
27483 + h->access->submit_command(h, c);
27484
27485 /* Get onto the completion Q */
27486 addQ(&h->cmpQ, c);
27487 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27488 unsigned long flags;
27489 __u32 a,a1;
27490
27491 - istat = h->access.intr_pending(h);
27492 + istat = h->access->intr_pending(h);
27493 /* Is this interrupt for us? */
27494 if (istat == 0)
27495 return IRQ_NONE;
27496 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27497 */
27498 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27499 if (istat & FIFO_NOT_EMPTY) {
27500 - while((a = h->access.command_completed(h))) {
27501 + while((a = h->access->command_completed(h))) {
27502 a1 = a; a &= ~3;
27503 if ((c = h->cmpQ) == NULL)
27504 {
27505 @@ -1434,11 +1436,11 @@ static int sendcmd(
27506 /*
27507 * Disable interrupt
27508 */
27509 - info_p->access.set_intr_mask(info_p, 0);
27510 + info_p->access->set_intr_mask(info_p, 0);
27511 /* Make sure there is room in the command FIFO */
27512 /* Actually it should be completely empty at this time. */
27513 for (i = 200000; i > 0; i--) {
27514 - temp = info_p->access.fifo_full(info_p);
27515 + temp = info_p->access->fifo_full(info_p);
27516 if (temp != 0) {
27517 break;
27518 }
27519 @@ -1451,7 +1453,7 @@ DBG(
27520 /*
27521 * Send the cmd
27522 */
27523 - info_p->access.submit_command(info_p, c);
27524 + info_p->access->submit_command(info_p, c);
27525 complete = pollcomplete(ctlr);
27526
27527 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27528 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27529 * we check the new geometry. Then turn interrupts back on when
27530 * we're done.
27531 */
27532 - host->access.set_intr_mask(host, 0);
27533 + host->access->set_intr_mask(host, 0);
27534 getgeometry(ctlr);
27535 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27536 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27537
27538 for(i=0; i<NWD; i++) {
27539 struct gendisk *disk = ida_gendisk[ctlr][i];
27540 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27541 /* Wait (up to 2 seconds) for a command to complete */
27542
27543 for (i = 200000; i > 0; i--) {
27544 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27545 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27546 if (done == 0) {
27547 udelay(10); /* a short fixed delay */
27548 } else
27549 diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27550 --- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27551 +++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27552 @@ -99,7 +99,7 @@ struct ctlr_info {
27553 drv_info_t drv[NWD];
27554 struct proc_dir_entry *proc;
27555
27556 - struct access_method access;
27557 + struct access_method *access;
27558
27559 cmdlist_t *reqQ;
27560 cmdlist_t *cmpQ;
27561 diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27562 --- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27563 +++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27564 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27565 unsigned long flags;
27566 int Channel, TargetID;
27567
27568 + pax_track_stack();
27569 +
27570 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27571 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27572 sizeof(DAC960_SCSI_Inquiry_T) +
27573 diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27574 --- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27575 +++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27576 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27577 struct kvec iov;
27578 sigset_t blocked, oldset;
27579
27580 + pax_track_stack();
27581 +
27582 if (unlikely(!sock)) {
27583 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27584 lo->disk->disk_name, (send ? "send" : "recv"));
27585 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27586 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27587 unsigned int cmd, unsigned long arg)
27588 {
27589 + pax_track_stack();
27590 +
27591 switch (cmd) {
27592 case NBD_DISCONNECT: {
27593 struct request sreq;
27594 diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27595 --- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27596 +++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27597 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27598 return len;
27599 }
27600
27601 -static struct sysfs_ops kobj_pkt_ops = {
27602 +static const struct sysfs_ops kobj_pkt_ops = {
27603 .show = kobj_pkt_show,
27604 .store = kobj_pkt_store
27605 };
27606 diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27607 --- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27608 +++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27609 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27610 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27611 return -EFAULT;
27612
27613 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27614 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27615 return -EFAULT;
27616
27617 client = agp_find_client_by_pid(reserve.pid);
27618 diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27619 --- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27620 +++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27621 @@ -10,6 +10,7 @@
27622 #include <linux/types.h>
27623 #include <linux/errno.h>
27624 #include <linux/tty.h>
27625 +#include <linux/mutex.h>
27626 #include <linux/timer.h>
27627 #include <linux/kernel.h>
27628 #include <linux/wait.h>
27629 @@ -36,6 +37,7 @@ static int vfd_is_open;
27630 static unsigned char vfd[40];
27631 static int vfd_cursor;
27632 static unsigned char ledpb, led;
27633 +static DEFINE_MUTEX(vfd_mutex);
27634
27635 static void update_vfd(void)
27636 {
27637 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27638 if (!vfd_is_open)
27639 return -EBUSY;
27640
27641 + mutex_lock(&vfd_mutex);
27642 for (;;) {
27643 char c;
27644 if (!indx)
27645 break;
27646 - if (get_user(c, buf))
27647 + if (get_user(c, buf)) {
27648 + mutex_unlock(&vfd_mutex);
27649 return -EFAULT;
27650 + }
27651 if (esc) {
27652 set_led(c);
27653 esc = 0;
27654 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27655 buf++;
27656 }
27657 update_vfd();
27658 + mutex_unlock(&vfd_mutex);
27659
27660 return len;
27661 }
27662 diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27663 --- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27664 +++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27665 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27666 switch (cmd) {
27667
27668 case RTC_PLL_GET:
27669 + memset(&pll, 0, sizeof(pll));
27670 if (get_rtc_pll(&pll))
27671 return -EINVAL;
27672 else
27673 diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27674 --- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27675 +++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27676 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27677 return 0;
27678 }
27679
27680 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27681 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27682
27683 static int
27684 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27685 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27686 }
27687
27688 static int
27689 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27690 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27691 {
27692 struct hpet_timer __iomem *timer;
27693 struct hpet __iomem *hpet;
27694 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27695 {
27696 struct hpet_info info;
27697
27698 + memset(&info, 0, sizeof(info));
27699 +
27700 if (devp->hd_ireqfreq)
27701 info.hi_ireqfreq =
27702 hpet_time_div(hpetp, devp->hd_ireqfreq);
27703 - else
27704 - info.hi_ireqfreq = 0;
27705 info.hi_flags =
27706 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27707 info.hi_hpet = hpetp->hp_which;
27708 diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27709 --- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27710 +++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27711 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27712 return cnt;
27713 }
27714
27715 -static struct hv_ops hvc_beat_get_put_ops = {
27716 +static const struct hv_ops hvc_beat_get_put_ops = {
27717 .get_chars = hvc_beat_get_chars,
27718 .put_chars = hvc_beat_put_chars,
27719 };
27720 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27721 --- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27722 +++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27723 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27724 * console interfaces but can still be used as a tty device. This has to be
27725 * static because kmalloc will not work during early console init.
27726 */
27727 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27728 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27729 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27730 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27731
27732 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27733 * vty adapters do NOT get an hvc_instantiate() callback since they
27734 * appear after early console init.
27735 */
27736 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27737 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27738 {
27739 struct hvc_struct *hp;
27740
27741 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27742 };
27743
27744 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27745 - struct hv_ops *ops, int outbuf_size)
27746 + const struct hv_ops *ops, int outbuf_size)
27747 {
27748 struct hvc_struct *hp;
27749 int i;
27750 diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27751 --- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27752 +++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27753 @@ -55,7 +55,7 @@ struct hvc_struct {
27754 int outbuf_size;
27755 int n_outbuf;
27756 uint32_t vtermno;
27757 - struct hv_ops *ops;
27758 + const struct hv_ops *ops;
27759 int irq_requested;
27760 int data;
27761 struct winsize ws;
27762 @@ -76,11 +76,11 @@ struct hv_ops {
27763 };
27764
27765 /* Register a vterm and a slot index for use as a console (console_init) */
27766 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27767 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27768
27769 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27770 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27771 - struct hv_ops *ops, int outbuf_size);
27772 + const struct hv_ops *ops, int outbuf_size);
27773 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27774 extern int hvc_remove(struct hvc_struct *hp);
27775
27776 diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27777 --- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27778 +++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27779 @@ -197,7 +197,7 @@ done:
27780 return sent;
27781 }
27782
27783 -static struct hv_ops hvc_get_put_ops = {
27784 +static const struct hv_ops hvc_get_put_ops = {
27785 .get_chars = get_chars,
27786 .put_chars = put_chars,
27787 .notifier_add = notifier_add_irq,
27788 diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27789 --- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27790 +++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27791 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27792
27793
27794 /* HVC operations */
27795 -static struct hv_ops hvc_iucv_ops = {
27796 +static const struct hv_ops hvc_iucv_ops = {
27797 .get_chars = hvc_iucv_get_chars,
27798 .put_chars = hvc_iucv_put_chars,
27799 .notifier_add = hvc_iucv_notifier_add,
27800 diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27801 --- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27802 +++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27803 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27804 return i;
27805 }
27806
27807 -static struct hv_ops hvc_rtas_get_put_ops = {
27808 +static const struct hv_ops hvc_rtas_get_put_ops = {
27809 .get_chars = hvc_rtas_read_console,
27810 .put_chars = hvc_rtas_write_console,
27811 };
27812 diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27813 --- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27814 +++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27815 @@ -82,6 +82,7 @@
27816 #include <asm/hvcserver.h>
27817 #include <asm/uaccess.h>
27818 #include <asm/vio.h>
27819 +#include <asm/local.h>
27820
27821 /*
27822 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27823 @@ -269,7 +270,7 @@ struct hvcs_struct {
27824 unsigned int index;
27825
27826 struct tty_struct *tty;
27827 - int open_count;
27828 + local_t open_count;
27829
27830 /*
27831 * Used to tell the driver kernel_thread what operations need to take
27832 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27833
27834 spin_lock_irqsave(&hvcsd->lock, flags);
27835
27836 - if (hvcsd->open_count > 0) {
27837 + if (local_read(&hvcsd->open_count) > 0) {
27838 spin_unlock_irqrestore(&hvcsd->lock, flags);
27839 printk(KERN_INFO "HVCS: vterm state unchanged. "
27840 "The hvcs device node is still in use.\n");
27841 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27842 if ((retval = hvcs_partner_connect(hvcsd)))
27843 goto error_release;
27844
27845 - hvcsd->open_count = 1;
27846 + local_set(&hvcsd->open_count, 1);
27847 hvcsd->tty = tty;
27848 tty->driver_data = hvcsd;
27849
27850 @@ -1169,7 +1170,7 @@ fast_open:
27851
27852 spin_lock_irqsave(&hvcsd->lock, flags);
27853 kref_get(&hvcsd->kref);
27854 - hvcsd->open_count++;
27855 + local_inc(&hvcsd->open_count);
27856 hvcsd->todo_mask |= HVCS_SCHED_READ;
27857 spin_unlock_irqrestore(&hvcsd->lock, flags);
27858
27859 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27860 hvcsd = tty->driver_data;
27861
27862 spin_lock_irqsave(&hvcsd->lock, flags);
27863 - if (--hvcsd->open_count == 0) {
27864 + if (local_dec_and_test(&hvcsd->open_count)) {
27865
27866 vio_disable_interrupts(hvcsd->vdev);
27867
27868 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27869 free_irq(irq, hvcsd);
27870 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27871 return;
27872 - } else if (hvcsd->open_count < 0) {
27873 + } else if (local_read(&hvcsd->open_count) < 0) {
27874 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27875 " is missmanaged.\n",
27876 - hvcsd->vdev->unit_address, hvcsd->open_count);
27877 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27878 }
27879
27880 spin_unlock_irqrestore(&hvcsd->lock, flags);
27881 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27882
27883 spin_lock_irqsave(&hvcsd->lock, flags);
27884 /* Preserve this so that we know how many kref refs to put */
27885 - temp_open_count = hvcsd->open_count;
27886 + temp_open_count = local_read(&hvcsd->open_count);
27887
27888 /*
27889 * Don't kref put inside the spinlock because the destruction
27890 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27891 hvcsd->tty->driver_data = NULL;
27892 hvcsd->tty = NULL;
27893
27894 - hvcsd->open_count = 0;
27895 + local_set(&hvcsd->open_count, 0);
27896
27897 /* This will drop any buffered data on the floor which is OK in a hangup
27898 * scenario. */
27899 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27900 * the middle of a write operation? This is a crummy place to do this
27901 * but we want to keep it all in the spinlock.
27902 */
27903 - if (hvcsd->open_count <= 0) {
27904 + if (local_read(&hvcsd->open_count) <= 0) {
27905 spin_unlock_irqrestore(&hvcsd->lock, flags);
27906 return -ENODEV;
27907 }
27908 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27909 {
27910 struct hvcs_struct *hvcsd = tty->driver_data;
27911
27912 - if (!hvcsd || hvcsd->open_count <= 0)
27913 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27914 return 0;
27915
27916 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27917 diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27918 --- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27919 +++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27920 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27921 return i;
27922 }
27923
27924 -static struct hv_ops hvc_udbg_ops = {
27925 +static const struct hv_ops hvc_udbg_ops = {
27926 .get_chars = hvc_udbg_get,
27927 .put_chars = hvc_udbg_put,
27928 };
27929 diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27930 --- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27931 +++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27932 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27933 return got;
27934 }
27935
27936 -static struct hv_ops hvc_get_put_ops = {
27937 +static const struct hv_ops hvc_get_put_ops = {
27938 .get_chars = filtered_get_chars,
27939 .put_chars = hvc_put_chars,
27940 .notifier_add = notifier_add_irq,
27941 diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27942 --- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27943 +++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27944 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27945 return recv;
27946 }
27947
27948 -static struct hv_ops hvc_ops = {
27949 +static const struct hv_ops hvc_ops = {
27950 .get_chars = read_console,
27951 .put_chars = write_console,
27952 .notifier_add = notifier_add_irq,
27953 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27954 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27955 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27956 @@ -414,7 +414,7 @@ struct ipmi_smi {
27957 struct proc_dir_entry *proc_dir;
27958 char proc_dir_name[10];
27959
27960 - atomic_t stats[IPMI_NUM_STATS];
27961 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27962
27963 /*
27964 * run_to_completion duplicate of smb_info, smi_info
27965 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27966
27967
27968 #define ipmi_inc_stat(intf, stat) \
27969 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27970 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27971 #define ipmi_get_stat(intf, stat) \
27972 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27973 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27974
27975 static int is_lan_addr(struct ipmi_addr *addr)
27976 {
27977 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27978 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27979 init_waitqueue_head(&intf->waitq);
27980 for (i = 0; i < IPMI_NUM_STATS; i++)
27981 - atomic_set(&intf->stats[i], 0);
27982 + atomic_set_unchecked(&intf->stats[i], 0);
27983
27984 intf->proc_dir = NULL;
27985
27986 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27987 struct ipmi_smi_msg smi_msg;
27988 struct ipmi_recv_msg recv_msg;
27989
27990 + pax_track_stack();
27991 +
27992 si = (struct ipmi_system_interface_addr *) &addr;
27993 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27994 si->channel = IPMI_BMC_CHANNEL;
27995 diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27996 --- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27997 +++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27998 @@ -277,7 +277,7 @@ struct smi_info {
27999 unsigned char slave_addr;
28000
28001 /* Counters and things for the proc filesystem. */
28002 - atomic_t stats[SI_NUM_STATS];
28003 + atomic_unchecked_t stats[SI_NUM_STATS];
28004
28005 struct task_struct *thread;
28006
28007 @@ -285,9 +285,9 @@ struct smi_info {
28008 };
28009
28010 #define smi_inc_stat(smi, stat) \
28011 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28012 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28013 #define smi_get_stat(smi, stat) \
28014 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28015 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28016
28017 #define SI_MAX_PARMS 4
28018
28019 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28020 atomic_set(&new_smi->req_events, 0);
28021 new_smi->run_to_completion = 0;
28022 for (i = 0; i < SI_NUM_STATS; i++)
28023 - atomic_set(&new_smi->stats[i], 0);
28024 + atomic_set_unchecked(&new_smi->stats[i], 0);
28025
28026 new_smi->interrupt_disabled = 0;
28027 atomic_set(&new_smi->stop_operation, 0);
28028 diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
28029 --- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
28030 +++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
28031 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28032 * re-used for each stats call.
28033 */
28034 static comstats_t stli_comstats;
28035 -static combrd_t stli_brdstats;
28036 static struct asystats stli_cdkstats;
28037
28038 /*****************************************************************************/
28039 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28040 {
28041 struct stlibrd *brdp;
28042 unsigned int i;
28043 + combrd_t stli_brdstats;
28044
28045 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28046 return -EFAULT;
28047 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28048 struct stliport stli_dummyport;
28049 struct stliport *portp;
28050
28051 + pax_track_stack();
28052 +
28053 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28054 return -EFAULT;
28055 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28056 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28057 struct stlibrd stli_dummybrd;
28058 struct stlibrd *brdp;
28059
28060 + pax_track_stack();
28061 +
28062 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28063 return -EFAULT;
28064 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28065 diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
28066 --- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
28067 +++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
28068 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28069
28070 config DEVKMEM
28071 bool "/dev/kmem virtual device support"
28072 - default y
28073 + default n
28074 + depends on !GRKERNSEC_KMEM
28075 help
28076 Say Y here if you want to support the /dev/kmem device. The
28077 /dev/kmem device is rarely used, but can be used for certain
28078 @@ -1114,6 +1115,7 @@ config DEVPORT
28079 bool
28080 depends on !M68K
28081 depends on ISA || PCI
28082 + depends on !GRKERNSEC_KMEM
28083 default y
28084
28085 source "drivers/s390/char/Kconfig"
28086 diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
28087 --- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
28088 +++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
28089 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28090 kbd->kbdmode == VC_MEDIUMRAW) &&
28091 value != KVAL(K_SAK))
28092 return; /* SAK is allowed even in raw mode */
28093 +
28094 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28095 + {
28096 + void *func = fn_handler[value];
28097 + if (func == fn_show_state || func == fn_show_ptregs ||
28098 + func == fn_show_mem)
28099 + return;
28100 + }
28101 +#endif
28102 +
28103 fn_handler[value](vc);
28104 }
28105
28106 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28107 .evbit = { BIT_MASK(EV_SND) },
28108 },
28109
28110 - { }, /* Terminating entry */
28111 + { 0 }, /* Terminating entry */
28112 };
28113
28114 MODULE_DEVICE_TABLE(input, kbd_ids);
28115 diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
28116 --- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
28117 +++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
28118 @@ -18,6 +18,7 @@
28119 #include <linux/raw.h>
28120 #include <linux/tty.h>
28121 #include <linux/capability.h>
28122 +#include <linux/security.h>
28123 #include <linux/ptrace.h>
28124 #include <linux/device.h>
28125 #include <linux/highmem.h>
28126 @@ -35,6 +36,10 @@
28127 # include <linux/efi.h>
28128 #endif
28129
28130 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28131 +extern struct file_operations grsec_fops;
28132 +#endif
28133 +
28134 static inline unsigned long size_inside_page(unsigned long start,
28135 unsigned long size)
28136 {
28137 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28138
28139 while (cursor < to) {
28140 if (!devmem_is_allowed(pfn)) {
28141 +#ifdef CONFIG_GRKERNSEC_KMEM
28142 + gr_handle_mem_readwrite(from, to);
28143 +#else
28144 printk(KERN_INFO
28145 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28146 current->comm, from, to);
28147 +#endif
28148 return 0;
28149 }
28150 cursor += PAGE_SIZE;
28151 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28152 }
28153 return 1;
28154 }
28155 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28156 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28157 +{
28158 + return 0;
28159 +}
28160 #else
28161 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28162 {
28163 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28164 #endif
28165
28166 while (count > 0) {
28167 + char *temp;
28168 +
28169 /*
28170 * Handle first page in case it's not aligned
28171 */
28172 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28173 if (!ptr)
28174 return -EFAULT;
28175
28176 - if (copy_to_user(buf, ptr, sz)) {
28177 +#ifdef CONFIG_PAX_USERCOPY
28178 + temp = kmalloc(sz, GFP_KERNEL);
28179 + if (!temp) {
28180 + unxlate_dev_mem_ptr(p, ptr);
28181 + return -ENOMEM;
28182 + }
28183 + memcpy(temp, ptr, sz);
28184 +#else
28185 + temp = ptr;
28186 +#endif
28187 +
28188 + if (copy_to_user(buf, temp, sz)) {
28189 +
28190 +#ifdef CONFIG_PAX_USERCOPY
28191 + kfree(temp);
28192 +#endif
28193 +
28194 unxlate_dev_mem_ptr(p, ptr);
28195 return -EFAULT;
28196 }
28197
28198 +#ifdef CONFIG_PAX_USERCOPY
28199 + kfree(temp);
28200 +#endif
28201 +
28202 unxlate_dev_mem_ptr(p, ptr);
28203
28204 buf += sz;
28205 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28206 size_t count, loff_t *ppos)
28207 {
28208 unsigned long p = *ppos;
28209 - ssize_t low_count, read, sz;
28210 + ssize_t low_count, read, sz, err = 0;
28211 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28212 - int err = 0;
28213
28214 read = 0;
28215 if (p < (unsigned long) high_memory) {
28216 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28217 }
28218 #endif
28219 while (low_count > 0) {
28220 + char *temp;
28221 +
28222 sz = size_inside_page(p, low_count);
28223
28224 /*
28225 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28226 */
28227 kbuf = xlate_dev_kmem_ptr((char *)p);
28228
28229 - if (copy_to_user(buf, kbuf, sz))
28230 +#ifdef CONFIG_PAX_USERCOPY
28231 + temp = kmalloc(sz, GFP_KERNEL);
28232 + if (!temp)
28233 + return -ENOMEM;
28234 + memcpy(temp, kbuf, sz);
28235 +#else
28236 + temp = kbuf;
28237 +#endif
28238 +
28239 + err = copy_to_user(buf, temp, sz);
28240 +
28241 +#ifdef CONFIG_PAX_USERCOPY
28242 + kfree(temp);
28243 +#endif
28244 +
28245 + if (err)
28246 return -EFAULT;
28247 buf += sz;
28248 p += sz;
28249 @@ -889,6 +941,9 @@ static const struct memdev {
28250 #ifdef CONFIG_CRASH_DUMP
28251 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28252 #endif
28253 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28254 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28255 +#endif
28256 };
28257
28258 static int memory_open(struct inode *inode, struct file *filp)
28259 diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28260 --- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28261 +++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28262 @@ -29,6 +29,7 @@
28263 #include <linux/tty_driver.h>
28264 #include <linux/tty_flip.h>
28265 #include <linux/uaccess.h>
28266 +#include <asm/local.h>
28267
28268 #include "tty.h"
28269 #include "network.h"
28270 @@ -51,7 +52,7 @@ struct ipw_tty {
28271 int tty_type;
28272 struct ipw_network *network;
28273 struct tty_struct *linux_tty;
28274 - int open_count;
28275 + local_t open_count;
28276 unsigned int control_lines;
28277 struct mutex ipw_tty_mutex;
28278 int tx_bytes_queued;
28279 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28280 mutex_unlock(&tty->ipw_tty_mutex);
28281 return -ENODEV;
28282 }
28283 - if (tty->open_count == 0)
28284 + if (local_read(&tty->open_count) == 0)
28285 tty->tx_bytes_queued = 0;
28286
28287 - tty->open_count++;
28288 + local_inc(&tty->open_count);
28289
28290 tty->linux_tty = linux_tty;
28291 linux_tty->driver_data = tty;
28292 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28293
28294 static void do_ipw_close(struct ipw_tty *tty)
28295 {
28296 - tty->open_count--;
28297 -
28298 - if (tty->open_count == 0) {
28299 + if (local_dec_return(&tty->open_count) == 0) {
28300 struct tty_struct *linux_tty = tty->linux_tty;
28301
28302 if (linux_tty != NULL) {
28303 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28304 return;
28305
28306 mutex_lock(&tty->ipw_tty_mutex);
28307 - if (tty->open_count == 0) {
28308 + if (local_read(&tty->open_count) == 0) {
28309 mutex_unlock(&tty->ipw_tty_mutex);
28310 return;
28311 }
28312 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28313 return;
28314 }
28315
28316 - if (!tty->open_count) {
28317 + if (!local_read(&tty->open_count)) {
28318 mutex_unlock(&tty->ipw_tty_mutex);
28319 return;
28320 }
28321 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28322 return -ENODEV;
28323
28324 mutex_lock(&tty->ipw_tty_mutex);
28325 - if (!tty->open_count) {
28326 + if (!local_read(&tty->open_count)) {
28327 mutex_unlock(&tty->ipw_tty_mutex);
28328 return -EINVAL;
28329 }
28330 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28331 if (!tty)
28332 return -ENODEV;
28333
28334 - if (!tty->open_count)
28335 + if (!local_read(&tty->open_count))
28336 return -EINVAL;
28337
28338 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28339 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28340 if (!tty)
28341 return 0;
28342
28343 - if (!tty->open_count)
28344 + if (!local_read(&tty->open_count))
28345 return 0;
28346
28347 return tty->tx_bytes_queued;
28348 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28349 if (!tty)
28350 return -ENODEV;
28351
28352 - if (!tty->open_count)
28353 + if (!local_read(&tty->open_count))
28354 return -EINVAL;
28355
28356 return get_control_lines(tty);
28357 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28358 if (!tty)
28359 return -ENODEV;
28360
28361 - if (!tty->open_count)
28362 + if (!local_read(&tty->open_count))
28363 return -EINVAL;
28364
28365 return set_control_lines(tty, set, clear);
28366 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28367 if (!tty)
28368 return -ENODEV;
28369
28370 - if (!tty->open_count)
28371 + if (!local_read(&tty->open_count))
28372 return -EINVAL;
28373
28374 /* FIXME: Exactly how is the tty object locked here .. */
28375 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28376 against a parallel ioctl etc */
28377 mutex_lock(&ttyj->ipw_tty_mutex);
28378 }
28379 - while (ttyj->open_count)
28380 + while (local_read(&ttyj->open_count))
28381 do_ipw_close(ttyj);
28382 ipwireless_disassociate_network_ttys(network,
28383 ttyj->channel_idx);
28384 diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28385 --- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28386 +++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28387 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28388 register_sysctl_table(pty_root_table);
28389
28390 /* Now create the /dev/ptmx special device */
28391 + pax_open_kernel();
28392 tty_default_fops(&ptmx_fops);
28393 - ptmx_fops.open = ptmx_open;
28394 + *(void **)&ptmx_fops.open = ptmx_open;
28395 + pax_close_kernel();
28396
28397 cdev_init(&ptmx_cdev, &ptmx_fops);
28398 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28399 diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28400 --- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28401 +++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28402 @@ -254,8 +254,13 @@
28403 /*
28404 * Configuration information
28405 */
28406 +#ifdef CONFIG_GRKERNSEC_RANDNET
28407 +#define INPUT_POOL_WORDS 512
28408 +#define OUTPUT_POOL_WORDS 128
28409 +#else
28410 #define INPUT_POOL_WORDS 128
28411 #define OUTPUT_POOL_WORDS 32
28412 +#endif
28413 #define SEC_XFER_SIZE 512
28414
28415 /*
28416 @@ -292,10 +297,17 @@ static struct poolinfo {
28417 int poolwords;
28418 int tap1, tap2, tap3, tap4, tap5;
28419 } poolinfo_table[] = {
28420 +#ifdef CONFIG_GRKERNSEC_RANDNET
28421 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28422 + { 512, 411, 308, 208, 104, 1 },
28423 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28424 + { 128, 103, 76, 51, 25, 1 },
28425 +#else
28426 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28427 { 128, 103, 76, 51, 25, 1 },
28428 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28429 { 32, 26, 20, 14, 7, 1 },
28430 +#endif
28431 #if 0
28432 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28433 { 2048, 1638, 1231, 819, 411, 1 },
28434 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28435 #include <linux/sysctl.h>
28436
28437 static int min_read_thresh = 8, min_write_thresh;
28438 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28439 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28440 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28441 static char sysctl_bootid[16];
28442
28443 diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28444 --- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28445 +++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28446 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28447 struct rocket_ports tmp;
28448 int board;
28449
28450 + pax_track_stack();
28451 +
28452 if (!retports)
28453 return -EFAULT;
28454 memset(&tmp, 0, sizeof (tmp));
28455 diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28456 --- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28457 +++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28458 @@ -55,6 +55,7 @@
28459 #include <asm/uaccess.h>
28460 #include <asm/io.h>
28461 #include <asm/system.h>
28462 +#include <asm/local.h>
28463
28464 #include <linux/sonypi.h>
28465
28466 @@ -491,7 +492,7 @@ static struct sonypi_device {
28467 spinlock_t fifo_lock;
28468 wait_queue_head_t fifo_proc_list;
28469 struct fasync_struct *fifo_async;
28470 - int open_count;
28471 + local_t open_count;
28472 int model;
28473 struct input_dev *input_jog_dev;
28474 struct input_dev *input_key_dev;
28475 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28476 static int sonypi_misc_release(struct inode *inode, struct file *file)
28477 {
28478 mutex_lock(&sonypi_device.lock);
28479 - sonypi_device.open_count--;
28480 + local_dec(&sonypi_device.open_count);
28481 mutex_unlock(&sonypi_device.lock);
28482 return 0;
28483 }
28484 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28485 lock_kernel();
28486 mutex_lock(&sonypi_device.lock);
28487 /* Flush input queue on first open */
28488 - if (!sonypi_device.open_count)
28489 + if (!local_read(&sonypi_device.open_count))
28490 kfifo_reset(sonypi_device.fifo);
28491 - sonypi_device.open_count++;
28492 + local_inc(&sonypi_device.open_count);
28493 mutex_unlock(&sonypi_device.lock);
28494 unlock_kernel();
28495 return 0;
28496 diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28497 --- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28498 +++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28499 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28500 struct stlport stl_dummyport;
28501 struct stlport *portp;
28502
28503 + pax_track_stack();
28504 +
28505 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28506 return -EFAULT;
28507 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28508 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28509 --- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28510 +++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28511 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28512 event = addr;
28513
28514 if ((event->event_type == 0 && event->event_size == 0) ||
28515 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28516 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28517 return NULL;
28518
28519 return addr;
28520 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28521 return NULL;
28522
28523 if ((event->event_type == 0 && event->event_size == 0) ||
28524 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28525 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28526 return NULL;
28527
28528 (*pos)++;
28529 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28530 int i;
28531
28532 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28533 - seq_putc(m, data[i]);
28534 + if (!seq_putc(m, data[i]))
28535 + return -EFAULT;
28536
28537 return 0;
28538 }
28539 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28540 log->bios_event_log_end = log->bios_event_log + len;
28541
28542 virt = acpi_os_map_memory(start, len);
28543 + if (!virt) {
28544 + kfree(log->bios_event_log);
28545 + log->bios_event_log = NULL;
28546 + return -EFAULT;
28547 + }
28548
28549 memcpy(log->bios_event_log, virt, len);
28550
28551 diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28552 --- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28553 +++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28554 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28555 chip->vendor.req_complete_val)
28556 goto out_recv;
28557
28558 - if ((status == chip->vendor.req_canceled)) {
28559 + if (status == chip->vendor.req_canceled) {
28560 dev_err(chip->dev, "Operation Canceled\n");
28561 rc = -ECANCELED;
28562 goto out;
28563 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28564
28565 struct tpm_chip *chip = dev_get_drvdata(dev);
28566
28567 + pax_track_stack();
28568 +
28569 tpm_cmd.header.in = tpm_readpubek_header;
28570 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28571 "attempting to read the PUBEK");
28572 diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28573 --- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28574 +++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28575 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28576 return retval;
28577 }
28578
28579 +EXPORT_SYMBOL(tty_ioctl);
28580 +
28581 #ifdef CONFIG_COMPAT
28582 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28583 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28584 unsigned long arg)
28585 {
28586 struct inode *inode = file->f_dentry->d_inode;
28587 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28588
28589 return retval;
28590 }
28591 +
28592 +EXPORT_SYMBOL(tty_compat_ioctl);
28593 #endif
28594
28595 /*
28596 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28597
28598 void tty_default_fops(struct file_operations *fops)
28599 {
28600 - *fops = tty_fops;
28601 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28602 }
28603
28604 /*
28605 diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28606 --- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28607 +++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28608 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28609 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28610 struct tty_ldisc_ops *ldo = ld->ops;
28611
28612 - ldo->refcount--;
28613 + atomic_dec(&ldo->refcount);
28614 module_put(ldo->owner);
28615 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28616
28617 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28618 spin_lock_irqsave(&tty_ldisc_lock, flags);
28619 tty_ldiscs[disc] = new_ldisc;
28620 new_ldisc->num = disc;
28621 - new_ldisc->refcount = 0;
28622 + atomic_set(&new_ldisc->refcount, 0);
28623 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28624
28625 return ret;
28626 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28627 return -EINVAL;
28628
28629 spin_lock_irqsave(&tty_ldisc_lock, flags);
28630 - if (tty_ldiscs[disc]->refcount)
28631 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28632 ret = -EBUSY;
28633 else
28634 tty_ldiscs[disc] = NULL;
28635 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28636 if (ldops) {
28637 ret = ERR_PTR(-EAGAIN);
28638 if (try_module_get(ldops->owner)) {
28639 - ldops->refcount++;
28640 + atomic_inc(&ldops->refcount);
28641 ret = ldops;
28642 }
28643 }
28644 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28645 unsigned long flags;
28646
28647 spin_lock_irqsave(&tty_ldisc_lock, flags);
28648 - ldops->refcount--;
28649 + atomic_dec(&ldops->refcount);
28650 module_put(ldops->owner);
28651 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28652 }
28653 diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28654 --- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28655 +++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28656 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28657 * virtqueue, so we let the drivers do some boutique early-output thing. */
28658 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28659 {
28660 - virtio_cons.put_chars = put_chars;
28661 + pax_open_kernel();
28662 + *(void **)&virtio_cons.put_chars = put_chars;
28663 + pax_close_kernel();
28664 return hvc_instantiate(0, 0, &virtio_cons);
28665 }
28666
28667 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28668 out_vq = vqs[1];
28669
28670 /* Start using the new console output. */
28671 - virtio_cons.get_chars = get_chars;
28672 - virtio_cons.put_chars = put_chars;
28673 - virtio_cons.notifier_add = notifier_add_vio;
28674 - virtio_cons.notifier_del = notifier_del_vio;
28675 - virtio_cons.notifier_hangup = notifier_del_vio;
28676 + pax_open_kernel();
28677 + *(void **)&virtio_cons.get_chars = get_chars;
28678 + *(void **)&virtio_cons.put_chars = put_chars;
28679 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28680 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28681 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28682 + pax_close_kernel();
28683
28684 /* The first argument of hvc_alloc() is the virtual console number, so
28685 * we use zero. The second argument is the parameter for the
28686 diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28687 --- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28688 +++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28689 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28690
28691 static void notify_write(struct vc_data *vc, unsigned int unicode)
28692 {
28693 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28694 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28695 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28696 }
28697
28698 diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28699 --- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28700 +++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28701 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28702 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28703 return -EFAULT;
28704
28705 - if (!capable(CAP_SYS_TTY_CONFIG))
28706 - perm = 0;
28707 -
28708 switch (cmd) {
28709 case KDGKBENT:
28710 key_map = key_maps[s];
28711 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28712 val = (i ? K_HOLE : K_NOSUCHMAP);
28713 return put_user(val, &user_kbe->kb_value);
28714 case KDSKBENT:
28715 + if (!capable(CAP_SYS_TTY_CONFIG))
28716 + perm = 0;
28717 +
28718 if (!perm)
28719 return -EPERM;
28720 +
28721 if (!i && v == K_NOSUCHMAP) {
28722 /* deallocate map */
28723 key_map = key_maps[s];
28724 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28725 int i, j, k;
28726 int ret;
28727
28728 - if (!capable(CAP_SYS_TTY_CONFIG))
28729 - perm = 0;
28730 -
28731 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28732 if (!kbs) {
28733 ret = -ENOMEM;
28734 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28735 kfree(kbs);
28736 return ((p && *p) ? -EOVERFLOW : 0);
28737 case KDSKBSENT:
28738 + if (!capable(CAP_SYS_TTY_CONFIG))
28739 + perm = 0;
28740 +
28741 if (!perm) {
28742 ret = -EPERM;
28743 goto reterr;
28744 diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28745 --- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28746 +++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28747 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28748 complete(&policy->kobj_unregister);
28749 }
28750
28751 -static struct sysfs_ops sysfs_ops = {
28752 +static const struct sysfs_ops sysfs_ops = {
28753 .show = show,
28754 .store = store,
28755 };
28756 diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28757 --- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28758 +++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28759 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28760 return ret;
28761 }
28762
28763 -static struct sysfs_ops cpuidle_sysfs_ops = {
28764 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28765 .show = cpuidle_show,
28766 .store = cpuidle_store,
28767 };
28768 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28769 return ret;
28770 }
28771
28772 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28773 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28774 .show = cpuidle_state_show,
28775 };
28776
28777 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28778 .release = cpuidle_state_sysfs_release,
28779 };
28780
28781 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28782 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28783 {
28784 kobject_put(&device->kobjs[i]->kobj);
28785 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28786 diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28787 --- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28788 +++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28789 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28790 0xCA, 0x34, 0x2B, 0x2E};
28791 struct scatterlist sg;
28792
28793 + pax_track_stack();
28794 +
28795 memset(src, 0, sizeof(src));
28796 memset(ctx.key, 0, sizeof(ctx.key));
28797
28798 diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28799 --- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28800 +++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28801 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28802 struct crypto_aes_ctx gen_aes;
28803 int cpu;
28804
28805 + pax_track_stack();
28806 +
28807 if (key_len % 8) {
28808 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28809 return -EINVAL;
28810 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28811 --- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28812 +++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28813 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28814 return entry->show(&chan->common, page);
28815 }
28816
28817 -struct sysfs_ops ioat_sysfs_ops = {
28818 +const struct sysfs_ops ioat_sysfs_ops = {
28819 .show = ioat_attr_show,
28820 };
28821
28822 diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28823 --- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28824 +++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28825 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28826 unsigned long *phys_complete);
28827 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28828 void ioat_kobject_del(struct ioatdma_device *device);
28829 -extern struct sysfs_ops ioat_sysfs_ops;
28830 +extern const struct sysfs_ops ioat_sysfs_ops;
28831 extern struct ioat_sysfs_entry ioat_version_attr;
28832 extern struct ioat_sysfs_entry ioat_cap_attr;
28833 #endif /* IOATDMA_H */
28834 diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28835 --- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28836 +++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28837 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28838 }
28839
28840 /* edac_dev file operations for an 'ctl_info' */
28841 -static struct sysfs_ops device_ctl_info_ops = {
28842 +static const struct sysfs_ops device_ctl_info_ops = {
28843 .show = edac_dev_ctl_info_show,
28844 .store = edac_dev_ctl_info_store
28845 };
28846 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28847 }
28848
28849 /* edac_dev file operations for an 'instance' */
28850 -static struct sysfs_ops device_instance_ops = {
28851 +static const struct sysfs_ops device_instance_ops = {
28852 .show = edac_dev_instance_show,
28853 .store = edac_dev_instance_store
28854 };
28855 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28856 }
28857
28858 /* edac_dev file operations for a 'block' */
28859 -static struct sysfs_ops device_block_ops = {
28860 +static const struct sysfs_ops device_block_ops = {
28861 .show = edac_dev_block_show,
28862 .store = edac_dev_block_store
28863 };
28864 diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28865 --- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28866 +++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28867 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28868 return -EIO;
28869 }
28870
28871 -static struct sysfs_ops csrowfs_ops = {
28872 +static const struct sysfs_ops csrowfs_ops = {
28873 .show = csrowdev_show,
28874 .store = csrowdev_store
28875 };
28876 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28877 }
28878
28879 /* Intermediate show/store table */
28880 -static struct sysfs_ops mci_ops = {
28881 +static const struct sysfs_ops mci_ops = {
28882 .show = mcidev_show,
28883 .store = mcidev_store
28884 };
28885 diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28886 --- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28887 +++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28888 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28889 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28890 static int edac_pci_poll_msec = 1000; /* one second workq period */
28891
28892 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28893 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28894 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28895 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28896
28897 static struct kobject *edac_pci_top_main_kobj;
28898 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28899 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28900 }
28901
28902 /* fs_ops table */
28903 -static struct sysfs_ops pci_instance_ops = {
28904 +static const struct sysfs_ops pci_instance_ops = {
28905 .show = edac_pci_instance_show,
28906 .store = edac_pci_instance_store
28907 };
28908 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28909 return -EIO;
28910 }
28911
28912 -static struct sysfs_ops edac_pci_sysfs_ops = {
28913 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28914 .show = edac_pci_dev_show,
28915 .store = edac_pci_dev_store
28916 };
28917 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28918 edac_printk(KERN_CRIT, EDAC_PCI,
28919 "Signaled System Error on %s\n",
28920 pci_name(dev));
28921 - atomic_inc(&pci_nonparity_count);
28922 + atomic_inc_unchecked(&pci_nonparity_count);
28923 }
28924
28925 if (status & (PCI_STATUS_PARITY)) {
28926 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28927 "Master Data Parity Error on %s\n",
28928 pci_name(dev));
28929
28930 - atomic_inc(&pci_parity_count);
28931 + atomic_inc_unchecked(&pci_parity_count);
28932 }
28933
28934 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28935 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28936 "Detected Parity Error on %s\n",
28937 pci_name(dev));
28938
28939 - atomic_inc(&pci_parity_count);
28940 + atomic_inc_unchecked(&pci_parity_count);
28941 }
28942 }
28943
28944 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28945 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28946 "Signaled System Error on %s\n",
28947 pci_name(dev));
28948 - atomic_inc(&pci_nonparity_count);
28949 + atomic_inc_unchecked(&pci_nonparity_count);
28950 }
28951
28952 if (status & (PCI_STATUS_PARITY)) {
28953 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28954 "Master Data Parity Error on "
28955 "%s\n", pci_name(dev));
28956
28957 - atomic_inc(&pci_parity_count);
28958 + atomic_inc_unchecked(&pci_parity_count);
28959 }
28960
28961 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28962 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28963 "Detected Parity Error on %s\n",
28964 pci_name(dev));
28965
28966 - atomic_inc(&pci_parity_count);
28967 + atomic_inc_unchecked(&pci_parity_count);
28968 }
28969 }
28970 }
28971 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28972 if (!check_pci_errors)
28973 return;
28974
28975 - before_count = atomic_read(&pci_parity_count);
28976 + before_count = atomic_read_unchecked(&pci_parity_count);
28977
28978 /* scan all PCI devices looking for a Parity Error on devices and
28979 * bridges.
28980 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28981 /* Only if operator has selected panic on PCI Error */
28982 if (edac_pci_get_panic_on_pe()) {
28983 /* If the count is different 'after' from 'before' */
28984 - if (before_count != atomic_read(&pci_parity_count))
28985 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28986 panic("EDAC: PCI Parity Error");
28987 }
28988 }
28989 diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28990 --- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28991 +++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-23 21:22:32.000000000 -0400
28992 @@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
28993
28994 void fw_core_remove_card(struct fw_card *card)
28995 {
28996 - struct fw_card_driver dummy_driver = dummy_driver_template;
28997 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
28998
28999 card->driver->update_phy_reg(card, 4,
29000 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29001 diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
29002 --- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29003 +++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29004 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29005 int ret;
29006
29007 if ((request->channels == 0 && request->bandwidth == 0) ||
29008 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29009 - request->bandwidth < 0)
29010 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29011 return -EINVAL;
29012
29013 r = kmalloc(sizeof(*r), GFP_KERNEL);
29014 diff -urNp linux-2.6.32.45/drivers/firewire/core.h linux-2.6.32.45/drivers/firewire/core.h
29015 --- linux-2.6.32.45/drivers/firewire/core.h 2011-03-27 14:31:47.000000000 -0400
29016 +++ linux-2.6.32.45/drivers/firewire/core.h 2011-08-23 20:24:26.000000000 -0400
29017 @@ -86,6 +86,7 @@ struct fw_card_driver {
29018
29019 int (*stop_iso)(struct fw_iso_context *ctx);
29020 };
29021 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29022
29023 void fw_card_initialize(struct fw_card *card,
29024 const struct fw_card_driver *driver, struct device *device);
29025 diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
29026 --- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29027 +++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29028 @@ -36,6 +36,7 @@
29029 #include <linux/string.h>
29030 #include <linux/timer.h>
29031 #include <linux/types.h>
29032 +#include <linux/sched.h>
29033
29034 #include <asm/byteorder.h>
29035
29036 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29037 struct transaction_callback_data d;
29038 struct fw_transaction t;
29039
29040 + pax_track_stack();
29041 +
29042 init_completion(&d.done);
29043 d.payload = payload;
29044 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29045 diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
29046 --- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29047 +++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29048 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29049 }
29050 }
29051 else {
29052 - /*
29053 - * no iounmap() for that ioremap(); it would be a no-op, but
29054 - * it's so early in setup that sucker gets confused into doing
29055 - * what it shouldn't if we actually call it.
29056 - */
29057 p = dmi_ioremap(0xF0000, 0x10000);
29058 if (p == NULL)
29059 goto error;
29060 diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
29061 --- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29062 +++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29063 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29064 return ret;
29065 }
29066
29067 -static struct sysfs_ops edd_attr_ops = {
29068 +static const struct sysfs_ops edd_attr_ops = {
29069 .show = edd_attr_show,
29070 };
29071
29072 diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
29073 --- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29074 +++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29075 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29076 return ret;
29077 }
29078
29079 -static struct sysfs_ops efivar_attr_ops = {
29080 +static const struct sysfs_ops efivar_attr_ops = {
29081 .show = efivar_attr_show,
29082 .store = efivar_attr_store,
29083 };
29084 diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
29085 --- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29086 +++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29087 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29088 return ret;
29089 }
29090
29091 -static struct sysfs_ops ibft_attr_ops = {
29092 +static const struct sysfs_ops ibft_attr_ops = {
29093 .show = ibft_show_attribute,
29094 };
29095
29096 diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
29097 --- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29098 +++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29099 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29100 NULL
29101 };
29102
29103 -static struct sysfs_ops memmap_attr_ops = {
29104 +static const struct sysfs_ops memmap_attr_ops = {
29105 .show = memmap_attr_show,
29106 };
29107
29108 diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
29109 --- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29110 +++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29111 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29112 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29113 maskl, pendl, maskh, pendh);
29114
29115 - atomic_inc(&irq_err_count);
29116 + atomic_inc_unchecked(&irq_err_count);
29117
29118 return -EINVAL;
29119 }
29120 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
29121 --- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29122 +++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29123 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29124 struct drm_crtc *tmp;
29125 int crtc_mask = 1;
29126
29127 - WARN(!crtc, "checking null crtc?");
29128 + BUG_ON(!crtc);
29129
29130 dev = crtc->dev;
29131
29132 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29133
29134 adjusted_mode = drm_mode_duplicate(dev, mode);
29135
29136 + pax_track_stack();
29137 +
29138 crtc->enabled = drm_helper_crtc_in_use(crtc);
29139
29140 if (!crtc->enabled)
29141 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
29142 --- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29143 +++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29144 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29145 char *kdata = NULL;
29146
29147 atomic_inc(&dev->ioctl_count);
29148 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29149 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29150 ++file_priv->ioctl_count;
29151
29152 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29153 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
29154 --- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29155 +++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29156 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29157 }
29158
29159 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29160 - atomic_set(&dev->counts[i], 0);
29161 + atomic_set_unchecked(&dev->counts[i], 0);
29162
29163 dev->sigdata.lock = NULL;
29164
29165 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29166
29167 retcode = drm_open_helper(inode, filp, dev);
29168 if (!retcode) {
29169 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29170 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29171 spin_lock(&dev->count_lock);
29172 - if (!dev->open_count++) {
29173 + if (local_inc_return(&dev->open_count) == 1) {
29174 spin_unlock(&dev->count_lock);
29175 retcode = drm_setup(dev);
29176 goto out;
29177 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29178
29179 lock_kernel();
29180
29181 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29182 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29183
29184 if (dev->driver->preclose)
29185 dev->driver->preclose(dev, file_priv);
29186 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29187 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29188 task_pid_nr(current),
29189 (long)old_encode_dev(file_priv->minor->device),
29190 - dev->open_count);
29191 + local_read(&dev->open_count));
29192
29193 /* if the master has gone away we can't do anything with the lock */
29194 if (file_priv->minor->master)
29195 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29196 * End inline drm_release
29197 */
29198
29199 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29200 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29201 spin_lock(&dev->count_lock);
29202 - if (!--dev->open_count) {
29203 + if (local_dec_and_test(&dev->open_count)) {
29204 if (atomic_read(&dev->ioctl_count)) {
29205 DRM_ERROR("Device busy: %d\n",
29206 atomic_read(&dev->ioctl_count));
29207 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
29208 --- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29209 +++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29210 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29211 spin_lock_init(&dev->object_name_lock);
29212 idr_init(&dev->object_name_idr);
29213 atomic_set(&dev->object_count, 0);
29214 - atomic_set(&dev->object_memory, 0);
29215 + atomic_set_unchecked(&dev->object_memory, 0);
29216 atomic_set(&dev->pin_count, 0);
29217 - atomic_set(&dev->pin_memory, 0);
29218 + atomic_set_unchecked(&dev->pin_memory, 0);
29219 atomic_set(&dev->gtt_count, 0);
29220 - atomic_set(&dev->gtt_memory, 0);
29221 + atomic_set_unchecked(&dev->gtt_memory, 0);
29222
29223 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29224 if (!mm) {
29225 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29226 goto fput;
29227 }
29228 atomic_inc(&dev->object_count);
29229 - atomic_add(obj->size, &dev->object_memory);
29230 + atomic_add_unchecked(obj->size, &dev->object_memory);
29231 return obj;
29232 fput:
29233 fput(obj->filp);
29234 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29235
29236 fput(obj->filp);
29237 atomic_dec(&dev->object_count);
29238 - atomic_sub(obj->size, &dev->object_memory);
29239 + atomic_sub_unchecked(obj->size, &dev->object_memory);
29240 kfree(obj);
29241 }
29242 EXPORT_SYMBOL(drm_gem_object_free);
29243 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
29244 --- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29245 +++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29246 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29247 struct drm_local_map *map;
29248 struct drm_map_list *r_list;
29249
29250 - /* Hardcoded from _DRM_FRAME_BUFFER,
29251 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29252 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29253 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29254 + static const char * const types[] = {
29255 + [_DRM_FRAME_BUFFER] = "FB",
29256 + [_DRM_REGISTERS] = "REG",
29257 + [_DRM_SHM] = "SHM",
29258 + [_DRM_AGP] = "AGP",
29259 + [_DRM_SCATTER_GATHER] = "SG",
29260 + [_DRM_CONSISTENT] = "PCI",
29261 + [_DRM_GEM] = "GEM" };
29262 const char *type;
29263 int i;
29264
29265 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29266 map = r_list->map;
29267 if (!map)
29268 continue;
29269 - if (map->type < 0 || map->type > 5)
29270 + if (map->type >= ARRAY_SIZE(types))
29271 type = "??";
29272 else
29273 type = types[map->type];
29274 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29275 struct drm_device *dev = node->minor->dev;
29276
29277 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29278 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29279 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29280 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29281 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29282 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29283 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29284 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29285 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29286 return 0;
29287 }
29288 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29289 mutex_lock(&dev->struct_mutex);
29290 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29291 atomic_read(&dev->vma_count),
29292 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29293 + NULL, 0);
29294 +#else
29295 high_memory, (u64)virt_to_phys(high_memory));
29296 +#endif
29297
29298 list_for_each_entry(pt, &dev->vmalist, head) {
29299 vma = pt->vma;
29300 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29301 continue;
29302 seq_printf(m,
29303 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29304 - pt->pid, vma->vm_start, vma->vm_end,
29305 + pt->pid,
29306 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29307 + 0, 0,
29308 +#else
29309 + vma->vm_start, vma->vm_end,
29310 +#endif
29311 vma->vm_flags & VM_READ ? 'r' : '-',
29312 vma->vm_flags & VM_WRITE ? 'w' : '-',
29313 vma->vm_flags & VM_EXEC ? 'x' : '-',
29314 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29315 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29316 vma->vm_flags & VM_IO ? 'i' : '-',
29317 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29318 + 0);
29319 +#else
29320 vma->vm_pgoff);
29321 +#endif
29322
29323 #if defined(__i386__)
29324 pgprot = pgprot_val(vma->vm_page_prot);
29325 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29326 --- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29327 +++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29328 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29329 stats->data[i].value =
29330 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29331 else
29332 - stats->data[i].value = atomic_read(&dev->counts[i]);
29333 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29334 stats->data[i].type = dev->types[i];
29335 }
29336
29337 diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29338 --- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29339 +++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29340 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29341 if (drm_lock_take(&master->lock, lock->context)) {
29342 master->lock.file_priv = file_priv;
29343 master->lock.lock_time = jiffies;
29344 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29345 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29346 break; /* Got lock */
29347 }
29348
29349 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29350 return -EINVAL;
29351 }
29352
29353 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29354 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29355
29356 /* kernel_context_switch isn't used by any of the x86 drm
29357 * modules but is required by the Sparc driver.
29358 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29359 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29360 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29361 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29362 dma->buflist[vertex->idx],
29363 vertex->discard, vertex->used);
29364
29365 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29366 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29367 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29368 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29369 sarea_priv->last_enqueue = dev_priv->counter - 1;
29370 sarea_priv->last_dispatch = (int)hw_status[5];
29371
29372 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29373 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29374 mc->last_render);
29375
29376 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29377 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29378 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29379 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29380 sarea_priv->last_enqueue = dev_priv->counter - 1;
29381 sarea_priv->last_dispatch = (int)hw_status[5];
29382
29383 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29384 --- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29385 +++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29386 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29387 int page_flipping;
29388
29389 wait_queue_head_t irq_queue;
29390 - atomic_t irq_received;
29391 - atomic_t irq_emitted;
29392 + atomic_unchecked_t irq_received;
29393 + atomic_unchecked_t irq_emitted;
29394
29395 int front_offset;
29396 } drm_i810_private_t;
29397 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29398 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29399 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29400 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29401 int page_flipping;
29402
29403 wait_queue_head_t irq_queue;
29404 - atomic_t irq_received;
29405 - atomic_t irq_emitted;
29406 + atomic_unchecked_t irq_received;
29407 + atomic_unchecked_t irq_emitted;
29408
29409 int use_mi_batchbuffer_start;
29410
29411 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29412 --- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29413 +++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29414 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29415
29416 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29417
29418 - atomic_inc(&dev_priv->irq_received);
29419 + atomic_inc_unchecked(&dev_priv->irq_received);
29420 wake_up_interruptible(&dev_priv->irq_queue);
29421
29422 return IRQ_HANDLED;
29423 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29424
29425 DRM_DEBUG("%s\n", __func__);
29426
29427 - atomic_inc(&dev_priv->irq_emitted);
29428 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29429
29430 BEGIN_LP_RING(2);
29431 OUT_RING(0);
29432 OUT_RING(GFX_OP_USER_INTERRUPT);
29433 ADVANCE_LP_RING();
29434
29435 - return atomic_read(&dev_priv->irq_emitted);
29436 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29437 }
29438
29439 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29440 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29441
29442 DRM_DEBUG("%s\n", __func__);
29443
29444 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29445 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29446 return 0;
29447
29448 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29449 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29450
29451 for (;;) {
29452 __set_current_state(TASK_INTERRUPTIBLE);
29453 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29454 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29455 break;
29456 if ((signed)(end - jiffies) <= 0) {
29457 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29458 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29459 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29460 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29461 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29462 - atomic_set(&dev_priv->irq_received, 0);
29463 - atomic_set(&dev_priv->irq_emitted, 0);
29464 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29465 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29466 init_waitqueue_head(&dev_priv->irq_queue);
29467 }
29468
29469 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29470 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29471 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29472 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29473 }
29474 }
29475
29476 -struct intel_dvo_dev_ops ch7017_ops = {
29477 +const struct intel_dvo_dev_ops ch7017_ops = {
29478 .init = ch7017_init,
29479 .detect = ch7017_detect,
29480 .mode_valid = ch7017_mode_valid,
29481 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29482 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29483 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29484 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29485 }
29486 }
29487
29488 -struct intel_dvo_dev_ops ch7xxx_ops = {
29489 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29490 .init = ch7xxx_init,
29491 .detect = ch7xxx_detect,
29492 .mode_valid = ch7xxx_mode_valid,
29493 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29494 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29495 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29496 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29497 *
29498 * \return singly-linked list of modes or NULL if no modes found.
29499 */
29500 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29501 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29502
29503 /**
29504 * Clean up driver-specific bits of the output
29505 */
29506 - void (*destroy) (struct intel_dvo_device *dvo);
29507 + void (* const destroy) (struct intel_dvo_device *dvo);
29508
29509 /**
29510 * Debugging hook to dump device registers to log file
29511 */
29512 - void (*dump_regs)(struct intel_dvo_device *dvo);
29513 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29514 };
29515
29516 -extern struct intel_dvo_dev_ops sil164_ops;
29517 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29518 -extern struct intel_dvo_dev_ops ivch_ops;
29519 -extern struct intel_dvo_dev_ops tfp410_ops;
29520 -extern struct intel_dvo_dev_ops ch7017_ops;
29521 +extern const struct intel_dvo_dev_ops sil164_ops;
29522 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29523 +extern const struct intel_dvo_dev_ops ivch_ops;
29524 +extern const struct intel_dvo_dev_ops tfp410_ops;
29525 +extern const struct intel_dvo_dev_ops ch7017_ops;
29526
29527 #endif /* _INTEL_DVO_H */
29528 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29529 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29530 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29531 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29532 }
29533 }
29534
29535 -struct intel_dvo_dev_ops ivch_ops= {
29536 +const struct intel_dvo_dev_ops ivch_ops= {
29537 .init = ivch_init,
29538 .dpms = ivch_dpms,
29539 .save = ivch_save,
29540 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29541 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29542 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29543 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29544 }
29545 }
29546
29547 -struct intel_dvo_dev_ops sil164_ops = {
29548 +const struct intel_dvo_dev_ops sil164_ops = {
29549 .init = sil164_init,
29550 .detect = sil164_detect,
29551 .mode_valid = sil164_mode_valid,
29552 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29553 --- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29554 +++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29555 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29556 }
29557 }
29558
29559 -struct intel_dvo_dev_ops tfp410_ops = {
29560 +const struct intel_dvo_dev_ops tfp410_ops = {
29561 .init = tfp410_init,
29562 .detect = tfp410_detect,
29563 .mode_valid = tfp410_mode_valid,
29564 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29565 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29566 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29567 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29568 I915_READ(GTIMR));
29569 }
29570 seq_printf(m, "Interrupts received: %d\n",
29571 - atomic_read(&dev_priv->irq_received));
29572 + atomic_read_unchecked(&dev_priv->irq_received));
29573 if (dev_priv->hw_status_page != NULL) {
29574 seq_printf(m, "Current sequence: %d\n",
29575 i915_get_gem_seqno(dev));
29576 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29577 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29578 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29579 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29580 return i915_resume(dev);
29581 }
29582
29583 -static struct vm_operations_struct i915_gem_vm_ops = {
29584 +static const struct vm_operations_struct i915_gem_vm_ops = {
29585 .fault = i915_gem_fault,
29586 .open = drm_gem_vm_open,
29587 .close = drm_gem_vm_close,
29588 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29589 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29590 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29591 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29592 /* display clock increase/decrease */
29593 /* pll clock increase/decrease */
29594 /* clock gating init */
29595 -};
29596 +} __no_const;
29597
29598 typedef struct drm_i915_private {
29599 struct drm_device *dev;
29600 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29601 int page_flipping;
29602
29603 wait_queue_head_t irq_queue;
29604 - atomic_t irq_received;
29605 + atomic_unchecked_t irq_received;
29606 /** Protects user_irq_refcount and irq_mask_reg */
29607 spinlock_t user_irq_lock;
29608 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29609 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29610 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29611 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29612 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29613
29614 args->aper_size = dev->gtt_total;
29615 args->aper_available_size = (args->aper_size -
29616 - atomic_read(&dev->pin_memory));
29617 + atomic_read_unchecked(&dev->pin_memory));
29618
29619 return 0;
29620 }
29621 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29622 return -EINVAL;
29623 }
29624
29625 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29626 + drm_gem_object_unreference(obj);
29627 + return -EFAULT;
29628 + }
29629 +
29630 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29631 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29632 } else {
29633 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29634 return -EINVAL;
29635 }
29636
29637 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29638 + drm_gem_object_unreference(obj);
29639 + return -EFAULT;
29640 + }
29641 +
29642 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29643 * it would end up going through the fenced access, and we'll get
29644 * different detiling behavior between reading and writing.
29645 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29646
29647 if (obj_priv->gtt_space) {
29648 atomic_dec(&dev->gtt_count);
29649 - atomic_sub(obj->size, &dev->gtt_memory);
29650 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29651
29652 drm_mm_put_block(obj_priv->gtt_space);
29653 obj_priv->gtt_space = NULL;
29654 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29655 goto search_free;
29656 }
29657 atomic_inc(&dev->gtt_count);
29658 - atomic_add(obj->size, &dev->gtt_memory);
29659 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29660
29661 /* Assert that the object is not currently in any GPU domain. As it
29662 * wasn't in the GTT, there shouldn't be any way it could have been in
29663 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29664 "%d/%d gtt bytes\n",
29665 atomic_read(&dev->object_count),
29666 atomic_read(&dev->pin_count),
29667 - atomic_read(&dev->object_memory),
29668 - atomic_read(&dev->pin_memory),
29669 - atomic_read(&dev->gtt_memory),
29670 + atomic_read_unchecked(&dev->object_memory),
29671 + atomic_read_unchecked(&dev->pin_memory),
29672 + atomic_read_unchecked(&dev->gtt_memory),
29673 dev->gtt_total);
29674 }
29675 goto err;
29676 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29677 */
29678 if (obj_priv->pin_count == 1) {
29679 atomic_inc(&dev->pin_count);
29680 - atomic_add(obj->size, &dev->pin_memory);
29681 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29682 if (!obj_priv->active &&
29683 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29684 !list_empty(&obj_priv->list))
29685 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29686 list_move_tail(&obj_priv->list,
29687 &dev_priv->mm.inactive_list);
29688 atomic_dec(&dev->pin_count);
29689 - atomic_sub(obj->size, &dev->pin_memory);
29690 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29691 }
29692 i915_verify_inactive(dev, __FILE__, __LINE__);
29693 }
29694 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29695 --- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29696 +++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29697 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29698 int irq_received;
29699 int ret = IRQ_NONE;
29700
29701 - atomic_inc(&dev_priv->irq_received);
29702 + atomic_inc_unchecked(&dev_priv->irq_received);
29703
29704 if (IS_IGDNG(dev))
29705 return igdng_irq_handler(dev);
29706 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29707 {
29708 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29709
29710 - atomic_set(&dev_priv->irq_received, 0);
29711 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29712
29713 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29714 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29715 diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29716 --- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29717 +++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29718 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29719 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29720
29721 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29722 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29723 + pax_open_kernel();
29724 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29725 + pax_close_kernel();
29726
29727 /* Read the regs to test if we can talk to the device */
29728 for (i = 0; i < 0x40; i++) {
29729 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29730 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29731 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29732 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29733 u32 clear_cmd;
29734 u32 maccess;
29735
29736 - atomic_t vbl_received; /**< Number of vblanks received. */
29737 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29738 wait_queue_head_t fence_queue;
29739 - atomic_t last_fence_retired;
29740 + atomic_unchecked_t last_fence_retired;
29741 u32 next_fence_to_post;
29742
29743 unsigned int fb_cpp;
29744 diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29745 --- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29746 +++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29747 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29748 if (crtc != 0)
29749 return 0;
29750
29751 - return atomic_read(&dev_priv->vbl_received);
29752 + return atomic_read_unchecked(&dev_priv->vbl_received);
29753 }
29754
29755
29756 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29757 /* VBLANK interrupt */
29758 if (status & MGA_VLINEPEN) {
29759 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29760 - atomic_inc(&dev_priv->vbl_received);
29761 + atomic_inc_unchecked(&dev_priv->vbl_received);
29762 drm_handle_vblank(dev, 0);
29763 handled = 1;
29764 }
29765 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29766 MGA_WRITE(MGA_PRIMEND, prim_end);
29767 }
29768
29769 - atomic_inc(&dev_priv->last_fence_retired);
29770 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29771 DRM_WAKEUP(&dev_priv->fence_queue);
29772 handled = 1;
29773 }
29774 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29775 * using fences.
29776 */
29777 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29778 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29779 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29780 - *sequence) <= (1 << 23)));
29781
29782 *sequence = cur_fence;
29783 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29784 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29785 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29786 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29787
29788 /* GH: Simple idle check.
29789 */
29790 - atomic_set(&dev_priv->idle_count, 0);
29791 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29792
29793 /* We don't support anything other than bus-mastering ring mode,
29794 * but the ring can be in either AGP or PCI space for the ring
29795 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29796 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29797 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29798 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29799 int is_pci;
29800 unsigned long cce_buffers_offset;
29801
29802 - atomic_t idle_count;
29803 + atomic_unchecked_t idle_count;
29804
29805 int page_flipping;
29806 int current_page;
29807 u32 crtc_offset;
29808 u32 crtc_offset_cntl;
29809
29810 - atomic_t vbl_received;
29811 + atomic_unchecked_t vbl_received;
29812
29813 u32 color_fmt;
29814 unsigned int front_offset;
29815 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29816 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29817 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29818 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29819 if (crtc != 0)
29820 return 0;
29821
29822 - return atomic_read(&dev_priv->vbl_received);
29823 + return atomic_read_unchecked(&dev_priv->vbl_received);
29824 }
29825
29826 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29827 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29828 /* VBLANK interrupt */
29829 if (status & R128_CRTC_VBLANK_INT) {
29830 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29831 - atomic_inc(&dev_priv->vbl_received);
29832 + atomic_inc_unchecked(&dev_priv->vbl_received);
29833 drm_handle_vblank(dev, 0);
29834 return IRQ_HANDLED;
29835 }
29836 diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29837 --- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29838 +++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29839 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29840
29841 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29842 {
29843 - if (atomic_read(&dev_priv->idle_count) == 0) {
29844 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29845 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29846 } else {
29847 - atomic_set(&dev_priv->idle_count, 0);
29848 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29849 }
29850 }
29851
29852 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29853 --- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29854 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29855 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29856 char name[512];
29857 int i;
29858
29859 + pax_track_stack();
29860 +
29861 ctx->card = card;
29862 ctx->bios = bios;
29863
29864 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29865 --- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29866 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29867 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29868 regex_t mask_rex;
29869 regmatch_t match[4];
29870 char buf[1024];
29871 - size_t end;
29872 + long end;
29873 int len;
29874 int done = 0;
29875 int r;
29876 unsigned o;
29877 struct offset *offset;
29878 char last_reg_s[10];
29879 - int last_reg;
29880 + unsigned long last_reg;
29881
29882 if (regcomp
29883 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29884 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29885 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29886 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29887 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29888 bool linkb;
29889 struct radeon_i2c_bus_rec ddc_bus;
29890
29891 + pax_track_stack();
29892 +
29893 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29894
29895 if (data_offset == 0)
29896 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29897 }
29898 }
29899
29900 -struct bios_connector {
29901 +static struct bios_connector {
29902 bool valid;
29903 uint16_t line_mux;
29904 uint16_t devices;
29905 int connector_type;
29906 struct radeon_i2c_bus_rec ddc_bus;
29907 -};
29908 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29909
29910 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29911 drm_device
29912 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29913 uint8_t dac;
29914 union atom_supported_devices *supported_devices;
29915 int i, j;
29916 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29917
29918 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29919
29920 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29921 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29922 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29923 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29924
29925 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29926 error = freq - current_freq;
29927 - error = error < 0 ? 0xffffffff : error;
29928 + error = (int32_t)error < 0 ? 0xffffffff : error;
29929 } else
29930 error = abs(current_freq - freq);
29931 vco_diff = abs(vco - best_vco);
29932 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29933 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29934 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29935 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29936
29937 /* SW interrupt */
29938 wait_queue_head_t swi_queue;
29939 - atomic_t swi_emitted;
29940 + atomic_unchecked_t swi_emitted;
29941 int vblank_crtc;
29942 uint32_t irq_enable_reg;
29943 uint32_t r500_disp_irq_reg;
29944 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29945 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29946 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29947 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29948 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29949 return 0;
29950 }
29951 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29952 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29953 if (!rdev->cp.ready) {
29954 /* FIXME: cp is not running assume everythings is done right
29955 * away
29956 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29957 return r;
29958 }
29959 WREG32(rdev->fence_drv.scratch_reg, 0);
29960 - atomic_set(&rdev->fence_drv.seq, 0);
29961 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29962 INIT_LIST_HEAD(&rdev->fence_drv.created);
29963 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29964 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29965 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29966 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29967 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29968 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29969 */
29970 struct radeon_fence_driver {
29971 uint32_t scratch_reg;
29972 - atomic_t seq;
29973 + atomic_unchecked_t seq;
29974 uint32_t last_seq;
29975 unsigned long count_timeout;
29976 wait_queue_head_t queue;
29977 @@ -640,7 +640,7 @@ struct radeon_asic {
29978 uint32_t offset, uint32_t obj_size);
29979 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29980 void (*bandwidth_update)(struct radeon_device *rdev);
29981 -};
29982 +} __no_const;
29983
29984 /*
29985 * Asic structures
29986 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29987 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29988 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29989 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29990 request = compat_alloc_user_space(sizeof(*request));
29991 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29992 || __put_user(req32.param, &request->param)
29993 - || __put_user((void __user *)(unsigned long)req32.value,
29994 + || __put_user((unsigned long)req32.value,
29995 &request->value))
29996 return -EFAULT;
29997
29998 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29999 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30000 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30001 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30002 unsigned int ret;
30003 RING_LOCALS;
30004
30005 - atomic_inc(&dev_priv->swi_emitted);
30006 - ret = atomic_read(&dev_priv->swi_emitted);
30007 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30008 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30009
30010 BEGIN_RING(4);
30011 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30012 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30013 drm_radeon_private_t *dev_priv =
30014 (drm_radeon_private_t *) dev->dev_private;
30015
30016 - atomic_set(&dev_priv->swi_emitted, 0);
30017 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30018 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30019
30020 dev->max_vblank_count = 0x001fffff;
30021 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
30022 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30023 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30024 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30025 {
30026 drm_radeon_private_t *dev_priv = dev->dev_private;
30027 drm_radeon_getparam_t *param = data;
30028 - int value;
30029 + int value = 0;
30030
30031 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30032
30033 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
30034 --- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30035 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30036 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30037 DRM_INFO("radeon: ttm finalized\n");
30038 }
30039
30040 -static struct vm_operations_struct radeon_ttm_vm_ops;
30041 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
30042 -
30043 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30044 -{
30045 - struct ttm_buffer_object *bo;
30046 - int r;
30047 -
30048 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
30049 - if (bo == NULL) {
30050 - return VM_FAULT_NOPAGE;
30051 - }
30052 - r = ttm_vm_ops->fault(vma, vmf);
30053 - return r;
30054 -}
30055 -
30056 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30057 {
30058 struct drm_file *file_priv;
30059 struct radeon_device *rdev;
30060 - int r;
30061
30062 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30063 return drm_mmap(filp, vma);
30064 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30065
30066 file_priv = (struct drm_file *)filp->private_data;
30067 rdev = file_priv->minor->dev->dev_private;
30068 - if (rdev == NULL) {
30069 + if (!rdev)
30070 return -EINVAL;
30071 - }
30072 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30073 - if (unlikely(r != 0)) {
30074 - return r;
30075 - }
30076 - if (unlikely(ttm_vm_ops == NULL)) {
30077 - ttm_vm_ops = vma->vm_ops;
30078 - radeon_ttm_vm_ops = *ttm_vm_ops;
30079 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30080 - }
30081 - vma->vm_ops = &radeon_ttm_vm_ops;
30082 - return 0;
30083 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30084 }
30085
30086
30087 diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
30088 --- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30089 +++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30090 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30091 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30092 rdev->pm.sideport_bandwidth.full)
30093 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30094 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30095 + read_delay_latency.full = rfixed_const(800 * 1000);
30096 read_delay_latency.full = rfixed_div(read_delay_latency,
30097 rdev->pm.igp_sideport_mclk);
30098 + a.full = rfixed_const(370);
30099 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30100 } else {
30101 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30102 rdev->pm.k8_bandwidth.full)
30103 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
30104 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30105 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30106 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30107 NULL
30108 };
30109
30110 -static struct sysfs_ops ttm_bo_global_ops = {
30111 +static const struct sysfs_ops ttm_bo_global_ops = {
30112 .show = &ttm_bo_global_show
30113 };
30114
30115 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
30116 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30117 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30118 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30119 {
30120 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30121 vma->vm_private_data;
30122 - struct ttm_bo_device *bdev = bo->bdev;
30123 + struct ttm_bo_device *bdev;
30124 unsigned long bus_base;
30125 unsigned long bus_offset;
30126 unsigned long bus_size;
30127 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30128 unsigned long address = (unsigned long)vmf->virtual_address;
30129 int retval = VM_FAULT_NOPAGE;
30130
30131 + if (!bo)
30132 + return VM_FAULT_NOPAGE;
30133 + bdev = bo->bdev;
30134 +
30135 /*
30136 * Work around locking order reversal in fault / nopfn
30137 * between mmap_sem and bo_reserve: Perform a trylock operation
30138 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
30139 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30140 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30141 @@ -36,7 +36,7 @@
30142 struct ttm_global_item {
30143 struct mutex mutex;
30144 void *object;
30145 - int refcount;
30146 + atomic_t refcount;
30147 };
30148
30149 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30150 @@ -49,7 +49,7 @@ void ttm_global_init(void)
30151 struct ttm_global_item *item = &glob[i];
30152 mutex_init(&item->mutex);
30153 item->object = NULL;
30154 - item->refcount = 0;
30155 + atomic_set(&item->refcount, 0);
30156 }
30157 }
30158
30159 @@ -59,7 +59,7 @@ void ttm_global_release(void)
30160 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30161 struct ttm_global_item *item = &glob[i];
30162 BUG_ON(item->object != NULL);
30163 - BUG_ON(item->refcount != 0);
30164 + BUG_ON(atomic_read(&item->refcount) != 0);
30165 }
30166 }
30167
30168 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30169 void *object;
30170
30171 mutex_lock(&item->mutex);
30172 - if (item->refcount == 0) {
30173 + if (atomic_read(&item->refcount) == 0) {
30174 item->object = kzalloc(ref->size, GFP_KERNEL);
30175 if (unlikely(item->object == NULL)) {
30176 ret = -ENOMEM;
30177 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30178 goto out_err;
30179
30180 }
30181 - ++item->refcount;
30182 + atomic_inc(&item->refcount);
30183 ref->object = item->object;
30184 object = item->object;
30185 mutex_unlock(&item->mutex);
30186 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30187 struct ttm_global_item *item = &glob[ref->global_type];
30188
30189 mutex_lock(&item->mutex);
30190 - BUG_ON(item->refcount == 0);
30191 + BUG_ON(atomic_read(&item->refcount) == 0);
30192 BUG_ON(ref->object != item->object);
30193 - if (--item->refcount == 0) {
30194 + if (atomic_dec_and_test(&item->refcount)) {
30195 ref->release(ref);
30196 item->object = NULL;
30197 }
30198 diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
30199 --- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30200 +++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30201 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30202 NULL
30203 };
30204
30205 -static struct sysfs_ops ttm_mem_zone_ops = {
30206 +static const struct sysfs_ops ttm_mem_zone_ops = {
30207 .show = &ttm_mem_zone_show,
30208 .store = &ttm_mem_zone_store
30209 };
30210 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
30211 --- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30212 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30213 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30214 typedef uint32_t maskarray_t[5];
30215
30216 typedef struct drm_via_irq {
30217 - atomic_t irq_received;
30218 + atomic_unchecked_t irq_received;
30219 uint32_t pending_mask;
30220 uint32_t enable_mask;
30221 wait_queue_head_t irq_queue;
30222 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30223 struct timeval last_vblank;
30224 int last_vblank_valid;
30225 unsigned usec_per_vblank;
30226 - atomic_t vbl_received;
30227 + atomic_unchecked_t vbl_received;
30228 drm_via_state_t hc_state;
30229 char pci_buf[VIA_PCI_BUF_SIZE];
30230 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30231 diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
30232 --- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30233 +++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30234 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30235 if (crtc != 0)
30236 return 0;
30237
30238 - return atomic_read(&dev_priv->vbl_received);
30239 + return atomic_read_unchecked(&dev_priv->vbl_received);
30240 }
30241
30242 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30243 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30244
30245 status = VIA_READ(VIA_REG_INTERRUPT);
30246 if (status & VIA_IRQ_VBLANK_PENDING) {
30247 - atomic_inc(&dev_priv->vbl_received);
30248 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30249 + atomic_inc_unchecked(&dev_priv->vbl_received);
30250 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30251 do_gettimeofday(&cur_vblank);
30252 if (dev_priv->last_vblank_valid) {
30253 dev_priv->usec_per_vblank =
30254 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30255 dev_priv->last_vblank = cur_vblank;
30256 dev_priv->last_vblank_valid = 1;
30257 }
30258 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30259 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30260 DRM_DEBUG("US per vblank is: %u\n",
30261 dev_priv->usec_per_vblank);
30262 }
30263 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30264
30265 for (i = 0; i < dev_priv->num_irqs; ++i) {
30266 if (status & cur_irq->pending_mask) {
30267 - atomic_inc(&cur_irq->irq_received);
30268 + atomic_inc_unchecked(&cur_irq->irq_received);
30269 DRM_WAKEUP(&cur_irq->irq_queue);
30270 handled = 1;
30271 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30272 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30273 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30274 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30275 masks[irq][4]));
30276 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30277 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30278 } else {
30279 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30280 (((cur_irq_sequence =
30281 - atomic_read(&cur_irq->irq_received)) -
30282 + atomic_read_unchecked(&cur_irq->irq_received)) -
30283 *sequence) <= (1 << 23)));
30284 }
30285 *sequence = cur_irq_sequence;
30286 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30287 }
30288
30289 for (i = 0; i < dev_priv->num_irqs; ++i) {
30290 - atomic_set(&cur_irq->irq_received, 0);
30291 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30292 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30293 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30294 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30295 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30296 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30297 case VIA_IRQ_RELATIVE:
30298 irqwait->request.sequence +=
30299 - atomic_read(&cur_irq->irq_received);
30300 + atomic_read_unchecked(&cur_irq->irq_received);
30301 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30302 case VIA_IRQ_ABSOLUTE:
30303 break;
30304 diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30305 --- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30306 +++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30307 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30308
30309 int hid_add_device(struct hid_device *hdev)
30310 {
30311 - static atomic_t id = ATOMIC_INIT(0);
30312 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30313 int ret;
30314
30315 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30316 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30317 /* XXX hack, any other cleaner solution after the driver core
30318 * is converted to allow more than 20 bytes as the device name? */
30319 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30320 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30321 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30322
30323 ret = device_add(&hdev->dev);
30324 if (!ret)
30325 diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30326 --- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30327 +++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30328 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30329 return put_user(HID_VERSION, (int __user *)arg);
30330
30331 case HIDIOCAPPLICATION:
30332 - if (arg < 0 || arg >= hid->maxapplication)
30333 + if (arg >= hid->maxapplication)
30334 return -EINVAL;
30335
30336 for (i = 0; i < hid->maxcollection; i++)
30337 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30338 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30339 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30340 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30341 * the lid is closed. This leads to interrupts as soon as a little move
30342 * is done.
30343 */
30344 - atomic_inc(&lis3_dev.count);
30345 + atomic_inc_unchecked(&lis3_dev.count);
30346
30347 wake_up_interruptible(&lis3_dev.misc_wait);
30348 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30349 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30350 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30351 return -EBUSY; /* already open */
30352
30353 - atomic_set(&lis3_dev.count, 0);
30354 + atomic_set_unchecked(&lis3_dev.count, 0);
30355
30356 /*
30357 * The sensor can generate interrupts for free-fall and direction
30358 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30359 add_wait_queue(&lis3_dev.misc_wait, &wait);
30360 while (true) {
30361 set_current_state(TASK_INTERRUPTIBLE);
30362 - data = atomic_xchg(&lis3_dev.count, 0);
30363 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30364 if (data)
30365 break;
30366
30367 @@ -244,7 +244,7 @@ out:
30368 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30369 {
30370 poll_wait(file, &lis3_dev.misc_wait, wait);
30371 - if (atomic_read(&lis3_dev.count))
30372 + if (atomic_read_unchecked(&lis3_dev.count))
30373 return POLLIN | POLLRDNORM;
30374 return 0;
30375 }
30376 diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30377 --- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30378 +++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30379 @@ -201,7 +201,7 @@ struct lis3lv02d {
30380
30381 struct input_polled_dev *idev; /* input device */
30382 struct platform_device *pdev; /* platform device */
30383 - atomic_t count; /* interrupt count after last read */
30384 + atomic_unchecked_t count; /* interrupt count after last read */
30385 int xcalib; /* calibrated null value for x */
30386 int ycalib; /* calibrated null value for y */
30387 int zcalib; /* calibrated null value for z */
30388 diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30389 --- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30390 +++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30391 @@ -112,7 +112,7 @@ struct sht15_data {
30392 int supply_uV;
30393 int supply_uV_valid;
30394 struct work_struct update_supply_work;
30395 - atomic_t interrupt_handled;
30396 + atomic_unchecked_t interrupt_handled;
30397 };
30398
30399 /**
30400 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30401 return ret;
30402
30403 gpio_direction_input(data->pdata->gpio_data);
30404 - atomic_set(&data->interrupt_handled, 0);
30405 + atomic_set_unchecked(&data->interrupt_handled, 0);
30406
30407 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30408 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30409 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30410 /* Only relevant if the interrupt hasn't occured. */
30411 - if (!atomic_read(&data->interrupt_handled))
30412 + if (!atomic_read_unchecked(&data->interrupt_handled))
30413 schedule_work(&data->read_work);
30414 }
30415 ret = wait_event_timeout(data->wait_queue,
30416 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30417 struct sht15_data *data = d;
30418 /* First disable the interrupt */
30419 disable_irq_nosync(irq);
30420 - atomic_inc(&data->interrupt_handled);
30421 + atomic_inc_unchecked(&data->interrupt_handled);
30422 /* Then schedule a reading work struct */
30423 if (data->flag != SHT15_READING_NOTHING)
30424 schedule_work(&data->read_work);
30425 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30426 here as could have gone low in meantime so verify
30427 it hasn't!
30428 */
30429 - atomic_set(&data->interrupt_handled, 0);
30430 + atomic_set_unchecked(&data->interrupt_handled, 0);
30431 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30432 /* If still not occured or another handler has been scheduled */
30433 if (gpio_get_value(data->pdata->gpio_data)
30434 - || atomic_read(&data->interrupt_handled))
30435 + || atomic_read_unchecked(&data->interrupt_handled))
30436 return;
30437 }
30438 /* Read the data back from the device */
30439 diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30440 --- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30441 +++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30442 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30443 struct i2c_board_info *info);
30444 static int w83791d_remove(struct i2c_client *client);
30445
30446 -static int w83791d_read(struct i2c_client *client, u8 register);
30447 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30448 +static int w83791d_read(struct i2c_client *client, u8 reg);
30449 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30450 static struct w83791d_data *w83791d_update_device(struct device *dev);
30451
30452 #ifdef DEBUG
30453 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30454 --- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30455 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:22:32.000000000 -0400
30456 @@ -43,7 +43,7 @@
30457 extern struct i2c_adapter amd756_smbus;
30458
30459 static struct i2c_adapter *s4882_adapter;
30460 -static struct i2c_algorithm *s4882_algo;
30461 +static i2c_algorithm_no_const *s4882_algo;
30462
30463 /* Wrapper access functions for multiplexed SMBus */
30464 static DEFINE_MUTEX(amd756_lock);
30465 diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30466 --- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30467 +++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:22:32.000000000 -0400
30468 @@ -41,7 +41,7 @@
30469 extern struct i2c_adapter *nforce2_smbus;
30470
30471 static struct i2c_adapter *s4985_adapter;
30472 -static struct i2c_algorithm *s4985_algo;
30473 +static i2c_algorithm_no_const *s4985_algo;
30474
30475 /* Wrapper access functions for multiplexed SMBus */
30476 static DEFINE_MUTEX(nforce2_lock);
30477 diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30478 --- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30479 +++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30480 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30481 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30482 if ((unsigned long)buf & alignment
30483 || blk_rq_bytes(rq) & q->dma_pad_mask
30484 - || object_is_on_stack(buf))
30485 + || object_starts_on_stack(buf))
30486 drive->dma = 0;
30487 }
30488 }
30489 diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30490 --- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30491 +++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30492 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30493 u8 pc_buf[256], header_len, desc_cnt;
30494 int i, rc = 1, blocks, length;
30495
30496 + pax_track_stack();
30497 +
30498 ide_debug_log(IDE_DBG_FUNC, "enter");
30499
30500 drive->bios_cyl = 0;
30501 diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30502 --- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30503 +++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30504 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30505 int ret, i, n_ports = dev2 ? 4 : 2;
30506 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30507
30508 + pax_track_stack();
30509 +
30510 for (i = 0; i < n_ports / 2; i++) {
30511 ret = ide_setup_pci_controller(pdev[i], d, !i);
30512 if (ret < 0)
30513 diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30514 --- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30515 +++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30516 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30517 based upon DIF section and sequence
30518 */
30519
30520 -static void inline
30521 +static inline void
30522 frame_put_packet (struct frame *f, struct packet *p)
30523 {
30524 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30525 diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30526 --- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30527 +++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30528 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30529 }
30530
30531 static struct hpsb_host_driver dummy_driver = {
30532 + .name = "dummy",
30533 .transmit_packet = dummy_transmit_packet,
30534 .devctl = dummy_devctl,
30535 .isoctl = dummy_isoctl
30536 diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30537 --- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30538 +++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30539 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30540 for (func = 0; func < 8; func++) {
30541 u32 class = read_pci_config(num,slot,func,
30542 PCI_CLASS_REVISION);
30543 - if ((class == 0xffffffff))
30544 + if (class == 0xffffffff)
30545 continue; /* No device at this func */
30546
30547 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30548 diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30549 --- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30550 +++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30551 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30552 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30553
30554 /* Module Parameters */
30555 -static int phys_dma = 1;
30556 +static int phys_dma;
30557 module_param(phys_dma, int, 0444);
30558 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30559 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30560
30561 static void dma_trm_tasklet(unsigned long data);
30562 static void dma_trm_reset(struct dma_trm_ctx *d);
30563 diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30564 --- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30565 +++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30566 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30567 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30568 MODULE_LICENSE("GPL");
30569
30570 -static int sbp2_module_init(void)
30571 +static int __init sbp2_module_init(void)
30572 {
30573 int ret;
30574
30575 diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30576 --- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30577 +++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30578 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30579
30580 struct cm_counter_group {
30581 struct kobject obj;
30582 - atomic_long_t counter[CM_ATTR_COUNT];
30583 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30584 };
30585
30586 struct cm_counter_attribute {
30587 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30588 struct ib_mad_send_buf *msg = NULL;
30589 int ret;
30590
30591 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30592 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30593 counter[CM_REQ_COUNTER]);
30594
30595 /* Quick state check to discard duplicate REQs. */
30596 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30597 if (!cm_id_priv)
30598 return;
30599
30600 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30601 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30602 counter[CM_REP_COUNTER]);
30603 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30604 if (ret)
30605 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30606 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30607 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30608 spin_unlock_irq(&cm_id_priv->lock);
30609 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30610 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30611 counter[CM_RTU_COUNTER]);
30612 goto out;
30613 }
30614 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30615 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30616 dreq_msg->local_comm_id);
30617 if (!cm_id_priv) {
30618 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30619 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30620 counter[CM_DREQ_COUNTER]);
30621 cm_issue_drep(work->port, work->mad_recv_wc);
30622 return -EINVAL;
30623 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30624 case IB_CM_MRA_REP_RCVD:
30625 break;
30626 case IB_CM_TIMEWAIT:
30627 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30628 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30629 counter[CM_DREQ_COUNTER]);
30630 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30631 goto unlock;
30632 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30633 cm_free_msg(msg);
30634 goto deref;
30635 case IB_CM_DREQ_RCVD:
30636 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30637 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30638 counter[CM_DREQ_COUNTER]);
30639 goto unlock;
30640 default:
30641 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30642 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30643 cm_id_priv->msg, timeout)) {
30644 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30645 - atomic_long_inc(&work->port->
30646 + atomic_long_inc_unchecked(&work->port->
30647 counter_group[CM_RECV_DUPLICATES].
30648 counter[CM_MRA_COUNTER]);
30649 goto out;
30650 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30651 break;
30652 case IB_CM_MRA_REQ_RCVD:
30653 case IB_CM_MRA_REP_RCVD:
30654 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30655 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30656 counter[CM_MRA_COUNTER]);
30657 /* fall through */
30658 default:
30659 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30660 case IB_CM_LAP_IDLE:
30661 break;
30662 case IB_CM_MRA_LAP_SENT:
30663 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30664 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30665 counter[CM_LAP_COUNTER]);
30666 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30667 goto unlock;
30668 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30669 cm_free_msg(msg);
30670 goto deref;
30671 case IB_CM_LAP_RCVD:
30672 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30673 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30674 counter[CM_LAP_COUNTER]);
30675 goto unlock;
30676 default:
30677 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30678 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30679 if (cur_cm_id_priv) {
30680 spin_unlock_irq(&cm.lock);
30681 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30682 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30683 counter[CM_SIDR_REQ_COUNTER]);
30684 goto out; /* Duplicate message. */
30685 }
30686 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30687 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30688 msg->retries = 1;
30689
30690 - atomic_long_add(1 + msg->retries,
30691 + atomic_long_add_unchecked(1 + msg->retries,
30692 &port->counter_group[CM_XMIT].counter[attr_index]);
30693 if (msg->retries)
30694 - atomic_long_add(msg->retries,
30695 + atomic_long_add_unchecked(msg->retries,
30696 &port->counter_group[CM_XMIT_RETRIES].
30697 counter[attr_index]);
30698
30699 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30700 }
30701
30702 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30703 - atomic_long_inc(&port->counter_group[CM_RECV].
30704 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30705 counter[attr_id - CM_ATTR_ID_OFFSET]);
30706
30707 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30708 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30709 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30710
30711 return sprintf(buf, "%ld\n",
30712 - atomic_long_read(&group->counter[cm_attr->index]));
30713 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30714 }
30715
30716 -static struct sysfs_ops cm_counter_ops = {
30717 +static const struct sysfs_ops cm_counter_ops = {
30718 .show = cm_show_counter
30719 };
30720
30721 diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30722 --- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30723 +++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30724 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30725
30726 struct task_struct *thread;
30727
30728 - atomic_t req_ser;
30729 - atomic_t flush_ser;
30730 + atomic_unchecked_t req_ser;
30731 + atomic_unchecked_t flush_ser;
30732
30733 wait_queue_head_t force_wait;
30734 };
30735 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30736 struct ib_fmr_pool *pool = pool_ptr;
30737
30738 do {
30739 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30740 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30741 ib_fmr_batch_release(pool);
30742
30743 - atomic_inc(&pool->flush_ser);
30744 + atomic_inc_unchecked(&pool->flush_ser);
30745 wake_up_interruptible(&pool->force_wait);
30746
30747 if (pool->flush_function)
30748 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30749 }
30750
30751 set_current_state(TASK_INTERRUPTIBLE);
30752 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30753 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30754 !kthread_should_stop())
30755 schedule();
30756 __set_current_state(TASK_RUNNING);
30757 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30758 pool->dirty_watermark = params->dirty_watermark;
30759 pool->dirty_len = 0;
30760 spin_lock_init(&pool->pool_lock);
30761 - atomic_set(&pool->req_ser, 0);
30762 - atomic_set(&pool->flush_ser, 0);
30763 + atomic_set_unchecked(&pool->req_ser, 0);
30764 + atomic_set_unchecked(&pool->flush_ser, 0);
30765 init_waitqueue_head(&pool->force_wait);
30766
30767 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30768 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30769 }
30770 spin_unlock_irq(&pool->pool_lock);
30771
30772 - serial = atomic_inc_return(&pool->req_ser);
30773 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30774 wake_up_process(pool->thread);
30775
30776 if (wait_event_interruptible(pool->force_wait,
30777 - atomic_read(&pool->flush_ser) - serial >= 0))
30778 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30779 return -EINTR;
30780
30781 return 0;
30782 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30783 } else {
30784 list_add_tail(&fmr->list, &pool->dirty_list);
30785 if (++pool->dirty_len >= pool->dirty_watermark) {
30786 - atomic_inc(&pool->req_ser);
30787 + atomic_inc_unchecked(&pool->req_ser);
30788 wake_up_process(pool->thread);
30789 }
30790 }
30791 diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30792 --- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30793 +++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30794 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30795 return port_attr->show(p, port_attr, buf);
30796 }
30797
30798 -static struct sysfs_ops port_sysfs_ops = {
30799 +static const struct sysfs_ops port_sysfs_ops = {
30800 .show = port_attr_show
30801 };
30802
30803 diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30804 --- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30805 +++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30806 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30807 dst->grh.sgid_index = src->grh.sgid_index;
30808 dst->grh.hop_limit = src->grh.hop_limit;
30809 dst->grh.traffic_class = src->grh.traffic_class;
30810 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30811 dst->dlid = src->dlid;
30812 dst->sl = src->sl;
30813 dst->src_path_bits = src->src_path_bits;
30814 dst->static_rate = src->static_rate;
30815 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30816 dst->port_num = src->port_num;
30817 + dst->reserved = 0;
30818 }
30819 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30820
30821 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30822 struct ib_qp_attr *src)
30823 {
30824 + dst->qp_state = src->qp_state;
30825 dst->cur_qp_state = src->cur_qp_state;
30826 dst->path_mtu = src->path_mtu;
30827 dst->path_mig_state = src->path_mig_state;
30828 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30829 dst->rnr_retry = src->rnr_retry;
30830 dst->alt_port_num = src->alt_port_num;
30831 dst->alt_timeout = src->alt_timeout;
30832 + memset(dst->reserved, 0, sizeof(dst->reserved));
30833 }
30834 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30835
30836 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30837 --- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30838 +++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30839 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30840 struct infinipath_counters counters;
30841 struct ipath_devdata *dd;
30842
30843 + pax_track_stack();
30844 +
30845 dd = file->f_path.dentry->d_inode->i_private;
30846 dd->ipath_f_read_counters(dd, &counters);
30847
30848 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30849 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30850 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30851 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30852 LIST_HEAD(nes_adapter_list);
30853 static LIST_HEAD(nes_dev_list);
30854
30855 -atomic_t qps_destroyed;
30856 +atomic_unchecked_t qps_destroyed;
30857
30858 static unsigned int ee_flsh_adapter;
30859 static unsigned int sysfs_nonidx_addr;
30860 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30861 struct nes_adapter *nesadapter = nesdev->nesadapter;
30862 u32 qp_id;
30863
30864 - atomic_inc(&qps_destroyed);
30865 + atomic_inc_unchecked(&qps_destroyed);
30866
30867 /* Free the control structures */
30868
30869 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30870 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30871 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30872 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30873 u32 cm_listens_created;
30874 u32 cm_listens_destroyed;
30875 u32 cm_backlog_drops;
30876 -atomic_t cm_loopbacks;
30877 -atomic_t cm_nodes_created;
30878 -atomic_t cm_nodes_destroyed;
30879 -atomic_t cm_accel_dropped_pkts;
30880 -atomic_t cm_resets_recvd;
30881 +atomic_unchecked_t cm_loopbacks;
30882 +atomic_unchecked_t cm_nodes_created;
30883 +atomic_unchecked_t cm_nodes_destroyed;
30884 +atomic_unchecked_t cm_accel_dropped_pkts;
30885 +atomic_unchecked_t cm_resets_recvd;
30886
30887 static inline int mini_cm_accelerated(struct nes_cm_core *,
30888 struct nes_cm_node *);
30889 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30890
30891 static struct nes_cm_core *g_cm_core;
30892
30893 -atomic_t cm_connects;
30894 -atomic_t cm_accepts;
30895 -atomic_t cm_disconnects;
30896 -atomic_t cm_closes;
30897 -atomic_t cm_connecteds;
30898 -atomic_t cm_connect_reqs;
30899 -atomic_t cm_rejects;
30900 +atomic_unchecked_t cm_connects;
30901 +atomic_unchecked_t cm_accepts;
30902 +atomic_unchecked_t cm_disconnects;
30903 +atomic_unchecked_t cm_closes;
30904 +atomic_unchecked_t cm_connecteds;
30905 +atomic_unchecked_t cm_connect_reqs;
30906 +atomic_unchecked_t cm_rejects;
30907
30908
30909 /**
30910 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30911 cm_node->rem_mac);
30912
30913 add_hte_node(cm_core, cm_node);
30914 - atomic_inc(&cm_nodes_created);
30915 + atomic_inc_unchecked(&cm_nodes_created);
30916
30917 return cm_node;
30918 }
30919 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30920 }
30921
30922 atomic_dec(&cm_core->node_cnt);
30923 - atomic_inc(&cm_nodes_destroyed);
30924 + atomic_inc_unchecked(&cm_nodes_destroyed);
30925 nesqp = cm_node->nesqp;
30926 if (nesqp) {
30927 nesqp->cm_node = NULL;
30928 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30929
30930 static void drop_packet(struct sk_buff *skb)
30931 {
30932 - atomic_inc(&cm_accel_dropped_pkts);
30933 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30934 dev_kfree_skb_any(skb);
30935 }
30936
30937 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30938
30939 int reset = 0; /* whether to send reset in case of err.. */
30940 int passive_state;
30941 - atomic_inc(&cm_resets_recvd);
30942 + atomic_inc_unchecked(&cm_resets_recvd);
30943 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30944 " refcnt=%d\n", cm_node, cm_node->state,
30945 atomic_read(&cm_node->ref_count));
30946 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30947 rem_ref_cm_node(cm_node->cm_core, cm_node);
30948 return NULL;
30949 }
30950 - atomic_inc(&cm_loopbacks);
30951 + atomic_inc_unchecked(&cm_loopbacks);
30952 loopbackremotenode->loopbackpartner = cm_node;
30953 loopbackremotenode->tcp_cntxt.rcv_wscale =
30954 NES_CM_DEFAULT_RCV_WND_SCALE;
30955 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30956 add_ref_cm_node(cm_node);
30957 } else if (cm_node->state == NES_CM_STATE_TSA) {
30958 rem_ref_cm_node(cm_core, cm_node);
30959 - atomic_inc(&cm_accel_dropped_pkts);
30960 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30961 dev_kfree_skb_any(skb);
30962 break;
30963 }
30964 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30965
30966 if ((cm_id) && (cm_id->event_handler)) {
30967 if (issue_disconn) {
30968 - atomic_inc(&cm_disconnects);
30969 + atomic_inc_unchecked(&cm_disconnects);
30970 cm_event.event = IW_CM_EVENT_DISCONNECT;
30971 cm_event.status = disconn_status;
30972 cm_event.local_addr = cm_id->local_addr;
30973 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30974 }
30975
30976 if (issue_close) {
30977 - atomic_inc(&cm_closes);
30978 + atomic_inc_unchecked(&cm_closes);
30979 nes_disconnect(nesqp, 1);
30980
30981 cm_id->provider_data = nesqp;
30982 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30983
30984 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30985 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30986 - atomic_inc(&cm_accepts);
30987 + atomic_inc_unchecked(&cm_accepts);
30988
30989 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30990 atomic_read(&nesvnic->netdev->refcnt));
30991 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30992
30993 struct nes_cm_core *cm_core;
30994
30995 - atomic_inc(&cm_rejects);
30996 + atomic_inc_unchecked(&cm_rejects);
30997 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30998 loopback = cm_node->loopbackpartner;
30999 cm_core = cm_node->cm_core;
31000 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31001 ntohl(cm_id->local_addr.sin_addr.s_addr),
31002 ntohs(cm_id->local_addr.sin_port));
31003
31004 - atomic_inc(&cm_connects);
31005 + atomic_inc_unchecked(&cm_connects);
31006 nesqp->active_conn = 1;
31007
31008 /* cache the cm_id in the qp */
31009 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31010 if (nesqp->destroyed) {
31011 return;
31012 }
31013 - atomic_inc(&cm_connecteds);
31014 + atomic_inc_unchecked(&cm_connecteds);
31015 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31016 " local port 0x%04X. jiffies = %lu.\n",
31017 nesqp->hwqp.qp_id,
31018 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31019
31020 ret = cm_id->event_handler(cm_id, &cm_event);
31021 cm_id->add_ref(cm_id);
31022 - atomic_inc(&cm_closes);
31023 + atomic_inc_unchecked(&cm_closes);
31024 cm_event.event = IW_CM_EVENT_CLOSE;
31025 cm_event.status = IW_CM_EVENT_STATUS_OK;
31026 cm_event.provider_data = cm_id->provider_data;
31027 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31028 return;
31029 cm_id = cm_node->cm_id;
31030
31031 - atomic_inc(&cm_connect_reqs);
31032 + atomic_inc_unchecked(&cm_connect_reqs);
31033 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31034 cm_node, cm_id, jiffies);
31035
31036 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31037 return;
31038 cm_id = cm_node->cm_id;
31039
31040 - atomic_inc(&cm_connect_reqs);
31041 + atomic_inc_unchecked(&cm_connect_reqs);
31042 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31043 cm_node, cm_id, jiffies);
31044
31045 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
31046 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31047 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31048 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31049 extern unsigned int wqm_quanta;
31050 extern struct list_head nes_adapter_list;
31051
31052 -extern atomic_t cm_connects;
31053 -extern atomic_t cm_accepts;
31054 -extern atomic_t cm_disconnects;
31055 -extern atomic_t cm_closes;
31056 -extern atomic_t cm_connecteds;
31057 -extern atomic_t cm_connect_reqs;
31058 -extern atomic_t cm_rejects;
31059 -extern atomic_t mod_qp_timouts;
31060 -extern atomic_t qps_created;
31061 -extern atomic_t qps_destroyed;
31062 -extern atomic_t sw_qps_destroyed;
31063 +extern atomic_unchecked_t cm_connects;
31064 +extern atomic_unchecked_t cm_accepts;
31065 +extern atomic_unchecked_t cm_disconnects;
31066 +extern atomic_unchecked_t cm_closes;
31067 +extern atomic_unchecked_t cm_connecteds;
31068 +extern atomic_unchecked_t cm_connect_reqs;
31069 +extern atomic_unchecked_t cm_rejects;
31070 +extern atomic_unchecked_t mod_qp_timouts;
31071 +extern atomic_unchecked_t qps_created;
31072 +extern atomic_unchecked_t qps_destroyed;
31073 +extern atomic_unchecked_t sw_qps_destroyed;
31074 extern u32 mh_detected;
31075 extern u32 mh_pauses_sent;
31076 extern u32 cm_packets_sent;
31077 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31078 extern u32 cm_listens_created;
31079 extern u32 cm_listens_destroyed;
31080 extern u32 cm_backlog_drops;
31081 -extern atomic_t cm_loopbacks;
31082 -extern atomic_t cm_nodes_created;
31083 -extern atomic_t cm_nodes_destroyed;
31084 -extern atomic_t cm_accel_dropped_pkts;
31085 -extern atomic_t cm_resets_recvd;
31086 +extern atomic_unchecked_t cm_loopbacks;
31087 +extern atomic_unchecked_t cm_nodes_created;
31088 +extern atomic_unchecked_t cm_nodes_destroyed;
31089 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31090 +extern atomic_unchecked_t cm_resets_recvd;
31091
31092 extern u32 int_mod_timer_init;
31093 extern u32 int_mod_cq_depth_256;
31094 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
31095 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31096 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31097 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31098 target_stat_values[++index] = mh_detected;
31099 target_stat_values[++index] = mh_pauses_sent;
31100 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31101 - target_stat_values[++index] = atomic_read(&cm_connects);
31102 - target_stat_values[++index] = atomic_read(&cm_accepts);
31103 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31104 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31105 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31106 - target_stat_values[++index] = atomic_read(&cm_rejects);
31107 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31108 - target_stat_values[++index] = atomic_read(&qps_created);
31109 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31110 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31111 - target_stat_values[++index] = atomic_read(&cm_closes);
31112 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31113 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31114 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31115 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31116 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31117 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31118 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31119 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31120 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31121 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31122 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31123 target_stat_values[++index] = cm_packets_sent;
31124 target_stat_values[++index] = cm_packets_bounced;
31125 target_stat_values[++index] = cm_packets_created;
31126 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31127 target_stat_values[++index] = cm_listens_created;
31128 target_stat_values[++index] = cm_listens_destroyed;
31129 target_stat_values[++index] = cm_backlog_drops;
31130 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31131 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31132 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31133 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31134 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31135 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31136 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31137 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31138 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31139 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31140 target_stat_values[++index] = int_mod_timer_init;
31141 target_stat_values[++index] = int_mod_cq_depth_1;
31142 target_stat_values[++index] = int_mod_cq_depth_4;
31143 diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
31144 --- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31145 +++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31146 @@ -45,9 +45,9 @@
31147
31148 #include <rdma/ib_umem.h>
31149
31150 -atomic_t mod_qp_timouts;
31151 -atomic_t qps_created;
31152 -atomic_t sw_qps_destroyed;
31153 +atomic_unchecked_t mod_qp_timouts;
31154 +atomic_unchecked_t qps_created;
31155 +atomic_unchecked_t sw_qps_destroyed;
31156
31157 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31158
31159 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31160 if (init_attr->create_flags)
31161 return ERR_PTR(-EINVAL);
31162
31163 - atomic_inc(&qps_created);
31164 + atomic_inc_unchecked(&qps_created);
31165 switch (init_attr->qp_type) {
31166 case IB_QPT_RC:
31167 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31168 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31169 struct iw_cm_event cm_event;
31170 int ret;
31171
31172 - atomic_inc(&sw_qps_destroyed);
31173 + atomic_inc_unchecked(&sw_qps_destroyed);
31174 nesqp->destroyed = 1;
31175
31176 /* Blow away the connection if it exists. */
31177 diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
31178 --- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31179 +++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31180 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31181 */
31182 static void gameport_init_port(struct gameport *gameport)
31183 {
31184 - static atomic_t gameport_no = ATOMIC_INIT(0);
31185 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31186
31187 __module_get(THIS_MODULE);
31188
31189 mutex_init(&gameport->drv_mutex);
31190 device_initialize(&gameport->dev);
31191 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31192 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31193 gameport->dev.bus = &gameport_bus;
31194 gameport->dev.release = gameport_release_port;
31195 if (gameport->parent)
31196 diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
31197 --- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31198 +++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31199 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31200 */
31201 int input_register_device(struct input_dev *dev)
31202 {
31203 - static atomic_t input_no = ATOMIC_INIT(0);
31204 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31205 struct input_handler *handler;
31206 const char *path;
31207 int error;
31208 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31209 dev->setkeycode = input_default_setkeycode;
31210
31211 dev_set_name(&dev->dev, "input%ld",
31212 - (unsigned long) atomic_inc_return(&input_no) - 1);
31213 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31214
31215 error = device_add(&dev->dev);
31216 if (error)
31217 diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31218 --- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31219 +++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31220 @@ -30,6 +30,7 @@
31221 #include <linux/kernel.h>
31222 #include <linux/module.h>
31223 #include <linux/slab.h>
31224 +#include <linux/sched.h>
31225 #include <linux/init.h>
31226 #include <linux/input.h>
31227 #include <linux/gameport.h>
31228 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31229 unsigned char buf[SW_LENGTH];
31230 int i;
31231
31232 + pax_track_stack();
31233 +
31234 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31235
31236 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31237 diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31238 --- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31239 +++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31240 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31241
31242 static int xpad_led_probe(struct usb_xpad *xpad)
31243 {
31244 - static atomic_t led_seq = ATOMIC_INIT(0);
31245 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31246 long led_no;
31247 struct xpad_led *led;
31248 struct led_classdev *led_cdev;
31249 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31250 if (!led)
31251 return -ENOMEM;
31252
31253 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31254 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31255
31256 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31257 led->xpad = xpad;
31258 diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31259 --- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31260 +++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31261 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31262 */
31263 static void serio_init_port(struct serio *serio)
31264 {
31265 - static atomic_t serio_no = ATOMIC_INIT(0);
31266 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31267
31268 __module_get(THIS_MODULE);
31269
31270 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31271 mutex_init(&serio->drv_mutex);
31272 device_initialize(&serio->dev);
31273 dev_set_name(&serio->dev, "serio%ld",
31274 - (long)atomic_inc_return(&serio_no) - 1);
31275 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31276 serio->dev.bus = &serio_bus;
31277 serio->dev.release = serio_release_port;
31278 if (serio->parent) {
31279 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31280 --- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31281 +++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31282 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31283 cs->commands_pending = 0;
31284 cs->cur_at_seq = 0;
31285 cs->gotfwver = -1;
31286 - cs->open_count = 0;
31287 + local_set(&cs->open_count, 0);
31288 cs->dev = NULL;
31289 cs->tty = NULL;
31290 cs->tty_dev = NULL;
31291 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31292 --- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31293 +++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31294 @@ -34,6 +34,7 @@
31295 #include <linux/tty_driver.h>
31296 #include <linux/list.h>
31297 #include <asm/atomic.h>
31298 +#include <asm/local.h>
31299
31300 #define GIG_VERSION {0,5,0,0}
31301 #define GIG_COMPAT {0,4,0,0}
31302 @@ -446,7 +447,7 @@ struct cardstate {
31303 spinlock_t cmdlock;
31304 unsigned curlen, cmdbytes;
31305
31306 - unsigned open_count;
31307 + local_t open_count;
31308 struct tty_struct *tty;
31309 struct tasklet_struct if_wake_tasklet;
31310 unsigned control_state;
31311 diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31312 --- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31313 +++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31314 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31315 return -ERESTARTSYS; // FIXME -EINTR?
31316 tty->driver_data = cs;
31317
31318 - ++cs->open_count;
31319 -
31320 - if (cs->open_count == 1) {
31321 + if (local_inc_return(&cs->open_count) == 1) {
31322 spin_lock_irqsave(&cs->lock, flags);
31323 cs->tty = tty;
31324 spin_unlock_irqrestore(&cs->lock, flags);
31325 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31326
31327 if (!cs->connected)
31328 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31329 - else if (!cs->open_count)
31330 + else if (!local_read(&cs->open_count))
31331 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31332 else {
31333 - if (!--cs->open_count) {
31334 + if (!local_dec_return(&cs->open_count)) {
31335 spin_lock_irqsave(&cs->lock, flags);
31336 cs->tty = NULL;
31337 spin_unlock_irqrestore(&cs->lock, flags);
31338 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31339 if (!cs->connected) {
31340 gig_dbg(DEBUG_IF, "not connected");
31341 retval = -ENODEV;
31342 - } else if (!cs->open_count)
31343 + } else if (!local_read(&cs->open_count))
31344 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31345 else {
31346 retval = 0;
31347 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31348 if (!cs->connected) {
31349 gig_dbg(DEBUG_IF, "not connected");
31350 retval = -ENODEV;
31351 - } else if (!cs->open_count)
31352 + } else if (!local_read(&cs->open_count))
31353 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31354 else if (cs->mstate != MS_LOCKED) {
31355 dev_warn(cs->dev, "can't write to unlocked device\n");
31356 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31357 if (!cs->connected) {
31358 gig_dbg(DEBUG_IF, "not connected");
31359 retval = -ENODEV;
31360 - } else if (!cs->open_count)
31361 + } else if (!local_read(&cs->open_count))
31362 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31363 else if (cs->mstate != MS_LOCKED) {
31364 dev_warn(cs->dev, "can't write to unlocked device\n");
31365 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31366
31367 if (!cs->connected)
31368 gig_dbg(DEBUG_IF, "not connected");
31369 - else if (!cs->open_count)
31370 + else if (!local_read(&cs->open_count))
31371 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31372 else if (cs->mstate != MS_LOCKED)
31373 dev_warn(cs->dev, "can't write to unlocked device\n");
31374 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31375
31376 if (!cs->connected)
31377 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31378 - else if (!cs->open_count)
31379 + else if (!local_read(&cs->open_count))
31380 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31381 else {
31382 //FIXME
31383 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31384
31385 if (!cs->connected)
31386 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31387 - else if (!cs->open_count)
31388 + else if (!local_read(&cs->open_count))
31389 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31390 else {
31391 //FIXME
31392 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31393 goto out;
31394 }
31395
31396 - if (!cs->open_count) {
31397 + if (!local_read(&cs->open_count)) {
31398 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31399 goto out;
31400 }
31401 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31402 --- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31403 +++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31404 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31405 }
31406 if (left) {
31407 if (t4file->user) {
31408 - if (copy_from_user(buf, dp, left))
31409 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31410 return -EFAULT;
31411 } else {
31412 memcpy(buf, dp, left);
31413 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31414 }
31415 if (left) {
31416 if (config->user) {
31417 - if (copy_from_user(buf, dp, left))
31418 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31419 return -EFAULT;
31420 } else {
31421 memcpy(buf, dp, left);
31422 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31423 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31424 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31425 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31426 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31427 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31428
31429 + pax_track_stack();
31430
31431 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31432 {
31433 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31434 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31435 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31436 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31437 IDI_SYNC_REQ req;
31438 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31439
31440 + pax_track_stack();
31441 +
31442 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31443
31444 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31445 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31446 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31447 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31448 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31449 IDI_SYNC_REQ req;
31450 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31451
31452 + pax_track_stack();
31453 +
31454 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31455
31456 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31457 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31458 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31459 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31460 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31461 IDI_SYNC_REQ req;
31462 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31463
31464 + pax_track_stack();
31465 +
31466 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31467
31468 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31469 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31470 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31471 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31472 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31473 } diva_didd_add_adapter_t;
31474 typedef struct _diva_didd_remove_adapter {
31475 IDI_CALL p_request;
31476 -} diva_didd_remove_adapter_t;
31477 +} __no_const diva_didd_remove_adapter_t;
31478 typedef struct _diva_didd_read_adapter_array {
31479 void * buffer;
31480 dword length;
31481 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31482 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31483 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31484 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31485 IDI_SYNC_REQ req;
31486 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31487
31488 + pax_track_stack();
31489 +
31490 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31491
31492 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31493 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31494 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31495 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31496 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31497 dword d;
31498 word w;
31499
31500 + pax_track_stack();
31501 +
31502 a = plci->adapter;
31503 Id = ((word)plci->Id<<8)|a->Id;
31504 PUT_WORD(&SS_Ind[4],0x0000);
31505 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31506 word j, n, w;
31507 dword d;
31508
31509 + pax_track_stack();
31510 +
31511
31512 for(i=0;i<8;i++) bp_parms[i].length = 0;
31513 for(i=0;i<2;i++) global_config[i].length = 0;
31514 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31515 const byte llc3[] = {4,3,2,2,6,6,0};
31516 const byte header[] = {0,2,3,3,0,0,0};
31517
31518 + pax_track_stack();
31519 +
31520 for(i=0;i<8;i++) bp_parms[i].length = 0;
31521 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31522 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31523 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31524 word appl_number_group_type[MAX_APPL];
31525 PLCI *auxplci;
31526
31527 + pax_track_stack();
31528 +
31529 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31530
31531 if(!a->group_optimization_enabled)
31532 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31533 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31534 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31535 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31536 IDI_SYNC_REQ req;
31537 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31538
31539 + pax_track_stack();
31540 +
31541 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31542
31543 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31544 diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31545 --- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31546 +++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31547 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31548 typedef struct _diva_os_idi_adapter_interface {
31549 diva_init_card_proc_t cleanup_adapter_proc;
31550 diva_cmd_card_proc_t cmd_proc;
31551 -} diva_os_idi_adapter_interface_t;
31552 +} __no_const diva_os_idi_adapter_interface_t;
31553
31554 typedef struct _diva_os_xdi_adapter {
31555 struct list_head link;
31556 diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31557 --- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31558 +++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31559 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31560 } iocpar;
31561 void __user *argp = (void __user *)arg;
31562
31563 + pax_track_stack();
31564 +
31565 #define name iocpar.name
31566 #define bname iocpar.bname
31567 #define iocts iocpar.iocts
31568 diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31569 --- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31570 +++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31571 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31572 if (count > len)
31573 count = len;
31574 if (user) {
31575 - if (copy_from_user(msg, buf, count))
31576 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31577 return -EFAULT;
31578 } else
31579 memcpy(msg, buf, count);
31580 diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31581 --- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31582 +++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31583 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31584 if (dev) {
31585 struct mISDN_devinfo di;
31586
31587 + memset(&di, 0, sizeof(di));
31588 di.id = dev->id;
31589 di.Dprotocols = dev->Dprotocols;
31590 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31591 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31592 if (dev) {
31593 struct mISDN_devinfo di;
31594
31595 + memset(&di, 0, sizeof(di));
31596 di.id = dev->id;
31597 di.Dprotocols = dev->Dprotocols;
31598 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31599 diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31600 --- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31601 +++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31602 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31603 }
31604 else if(callid>=0x0000 && callid<=0x7FFF)
31605 {
31606 + int len;
31607 +
31608 pr_debug("%s: Got Incoming Call\n",
31609 sc_adapter[card]->devicename);
31610 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31611 - strcpy(setup.eazmsn,
31612 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31613 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31614 + sizeof(setup.phone));
31615 + if (len >= sizeof(setup.phone))
31616 + continue;
31617 + len = strlcpy(setup.eazmsn,
31618 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31619 + sizeof(setup.eazmsn));
31620 + if (len >= sizeof(setup.eazmsn))
31621 + continue;
31622 setup.si1 = 7;
31623 setup.si2 = 0;
31624 setup.plan = 0;
31625 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31626 * Handle a GetMyNumber Rsp
31627 */
31628 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31629 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31630 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31631 + rcvmsg.msg_data.byte_array,
31632 + sizeof(rcvmsg.msg_data.byte_array));
31633 continue;
31634 }
31635
31636 diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31637 --- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31638 +++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31639 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31640 * it's worked so far. The end address needs +1 because __get_vm_area
31641 * allocates an extra guard page, so we need space for that.
31642 */
31643 +
31644 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31645 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31646 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31647 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31648 +#else
31649 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31650 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31651 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31652 +#endif
31653 +
31654 if (!switcher_vma) {
31655 err = -ENOMEM;
31656 printk("lguest: could not map switcher pages high\n");
31657 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31658 * Now the Switcher is mapped at the right address, we can't fail!
31659 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31660 */
31661 - memcpy(switcher_vma->addr, start_switcher_text,
31662 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31663 end_switcher_text - start_switcher_text);
31664
31665 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31666 diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31667 --- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31668 +++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31669 @@ -59,7 +59,7 @@ static struct {
31670 /* Offset from where switcher.S was compiled to where we've copied it */
31671 static unsigned long switcher_offset(void)
31672 {
31673 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31674 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31675 }
31676
31677 /* This cpu's struct lguest_pages. */
31678 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31679 * These copies are pretty cheap, so we do them unconditionally: */
31680 /* Save the current Host top-level page directory.
31681 */
31682 +
31683 +#ifdef CONFIG_PAX_PER_CPU_PGD
31684 + pages->state.host_cr3 = read_cr3();
31685 +#else
31686 pages->state.host_cr3 = __pa(current->mm->pgd);
31687 +#endif
31688 +
31689 /*
31690 * Set up the Guest's page tables to see this CPU's pages (and no
31691 * other CPU's pages).
31692 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31693 * compiled-in switcher code and the high-mapped copy we just made.
31694 */
31695 for (i = 0; i < IDT_ENTRIES; i++)
31696 - default_idt_entries[i] += switcher_offset();
31697 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31698
31699 /*
31700 * Set up the Switcher's per-cpu areas.
31701 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31702 * it will be undisturbed when we switch. To change %cs and jump we
31703 * need this structure to feed to Intel's "lcall" instruction.
31704 */
31705 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31706 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31707 lguest_entry.segment = LGUEST_CS;
31708
31709 /*
31710 diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31711 --- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31712 +++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31713 @@ -87,6 +87,7 @@
31714 #include <asm/page.h>
31715 #include <asm/segment.h>
31716 #include <asm/lguest.h>
31717 +#include <asm/processor-flags.h>
31718
31719 // We mark the start of the code to copy
31720 // It's placed in .text tho it's never run here
31721 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31722 // Changes type when we load it: damn Intel!
31723 // For after we switch over our page tables
31724 // That entry will be read-only: we'd crash.
31725 +
31726 +#ifdef CONFIG_PAX_KERNEXEC
31727 + mov %cr0, %edx
31728 + xor $X86_CR0_WP, %edx
31729 + mov %edx, %cr0
31730 +#endif
31731 +
31732 movl $(GDT_ENTRY_TSS*8), %edx
31733 ltr %dx
31734
31735 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31736 // Let's clear it again for our return.
31737 // The GDT descriptor of the Host
31738 // Points to the table after two "size" bytes
31739 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31740 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31741 // Clear "used" from type field (byte 5, bit 2)
31742 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31743 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31744 +
31745 +#ifdef CONFIG_PAX_KERNEXEC
31746 + mov %cr0, %eax
31747 + xor $X86_CR0_WP, %eax
31748 + mov %eax, %cr0
31749 +#endif
31750
31751 // Once our page table's switched, the Guest is live!
31752 // The Host fades as we run this final step.
31753 @@ -295,13 +309,12 @@ deliver_to_host:
31754 // I consulted gcc, and it gave
31755 // These instructions, which I gladly credit:
31756 leal (%edx,%ebx,8), %eax
31757 - movzwl (%eax),%edx
31758 - movl 4(%eax), %eax
31759 - xorw %ax, %ax
31760 - orl %eax, %edx
31761 + movl 4(%eax), %edx
31762 + movw (%eax), %dx
31763 // Now the address of the handler's in %edx
31764 // We call it now: its "iret" drops us home.
31765 - jmp *%edx
31766 + ljmp $__KERNEL_CS, $1f
31767 +1: jmp *%edx
31768
31769 // Every interrupt can come to us here
31770 // But we must truly tell each apart.
31771 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31772 --- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31773 +++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31774 @@ -15,7 +15,7 @@
31775
31776 #define MAX_PMU_LEVEL 0xFF
31777
31778 -static struct backlight_ops pmu_backlight_data;
31779 +static const struct backlight_ops pmu_backlight_data;
31780 static DEFINE_SPINLOCK(pmu_backlight_lock);
31781 static int sleeping, uses_pmu_bl;
31782 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31783 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31784 return bd->props.brightness;
31785 }
31786
31787 -static struct backlight_ops pmu_backlight_data = {
31788 +static const struct backlight_ops pmu_backlight_data = {
31789 .get_brightness = pmu_backlight_get_brightness,
31790 .update_status = pmu_backlight_update_status,
31791
31792 diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31793 --- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31794 +++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31795 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31796 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31797 }
31798
31799 -static struct platform_suspend_ops pmu_pm_ops = {
31800 +static const struct platform_suspend_ops pmu_pm_ops = {
31801 .enter = powerbook_sleep,
31802 .valid = pmu_sleep_valid,
31803 };
31804 diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31805 --- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31806 +++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31807 @@ -165,9 +165,9 @@ struct mapped_device {
31808 /*
31809 * Event handling.
31810 */
31811 - atomic_t event_nr;
31812 + atomic_unchecked_t event_nr;
31813 wait_queue_head_t eventq;
31814 - atomic_t uevent_seq;
31815 + atomic_unchecked_t uevent_seq;
31816 struct list_head uevent_list;
31817 spinlock_t uevent_lock; /* Protect access to uevent_list */
31818
31819 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31820 rwlock_init(&md->map_lock);
31821 atomic_set(&md->holders, 1);
31822 atomic_set(&md->open_count, 0);
31823 - atomic_set(&md->event_nr, 0);
31824 - atomic_set(&md->uevent_seq, 0);
31825 + atomic_set_unchecked(&md->event_nr, 0);
31826 + atomic_set_unchecked(&md->uevent_seq, 0);
31827 INIT_LIST_HEAD(&md->uevent_list);
31828 spin_lock_init(&md->uevent_lock);
31829
31830 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
31831
31832 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31833
31834 - atomic_inc(&md->event_nr);
31835 + atomic_inc_unchecked(&md->event_nr);
31836 wake_up(&md->eventq);
31837 }
31838
31839 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31840
31841 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31842 {
31843 - return atomic_add_return(1, &md->uevent_seq);
31844 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31845 }
31846
31847 uint32_t dm_get_event_nr(struct mapped_device *md)
31848 {
31849 - return atomic_read(&md->event_nr);
31850 + return atomic_read_unchecked(&md->event_nr);
31851 }
31852
31853 int dm_wait_event(struct mapped_device *md, int event_nr)
31854 {
31855 return wait_event_interruptible(md->eventq,
31856 - (event_nr != atomic_read(&md->event_nr)));
31857 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31858 }
31859
31860 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31861 diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31862 --- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31863 +++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31864 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31865 cmd == DM_LIST_VERSIONS_CMD)
31866 return 0;
31867
31868 - if ((cmd == DM_DEV_CREATE_CMD)) {
31869 + if (cmd == DM_DEV_CREATE_CMD) {
31870 if (!*param->name) {
31871 DMWARN("name not supplied when creating device");
31872 return -EINVAL;
31873 diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31874 --- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31875 +++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31876 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31877
31878 struct mirror {
31879 struct mirror_set *ms;
31880 - atomic_t error_count;
31881 + atomic_unchecked_t error_count;
31882 unsigned long error_type;
31883 struct dm_dev *dev;
31884 sector_t offset;
31885 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31886 * simple way to tell if a device has encountered
31887 * errors.
31888 */
31889 - atomic_inc(&m->error_count);
31890 + atomic_inc_unchecked(&m->error_count);
31891
31892 if (test_and_set_bit(error_type, &m->error_type))
31893 return;
31894 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31895 }
31896
31897 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31898 - if (!atomic_read(&new->error_count)) {
31899 + if (!atomic_read_unchecked(&new->error_count)) {
31900 set_default_mirror(new);
31901 break;
31902 }
31903 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31904 struct mirror *m = get_default_mirror(ms);
31905
31906 do {
31907 - if (likely(!atomic_read(&m->error_count)))
31908 + if (likely(!atomic_read_unchecked(&m->error_count)))
31909 return m;
31910
31911 if (m-- == ms->mirror)
31912 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31913 {
31914 struct mirror *default_mirror = get_default_mirror(m->ms);
31915
31916 - return !atomic_read(&default_mirror->error_count);
31917 + return !atomic_read_unchecked(&default_mirror->error_count);
31918 }
31919
31920 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31921 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31922 */
31923 if (likely(region_in_sync(ms, region, 1)))
31924 m = choose_mirror(ms, bio->bi_sector);
31925 - else if (m && atomic_read(&m->error_count))
31926 + else if (m && atomic_read_unchecked(&m->error_count))
31927 m = NULL;
31928
31929 if (likely(m))
31930 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31931 }
31932
31933 ms->mirror[mirror].ms = ms;
31934 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31935 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31936 ms->mirror[mirror].error_type = 0;
31937 ms->mirror[mirror].offset = offset;
31938
31939 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31940 */
31941 static char device_status_char(struct mirror *m)
31942 {
31943 - if (!atomic_read(&(m->error_count)))
31944 + if (!atomic_read_unchecked(&(m->error_count)))
31945 return 'A';
31946
31947 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31948 diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31949 --- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31950 +++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31951 @@ -20,7 +20,7 @@ struct stripe {
31952 struct dm_dev *dev;
31953 sector_t physical_start;
31954
31955 - atomic_t error_count;
31956 + atomic_unchecked_t error_count;
31957 };
31958
31959 struct stripe_c {
31960 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31961 kfree(sc);
31962 return r;
31963 }
31964 - atomic_set(&(sc->stripe[i].error_count), 0);
31965 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31966 }
31967
31968 ti->private = sc;
31969 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31970 DMEMIT("%d ", sc->stripes);
31971 for (i = 0; i < sc->stripes; i++) {
31972 DMEMIT("%s ", sc->stripe[i].dev->name);
31973 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31974 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31975 'D' : 'A';
31976 }
31977 buffer[i] = '\0';
31978 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31979 */
31980 for (i = 0; i < sc->stripes; i++)
31981 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31982 - atomic_inc(&(sc->stripe[i].error_count));
31983 - if (atomic_read(&(sc->stripe[i].error_count)) <
31984 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31985 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31986 DM_IO_ERROR_THRESHOLD)
31987 queue_work(kstriped, &sc->kstriped_ws);
31988 }
31989 diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31990 --- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31991 +++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31992 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31993 NULL,
31994 };
31995
31996 -static struct sysfs_ops dm_sysfs_ops = {
31997 +static const struct sysfs_ops dm_sysfs_ops = {
31998 .show = dm_attr_show,
31999 };
32000
32001 diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
32002 --- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32003 +++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32004 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32005 if (!dev_size)
32006 return 0;
32007
32008 - if ((start >= dev_size) || (start + len > dev_size)) {
32009 + if ((start >= dev_size) || (len > dev_size - start)) {
32010 DMWARN("%s: %s too small for target: "
32011 "start=%llu, len=%llu, dev_size=%llu",
32012 dm_device_name(ti->table->md), bdevname(bdev, b),
32013 diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
32014 --- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32015 +++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32016 @@ -153,10 +153,10 @@ static int start_readonly;
32017 * start build, activate spare
32018 */
32019 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32020 -static atomic_t md_event_count;
32021 +static atomic_unchecked_t md_event_count;
32022 void md_new_event(mddev_t *mddev)
32023 {
32024 - atomic_inc(&md_event_count);
32025 + atomic_inc_unchecked(&md_event_count);
32026 wake_up(&md_event_waiters);
32027 }
32028 EXPORT_SYMBOL_GPL(md_new_event);
32029 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32030 */
32031 static void md_new_event_inintr(mddev_t *mddev)
32032 {
32033 - atomic_inc(&md_event_count);
32034 + atomic_inc_unchecked(&md_event_count);
32035 wake_up(&md_event_waiters);
32036 }
32037
32038 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32039
32040 rdev->preferred_minor = 0xffff;
32041 rdev->data_offset = le64_to_cpu(sb->data_offset);
32042 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32043 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32044
32045 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32046 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32047 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32048 else
32049 sb->resync_offset = cpu_to_le64(0);
32050
32051 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32052 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32053
32054 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32055 sb->size = cpu_to_le64(mddev->dev_sectors);
32056 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32057 static ssize_t
32058 errors_show(mdk_rdev_t *rdev, char *page)
32059 {
32060 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32061 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32062 }
32063
32064 static ssize_t
32065 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32066 char *e;
32067 unsigned long n = simple_strtoul(buf, &e, 10);
32068 if (*buf && (*e == 0 || *e == '\n')) {
32069 - atomic_set(&rdev->corrected_errors, n);
32070 + atomic_set_unchecked(&rdev->corrected_errors, n);
32071 return len;
32072 }
32073 return -EINVAL;
32074 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32075 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32076 kfree(rdev);
32077 }
32078 -static struct sysfs_ops rdev_sysfs_ops = {
32079 +static const struct sysfs_ops rdev_sysfs_ops = {
32080 .show = rdev_attr_show,
32081 .store = rdev_attr_store,
32082 };
32083 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32084 rdev->data_offset = 0;
32085 rdev->sb_events = 0;
32086 atomic_set(&rdev->nr_pending, 0);
32087 - atomic_set(&rdev->read_errors, 0);
32088 - atomic_set(&rdev->corrected_errors, 0);
32089 + atomic_set_unchecked(&rdev->read_errors, 0);
32090 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32091
32092 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32093 if (!size) {
32094 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32095 kfree(mddev);
32096 }
32097
32098 -static struct sysfs_ops md_sysfs_ops = {
32099 +static const struct sysfs_ops md_sysfs_ops = {
32100 .show = md_attr_show,
32101 .store = md_attr_store,
32102 };
32103 @@ -4474,7 +4474,8 @@ out:
32104 err = 0;
32105 blk_integrity_unregister(disk);
32106 md_new_event(mddev);
32107 - sysfs_notify_dirent(mddev->sysfs_state);
32108 + if (mddev->sysfs_state)
32109 + sysfs_notify_dirent(mddev->sysfs_state);
32110 return err;
32111 }
32112
32113 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32114
32115 spin_unlock(&pers_lock);
32116 seq_printf(seq, "\n");
32117 - mi->event = atomic_read(&md_event_count);
32118 + mi->event = atomic_read_unchecked(&md_event_count);
32119 return 0;
32120 }
32121 if (v == (void*)2) {
32122 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32123 chunk_kb ? "KB" : "B");
32124 if (bitmap->file) {
32125 seq_printf(seq, ", file: ");
32126 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32127 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32128 }
32129
32130 seq_printf(seq, "\n");
32131 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32132 else {
32133 struct seq_file *p = file->private_data;
32134 p->private = mi;
32135 - mi->event = atomic_read(&md_event_count);
32136 + mi->event = atomic_read_unchecked(&md_event_count);
32137 }
32138 return error;
32139 }
32140 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32141 /* always allow read */
32142 mask = POLLIN | POLLRDNORM;
32143
32144 - if (mi->event != atomic_read(&md_event_count))
32145 + if (mi->event != atomic_read_unchecked(&md_event_count))
32146 mask |= POLLERR | POLLPRI;
32147 return mask;
32148 }
32149 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32152 (int)part_stat_read(&disk->part0, sectors[1]) -
32153 - atomic_read(&disk->sync_io);
32154 + atomic_read_unchecked(&disk->sync_io);
32155 /* sync IO will cause sync_io to increase before the disk_stats
32156 * as sync_io is counted when a request starts, and
32157 * disk_stats is counted when it completes.
32158 diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
32159 --- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32160 +++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32161 @@ -94,10 +94,10 @@ struct mdk_rdev_s
32162 * only maintained for arrays that
32163 * support hot removal
32164 */
32165 - atomic_t read_errors; /* number of consecutive read errors that
32166 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32167 * we have tried to ignore.
32168 */
32169 - atomic_t corrected_errors; /* number of corrected read errors,
32170 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32171 * for reporting to userspace and storing
32172 * in superblock.
32173 */
32174 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32175
32176 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32177 {
32178 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32179 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32180 }
32181
32182 struct mdk_personality
32183 diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
32184 --- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32185 +++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32186 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32187 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32188 set_bit(R10BIO_Uptodate, &r10_bio->state);
32189 else {
32190 - atomic_add(r10_bio->sectors,
32191 + atomic_add_unchecked(r10_bio->sectors,
32192 &conf->mirrors[d].rdev->corrected_errors);
32193 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32194 md_error(r10_bio->mddev,
32195 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32196 test_bit(In_sync, &rdev->flags)) {
32197 atomic_inc(&rdev->nr_pending);
32198 rcu_read_unlock();
32199 - atomic_add(s, &rdev->corrected_errors);
32200 + atomic_add_unchecked(s, &rdev->corrected_errors);
32201 if (sync_page_io(rdev->bdev,
32202 r10_bio->devs[sl].addr +
32203 sect + rdev->data_offset,
32204 diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
32205 --- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32206 +++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32207 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32208 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32209 continue;
32210 rdev = conf->mirrors[d].rdev;
32211 - atomic_add(s, &rdev->corrected_errors);
32212 + atomic_add_unchecked(s, &rdev->corrected_errors);
32213 if (sync_page_io(rdev->bdev,
32214 sect + rdev->data_offset,
32215 s<<9,
32216 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32217 /* Well, this device is dead */
32218 md_error(mddev, rdev);
32219 else {
32220 - atomic_add(s, &rdev->corrected_errors);
32221 + atomic_add_unchecked(s, &rdev->corrected_errors);
32222 printk(KERN_INFO
32223 "raid1:%s: read error corrected "
32224 "(%d sectors at %llu on %s)\n",
32225 diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32226 --- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32227 +++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32228 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32229 bi->bi_next = NULL;
32230 if ((rw & WRITE) &&
32231 test_bit(R5_ReWrite, &sh->dev[i].flags))
32232 - atomic_add(STRIPE_SECTORS,
32233 + atomic_add_unchecked(STRIPE_SECTORS,
32234 &rdev->corrected_errors);
32235 generic_make_request(bi);
32236 } else {
32237 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32238 clear_bit(R5_ReadError, &sh->dev[i].flags);
32239 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32240 }
32241 - if (atomic_read(&conf->disks[i].rdev->read_errors))
32242 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
32243 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32244 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32245 } else {
32246 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32247 int retry = 0;
32248 rdev = conf->disks[i].rdev;
32249
32250 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32251 - atomic_inc(&rdev->read_errors);
32252 + atomic_inc_unchecked(&rdev->read_errors);
32253 if (conf->mddev->degraded >= conf->max_degraded)
32254 printk_rl(KERN_WARNING
32255 "raid5:%s: read error not correctable "
32256 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32257 (unsigned long long)(sh->sector
32258 + rdev->data_offset),
32259 bdn);
32260 - else if (atomic_read(&rdev->read_errors)
32261 + else if (atomic_read_unchecked(&rdev->read_errors)
32262 > conf->max_nr_stripes)
32263 printk(KERN_WARNING
32264 "raid5:%s: Too many read errors, failing device %s.\n",
32265 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32266 sector_t r_sector;
32267 struct stripe_head sh2;
32268
32269 + pax_track_stack();
32270
32271 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32272 stripe = new_sector;
32273 diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32274 --- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32275 +++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32276 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32277
32278 int x[32], y[32], w[32], h[32];
32279
32280 + pax_track_stack();
32281 +
32282 /* clear out memory */
32283 memset(&line_list[0], 0x00, sizeof(u32)*32);
32284 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32285 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32286 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32287 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32288 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32289 u8 buf[HOST_LINK_BUF_SIZE];
32290 int i;
32291
32292 + pax_track_stack();
32293 +
32294 dprintk("%s\n", __func__);
32295
32296 /* check if we have space for a link buf in the rx_buffer */
32297 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32298 unsigned long timeout;
32299 int written;
32300
32301 + pax_track_stack();
32302 +
32303 dprintk("%s\n", __func__);
32304
32305 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32306 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32307 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32308 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32309 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32310 union {
32311 dmx_ts_cb ts;
32312 dmx_section_cb sec;
32313 - } cb;
32314 + } __no_const cb;
32315
32316 struct dvb_demux *demux;
32317 void *priv;
32318 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32319 --- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32320 +++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-23 21:22:32.000000000 -0400
32321 @@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
32322 const struct dvb_device *template, void *priv, int type)
32323 {
32324 struct dvb_device *dvbdev;
32325 - struct file_operations *dvbdevfops;
32326 + file_operations_no_const *dvbdevfops;
32327 struct device *clsdev;
32328 int minor;
32329 int id;
32330 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32331 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32332 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32333 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32334 struct dib0700_adapter_state {
32335 int (*set_param_save) (struct dvb_frontend *,
32336 struct dvb_frontend_parameters *);
32337 -};
32338 +} __no_const;
32339
32340 static int dib7070_set_param_override(struct dvb_frontend *fe,
32341 struct dvb_frontend_parameters *fep)
32342 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32343 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32344 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32345 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32346
32347 u8 buf[260];
32348
32349 + pax_track_stack();
32350 +
32351 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32352 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32353
32354 diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32355 --- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32356 +++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32357 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32358
32359 struct dib0700_adapter_state {
32360 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32361 -};
32362 +} __no_const;
32363
32364 /* Hauppauge Nova-T 500 (aka Bristol)
32365 * has a LNA on GPIO0 which is enabled by setting 1 */
32366 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32367 --- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32368 +++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32369 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32370 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32371 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32372 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32373 -};
32374 +} __no_const;
32375
32376 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32377 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32378 diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32379 --- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32380 +++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32381 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32382 u8 tudata[585];
32383 int i;
32384
32385 + pax_track_stack();
32386 +
32387 dprintk("Firmware is %zd bytes\n",fw->size);
32388
32389 /* Get eprom data */
32390 diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32391 --- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32392 +++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32393 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32394 while (i < count && dev->rdsin != dev->rdsout)
32395 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32396
32397 - if (copy_to_user(data, readbuf, i))
32398 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32399 return -EFAULT;
32400 return i;
32401 }
32402 diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32403 --- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32404 +++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32405 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32406
32407 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32408
32409 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32410 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32411
32412 /* Parameter declarations */
32413 static int cardtype[CX18_MAX_CARDS];
32414 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32415 struct i2c_client c;
32416 u8 eedata[256];
32417
32418 + pax_track_stack();
32419 +
32420 memset(&c, 0, sizeof(c));
32421 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32422 c.adapter = &cx->i2c_adap[0];
32423 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32424 struct cx18 *cx;
32425
32426 /* FIXME - module parameter arrays constrain max instances */
32427 - i = atomic_inc_return(&cx18_instance) - 1;
32428 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32429 if (i >= CX18_MAX_CARDS) {
32430 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32431 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32432 diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32433 --- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32434 +++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32435 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32436 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32437
32438 /* ivtv instance counter */
32439 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32440 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32441
32442 /* Parameter declarations */
32443 static int cardtype[IVTV_MAX_CARDS];
32444 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32445 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32446 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32447 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32448 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32449
32450 do_gettimeofday(&vb->ts);
32451 - vb->field_count = atomic_add_return(2, &fh->field_count);
32452 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32453 if (csr & csr_error) {
32454 vb->state = VIDEOBUF_ERROR;
32455 if (!atomic_read(&fh->cam->in_reset)) {
32456 diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32457 --- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32458 +++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32459 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32460 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32461 struct videobuf_queue vbq;
32462 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32463 - atomic_t field_count; /* field counter for videobuf_buffer */
32464 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32465 /* accessing cam here doesn't need serialisation: it's constant */
32466 struct omap24xxcam_device *cam;
32467 };
32468 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32469 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32470 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32471 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32472 u8 *eeprom;
32473 struct tveeprom tvdata;
32474
32475 + pax_track_stack();
32476 +
32477 memset(&tvdata,0,sizeof(tvdata));
32478
32479 eeprom = pvr2_eeprom_fetch(hdw);
32480 diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32481 --- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-03-27 14:31:47.000000000 -0400
32482 +++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-23 21:22:38.000000000 -0400
32483 @@ -195,7 +195,7 @@ struct pvr2_hdw {
32484
32485 /* I2C stuff */
32486 struct i2c_adapter i2c_adap;
32487 - struct i2c_algorithm i2c_algo;
32488 + i2c_algorithm_no_const i2c_algo;
32489 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32490 int i2c_cx25840_hack_state;
32491 int i2c_linked;
32492 diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32493 --- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32494 +++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32495 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32496 unsigned char localPAT[256];
32497 unsigned char localPMT[256];
32498
32499 + pax_track_stack();
32500 +
32501 /* Set video format - must be done first as it resets other settings */
32502 set_reg8(client, 0x41, h->video_format);
32503
32504 diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32505 --- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32506 +++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32507 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32508 wait_queue_head_t *q = 0;
32509 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32510
32511 + pax_track_stack();
32512 +
32513 /* While any outstand message on the bus exists... */
32514 do {
32515
32516 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32517 u8 tmp[512];
32518 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32519
32520 + pax_track_stack();
32521 +
32522 while (loop) {
32523
32524 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32525 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32526 --- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32527 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32528 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32529 static int __init ibmcam_init(void)
32530 {
32531 struct usbvideo_cb cbTbl;
32532 - memset(&cbTbl, 0, sizeof(cbTbl));
32533 - cbTbl.probe = ibmcam_probe;
32534 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32535 - cbTbl.videoStart = ibmcam_video_start;
32536 - cbTbl.videoStop = ibmcam_video_stop;
32537 - cbTbl.processData = ibmcam_ProcessIsocData;
32538 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32539 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32540 - cbTbl.getFPS = ibmcam_calculate_fps;
32541 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32542 + *(void **)&cbTbl.probe = ibmcam_probe;
32543 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32544 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32545 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32546 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32547 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32548 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32549 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32550 return usbvideo_register(
32551 &cams,
32552 MAX_IBMCAM,
32553 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32554 --- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32555 +++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32556 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32557 int error;
32558
32559 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32560 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32561 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32562
32563 cam->input = input_dev = input_allocate_device();
32564 if (!input_dev) {
32565 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32566 struct usbvideo_cb cbTbl;
32567 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32568 DRIVER_DESC "\n");
32569 - memset(&cbTbl, 0, sizeof(cbTbl));
32570 - cbTbl.probe = konicawc_probe;
32571 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32572 - cbTbl.processData = konicawc_process_isoc;
32573 - cbTbl.getFPS = konicawc_calculate_fps;
32574 - cbTbl.setVideoMode = konicawc_set_video_mode;
32575 - cbTbl.startDataPump = konicawc_start_data;
32576 - cbTbl.stopDataPump = konicawc_stop_data;
32577 - cbTbl.adjustPicture = konicawc_adjust_picture;
32578 - cbTbl.userFree = konicawc_free_uvd;
32579 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32580 + *(void **)&cbTbl.probe = konicawc_probe;
32581 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32582 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32583 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32584 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32585 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32586 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32587 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32588 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32589 return usbvideo_register(
32590 &cams,
32591 MAX_CAMERAS,
32592 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32593 --- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32594 +++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32595 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32596 int error;
32597
32598 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32599 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32600 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32601
32602 cam->input = input_dev = input_allocate_device();
32603 if (!input_dev) {
32604 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32605 --- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32606 +++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32607 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32608 {
32609 struct usbvideo_cb cbTbl;
32610 memset(&cbTbl, 0, sizeof(cbTbl));
32611 - cbTbl.probe = ultracam_probe;
32612 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32613 - cbTbl.videoStart = ultracam_video_start;
32614 - cbTbl.videoStop = ultracam_video_stop;
32615 - cbTbl.processData = ultracam_ProcessIsocData;
32616 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32617 - cbTbl.adjustPicture = ultracam_adjust_picture;
32618 - cbTbl.getFPS = ultracam_calculate_fps;
32619 + *(void **)&cbTbl.probe = ultracam_probe;
32620 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32621 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32622 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32623 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32624 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32625 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32626 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32627 return usbvideo_register(
32628 &cams,
32629 MAX_CAMERAS,
32630 diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32631 --- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32632 +++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32633 @@ -697,15 +697,15 @@ int usbvideo_register(
32634 __func__, cams, base_size, num_cams);
32635
32636 /* Copy callbacks, apply defaults for those that are not set */
32637 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32638 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32639 if (cams->cb.getFrame == NULL)
32640 - cams->cb.getFrame = usbvideo_GetFrame;
32641 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32642 if (cams->cb.disconnect == NULL)
32643 - cams->cb.disconnect = usbvideo_Disconnect;
32644 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32645 if (cams->cb.startDataPump == NULL)
32646 - cams->cb.startDataPump = usbvideo_StartDataPump;
32647 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32648 if (cams->cb.stopDataPump == NULL)
32649 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32650 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32651
32652 cams->num_cameras = num_cams;
32653 cams->cam = (struct uvd *) &cams[1];
32654 diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32655 --- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32656 +++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32657 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32658 unsigned char rv, gv, bv;
32659 static unsigned char *Y, *U, *V;
32660
32661 + pax_track_stack();
32662 +
32663 frame = usbvision->curFrame;
32664 imageSize = frame->frmwidth * frame->frmheight;
32665 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32666 diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32667 --- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32668 +++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32669 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32670 EXPORT_SYMBOL_GPL(v4l2_device_register);
32671
32672 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32673 - atomic_t *instance)
32674 + atomic_unchecked_t *instance)
32675 {
32676 - int num = atomic_inc_return(instance) - 1;
32677 + int num = atomic_inc_return_unchecked(instance) - 1;
32678 int len = strlen(basename);
32679
32680 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32681 diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32682 --- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32683 +++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32684 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32685 {
32686 struct videobuf_queue q;
32687
32688 + pax_track_stack();
32689 +
32690 /* Required to make generic handler to call __videobuf_alloc */
32691 q.int_ops = &sg_ops;
32692
32693 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32694 --- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32695 +++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32696 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32697 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32698 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32699
32700 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32701 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32702 + NULL, NULL);
32703 +#else
32704 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32705 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32706 +#endif
32707 +
32708 /*
32709 * Rounding UP to nearest 4-kB boundary here...
32710 */
32711 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32712 --- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32713 +++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32714 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32715 return 0;
32716 }
32717
32718 +static inline void
32719 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32720 +{
32721 + if (phy_info->port_details) {
32722 + phy_info->port_details->rphy = rphy;
32723 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32724 + ioc->name, rphy));
32725 + }
32726 +
32727 + if (rphy) {
32728 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32729 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32730 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32731 + ioc->name, rphy, rphy->dev.release));
32732 + }
32733 +}
32734 +
32735 /* no mutex */
32736 static void
32737 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32738 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32739 return NULL;
32740 }
32741
32742 -static inline void
32743 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32744 -{
32745 - if (phy_info->port_details) {
32746 - phy_info->port_details->rphy = rphy;
32747 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32748 - ioc->name, rphy));
32749 - }
32750 -
32751 - if (rphy) {
32752 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32753 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32754 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32755 - ioc->name, rphy, rphy->dev.release));
32756 - }
32757 -}
32758 -
32759 static inline struct sas_port *
32760 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32761 {
32762 diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32763 --- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32764 +++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32765 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32766
32767 h = shost_priv(SChost);
32768
32769 - if (h) {
32770 - if (h->info_kbuf == NULL)
32771 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32772 - return h->info_kbuf;
32773 - h->info_kbuf[0] = '\0';
32774 + if (!h)
32775 + return NULL;
32776
32777 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32778 - h->info_kbuf[size-1] = '\0';
32779 - }
32780 + if (h->info_kbuf == NULL)
32781 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32782 + return h->info_kbuf;
32783 + h->info_kbuf[0] = '\0';
32784 +
32785 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32786 + h->info_kbuf[size-1] = '\0';
32787
32788 return h->info_kbuf;
32789 }
32790 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32791 --- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32792 +++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32793 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32794 struct i2o_message *msg;
32795 unsigned int iop;
32796
32797 + pax_track_stack();
32798 +
32799 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32800 return -EFAULT;
32801
32802 diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32803 --- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32804 +++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32805 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32806 "Array Controller Device"
32807 };
32808
32809 -static char *chtostr(u8 * chars, int n)
32810 -{
32811 - char tmp[256];
32812 - tmp[0] = 0;
32813 - return strncat(tmp, (char *)chars, n);
32814 -}
32815 -
32816 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32817 char *group)
32818 {
32819 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32820
32821 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32822 seq_printf(seq, "%-#8x", ddm_table.module_id);
32823 - seq_printf(seq, "%-29s",
32824 - chtostr(ddm_table.module_name_version, 28));
32825 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32826 seq_printf(seq, "%9d ", ddm_table.data_size);
32827 seq_printf(seq, "%8d", ddm_table.code_size);
32828
32829 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32830
32831 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32832 seq_printf(seq, "%-#8x", dst->module_id);
32833 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32834 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32835 + seq_printf(seq, "%-.28s", dst->module_name_version);
32836 + seq_printf(seq, "%-.8s", dst->date);
32837 seq_printf(seq, "%8d ", dst->module_size);
32838 seq_printf(seq, "%8d ", dst->mpb_size);
32839 seq_printf(seq, "0x%04x", dst->module_flags);
32840 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32841 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32842 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32843 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32844 - seq_printf(seq, "Vendor info : %s\n",
32845 - chtostr((u8 *) (work32 + 2), 16));
32846 - seq_printf(seq, "Product info : %s\n",
32847 - chtostr((u8 *) (work32 + 6), 16));
32848 - seq_printf(seq, "Description : %s\n",
32849 - chtostr((u8 *) (work32 + 10), 16));
32850 - seq_printf(seq, "Product rev. : %s\n",
32851 - chtostr((u8 *) (work32 + 14), 8));
32852 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32853 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32854 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32855 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32856
32857 seq_printf(seq, "Serial number : ");
32858 print_serial_number(seq, (u8 *) (work32 + 16),
32859 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32860 }
32861
32862 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32863 - seq_printf(seq, "Module name : %s\n",
32864 - chtostr(result.module_name, 24));
32865 - seq_printf(seq, "Module revision : %s\n",
32866 - chtostr(result.module_rev, 8));
32867 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32868 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32869
32870 seq_printf(seq, "Serial number : ");
32871 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32872 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32873 return 0;
32874 }
32875
32876 - seq_printf(seq, "Device name : %s\n",
32877 - chtostr(result.device_name, 64));
32878 - seq_printf(seq, "Service name : %s\n",
32879 - chtostr(result.service_name, 64));
32880 - seq_printf(seq, "Physical name : %s\n",
32881 - chtostr(result.physical_location, 64));
32882 - seq_printf(seq, "Instance number : %s\n",
32883 - chtostr(result.instance_number, 4));
32884 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32885 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32886 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32887 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32888
32889 return 0;
32890 }
32891 diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32892 --- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32893 +++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32894 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32895
32896 spin_lock_irqsave(&c->context_list_lock, flags);
32897
32898 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32899 - atomic_inc(&c->context_list_counter);
32900 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32901 + atomic_inc_unchecked(&c->context_list_counter);
32902
32903 - entry->context = atomic_read(&c->context_list_counter);
32904 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32905
32906 list_add(&entry->list, &c->context_list);
32907
32908 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32909
32910 #if BITS_PER_LONG == 64
32911 spin_lock_init(&c->context_list_lock);
32912 - atomic_set(&c->context_list_counter, 0);
32913 + atomic_set_unchecked(&c->context_list_counter, 0);
32914 INIT_LIST_HEAD(&c->context_list);
32915 #endif
32916
32917 diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32918 --- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32919 +++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32920 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32921 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32922 int ret;
32923
32924 + pax_track_stack();
32925 +
32926 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32927 return -EINVAL;
32928
32929 diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32930 --- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32931 +++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32932 @@ -118,7 +118,7 @@
32933 } while (0)
32934 #define MAX_CONFIG_LEN 40
32935
32936 -static struct kgdb_io kgdbts_io_ops;
32937 +static const struct kgdb_io kgdbts_io_ops;
32938 static char get_buf[BUFMAX];
32939 static int get_buf_cnt;
32940 static char put_buf[BUFMAX];
32941 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32942 module_put(THIS_MODULE);
32943 }
32944
32945 -static struct kgdb_io kgdbts_io_ops = {
32946 +static const struct kgdb_io kgdbts_io_ops = {
32947 .name = "kgdbts",
32948 .read_char = kgdbts_get_char,
32949 .write_char = kgdbts_put_char,
32950 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32951 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32952 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32953 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32954
32955 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32956 {
32957 - atomic_long_inc(&mcs_op_statistics[op].count);
32958 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32959 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32960 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32961 if (mcs_op_statistics[op].max < clks)
32962 mcs_op_statistics[op].max = clks;
32963 }
32964 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
32965 --- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32966 +++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32967 @@ -32,9 +32,9 @@
32968
32969 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32970
32971 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32972 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32973 {
32974 - unsigned long val = atomic_long_read(v);
32975 + unsigned long val = atomic_long_read_unchecked(v);
32976
32977 if (val)
32978 seq_printf(s, "%16lu %s\n", val, id);
32979 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32980 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32981
32982 for (op = 0; op < mcsop_last; op++) {
32983 - count = atomic_long_read(&mcs_op_statistics[op].count);
32984 - total = atomic_long_read(&mcs_op_statistics[op].total);
32985 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32986 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32987 max = mcs_op_statistics[op].max;
32988 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32989 count ? total / count : 0, max);
32990 diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
32991 --- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32992 +++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32993 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32994 * GRU statistics.
32995 */
32996 struct gru_stats_s {
32997 - atomic_long_t vdata_alloc;
32998 - atomic_long_t vdata_free;
32999 - atomic_long_t gts_alloc;
33000 - atomic_long_t gts_free;
33001 - atomic_long_t vdata_double_alloc;
33002 - atomic_long_t gts_double_allocate;
33003 - atomic_long_t assign_context;
33004 - atomic_long_t assign_context_failed;
33005 - atomic_long_t free_context;
33006 - atomic_long_t load_user_context;
33007 - atomic_long_t load_kernel_context;
33008 - atomic_long_t lock_kernel_context;
33009 - atomic_long_t unlock_kernel_context;
33010 - atomic_long_t steal_user_context;
33011 - atomic_long_t steal_kernel_context;
33012 - atomic_long_t steal_context_failed;
33013 - atomic_long_t nopfn;
33014 - atomic_long_t break_cow;
33015 - atomic_long_t asid_new;
33016 - atomic_long_t asid_next;
33017 - atomic_long_t asid_wrap;
33018 - atomic_long_t asid_reuse;
33019 - atomic_long_t intr;
33020 - atomic_long_t intr_mm_lock_failed;
33021 - atomic_long_t call_os;
33022 - atomic_long_t call_os_offnode_reference;
33023 - atomic_long_t call_os_check_for_bug;
33024 - atomic_long_t call_os_wait_queue;
33025 - atomic_long_t user_flush_tlb;
33026 - atomic_long_t user_unload_context;
33027 - atomic_long_t user_exception;
33028 - atomic_long_t set_context_option;
33029 - atomic_long_t migrate_check;
33030 - atomic_long_t migrated_retarget;
33031 - atomic_long_t migrated_unload;
33032 - atomic_long_t migrated_unload_delay;
33033 - atomic_long_t migrated_nopfn_retarget;
33034 - atomic_long_t migrated_nopfn_unload;
33035 - atomic_long_t tlb_dropin;
33036 - atomic_long_t tlb_dropin_fail_no_asid;
33037 - atomic_long_t tlb_dropin_fail_upm;
33038 - atomic_long_t tlb_dropin_fail_invalid;
33039 - atomic_long_t tlb_dropin_fail_range_active;
33040 - atomic_long_t tlb_dropin_fail_idle;
33041 - atomic_long_t tlb_dropin_fail_fmm;
33042 - atomic_long_t tlb_dropin_fail_no_exception;
33043 - atomic_long_t tlb_dropin_fail_no_exception_war;
33044 - atomic_long_t tfh_stale_on_fault;
33045 - atomic_long_t mmu_invalidate_range;
33046 - atomic_long_t mmu_invalidate_page;
33047 - atomic_long_t mmu_clear_flush_young;
33048 - atomic_long_t flush_tlb;
33049 - atomic_long_t flush_tlb_gru;
33050 - atomic_long_t flush_tlb_gru_tgh;
33051 - atomic_long_t flush_tlb_gru_zero_asid;
33052 -
33053 - atomic_long_t copy_gpa;
33054 -
33055 - atomic_long_t mesq_receive;
33056 - atomic_long_t mesq_receive_none;
33057 - atomic_long_t mesq_send;
33058 - atomic_long_t mesq_send_failed;
33059 - atomic_long_t mesq_noop;
33060 - atomic_long_t mesq_send_unexpected_error;
33061 - atomic_long_t mesq_send_lb_overflow;
33062 - atomic_long_t mesq_send_qlimit_reached;
33063 - atomic_long_t mesq_send_amo_nacked;
33064 - atomic_long_t mesq_send_put_nacked;
33065 - atomic_long_t mesq_qf_not_full;
33066 - atomic_long_t mesq_qf_locked;
33067 - atomic_long_t mesq_qf_noop_not_full;
33068 - atomic_long_t mesq_qf_switch_head_failed;
33069 - atomic_long_t mesq_qf_unexpected_error;
33070 - atomic_long_t mesq_noop_unexpected_error;
33071 - atomic_long_t mesq_noop_lb_overflow;
33072 - atomic_long_t mesq_noop_qlimit_reached;
33073 - atomic_long_t mesq_noop_amo_nacked;
33074 - atomic_long_t mesq_noop_put_nacked;
33075 + atomic_long_unchecked_t vdata_alloc;
33076 + atomic_long_unchecked_t vdata_free;
33077 + atomic_long_unchecked_t gts_alloc;
33078 + atomic_long_unchecked_t gts_free;
33079 + atomic_long_unchecked_t vdata_double_alloc;
33080 + atomic_long_unchecked_t gts_double_allocate;
33081 + atomic_long_unchecked_t assign_context;
33082 + atomic_long_unchecked_t assign_context_failed;
33083 + atomic_long_unchecked_t free_context;
33084 + atomic_long_unchecked_t load_user_context;
33085 + atomic_long_unchecked_t load_kernel_context;
33086 + atomic_long_unchecked_t lock_kernel_context;
33087 + atomic_long_unchecked_t unlock_kernel_context;
33088 + atomic_long_unchecked_t steal_user_context;
33089 + atomic_long_unchecked_t steal_kernel_context;
33090 + atomic_long_unchecked_t steal_context_failed;
33091 + atomic_long_unchecked_t nopfn;
33092 + atomic_long_unchecked_t break_cow;
33093 + atomic_long_unchecked_t asid_new;
33094 + atomic_long_unchecked_t asid_next;
33095 + atomic_long_unchecked_t asid_wrap;
33096 + atomic_long_unchecked_t asid_reuse;
33097 + atomic_long_unchecked_t intr;
33098 + atomic_long_unchecked_t intr_mm_lock_failed;
33099 + atomic_long_unchecked_t call_os;
33100 + atomic_long_unchecked_t call_os_offnode_reference;
33101 + atomic_long_unchecked_t call_os_check_for_bug;
33102 + atomic_long_unchecked_t call_os_wait_queue;
33103 + atomic_long_unchecked_t user_flush_tlb;
33104 + atomic_long_unchecked_t user_unload_context;
33105 + atomic_long_unchecked_t user_exception;
33106 + atomic_long_unchecked_t set_context_option;
33107 + atomic_long_unchecked_t migrate_check;
33108 + atomic_long_unchecked_t migrated_retarget;
33109 + atomic_long_unchecked_t migrated_unload;
33110 + atomic_long_unchecked_t migrated_unload_delay;
33111 + atomic_long_unchecked_t migrated_nopfn_retarget;
33112 + atomic_long_unchecked_t migrated_nopfn_unload;
33113 + atomic_long_unchecked_t tlb_dropin;
33114 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33115 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33116 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33117 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33118 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33119 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33120 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33121 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33122 + atomic_long_unchecked_t tfh_stale_on_fault;
33123 + atomic_long_unchecked_t mmu_invalidate_range;
33124 + atomic_long_unchecked_t mmu_invalidate_page;
33125 + atomic_long_unchecked_t mmu_clear_flush_young;
33126 + atomic_long_unchecked_t flush_tlb;
33127 + atomic_long_unchecked_t flush_tlb_gru;
33128 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33129 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33130 +
33131 + atomic_long_unchecked_t copy_gpa;
33132 +
33133 + atomic_long_unchecked_t mesq_receive;
33134 + atomic_long_unchecked_t mesq_receive_none;
33135 + atomic_long_unchecked_t mesq_send;
33136 + atomic_long_unchecked_t mesq_send_failed;
33137 + atomic_long_unchecked_t mesq_noop;
33138 + atomic_long_unchecked_t mesq_send_unexpected_error;
33139 + atomic_long_unchecked_t mesq_send_lb_overflow;
33140 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33141 + atomic_long_unchecked_t mesq_send_amo_nacked;
33142 + atomic_long_unchecked_t mesq_send_put_nacked;
33143 + atomic_long_unchecked_t mesq_qf_not_full;
33144 + atomic_long_unchecked_t mesq_qf_locked;
33145 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33146 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33147 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33148 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33149 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33150 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33151 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33152 + atomic_long_unchecked_t mesq_noop_put_nacked;
33153
33154 };
33155
33156 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33157 cchop_deallocate, tghop_invalidate, mcsop_last};
33158
33159 struct mcs_op_statistic {
33160 - atomic_long_t count;
33161 - atomic_long_t total;
33162 + atomic_long_unchecked_t count;
33163 + atomic_long_unchecked_t total;
33164 unsigned long max;
33165 };
33166
33167 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33168
33169 #define STAT(id) do { \
33170 if (gru_options & OPT_STATS) \
33171 - atomic_long_inc(&gru_stats.id); \
33172 + atomic_long_inc_unchecked(&gru_stats.id); \
33173 } while (0)
33174
33175 #ifdef CONFIG_SGI_GRU_DEBUG
33176 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33177 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33178 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33179 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33180 /* found in xpc_main.c */
33181 extern struct device *xpc_part;
33182 extern struct device *xpc_chan;
33183 -extern struct xpc_arch_operations xpc_arch_ops;
33184 +extern const struct xpc_arch_operations xpc_arch_ops;
33185 extern int xpc_disengage_timelimit;
33186 extern int xpc_disengage_timedout;
33187 extern int xpc_activate_IRQ_rcvd;
33188 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33189 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33190 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33191 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33192 .notifier_call = xpc_system_die,
33193 };
33194
33195 -struct xpc_arch_operations xpc_arch_ops;
33196 +const struct xpc_arch_operations xpc_arch_ops;
33197
33198 /*
33199 * Timer function to enforce the timelimit on the partition disengage.
33200 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33201 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33202 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33203 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33204 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33205 }
33206
33207 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33208 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33209 .setup_partitions = xpc_setup_partitions_sn2,
33210 .teardown_partitions = xpc_teardown_partitions_sn2,
33211 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33212 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33213 int ret;
33214 size_t buf_size;
33215
33216 - xpc_arch_ops = xpc_arch_ops_sn2;
33217 + pax_open_kernel();
33218 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33219 + pax_close_kernel();
33220
33221 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33222 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33223 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33224 --- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33225 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33226 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33227 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33228 }
33229
33230 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33231 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33232 .setup_partitions = xpc_setup_partitions_uv,
33233 .teardown_partitions = xpc_teardown_partitions_uv,
33234 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33235 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33236 int
33237 xpc_init_uv(void)
33238 {
33239 - xpc_arch_ops = xpc_arch_ops_uv;
33240 + pax_open_kernel();
33241 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33242 + pax_close_kernel();
33243
33244 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33245 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33246 diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33247 --- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33248 +++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33249 @@ -289,7 +289,7 @@ struct xpc_interface {
33250 xpc_notify_func, void *);
33251 void (*received) (short, int, void *);
33252 enum xp_retval (*partid_to_nasids) (short, void *);
33253 -};
33254 +} __no_const;
33255
33256 extern struct xpc_interface xpc_interface;
33257
33258 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33259 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33260 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33261 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33262 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33263 unsigned long timeo = jiffies + HZ;
33264
33265 + pax_track_stack();
33266 +
33267 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33268 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33269 goto sleep;
33270 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33271 unsigned long initial_adr;
33272 int initial_len = len;
33273
33274 + pax_track_stack();
33275 +
33276 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33277 adr += chip->start;
33278 initial_adr = adr;
33279 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33280 int retries = 3;
33281 int ret;
33282
33283 + pax_track_stack();
33284 +
33285 adr += chip->start;
33286
33287 retry:
33288 diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33289 --- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33290 +++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33291 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33292 unsigned long cmd_addr;
33293 struct cfi_private *cfi = map->fldrv_priv;
33294
33295 + pax_track_stack();
33296 +
33297 adr += chip->start;
33298
33299 /* Ensure cmd read/writes are aligned. */
33300 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33301 DECLARE_WAITQUEUE(wait, current);
33302 int wbufsize, z;
33303
33304 + pax_track_stack();
33305 +
33306 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33307 if (adr & (map_bankwidth(map)-1))
33308 return -EINVAL;
33309 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33310 DECLARE_WAITQUEUE(wait, current);
33311 int ret = 0;
33312
33313 + pax_track_stack();
33314 +
33315 adr += chip->start;
33316
33317 /* Let's determine this according to the interleave only once */
33318 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33319 unsigned long timeo = jiffies + HZ;
33320 DECLARE_WAITQUEUE(wait, current);
33321
33322 + pax_track_stack();
33323 +
33324 adr += chip->start;
33325
33326 /* Let's determine this according to the interleave only once */
33327 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33328 unsigned long timeo = jiffies + HZ;
33329 DECLARE_WAITQUEUE(wait, current);
33330
33331 + pax_track_stack();
33332 +
33333 adr += chip->start;
33334
33335 /* Let's determine this according to the interleave only once */
33336 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33337 --- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33338 +++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33339 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33340
33341 /* The ECC will not be calculated correctly if less than 512 is written */
33342 /* DBB-
33343 - if (len != 0x200 && eccbuf)
33344 + if (len != 0x200)
33345 printk(KERN_WARNING
33346 "ECC needs a full sector write (adr: %lx size %lx)\n",
33347 (long) to, (long) len);
33348 diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33349 --- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33350 +++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33351 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33352 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33353
33354 /* Don't allow read past end of device */
33355 - if (from >= this->totlen)
33356 + if (from >= this->totlen || !len)
33357 return -EINVAL;
33358
33359 /* Don't allow a single read to cross a 512-byte block boundary */
33360 diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33361 --- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33362 +++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33363 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33364 loff_t offset;
33365 uint16_t srcunitswap = cpu_to_le16(srcunit);
33366
33367 + pax_track_stack();
33368 +
33369 eun = &part->EUNInfo[srcunit];
33370 xfer = &part->XferInfo[xferunit];
33371 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33372 diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33373 --- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33374 +++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33375 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33376 struct inftl_oob oob;
33377 size_t retlen;
33378
33379 + pax_track_stack();
33380 +
33381 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33382 "pending=%d)\n", inftl, thisVUC, pendingblock);
33383
33384 diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33385 --- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33386 +++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33387 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33388 struct INFTLPartition *ip;
33389 size_t retlen;
33390
33391 + pax_track_stack();
33392 +
33393 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33394
33395 /*
33396 diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33397 --- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33398 +++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33399 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33400 {
33401 map_word pfow_val[4];
33402
33403 + pax_track_stack();
33404 +
33405 /* Check identification string */
33406 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33407 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33408 diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33409 --- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33410 +++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33411 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33412 u_long size;
33413 struct mtd_info_user info;
33414
33415 + pax_track_stack();
33416 +
33417 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33418
33419 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33420 diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33421 --- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33422 +++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33423 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33424 int inplace = 1;
33425 size_t retlen;
33426
33427 + pax_track_stack();
33428 +
33429 memset(BlockMap, 0xff, sizeof(BlockMap));
33430 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33431
33432 diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33433 --- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33434 +++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33435 @@ -23,6 +23,7 @@
33436 #include <asm/errno.h>
33437 #include <linux/delay.h>
33438 #include <linux/slab.h>
33439 +#include <linux/sched.h>
33440 #include <linux/mtd/mtd.h>
33441 #include <linux/mtd/nand.h>
33442 #include <linux/mtd/nftl.h>
33443 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33444 struct mtd_info *mtd = nftl->mbd.mtd;
33445 unsigned int i;
33446
33447 + pax_track_stack();
33448 +
33449 /* Assume logical EraseSize == physical erasesize for starting the scan.
33450 We'll sort it out later if we find a MediaHeader which says otherwise */
33451 /* Actually, we won't. The new DiskOnChip driver has already scanned
33452 diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33453 --- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33454 +++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33455 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33456 static int __init bytes_str_to_int(const char *str)
33457 {
33458 char *endp;
33459 - unsigned long result;
33460 + unsigned long result, scale = 1;
33461
33462 result = simple_strtoul(str, &endp, 0);
33463 if (str == endp || result >= INT_MAX) {
33464 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33465
33466 switch (*endp) {
33467 case 'G':
33468 - result *= 1024;
33469 + scale *= 1024;
33470 case 'M':
33471 - result *= 1024;
33472 + scale *= 1024;
33473 case 'K':
33474 - result *= 1024;
33475 + scale *= 1024;
33476 if (endp[1] == 'i' && endp[2] == 'B')
33477 endp += 2;
33478 case '\0':
33479 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33480 return -EINVAL;
33481 }
33482
33483 - return result;
33484 + if ((intoverflow_t)result*scale >= INT_MAX) {
33485 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33486 + str);
33487 + return -EINVAL;
33488 + }
33489 +
33490 + return result*scale;
33491 }
33492
33493 /**
33494 diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33495 --- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33496 +++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33497 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33498 int rc = 0;
33499 u32 magic, csum;
33500
33501 + pax_track_stack();
33502 +
33503 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33504 goto test_nvram_done;
33505
33506 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33507 --- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33508 +++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33509 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33510 */
33511 struct l2t_skb_cb {
33512 arp_failure_handler_func arp_failure_handler;
33513 -};
33514 +} __no_const;
33515
33516 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33517
33518 diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33519 --- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33520 +++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33521 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33522 int i, addr, ret;
33523 struct t3_vpd vpd;
33524
33525 + pax_track_stack();
33526 +
33527 /*
33528 * Card information is normally at VPD_BASE but some early cards had
33529 * it at 0.
33530 diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33531 --- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33532 +++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-23 21:22:32.000000000 -0400
33533 @@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
33534 {
33535 struct e1000_hw *hw = &adapter->hw;
33536 struct e1000_mac_info *mac = &hw->mac;
33537 - struct e1000_mac_operations *func = &mac->ops;
33538 + e1000_mac_operations_no_const *func = &mac->ops;
33539 u32 swsm = 0;
33540 u32 swsm2 = 0;
33541 bool force_clear_smbi = false;
33542 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33543 temp = er32(ICRXDMTC);
33544 }
33545
33546 -static struct e1000_mac_operations e82571_mac_ops = {
33547 +static const struct e1000_mac_operations e82571_mac_ops = {
33548 /* .check_mng_mode: mac type dependent */
33549 /* .check_for_link: media type dependent */
33550 .id_led_init = e1000e_id_led_init,
33551 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33552 .setup_led = e1000e_setup_led_generic,
33553 };
33554
33555 -static struct e1000_phy_operations e82_phy_ops_igp = {
33556 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33557 .acquire_phy = e1000_get_hw_semaphore_82571,
33558 .check_reset_block = e1000e_check_reset_block_generic,
33559 .commit_phy = NULL,
33560 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33561 .cfg_on_link_up = NULL,
33562 };
33563
33564 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33565 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33566 .acquire_phy = e1000_get_hw_semaphore_82571,
33567 .check_reset_block = e1000e_check_reset_block_generic,
33568 .commit_phy = e1000e_phy_sw_reset,
33569 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33570 .cfg_on_link_up = NULL,
33571 };
33572
33573 -static struct e1000_phy_operations e82_phy_ops_bm = {
33574 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33575 .acquire_phy = e1000_get_hw_semaphore_82571,
33576 .check_reset_block = e1000e_check_reset_block_generic,
33577 .commit_phy = e1000e_phy_sw_reset,
33578 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33579 .cfg_on_link_up = NULL,
33580 };
33581
33582 -static struct e1000_nvm_operations e82571_nvm_ops = {
33583 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33584 .acquire_nvm = e1000_acquire_nvm_82571,
33585 .read_nvm = e1000e_read_nvm_eerd,
33586 .release_nvm = e1000_release_nvm_82571,
33587 diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33588 --- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33589 +++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33590 @@ -375,9 +375,9 @@ struct e1000_info {
33591 u32 pba;
33592 u32 max_hw_frame_size;
33593 s32 (*get_variants)(struct e1000_adapter *);
33594 - struct e1000_mac_operations *mac_ops;
33595 - struct e1000_phy_operations *phy_ops;
33596 - struct e1000_nvm_operations *nvm_ops;
33597 + const struct e1000_mac_operations *mac_ops;
33598 + const struct e1000_phy_operations *phy_ops;
33599 + const struct e1000_nvm_operations *nvm_ops;
33600 };
33601
33602 /* hardware capability, feature, and workaround flags */
33603 diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33604 --- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33605 +++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-23 21:22:32.000000000 -0400
33606 @@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
33607 {
33608 struct e1000_hw *hw = &adapter->hw;
33609 struct e1000_mac_info *mac = &hw->mac;
33610 - struct e1000_mac_operations *func = &mac->ops;
33611 + e1000_mac_operations_no_const *func = &mac->ops;
33612
33613 /* Set media type */
33614 switch (adapter->pdev->device) {
33615 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33616 temp = er32(ICRXDMTC);
33617 }
33618
33619 -static struct e1000_mac_operations es2_mac_ops = {
33620 +static const struct e1000_mac_operations es2_mac_ops = {
33621 .id_led_init = e1000e_id_led_init,
33622 .check_mng_mode = e1000e_check_mng_mode_generic,
33623 /* check_for_link dependent on media type */
33624 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33625 .setup_led = e1000e_setup_led_generic,
33626 };
33627
33628 -static struct e1000_phy_operations es2_phy_ops = {
33629 +static const struct e1000_phy_operations es2_phy_ops = {
33630 .acquire_phy = e1000_acquire_phy_80003es2lan,
33631 .check_reset_block = e1000e_check_reset_block_generic,
33632 .commit_phy = e1000e_phy_sw_reset,
33633 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33634 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33635 };
33636
33637 -static struct e1000_nvm_operations es2_nvm_ops = {
33638 +static const struct e1000_nvm_operations es2_nvm_ops = {
33639 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33640 .read_nvm = e1000e_read_nvm_eerd,
33641 .release_nvm = e1000_release_nvm_80003es2lan,
33642 diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33643 --- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33644 +++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-08-23 21:27:38.000000000 -0400
33645 @@ -753,6 +753,7 @@ struct e1000_mac_operations {
33646 s32 (*setup_physical_interface)(struct e1000_hw *);
33647 s32 (*setup_led)(struct e1000_hw *);
33648 };
33649 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33650
33651 /* Function pointers for the PHY. */
33652 struct e1000_phy_operations {
33653 @@ -774,6 +775,7 @@ struct e1000_phy_operations {
33654 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33655 s32 (*cfg_on_link_up)(struct e1000_hw *);
33656 };
33657 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33658
33659 /* Function pointers for the NVM. */
33660 struct e1000_nvm_operations {
33661 @@ -785,9 +787,10 @@ struct e1000_nvm_operations {
33662 s32 (*validate_nvm)(struct e1000_hw *);
33663 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33664 };
33665 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33666
33667 struct e1000_mac_info {
33668 - struct e1000_mac_operations ops;
33669 + e1000_mac_operations_no_const ops;
33670
33671 u8 addr[6];
33672 u8 perm_addr[6];
33673 @@ -823,7 +826,7 @@ struct e1000_mac_info {
33674 };
33675
33676 struct e1000_phy_info {
33677 - struct e1000_phy_operations ops;
33678 + e1000_phy_operations_no_const ops;
33679
33680 enum e1000_phy_type type;
33681
33682 @@ -857,7 +860,7 @@ struct e1000_phy_info {
33683 };
33684
33685 struct e1000_nvm_info {
33686 - struct e1000_nvm_operations ops;
33687 + e1000_nvm_operations_no_const ops;
33688
33689 enum e1000_nvm_type type;
33690 enum e1000_nvm_override override;
33691 diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33692 --- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33693 +++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-23 21:22:32.000000000 -0400
33694 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33695 }
33696 }
33697
33698 -static struct e1000_mac_operations ich8_mac_ops = {
33699 +static const struct e1000_mac_operations ich8_mac_ops = {
33700 .id_led_init = e1000e_id_led_init,
33701 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33702 .check_for_link = e1000_check_for_copper_link_ich8lan,
33703 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33704 /* id_led_init dependent on mac type */
33705 };
33706
33707 -static struct e1000_phy_operations ich8_phy_ops = {
33708 +static const struct e1000_phy_operations ich8_phy_ops = {
33709 .acquire_phy = e1000_acquire_swflag_ich8lan,
33710 .check_reset_block = e1000_check_reset_block_ich8lan,
33711 .commit_phy = NULL,
33712 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33713 .write_phy_reg = e1000e_write_phy_reg_igp,
33714 };
33715
33716 -static struct e1000_nvm_operations ich8_nvm_ops = {
33717 +static const struct e1000_nvm_operations ich8_nvm_ops = {
33718 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33719 .read_nvm = e1000_read_nvm_ich8lan,
33720 .release_nvm = e1000_release_nvm_ich8lan,
33721 diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33722 --- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33723 +++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33724 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33725 unsigned char buf[512];
33726 int count1;
33727
33728 + pax_track_stack();
33729 +
33730 if (!count)
33731 return;
33732
33733 diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33734 --- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33735 +++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33736 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33737 NULL,
33738 };
33739
33740 -static struct sysfs_ops veth_pool_ops = {
33741 +static const struct sysfs_ops veth_pool_ops = {
33742 .show = veth_pool_show,
33743 .store = veth_pool_store,
33744 };
33745 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33746 --- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33747 +++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-23 21:22:32.000000000 -0400
33748 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
33749 wr32(E1000_VT_CTL, vt_ctl);
33750 }
33751
33752 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
33753 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33754 .reset_hw = igb_reset_hw_82575,
33755 .init_hw = igb_init_hw_82575,
33756 .check_for_link = igb_check_for_link_82575,
33757 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
33758 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33759 };
33760
33761 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
33762 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33763 .acquire = igb_acquire_phy_82575,
33764 .get_cfg_done = igb_get_cfg_done_82575,
33765 .release = igb_release_phy_82575,
33766 };
33767
33768 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33769 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33770 .acquire = igb_acquire_nvm_82575,
33771 .read = igb_read_nvm_eerd,
33772 .release = igb_release_nvm_82575,
33773 diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
33774 --- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33775 +++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-08-23 21:28:01.000000000 -0400
33776 @@ -288,6 +288,7 @@ struct e1000_mac_operations {
33777 s32 (*read_mac_addr)(struct e1000_hw *);
33778 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33779 };
33780 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33781
33782 struct e1000_phy_operations {
33783 s32 (*acquire)(struct e1000_hw *);
33784 @@ -303,6 +304,7 @@ struct e1000_phy_operations {
33785 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33786 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33787 };
33788 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33789
33790 struct e1000_nvm_operations {
33791 s32 (*acquire)(struct e1000_hw *);
33792 @@ -310,6 +312,7 @@ struct e1000_nvm_operations {
33793 void (*release)(struct e1000_hw *);
33794 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33795 };
33796 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33797
33798 struct e1000_info {
33799 s32 (*get_invariants)(struct e1000_hw *);
33800 @@ -321,7 +324,7 @@ struct e1000_info {
33801 extern const struct e1000_info e1000_82575_info;
33802
33803 struct e1000_mac_info {
33804 - struct e1000_mac_operations ops;
33805 + e1000_mac_operations_no_const ops;
33806
33807 u8 addr[6];
33808 u8 perm_addr[6];
33809 @@ -365,7 +368,7 @@ struct e1000_mac_info {
33810 };
33811
33812 struct e1000_phy_info {
33813 - struct e1000_phy_operations ops;
33814 + e1000_phy_operations_no_const ops;
33815
33816 enum e1000_phy_type type;
33817
33818 @@ -400,7 +403,7 @@ struct e1000_phy_info {
33819 };
33820
33821 struct e1000_nvm_info {
33822 - struct e1000_nvm_operations ops;
33823 + e1000_nvm_operations_no_const ops;
33824
33825 enum e1000_nvm_type type;
33826 enum e1000_nvm_override override;
33827 @@ -446,6 +449,7 @@ struct e1000_mbx_operations {
33828 s32 (*check_for_ack)(struct e1000_hw *, u16);
33829 s32 (*check_for_rst)(struct e1000_hw *, u16);
33830 };
33831 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33832
33833 struct e1000_mbx_stats {
33834 u32 msgs_tx;
33835 @@ -457,7 +461,7 @@ struct e1000_mbx_stats {
33836 };
33837
33838 struct e1000_mbx_info {
33839 - struct e1000_mbx_operations ops;
33840 + e1000_mbx_operations_no_const ops;
33841 struct e1000_mbx_stats stats;
33842 u32 timeout;
33843 u32 usec_delay;
33844 diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.h linux-2.6.32.45/drivers/net/igbvf/vf.h
33845 --- linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-03-27 14:31:47.000000000 -0400
33846 +++ linux-2.6.32.45/drivers/net/igbvf/vf.h 2011-08-23 21:22:38.000000000 -0400
33847 @@ -187,9 +187,10 @@ struct e1000_mac_operations {
33848 s32 (*read_mac_addr)(struct e1000_hw *);
33849 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33850 };
33851 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33852
33853 struct e1000_mac_info {
33854 - struct e1000_mac_operations ops;
33855 + e1000_mac_operations_no_const ops;
33856 u8 addr[6];
33857 u8 perm_addr[6];
33858
33859 @@ -211,6 +212,7 @@ struct e1000_mbx_operations {
33860 s32 (*check_for_ack)(struct e1000_hw *);
33861 s32 (*check_for_rst)(struct e1000_hw *);
33862 };
33863 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33864
33865 struct e1000_mbx_stats {
33866 u32 msgs_tx;
33867 @@ -222,7 +224,7 @@ struct e1000_mbx_stats {
33868 };
33869
33870 struct e1000_mbx_info {
33871 - struct e1000_mbx_operations ops;
33872 + e1000_mbx_operations_no_const ops;
33873 struct e1000_mbx_stats stats;
33874 u32 timeout;
33875 u32 usec_delay;
33876 diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
33877 --- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
33878 +++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
33879 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
33880 NULL
33881 };
33882
33883 -static struct sysfs_ops veth_cnx_sysfs_ops = {
33884 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
33885 .show = veth_cnx_attribute_show
33886 };
33887
33888 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
33889 NULL
33890 };
33891
33892 -static struct sysfs_ops veth_port_sysfs_ops = {
33893 +static const struct sysfs_ops veth_port_sysfs_ops = {
33894 .show = veth_port_attribute_show
33895 };
33896
33897 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
33898 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
33899 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
33900 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
33901 u32 rctl;
33902 int i;
33903
33904 + pax_track_stack();
33905 +
33906 /* Check for Promiscuous and All Multicast modes */
33907
33908 rctl = IXGB_READ_REG(hw, RCTL);
33909 diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
33910 --- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
33911 +++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
33912 @@ -260,6 +260,9 @@ void __devinit
33913 ixgb_check_options(struct ixgb_adapter *adapter)
33914 {
33915 int bd = adapter->bd_number;
33916 +
33917 + pax_track_stack();
33918 +
33919 if (bd >= IXGB_MAX_NIC) {
33920 printk(KERN_NOTICE
33921 "Warning: no configuration for board #%i\n", bd);
33922 diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h
33923 --- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-03-27 14:31:47.000000000 -0400
33924 +++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:22:38.000000000 -0400
33925 @@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
33926 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
33927 s32 (*update_checksum)(struct ixgbe_hw *);
33928 };
33929 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33930
33931 struct ixgbe_mac_operations {
33932 s32 (*init_hw)(struct ixgbe_hw *);
33933 @@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
33934 /* Flow Control */
33935 s32 (*fc_enable)(struct ixgbe_hw *, s32);
33936 };
33937 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33938
33939 struct ixgbe_phy_operations {
33940 s32 (*identify)(struct ixgbe_hw *);
33941 @@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
33942 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
33943 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33944 };
33945 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33946
33947 struct ixgbe_eeprom_info {
33948 - struct ixgbe_eeprom_operations ops;
33949 + ixgbe_eeprom_operations_no_const ops;
33950 enum ixgbe_eeprom_type type;
33951 u32 semaphore_delay;
33952 u16 word_size;
33953 @@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
33954 };
33955
33956 struct ixgbe_mac_info {
33957 - struct ixgbe_mac_operations ops;
33958 + ixgbe_mac_operations_no_const ops;
33959 enum ixgbe_mac_type type;
33960 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33961 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33962 @@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
33963 };
33964
33965 struct ixgbe_phy_info {
33966 - struct ixgbe_phy_operations ops;
33967 + ixgbe_phy_operations_no_const ops;
33968 struct mdio_if_info mdio;
33969 enum ixgbe_phy_type type;
33970 u32 id;
33971 diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
33972 --- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
33973 +++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
33974 @@ -38,6 +38,7 @@
33975 #include <linux/errno.h>
33976 #include <linux/pci.h>
33977 #include <linux/dma-mapping.h>
33978 +#include <linux/sched.h>
33979
33980 #include <linux/mlx4/device.h>
33981 #include <linux/mlx4/doorbell.h>
33982 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
33983 u64 icm_size;
33984 int err;
33985
33986 + pax_track_stack();
33987 +
33988 err = mlx4_QUERY_FW(dev);
33989 if (err) {
33990 if (err == -EACCES)
33991 diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
33992 --- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
33993 +++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
33994 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
33995 int i, num_irqs, err;
33996 u8 first_ldg;
33997
33998 + pax_track_stack();
33999 +
34000 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34001 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34002 ldg_num_map[i] = first_ldg + i;
34003 diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34004 --- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34005 +++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34006 @@ -79,7 +79,7 @@ static int cards_found;
34007 /*
34008 * VLB I/O addresses
34009 */
34010 -static unsigned int pcnet32_portlist[] __initdata =
34011 +static unsigned int pcnet32_portlist[] __devinitdata =
34012 { 0x300, 0x320, 0x340, 0x360, 0 };
34013
34014 static int pcnet32_debug = 0;
34015 @@ -267,7 +267,7 @@ struct pcnet32_private {
34016 struct sk_buff **rx_skbuff;
34017 dma_addr_t *tx_dma_addr;
34018 dma_addr_t *rx_dma_addr;
34019 - struct pcnet32_access a;
34020 + struct pcnet32_access *a;
34021 spinlock_t lock; /* Guard lock */
34022 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34023 unsigned int rx_ring_size; /* current rx ring size */
34024 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34025 u16 val;
34026
34027 netif_wake_queue(dev);
34028 - val = lp->a.read_csr(ioaddr, CSR3);
34029 + val = lp->a->read_csr(ioaddr, CSR3);
34030 val &= 0x00ff;
34031 - lp->a.write_csr(ioaddr, CSR3, val);
34032 + lp->a->write_csr(ioaddr, CSR3, val);
34033 napi_enable(&lp->napi);
34034 }
34035
34036 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34037 r = mii_link_ok(&lp->mii_if);
34038 } else if (lp->chip_version >= PCNET32_79C970A) {
34039 ulong ioaddr = dev->base_addr; /* card base I/O address */
34040 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34041 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34042 } else { /* can not detect link on really old chips */
34043 r = 1;
34044 }
34045 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34046 pcnet32_netif_stop(dev);
34047
34048 spin_lock_irqsave(&lp->lock, flags);
34049 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34050 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34051
34052 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34053
34054 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34055 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34056 {
34057 struct pcnet32_private *lp = netdev_priv(dev);
34058 - struct pcnet32_access *a = &lp->a; /* access to registers */
34059 + struct pcnet32_access *a = lp->a; /* access to registers */
34060 ulong ioaddr = dev->base_addr; /* card base I/O address */
34061 struct sk_buff *skb; /* sk buff */
34062 int x, i; /* counters */
34063 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34064 pcnet32_netif_stop(dev);
34065
34066 spin_lock_irqsave(&lp->lock, flags);
34067 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34068 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34069
34070 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34071
34072 /* Reset the PCNET32 */
34073 - lp->a.reset(ioaddr);
34074 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34075 + lp->a->reset(ioaddr);
34076 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34077
34078 /* switch pcnet32 to 32bit mode */
34079 - lp->a.write_bcr(ioaddr, 20, 2);
34080 + lp->a->write_bcr(ioaddr, 20, 2);
34081
34082 /* purge & init rings but don't actually restart */
34083 pcnet32_restart(dev, 0x0000);
34084
34085 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34086 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34087
34088 /* Initialize Transmit buffers. */
34089 size = data_len + 15;
34090 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34091
34092 /* set int loopback in CSR15 */
34093 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34094 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34095 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34096
34097 teststatus = cpu_to_le16(0x8000);
34098 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34099 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34100
34101 /* Check status of descriptors */
34102 for (x = 0; x < numbuffs; x++) {
34103 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34104 }
34105 }
34106
34107 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34108 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34109 wmb();
34110 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34111 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34112 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34113 pcnet32_restart(dev, CSR0_NORMAL);
34114 } else {
34115 pcnet32_purge_rx_ring(dev);
34116 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34117 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34118 }
34119 spin_unlock_irqrestore(&lp->lock, flags);
34120
34121 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34122 static void pcnet32_led_blink_callback(struct net_device *dev)
34123 {
34124 struct pcnet32_private *lp = netdev_priv(dev);
34125 - struct pcnet32_access *a = &lp->a;
34126 + struct pcnet32_access *a = lp->a;
34127 ulong ioaddr = dev->base_addr;
34128 unsigned long flags;
34129 int i;
34130 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34131 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34132 {
34133 struct pcnet32_private *lp = netdev_priv(dev);
34134 - struct pcnet32_access *a = &lp->a;
34135 + struct pcnet32_access *a = lp->a;
34136 ulong ioaddr = dev->base_addr;
34137 unsigned long flags;
34138 int i, regs[4];
34139 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34140 {
34141 int csr5;
34142 struct pcnet32_private *lp = netdev_priv(dev);
34143 - struct pcnet32_access *a = &lp->a;
34144 + struct pcnet32_access *a = lp->a;
34145 ulong ioaddr = dev->base_addr;
34146 int ticks;
34147
34148 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34149 spin_lock_irqsave(&lp->lock, flags);
34150 if (pcnet32_tx(dev)) {
34151 /* reset the chip to clear the error condition, then restart */
34152 - lp->a.reset(ioaddr);
34153 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34154 + lp->a->reset(ioaddr);
34155 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34156 pcnet32_restart(dev, CSR0_START);
34157 netif_wake_queue(dev);
34158 }
34159 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34160 __napi_complete(napi);
34161
34162 /* clear interrupt masks */
34163 - val = lp->a.read_csr(ioaddr, CSR3);
34164 + val = lp->a->read_csr(ioaddr, CSR3);
34165 val &= 0x00ff;
34166 - lp->a.write_csr(ioaddr, CSR3, val);
34167 + lp->a->write_csr(ioaddr, CSR3, val);
34168
34169 /* Set interrupt enable. */
34170 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34171 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34172
34173 spin_unlock_irqrestore(&lp->lock, flags);
34174 }
34175 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34176 int i, csr0;
34177 u16 *buff = ptr;
34178 struct pcnet32_private *lp = netdev_priv(dev);
34179 - struct pcnet32_access *a = &lp->a;
34180 + struct pcnet32_access *a = lp->a;
34181 ulong ioaddr = dev->base_addr;
34182 unsigned long flags;
34183
34184 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34185 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34186 if (lp->phymask & (1 << j)) {
34187 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34188 - lp->a.write_bcr(ioaddr, 33,
34189 + lp->a->write_bcr(ioaddr, 33,
34190 (j << 5) | i);
34191 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34192 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34193 }
34194 }
34195 }
34196 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34197 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34198 lp->options |= PCNET32_PORT_FD;
34199
34200 - lp->a = *a;
34201 + lp->a = a;
34202
34203 /* prior to register_netdev, dev->name is not yet correct */
34204 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34205 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34206 if (lp->mii) {
34207 /* lp->phycount and lp->phymask are set to 0 by memset above */
34208
34209 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34210 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34211 /* scan for PHYs */
34212 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34213 unsigned short id1, id2;
34214 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34215 "Found PHY %04x:%04x at address %d.\n",
34216 id1, id2, i);
34217 }
34218 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34219 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34220 if (lp->phycount > 1) {
34221 lp->options |= PCNET32_PORT_MII;
34222 }
34223 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34224 }
34225
34226 /* Reset the PCNET32 */
34227 - lp->a.reset(ioaddr);
34228 + lp->a->reset(ioaddr);
34229
34230 /* switch pcnet32 to 32bit mode */
34231 - lp->a.write_bcr(ioaddr, 20, 2);
34232 + lp->a->write_bcr(ioaddr, 20, 2);
34233
34234 if (netif_msg_ifup(lp))
34235 printk(KERN_DEBUG
34236 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34237 (u32) (lp->init_dma_addr));
34238
34239 /* set/reset autoselect bit */
34240 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34241 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34242 if (lp->options & PCNET32_PORT_ASEL)
34243 val |= 2;
34244 - lp->a.write_bcr(ioaddr, 2, val);
34245 + lp->a->write_bcr(ioaddr, 2, val);
34246
34247 /* handle full duplex setting */
34248 if (lp->mii_if.full_duplex) {
34249 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34250 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34251 if (lp->options & PCNET32_PORT_FD) {
34252 val |= 1;
34253 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34254 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34255 if (lp->chip_version == 0x2627)
34256 val |= 3;
34257 }
34258 - lp->a.write_bcr(ioaddr, 9, val);
34259 + lp->a->write_bcr(ioaddr, 9, val);
34260 }
34261
34262 /* set/reset GPSI bit in test register */
34263 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34264 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34265 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34266 val |= 0x10;
34267 - lp->a.write_csr(ioaddr, 124, val);
34268 + lp->a->write_csr(ioaddr, 124, val);
34269
34270 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34271 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34272 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34273 * duplex, and/or enable auto negotiation, and clear DANAS
34274 */
34275 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34276 - lp->a.write_bcr(ioaddr, 32,
34277 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34278 + lp->a->write_bcr(ioaddr, 32,
34279 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34280 /* disable Auto Negotiation, set 10Mpbs, HD */
34281 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34282 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34283 if (lp->options & PCNET32_PORT_FD)
34284 val |= 0x10;
34285 if (lp->options & PCNET32_PORT_100)
34286 val |= 0x08;
34287 - lp->a.write_bcr(ioaddr, 32, val);
34288 + lp->a->write_bcr(ioaddr, 32, val);
34289 } else {
34290 if (lp->options & PCNET32_PORT_ASEL) {
34291 - lp->a.write_bcr(ioaddr, 32,
34292 - lp->a.read_bcr(ioaddr,
34293 + lp->a->write_bcr(ioaddr, 32,
34294 + lp->a->read_bcr(ioaddr,
34295 32) | 0x0080);
34296 /* enable auto negotiate, setup, disable fd */
34297 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34298 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34299 val |= 0x20;
34300 - lp->a.write_bcr(ioaddr, 32, val);
34301 + lp->a->write_bcr(ioaddr, 32, val);
34302 }
34303 }
34304 } else {
34305 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34306 * There is really no good other way to handle multiple PHYs
34307 * other than turning off all automatics
34308 */
34309 - val = lp->a.read_bcr(ioaddr, 2);
34310 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34311 - val = lp->a.read_bcr(ioaddr, 32);
34312 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34313 + val = lp->a->read_bcr(ioaddr, 2);
34314 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34315 + val = lp->a->read_bcr(ioaddr, 32);
34316 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34317
34318 if (!(lp->options & PCNET32_PORT_ASEL)) {
34319 /* setup ecmd */
34320 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34321 ecmd.speed =
34322 lp->
34323 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34324 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34325 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34326
34327 if (lp->options & PCNET32_PORT_FD) {
34328 ecmd.duplex = DUPLEX_FULL;
34329 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34330 ecmd.duplex = DUPLEX_HALF;
34331 bcr9 |= ~(1 << 0);
34332 }
34333 - lp->a.write_bcr(ioaddr, 9, bcr9);
34334 + lp->a->write_bcr(ioaddr, 9, bcr9);
34335 }
34336
34337 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34338 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34339
34340 #ifdef DO_DXSUFLO
34341 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34342 - val = lp->a.read_csr(ioaddr, CSR3);
34343 + val = lp->a->read_csr(ioaddr, CSR3);
34344 val |= 0x40;
34345 - lp->a.write_csr(ioaddr, CSR3, val);
34346 + lp->a->write_csr(ioaddr, CSR3, val);
34347 }
34348 #endif
34349
34350 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34351 napi_enable(&lp->napi);
34352
34353 /* Re-initialize the PCNET32, and start it when done. */
34354 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34355 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34356 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34357 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34358
34359 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34360 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34361 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34362 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34363
34364 netif_start_queue(dev);
34365
34366 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34367
34368 i = 0;
34369 while (i++ < 100)
34370 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34371 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34372 break;
34373 /*
34374 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34375 * reports that doing so triggers a bug in the '974.
34376 */
34377 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34378 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34379
34380 if (netif_msg_ifup(lp))
34381 printk(KERN_DEBUG
34382 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34383 dev->name, i,
34384 (u32) (lp->init_dma_addr),
34385 - lp->a.read_csr(ioaddr, CSR0));
34386 + lp->a->read_csr(ioaddr, CSR0));
34387
34388 spin_unlock_irqrestore(&lp->lock, flags);
34389
34390 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34391 * Switch back to 16bit mode to avoid problems with dumb
34392 * DOS packet driver after a warm reboot
34393 */
34394 - lp->a.write_bcr(ioaddr, 20, 4);
34395 + lp->a->write_bcr(ioaddr, 20, 4);
34396
34397 err_free_irq:
34398 spin_unlock_irqrestore(&lp->lock, flags);
34399 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34400
34401 /* wait for stop */
34402 for (i = 0; i < 100; i++)
34403 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34404 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34405 break;
34406
34407 if (i >= 100 && netif_msg_drv(lp))
34408 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34409 return;
34410
34411 /* ReInit Ring */
34412 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34413 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34414 i = 0;
34415 while (i++ < 1000)
34416 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34417 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34418 break;
34419
34420 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34421 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34422 }
34423
34424 static void pcnet32_tx_timeout(struct net_device *dev)
34425 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34426 if (pcnet32_debug & NETIF_MSG_DRV)
34427 printk(KERN_ERR
34428 "%s: transmit timed out, status %4.4x, resetting.\n",
34429 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34430 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34431 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34432 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34433 dev->stats.tx_errors++;
34434 if (netif_msg_tx_err(lp)) {
34435 int i;
34436 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34437 if (netif_msg_tx_queued(lp)) {
34438 printk(KERN_DEBUG
34439 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34440 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34441 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34442 }
34443
34444 /* Default status -- will not enable Successful-TxDone
34445 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34446 dev->stats.tx_bytes += skb->len;
34447
34448 /* Trigger an immediate send poll. */
34449 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34450 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34451
34452 dev->trans_start = jiffies;
34453
34454 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34455
34456 spin_lock(&lp->lock);
34457
34458 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34459 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34460 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34461 if (csr0 == 0xffff) {
34462 break; /* PCMCIA remove happened */
34463 }
34464 /* Acknowledge all of the current interrupt sources ASAP. */
34465 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34466 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34467
34468 if (netif_msg_intr(lp))
34469 printk(KERN_DEBUG
34470 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34471 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34472 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34473
34474 /* Log misc errors. */
34475 if (csr0 & 0x4000)
34476 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34477 if (napi_schedule_prep(&lp->napi)) {
34478 u16 val;
34479 /* set interrupt masks */
34480 - val = lp->a.read_csr(ioaddr, CSR3);
34481 + val = lp->a->read_csr(ioaddr, CSR3);
34482 val |= 0x5f00;
34483 - lp->a.write_csr(ioaddr, CSR3, val);
34484 + lp->a->write_csr(ioaddr, CSR3, val);
34485
34486 __napi_schedule(&lp->napi);
34487 break;
34488 }
34489 - csr0 = lp->a.read_csr(ioaddr, CSR0);
34490 + csr0 = lp->a->read_csr(ioaddr, CSR0);
34491 }
34492
34493 if (netif_msg_intr(lp))
34494 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34495 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34496 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34497
34498 spin_unlock(&lp->lock);
34499
34500 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34501
34502 spin_lock_irqsave(&lp->lock, flags);
34503
34504 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34505 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34506
34507 if (netif_msg_ifdown(lp))
34508 printk(KERN_DEBUG
34509 "%s: Shutting down ethercard, status was %2.2x.\n",
34510 - dev->name, lp->a.read_csr(ioaddr, CSR0));
34511 + dev->name, lp->a->read_csr(ioaddr, CSR0));
34512
34513 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34514 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34515 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34516
34517 /*
34518 * Switch back to 16bit mode to avoid problems with dumb
34519 * DOS packet driver after a warm reboot
34520 */
34521 - lp->a.write_bcr(ioaddr, 20, 4);
34522 + lp->a->write_bcr(ioaddr, 20, 4);
34523
34524 spin_unlock_irqrestore(&lp->lock, flags);
34525
34526 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34527 unsigned long flags;
34528
34529 spin_lock_irqsave(&lp->lock, flags);
34530 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34531 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34532 spin_unlock_irqrestore(&lp->lock, flags);
34533
34534 return &dev->stats;
34535 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34536 if (dev->flags & IFF_ALLMULTI) {
34537 ib->filter[0] = cpu_to_le32(~0U);
34538 ib->filter[1] = cpu_to_le32(~0U);
34539 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34540 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34541 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34542 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34543 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34544 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34545 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34546 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34547 return;
34548 }
34549 /* clear the multicast filter */
34550 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34551 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34552 }
34553 for (i = 0; i < 4; i++)
34554 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34555 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34556 le16_to_cpu(mcast_table[i]));
34557 return;
34558 }
34559 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34560
34561 spin_lock_irqsave(&lp->lock, flags);
34562 suspended = pcnet32_suspend(dev, &flags, 0);
34563 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34564 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34565 if (dev->flags & IFF_PROMISC) {
34566 /* Log any net taps. */
34567 if (netif_msg_hw(lp))
34568 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34569 lp->init_block->mode =
34570 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34571 7);
34572 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34573 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34574 } else {
34575 lp->init_block->mode =
34576 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34577 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34578 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34579 pcnet32_load_multicast(dev);
34580 }
34581
34582 if (suspended) {
34583 int csr5;
34584 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34585 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34586 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34587 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34588 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34589 } else {
34590 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34591 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34592 pcnet32_restart(dev, CSR0_NORMAL);
34593 netif_wake_queue(dev);
34594 }
34595 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34596 if (!lp->mii)
34597 return 0;
34598
34599 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34600 - val_out = lp->a.read_bcr(ioaddr, 34);
34601 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34602 + val_out = lp->a->read_bcr(ioaddr, 34);
34603
34604 return val_out;
34605 }
34606 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34607 if (!lp->mii)
34608 return;
34609
34610 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34611 - lp->a.write_bcr(ioaddr, 34, val);
34612 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34613 + lp->a->write_bcr(ioaddr, 34, val);
34614 }
34615
34616 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34617 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34618 curr_link = mii_link_ok(&lp->mii_if);
34619 } else {
34620 ulong ioaddr = dev->base_addr; /* card base I/O address */
34621 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34622 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34623 }
34624 if (!curr_link) {
34625 if (prev_link || verbose) {
34626 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34627 (ecmd.duplex ==
34628 DUPLEX_FULL) ? "full" : "half");
34629 }
34630 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34631 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34632 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34633 if (lp->mii_if.full_duplex)
34634 bcr9 |= (1 << 0);
34635 else
34636 bcr9 &= ~(1 << 0);
34637 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34638 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34639 }
34640 } else {
34641 if (netif_msg_link(lp))
34642 diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
34643 --- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34644 +++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34645 @@ -95,6 +95,7 @@
34646 #define CHIPREV_ID_5750_A0 0x4000
34647 #define CHIPREV_ID_5750_A1 0x4001
34648 #define CHIPREV_ID_5750_A3 0x4003
34649 +#define CHIPREV_ID_5750_C1 0x4201
34650 #define CHIPREV_ID_5750_C2 0x4202
34651 #define CHIPREV_ID_5752_A0_HW 0x5000
34652 #define CHIPREV_ID_5752_A0 0x6000
34653 diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
34654 --- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34655 +++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34656 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34657
34658 static int __init abyss_init (void)
34659 {
34660 - abyss_netdev_ops = tms380tr_netdev_ops;
34661 + pax_open_kernel();
34662 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34663
34664 - abyss_netdev_ops.ndo_open = abyss_open;
34665 - abyss_netdev_ops.ndo_stop = abyss_close;
34666 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34667 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34668 + pax_close_kernel();
34669
34670 return pci_register_driver(&abyss_driver);
34671 }
34672 diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
34673 --- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34674 +++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34675 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34676
34677 static int __init madgemc_init (void)
34678 {
34679 - madgemc_netdev_ops = tms380tr_netdev_ops;
34680 - madgemc_netdev_ops.ndo_open = madgemc_open;
34681 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34682 + pax_open_kernel();
34683 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34684 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34685 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34686 + pax_close_kernel();
34687
34688 return mca_register_driver (&madgemc_driver);
34689 }
34690 diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
34691 --- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34692 +++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34693 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34694 struct platform_device *pdev;
34695 int i, num = 0, err = 0;
34696
34697 - proteon_netdev_ops = tms380tr_netdev_ops;
34698 - proteon_netdev_ops.ndo_open = proteon_open;
34699 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34700 + pax_open_kernel();
34701 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34702 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34703 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34704 + pax_close_kernel();
34705
34706 err = platform_driver_register(&proteon_driver);
34707 if (err)
34708 diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
34709 --- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34710 +++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34711 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34712 struct platform_device *pdev;
34713 int i, num = 0, err = 0;
34714
34715 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34716 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34717 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34718 + pax_open_kernel();
34719 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34720 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34721 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34722 + pax_close_kernel();
34723
34724 err = platform_driver_register(&sk_isa_driver);
34725 if (err)
34726 diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
34727 --- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34728 +++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34729 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34730 struct de_srom_info_leaf *il;
34731 void *bufp;
34732
34733 + pax_track_stack();
34734 +
34735 /* download entire eeprom */
34736 for (i = 0; i < DE_EEPROM_WORDS; i++)
34737 ((__le16 *)ee_data)[i] =
34738 diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
34739 --- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34740 +++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34741 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34742 for (i=0; i<ETH_ALEN; i++) {
34743 tmp.addr[i] = dev->dev_addr[i];
34744 }
34745 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34746 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34747 break;
34748
34749 case DE4X5_SET_HWADDR: /* Set the hardware address */
34750 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34751 spin_lock_irqsave(&lp->lock, flags);
34752 memcpy(&statbuf, &lp->pktStats, ioc->len);
34753 spin_unlock_irqrestore(&lp->lock, flags);
34754 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34755 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34756 return -EFAULT;
34757 break;
34758 }
34759 diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
34760 --- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34761 +++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34762 @@ -71,7 +71,7 @@
34763 #include <asm/byteorder.h>
34764 #include <linux/serial_core.h>
34765 #include <linux/serial.h>
34766 -
34767 +#include <asm/local.h>
34768
34769 #define DRIVER_VERSION "1.2"
34770 #define MOD_AUTHOR "Option Wireless"
34771 @@ -258,7 +258,7 @@ struct hso_serial {
34772
34773 /* from usb_serial_port */
34774 struct tty_struct *tty;
34775 - int open_count;
34776 + local_t open_count;
34777 spinlock_t serial_lock;
34778
34779 int (*write_data) (struct hso_serial *serial);
34780 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34781 struct urb *urb;
34782
34783 urb = serial->rx_urb[0];
34784 - if (serial->open_count > 0) {
34785 + if (local_read(&serial->open_count) > 0) {
34786 count = put_rxbuf_data(urb, serial);
34787 if (count == -1)
34788 return;
34789 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34790 DUMP1(urb->transfer_buffer, urb->actual_length);
34791
34792 /* Anyone listening? */
34793 - if (serial->open_count == 0)
34794 + if (local_read(&serial->open_count) == 0)
34795 return;
34796
34797 if (status == 0) {
34798 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34799 spin_unlock_irq(&serial->serial_lock);
34800
34801 /* check for port already opened, if not set the termios */
34802 - serial->open_count++;
34803 - if (serial->open_count == 1) {
34804 + if (local_inc_return(&serial->open_count) == 1) {
34805 tty->low_latency = 1;
34806 serial->rx_state = RX_IDLE;
34807 /* Force default termio settings */
34808 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
34809 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34810 if (result) {
34811 hso_stop_serial_device(serial->parent);
34812 - serial->open_count--;
34813 + local_dec(&serial->open_count);
34814 kref_put(&serial->parent->ref, hso_serial_ref_free);
34815 }
34816 } else {
34817 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
34818
34819 /* reset the rts and dtr */
34820 /* do the actual close */
34821 - serial->open_count--;
34822 + local_dec(&serial->open_count);
34823
34824 - if (serial->open_count <= 0) {
34825 - serial->open_count = 0;
34826 + if (local_read(&serial->open_count) <= 0) {
34827 + local_set(&serial->open_count, 0);
34828 spin_lock_irq(&serial->serial_lock);
34829 if (serial->tty == tty) {
34830 serial->tty->driver_data = NULL;
34831 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
34832
34833 /* the actual setup */
34834 spin_lock_irqsave(&serial->serial_lock, flags);
34835 - if (serial->open_count)
34836 + if (local_read(&serial->open_count))
34837 _hso_serial_set_termios(tty, old);
34838 else
34839 tty->termios = old;
34840 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
34841 /* Start all serial ports */
34842 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34843 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34844 - if (dev2ser(serial_table[i])->open_count) {
34845 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34846 result =
34847 hso_start_serial_device(serial_table[i], GFP_NOIO);
34848 hso_kick_transmit(dev2ser(serial_table[i]));
34849 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
34850 --- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
34851 +++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
34852 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
34853 void (*link_down)(struct __vxge_hw_device *devh);
34854 void (*crit_err)(struct __vxge_hw_device *devh,
34855 enum vxge_hw_event type, u64 ext_data);
34856 -};
34857 +} __no_const;
34858
34859 /*
34860 * struct __vxge_hw_blockpool_entry - Block private data structure
34861 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
34862 --- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
34863 +++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
34864 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
34865 struct sk_buff *completed[NR_SKB_COMPLETED];
34866 int more;
34867
34868 + pax_track_stack();
34869 +
34870 do {
34871 more = 0;
34872 skb_ptr = completed;
34873 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
34874 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34875 int index;
34876
34877 + pax_track_stack();
34878 +
34879 /*
34880 * Filling
34881 * - itable with bucket numbers
34882 diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
34883 --- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
34884 +++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
34885 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
34886 struct vxge_hw_mempool_dma *dma_object,
34887 u32 index,
34888 u32 is_last);
34889 -};
34890 +} __no_const;
34891
34892 void
34893 __vxge_hw_mempool_destroy(
34894 diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
34895 --- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
34896 +++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
34897 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
34898 unsigned char hex[1024],
34899 * phex = hex;
34900
34901 + pax_track_stack();
34902 +
34903 if (len >= (sizeof(hex) / 2))
34904 len = (sizeof(hex) / 2) - 1;
34905
34906 diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
34907 --- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
34908 +++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
34909 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
34910
34911 static int x25_open(struct net_device *dev)
34912 {
34913 - struct lapb_register_struct cb;
34914 + static struct lapb_register_struct cb = {
34915 + .connect_confirmation = x25_connected,
34916 + .connect_indication = x25_connected,
34917 + .disconnect_confirmation = x25_disconnected,
34918 + .disconnect_indication = x25_disconnected,
34919 + .data_indication = x25_data_indication,
34920 + .data_transmit = x25_data_transmit
34921 + };
34922 int result;
34923
34924 - cb.connect_confirmation = x25_connected;
34925 - cb.connect_indication = x25_connected;
34926 - cb.disconnect_confirmation = x25_disconnected;
34927 - cb.disconnect_indication = x25_disconnected;
34928 - cb.data_indication = x25_data_indication;
34929 - cb.data_transmit = x25_data_transmit;
34930 -
34931 result = lapb_register(dev, &cb);
34932 if (result != LAPB_OK)
34933 return result;
34934 diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
34935 --- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
34936 +++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
34937 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
34938 int do_autopm = 1;
34939 DECLARE_COMPLETION_ONSTACK(notif_completion);
34940
34941 + pax_track_stack();
34942 +
34943 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34944 i2400m, ack, ack_size);
34945 BUG_ON(_ack == i2400m->bm_ack_buf);
34946 diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
34947 --- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
34948 +++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
34949 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
34950 BSSListElement * loop_net;
34951 BSSListElement * tmp_net;
34952
34953 + pax_track_stack();
34954 +
34955 /* Blow away current list of scan results */
34956 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34957 list_move_tail (&loop_net->list, &ai->network_free_list);
34958 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
34959 WepKeyRid wkr;
34960 int rc;
34961
34962 + pax_track_stack();
34963 +
34964 memset( &mySsid, 0, sizeof( mySsid ) );
34965 kfree (ai->flash);
34966 ai->flash = NULL;
34967 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
34968 __le32 *vals = stats.vals;
34969 int len;
34970
34971 + pax_track_stack();
34972 +
34973 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34974 return -ENOMEM;
34975 data = (struct proc_data *)file->private_data;
34976 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
34977 /* If doLoseSync is not 1, we won't do a Lose Sync */
34978 int doLoseSync = -1;
34979
34980 + pax_track_stack();
34981 +
34982 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34983 return -ENOMEM;
34984 data = (struct proc_data *)file->private_data;
34985 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
34986 int i;
34987 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34988
34989 + pax_track_stack();
34990 +
34991 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34992 if (!qual)
34993 return -ENOMEM;
34994 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
34995 CapabilityRid cap_rid;
34996 __le32 *vals = stats_rid.vals;
34997
34998 + pax_track_stack();
34999 +
35000 /* Get stats out of the card */
35001 clear_bit(JOB_WSTATS, &local->jobs);
35002 if (local->power.event) {
35003 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35004 --- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35005 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35006 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35007 unsigned int v;
35008 u64 tsf;
35009
35010 + pax_track_stack();
35011 +
35012 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35013 len += snprintf(buf+len, sizeof(buf)-len,
35014 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35015 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35016 unsigned int len = 0;
35017 unsigned int i;
35018
35019 + pax_track_stack();
35020 +
35021 len += snprintf(buf+len, sizeof(buf)-len,
35022 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35023
35024 diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35025 --- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35026 +++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35027 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35028 char buf[512];
35029 unsigned int len = 0;
35030
35031 + pax_track_stack();
35032 +
35033 len += snprintf(buf + len, sizeof(buf) - len,
35034 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35035 len += snprintf(buf + len, sizeof(buf) - len,
35036 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35037 int i;
35038 u8 addr[ETH_ALEN];
35039
35040 + pax_track_stack();
35041 +
35042 len += snprintf(buf + len, sizeof(buf) - len,
35043 "primary: %s (%s chan=%d ht=%d)\n",
35044 wiphy_name(sc->pri_wiphy->hw->wiphy),
35045 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35046 --- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35047 +++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35048 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35049 struct b43_debugfs_fops {
35050 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35051 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35052 - struct file_operations fops;
35053 + const struct file_operations fops;
35054 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35055 size_t file_struct_offset;
35056 };
35057 diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35058 --- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35059 +++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35060 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35061 struct b43legacy_debugfs_fops {
35062 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35063 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35064 - struct file_operations fops;
35065 + const struct file_operations fops;
35066 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35067 size_t file_struct_offset;
35068 /* Take wl->irq_lock before calling read/write? */
35069 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35070 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35071 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35072 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35073 int err;
35074 DECLARE_SSID_BUF(ssid);
35075
35076 + pax_track_stack();
35077 +
35078 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35079
35080 if (ssid_len)
35081 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35082 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35083 int err;
35084
35085 + pax_track_stack();
35086 +
35087 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35088 idx, keylen, len);
35089
35090 diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35091 --- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35092 +++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35093 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35094 unsigned long flags;
35095 DECLARE_SSID_BUF(ssid);
35096
35097 + pax_track_stack();
35098 +
35099 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35100 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35101 print_ssid(ssid, info_element->data, info_element->len),
35102 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35103 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35104 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35105 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35106 },
35107 };
35108
35109 -static struct iwl_ops iwl1000_ops = {
35110 +static const struct iwl_ops iwl1000_ops = {
35111 .ucode = &iwl5000_ucode,
35112 .lib = &iwl1000_lib,
35113 .hcmd = &iwl5000_hcmd,
35114 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35115 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35116 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35117 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35118 */
35119 if (iwl3945_mod_params.disable_hw_scan) {
35120 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35121 - iwl3945_hw_ops.hw_scan = NULL;
35122 + pax_open_kernel();
35123 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35124 + pax_close_kernel();
35125 }
35126
35127
35128 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35129 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35130 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35131 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35132 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35133 };
35134
35135 -static struct iwl_ops iwl3945_ops = {
35136 +static const struct iwl_ops iwl3945_ops = {
35137 .ucode = &iwl3945_ucode,
35138 .lib = &iwl3945_lib,
35139 .hcmd = &iwl3945_hcmd,
35140 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35141 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35142 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35143 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35144 },
35145 };
35146
35147 -static struct iwl_ops iwl4965_ops = {
35148 +static const struct iwl_ops iwl4965_ops = {
35149 .ucode = &iwl4965_ucode,
35150 .lib = &iwl4965_lib,
35151 .hcmd = &iwl4965_hcmd,
35152 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35153 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35154 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35155 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35156 },
35157 };
35158
35159 -struct iwl_ops iwl5000_ops = {
35160 +const struct iwl_ops iwl5000_ops = {
35161 .ucode = &iwl5000_ucode,
35162 .lib = &iwl5000_lib,
35163 .hcmd = &iwl5000_hcmd,
35164 .utils = &iwl5000_hcmd_utils,
35165 };
35166
35167 -static struct iwl_ops iwl5150_ops = {
35168 +static const struct iwl_ops iwl5150_ops = {
35169 .ucode = &iwl5000_ucode,
35170 .lib = &iwl5150_lib,
35171 .hcmd = &iwl5000_hcmd,
35172 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35173 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35174 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35175 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35176 .calc_rssi = iwl5000_calc_rssi,
35177 };
35178
35179 -static struct iwl_ops iwl6000_ops = {
35180 +static const struct iwl_ops iwl6000_ops = {
35181 .ucode = &iwl5000_ucode,
35182 .lib = &iwl6000_lib,
35183 .hcmd = &iwl5000_hcmd,
35184 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35185 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35186 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35187 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35188 if (iwl_debug_level & IWL_DL_INFO)
35189 dev_printk(KERN_DEBUG, &(pdev->dev),
35190 "Disabling hw_scan\n");
35191 - iwl_hw_ops.hw_scan = NULL;
35192 + pax_open_kernel();
35193 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35194 + pax_close_kernel();
35195 }
35196
35197 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35198 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35199 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35200 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35201 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35202 u8 active_index = 0;
35203 s32 tpt = 0;
35204
35205 + pax_track_stack();
35206 +
35207 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35208
35209 if (!ieee80211_is_data(hdr->frame_control) ||
35210 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35211 u8 valid_tx_ant = 0;
35212 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35213
35214 + pax_track_stack();
35215 +
35216 /* Override starting rate (index 0) if needed for debug purposes */
35217 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35218
35219 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35220 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35221 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35222 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35223 int pos = 0;
35224 const size_t bufsz = sizeof(buf);
35225
35226 + pax_track_stack();
35227 +
35228 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35229 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35230 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35231 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35232 const size_t bufsz = sizeof(buf);
35233 ssize_t ret;
35234
35235 + pax_track_stack();
35236 +
35237 for (i = 0; i < AC_NUM; i++) {
35238 pos += scnprintf(buf + pos, bufsz - pos,
35239 "\tcw_min\tcw_max\taifsn\ttxop\n");
35240 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35241 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35242 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35243 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35244 #endif
35245
35246 #else
35247 -#define IWL_DEBUG(__priv, level, fmt, args...)
35248 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35249 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35250 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35251 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35252 void *p, u32 len)
35253 {}
35254 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35255 --- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35256 +++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35257 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35258
35259 /* shared structures from iwl-5000.c */
35260 extern struct iwl_mod_params iwl50_mod_params;
35261 -extern struct iwl_ops iwl5000_ops;
35262 +extern const struct iwl_ops iwl5000_ops;
35263 extern struct iwl_ucode_ops iwl5000_ucode;
35264 extern struct iwl_lib_ops iwl5000_lib;
35265 extern struct iwl_hcmd_ops iwl5000_hcmd;
35266 diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35267 --- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35268 +++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35269 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35270 int buf_len = 512;
35271 size_t len = 0;
35272
35273 + pax_track_stack();
35274 +
35275 if (*ppos != 0)
35276 return 0;
35277 if (count < sizeof(buf))
35278 diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35279 --- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35280 +++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35281 @@ -708,7 +708,7 @@ out_unlock:
35282 struct lbs_debugfs_files {
35283 const char *name;
35284 int perm;
35285 - struct file_operations fops;
35286 + const struct file_operations fops;
35287 };
35288
35289 static const struct lbs_debugfs_files debugfs_files[] = {
35290 diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35291 --- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35292 +++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35293 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35294
35295 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35296
35297 - if (rts_threshold < 0 || rts_threshold > 2347)
35298 + if (rts_threshold > 2347)
35299 rts_threshold = 2347;
35300
35301 tmp = cpu_to_le32(rts_threshold);
35302 diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35303 --- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35304 +++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35305 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35306 if (cookie == NO_COOKIE)
35307 offset = pc;
35308 if (cookie == INVALID_COOKIE) {
35309 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35310 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35311 offset = pc;
35312 }
35313 if (cookie != last_cookie) {
35314 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35315 /* add userspace sample */
35316
35317 if (!mm) {
35318 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35319 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35320 return 0;
35321 }
35322
35323 cookie = lookup_dcookie(mm, s->eip, &offset);
35324
35325 if (cookie == INVALID_COOKIE) {
35326 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35327 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35328 return 0;
35329 }
35330
35331 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35332 /* ignore backtraces if failed to add a sample */
35333 if (state == sb_bt_start) {
35334 state = sb_bt_ignore;
35335 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35336 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35337 }
35338 }
35339 release_mm(mm);
35340 diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35341 --- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35342 +++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35343 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35344 }
35345
35346 if (buffer_pos == buffer_size) {
35347 - atomic_inc(&oprofile_stats.event_lost_overflow);
35348 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35349 return;
35350 }
35351
35352 diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35353 --- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35354 +++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35355 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35356 if (oprofile_ops.switch_events())
35357 return;
35358
35359 - atomic_inc(&oprofile_stats.multiplex_counter);
35360 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35361 start_switch_worker();
35362 }
35363
35364 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35365 --- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35366 +++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35367 @@ -187,7 +187,7 @@ static const struct file_operations atom
35368
35369
35370 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35371 - char const *name, atomic_t *val)
35372 + char const *name, atomic_unchecked_t *val)
35373 {
35374 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35375 &atomic_ro_fops, 0444);
35376 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35377 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35378 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35379 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35380 cpu_buf->sample_invalid_eip = 0;
35381 }
35382
35383 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35384 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35385 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35386 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35387 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35388 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35389 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35390 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35391 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35392 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35393 }
35394
35395
35396 diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35397 --- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35398 +++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35399 @@ -13,11 +13,11 @@
35400 #include <asm/atomic.h>
35401
35402 struct oprofile_stat_struct {
35403 - atomic_t sample_lost_no_mm;
35404 - atomic_t sample_lost_no_mapping;
35405 - atomic_t bt_lost_no_mapping;
35406 - atomic_t event_lost_overflow;
35407 - atomic_t multiplex_counter;
35408 + atomic_unchecked_t sample_lost_no_mm;
35409 + atomic_unchecked_t sample_lost_no_mapping;
35410 + atomic_unchecked_t bt_lost_no_mapping;
35411 + atomic_unchecked_t event_lost_overflow;
35412 + atomic_unchecked_t multiplex_counter;
35413 };
35414
35415 extern struct oprofile_stat_struct oprofile_stats;
35416 diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35417 --- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35418 +++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35419 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35420 return ret;
35421 }
35422
35423 -static struct sysfs_ops pdcspath_attr_ops = {
35424 +static const struct sysfs_ops pdcspath_attr_ops = {
35425 .show = pdcspath_attr_show,
35426 .store = pdcspath_attr_store,
35427 };
35428 diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35429 --- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35430 +++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35431 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35432
35433 *ppos += len;
35434
35435 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35436 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35437 }
35438
35439 #ifdef CONFIG_PARPORT_1284
35440 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35441
35442 *ppos += len;
35443
35444 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35445 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35446 }
35447 #endif /* IEEE1284.3 support. */
35448
35449 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35450 --- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35451 +++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35452 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35453 }
35454
35455
35456 -static struct acpi_dock_ops acpiphp_dock_ops = {
35457 +static const struct acpi_dock_ops acpiphp_dock_ops = {
35458 .handler = handle_hotplug_event_func,
35459 };
35460
35461 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35462 --- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35463 +++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35464 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35465 int (*hardware_test) (struct slot* slot, u32 value);
35466 u8 (*get_power) (struct slot* slot);
35467 int (*set_power) (struct slot* slot, int value);
35468 -};
35469 +} __no_const;
35470
35471 struct cpci_hp_controller {
35472 unsigned int irq;
35473 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35474 --- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35475 +++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35476 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35477
35478 void compaq_nvram_init (void __iomem *rom_start)
35479 {
35480 +
35481 +#ifndef CONFIG_PAX_KERNEXEC
35482 if (rom_start) {
35483 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35484 }
35485 +#endif
35486 +
35487 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35488
35489 /* initialize our int15 lock */
35490 diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35491 --- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35492 +++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35493 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35494 }
35495
35496 static struct kobj_type legacy_ktype = {
35497 - .sysfs_ops = &(struct sysfs_ops){
35498 + .sysfs_ops = &(const struct sysfs_ops){
35499 .store = legacy_store, .show = legacy_show
35500 },
35501 .release = &legacy_release,
35502 diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35503 --- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35504 +++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35505 @@ -2643,7 +2643,7 @@ error:
35506 return 0;
35507 }
35508
35509 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35510 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
35511 unsigned long offset, size_t size,
35512 enum dma_data_direction dir,
35513 struct dma_attrs *attrs)
35514 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35515 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35516 }
35517
35518 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35519 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35520 size_t size, enum dma_data_direction dir,
35521 struct dma_attrs *attrs)
35522 {
35523 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35524 }
35525 }
35526
35527 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35528 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
35529 dma_addr_t *dma_handle, gfp_t flags)
35530 {
35531 void *vaddr;
35532 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35533 return NULL;
35534 }
35535
35536 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35537 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35538 dma_addr_t dma_handle)
35539 {
35540 int order;
35541 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35542 free_pages((unsigned long)vaddr, order);
35543 }
35544
35545 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35546 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35547 int nelems, enum dma_data_direction dir,
35548 struct dma_attrs *attrs)
35549 {
35550 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35551 return nelems;
35552 }
35553
35554 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35555 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35556 enum dma_data_direction dir, struct dma_attrs *attrs)
35557 {
35558 int i;
35559 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35560 return nelems;
35561 }
35562
35563 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35564 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35565 {
35566 return !dma_addr;
35567 }
35568
35569 -struct dma_map_ops intel_dma_ops = {
35570 +const struct dma_map_ops intel_dma_ops = {
35571 .alloc_coherent = intel_alloc_coherent,
35572 .free_coherent = intel_free_coherent,
35573 .map_sg = intel_map_sg,
35574 diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35575 --- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35576 +++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35577 @@ -27,9 +27,9 @@
35578 #define MODULE_PARAM_PREFIX "pcie_aspm."
35579
35580 /* Note: those are not register definitions */
35581 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35582 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35583 -#define ASPM_STATE_L1 (4) /* L1 state */
35584 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35585 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35586 +#define ASPM_STATE_L1 (4U) /* L1 state */
35587 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35588 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35589
35590 diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35591 --- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35592 +++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35593 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35594 return ret;
35595 }
35596
35597 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35598 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35599 struct device_attribute *attr,
35600 char *buf)
35601 {
35602 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35603 }
35604
35605 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35606 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35607 struct device_attribute *attr,
35608 char *buf)
35609 {
35610 diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35611 --- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35612 +++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35613 @@ -480,7 +480,16 @@ static const struct file_operations proc
35614 static int __init pci_proc_init(void)
35615 {
35616 struct pci_dev *dev = NULL;
35617 +
35618 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35619 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35620 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35621 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35622 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35623 +#endif
35624 +#else
35625 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35626 +#endif
35627 proc_create("devices", 0, proc_bus_pci_dir,
35628 &proc_bus_pci_dev_operations);
35629 proc_initialized = 1;
35630 diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
35631 --- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35632 +++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35633 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35634 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35635 }
35636
35637 -static struct sysfs_ops pci_slot_sysfs_ops = {
35638 +static const struct sysfs_ops pci_slot_sysfs_ops = {
35639 .show = pci_slot_attr_show,
35640 .store = pci_slot_attr_store,
35641 };
35642 diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
35643 --- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35644 +++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35645 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35646 return -EFAULT;
35647 }
35648 }
35649 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35650 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35651 if (!buf)
35652 return -ENOMEM;
35653
35654 diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
35655 --- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35656 +++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35657 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35658 return 0;
35659 }
35660
35661 -static struct backlight_ops acer_bl_ops = {
35662 +static const struct backlight_ops acer_bl_ops = {
35663 .get_brightness = read_brightness,
35664 .update_status = update_bl_status,
35665 };
35666 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
35667 --- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35668 +++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35669 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35670 return 0;
35671 }
35672
35673 -static struct backlight_ops asus_backlight_data = {
35674 +static const struct backlight_ops asus_backlight_data = {
35675 .get_brightness = read_brightness,
35676 .update_status = set_brightness_status,
35677 };
35678 diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
35679 --- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35680 +++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35681 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35682 */
35683 static int read_brightness(struct backlight_device *bd);
35684 static int update_bl_status(struct backlight_device *bd);
35685 -static struct backlight_ops asusbl_ops = {
35686 +static const struct backlight_ops asusbl_ops = {
35687 .get_brightness = read_brightness,
35688 .update_status = update_bl_status,
35689 };
35690 diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
35691 --- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35692 +++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35693 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35694 return set_lcd_level(b->props.brightness);
35695 }
35696
35697 -static struct backlight_ops compalbl_ops = {
35698 +static const struct backlight_ops compalbl_ops = {
35699 .get_brightness = bl_get_brightness,
35700 .update_status = bl_update_status,
35701 };
35702 diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
35703 --- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35704 +++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35705 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35706 return buffer.output[1];
35707 }
35708
35709 -static struct backlight_ops dell_ops = {
35710 +static const struct backlight_ops dell_ops = {
35711 .get_brightness = dell_get_intensity,
35712 .update_status = dell_send_intensity,
35713 };
35714 diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
35715 --- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35716 +++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35717 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35718 */
35719 static int read_brightness(struct backlight_device *bd);
35720 static int update_bl_status(struct backlight_device *bd);
35721 -static struct backlight_ops eeepcbl_ops = {
35722 +static const struct backlight_ops eeepcbl_ops = {
35723 .get_brightness = read_brightness,
35724 .update_status = update_bl_status,
35725 };
35726 diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
35727 --- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35728 +++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35729 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35730 return ret;
35731 }
35732
35733 -static struct backlight_ops fujitsubl_ops = {
35734 +static const struct backlight_ops fujitsubl_ops = {
35735 .get_brightness = bl_get_brightness,
35736 .update_status = bl_update_status,
35737 };
35738 diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
35739 --- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35740 +++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35741 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35742 return set_lcd_level(b->props.brightness);
35743 }
35744
35745 -static struct backlight_ops msibl_ops = {
35746 +static const struct backlight_ops msibl_ops = {
35747 .get_brightness = bl_get_brightness,
35748 .update_status = bl_update_status,
35749 };
35750 diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
35751 --- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35752 +++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35753 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35754 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35755 }
35756
35757 -static struct backlight_ops pcc_backlight_ops = {
35758 +static const struct backlight_ops pcc_backlight_ops = {
35759 .get_brightness = bl_get,
35760 .update_status = bl_set_status,
35761 };
35762 diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
35763 --- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35764 +++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35765 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35766 }
35767
35768 static struct backlight_device *sony_backlight_device;
35769 -static struct backlight_ops sony_backlight_ops = {
35770 +static const struct backlight_ops sony_backlight_ops = {
35771 .update_status = sony_backlight_update_status,
35772 .get_brightness = sony_backlight_get_brightness,
35773 };
35774 diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
35775 --- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35776 +++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35777 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35778 return 0;
35779 }
35780
35781 -void static hotkey_mask_warn_incomplete_mask(void)
35782 +static void hotkey_mask_warn_incomplete_mask(void)
35783 {
35784 /* log only what the user can fix... */
35785 const u32 wantedmask = hotkey_driver_mask &
35786 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35787 BACKLIGHT_UPDATE_HOTKEY);
35788 }
35789
35790 -static struct backlight_ops ibm_backlight_data = {
35791 +static const struct backlight_ops ibm_backlight_data = {
35792 .get_brightness = brightness_get,
35793 .update_status = brightness_update_status,
35794 };
35795 diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
35796 --- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35797 +++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35798 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35799 return AE_OK;
35800 }
35801
35802 -static struct backlight_ops toshiba_backlight_data = {
35803 +static const struct backlight_ops toshiba_backlight_data = {
35804 .get_brightness = get_lcd,
35805 .update_status = set_lcd_status,
35806 };
35807 diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
35808 --- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
35809 +++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
35810 @@ -60,7 +60,7 @@ do { \
35811 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35812 } while(0)
35813
35814 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35815 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35816 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35817
35818 /*
35819 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
35820
35821 cpu = get_cpu();
35822 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35823 +
35824 + pax_open_kernel();
35825 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35826 + pax_close_kernel();
35827
35828 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35829 spin_lock_irqsave(&pnp_bios_lock, flags);
35830 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
35831 :"memory");
35832 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35833
35834 + pax_open_kernel();
35835 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35836 + pax_close_kernel();
35837 +
35838 put_cpu();
35839
35840 /* If we get here and this is set then the PnP BIOS faulted on us. */
35841 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
35842 return status;
35843 }
35844
35845 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35846 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35847 {
35848 int i;
35849
35850 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
35851 pnp_bios_callpoint.offset = header->fields.pm16offset;
35852 pnp_bios_callpoint.segment = PNP_CS16;
35853
35854 + pax_open_kernel();
35855 +
35856 for_each_possible_cpu(i) {
35857 struct desc_struct *gdt = get_cpu_gdt_table(i);
35858 if (!gdt)
35859 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
35860 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35861 (unsigned long)__va(header->fields.pm16dseg));
35862 }
35863 +
35864 + pax_close_kernel();
35865 }
35866 diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
35867 --- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
35868 +++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
35869 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
35870 return 1;
35871
35872 /* check if the resource is valid */
35873 - if (*irq < 0 || *irq > 15)
35874 + if (*irq > 15)
35875 return 0;
35876
35877 /* check if the resource is reserved */
35878 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
35879 return 1;
35880
35881 /* check if the resource is valid */
35882 - if (*dma < 0 || *dma == 4 || *dma > 7)
35883 + if (*dma == 4 || *dma > 7)
35884 return 0;
35885
35886 /* check if the resource is reserved */
35887 diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
35888 --- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
35889 +++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
35890 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
35891 struct bq27x00_access_methods {
35892 int (*read)(u8 reg, int *rt_value, int b_single,
35893 struct bq27x00_device_info *di);
35894 -};
35895 +} __no_const;
35896
35897 struct bq27x00_device_info {
35898 struct device *dev;
35899 diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
35900 --- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
35901 +++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
35902 @@ -14,6 +14,7 @@
35903 #include <linux/module.h>
35904 #include <linux/rtc.h>
35905 #include <linux/sched.h>
35906 +#include <linux/grsecurity.h>
35907 #include "rtc-core.h"
35908
35909 static dev_t rtc_devt;
35910 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
35911 if (copy_from_user(&tm, uarg, sizeof(tm)))
35912 return -EFAULT;
35913
35914 + gr_log_timechange();
35915 +
35916 return rtc_set_time(rtc, &tm);
35917
35918 case RTC_PIE_ON:
35919 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
35920 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
35921 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
35922 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
35923 static int qdio_perf_proc_show(struct seq_file *m, void *v)
35924 {
35925 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
35926 - (long)atomic_long_read(&perf_stats.qdio_int));
35927 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
35928 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
35929 - (long)atomic_long_read(&perf_stats.pci_int));
35930 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
35931 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
35932 - (long)atomic_long_read(&perf_stats.thin_int));
35933 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
35934 seq_printf(m, "\n");
35935 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
35936 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
35937 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
35938 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
35939 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
35940 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
35941 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
35942 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
35943 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
35944 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
35945 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
35946 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
35947 - (long)atomic_long_read(&perf_stats.thinint_inbound),
35948 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
35949 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
35950 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
35951 seq_printf(m, "\n");
35952 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
35953 - (long)atomic_long_read(&perf_stats.siga_in));
35954 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
35955 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
35956 - (long)atomic_long_read(&perf_stats.siga_out));
35957 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
35958 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
35959 - (long)atomic_long_read(&perf_stats.siga_sync));
35960 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
35961 seq_printf(m, "\n");
35962 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
35963 - (long)atomic_long_read(&perf_stats.inbound_handler));
35964 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
35965 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
35966 - (long)atomic_long_read(&perf_stats.outbound_handler));
35967 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
35968 seq_printf(m, "\n");
35969 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
35970 - (long)atomic_long_read(&perf_stats.fast_requeue));
35971 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
35972 seq_printf(m, "Number of outbound target full condition\t: %li\n",
35973 - (long)atomic_long_read(&perf_stats.outbound_target_full));
35974 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
35975 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
35976 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
35977 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
35978 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
35979 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
35980 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
35981 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
35982 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
35983 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
35984 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
35985 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
35986 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
35987 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
35988 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
35989 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
35990 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
35991 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
35992 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
35993 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
35994 seq_printf(m, "\n");
35995 return 0;
35996 }
35997 diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
35998 --- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
35999 +++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36000 @@ -13,46 +13,46 @@
36001
36002 struct qdio_perf_stats {
36003 /* interrupt handler calls */
36004 - atomic_long_t qdio_int;
36005 - atomic_long_t pci_int;
36006 - atomic_long_t thin_int;
36007 + atomic_long_unchecked_t qdio_int;
36008 + atomic_long_unchecked_t pci_int;
36009 + atomic_long_unchecked_t thin_int;
36010
36011 /* tasklet runs */
36012 - atomic_long_t tasklet_inbound;
36013 - atomic_long_t tasklet_outbound;
36014 - atomic_long_t tasklet_thinint;
36015 - atomic_long_t tasklet_thinint_loop;
36016 - atomic_long_t thinint_inbound;
36017 - atomic_long_t thinint_inbound_loop;
36018 - atomic_long_t thinint_inbound_loop2;
36019 + atomic_long_unchecked_t tasklet_inbound;
36020 + atomic_long_unchecked_t tasklet_outbound;
36021 + atomic_long_unchecked_t tasklet_thinint;
36022 + atomic_long_unchecked_t tasklet_thinint_loop;
36023 + atomic_long_unchecked_t thinint_inbound;
36024 + atomic_long_unchecked_t thinint_inbound_loop;
36025 + atomic_long_unchecked_t thinint_inbound_loop2;
36026
36027 /* signal adapter calls */
36028 - atomic_long_t siga_out;
36029 - atomic_long_t siga_in;
36030 - atomic_long_t siga_sync;
36031 + atomic_long_unchecked_t siga_out;
36032 + atomic_long_unchecked_t siga_in;
36033 + atomic_long_unchecked_t siga_sync;
36034
36035 /* misc */
36036 - atomic_long_t inbound_handler;
36037 - atomic_long_t outbound_handler;
36038 - atomic_long_t fast_requeue;
36039 - atomic_long_t outbound_target_full;
36040 + atomic_long_unchecked_t inbound_handler;
36041 + atomic_long_unchecked_t outbound_handler;
36042 + atomic_long_unchecked_t fast_requeue;
36043 + atomic_long_unchecked_t outbound_target_full;
36044
36045 /* for debugging */
36046 - atomic_long_t debug_tl_out_timer;
36047 - atomic_long_t debug_stop_polling;
36048 - atomic_long_t debug_eqbs_all;
36049 - atomic_long_t debug_eqbs_incomplete;
36050 - atomic_long_t debug_sqbs_all;
36051 - atomic_long_t debug_sqbs_incomplete;
36052 + atomic_long_unchecked_t debug_tl_out_timer;
36053 + atomic_long_unchecked_t debug_stop_polling;
36054 + atomic_long_unchecked_t debug_eqbs_all;
36055 + atomic_long_unchecked_t debug_eqbs_incomplete;
36056 + atomic_long_unchecked_t debug_sqbs_all;
36057 + atomic_long_unchecked_t debug_sqbs_incomplete;
36058 };
36059
36060 extern struct qdio_perf_stats perf_stats;
36061 extern int qdio_performance_stats;
36062
36063 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36064 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36065 {
36066 if (qdio_performance_stats)
36067 - atomic_long_inc(count);
36068 + atomic_long_inc_unchecked(count);
36069 }
36070
36071 int qdio_setup_perf_stats(void);
36072 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36073 --- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36074 +++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36075 @@ -471,7 +471,7 @@ struct adapter_ops
36076 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36077 /* Administrative operations */
36078 int (*adapter_comm)(struct aac_dev * dev, int comm);
36079 -};
36080 +} __no_const;
36081
36082 /*
36083 * Define which interrupt handler needs to be installed
36084 diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36085 --- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36086 +++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36087 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36088 u32 actual_fibsize64, actual_fibsize = 0;
36089 int i;
36090
36091 + pax_track_stack();
36092
36093 if (dev->in_reset) {
36094 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36095 diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36096 --- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36097 +++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36098 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36099 flash_error_table[i].reason);
36100 }
36101
36102 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36103 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36104 asd_show_update_bios, asd_store_update_bios);
36105
36106 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36107 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36108 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36109 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36110 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36111 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36112 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36113 u32 *nvecs, u32 *maxvec);
36114 -};
36115 +} __no_const;
36116 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36117
36118 struct bfa_iocfc_s {
36119 diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36120 --- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36121 +++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36122 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36123 bfa_ioc_disable_cbfn_t disable_cbfn;
36124 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36125 bfa_ioc_reset_cbfn_t reset_cbfn;
36126 -};
36127 +} __no_const;
36128
36129 /**
36130 * Heartbeat failure notification queue element.
36131 diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36132 --- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36133 +++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36134 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36135 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36136 *PrototypeHostAdapter)
36137 {
36138 + pax_track_stack();
36139 +
36140 /*
36141 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36142 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36143 diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36144 --- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36145 +++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36146 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36147 dma_addr_t addr;
36148 ulong flags = 0;
36149
36150 + pax_track_stack();
36151 +
36152 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36153 // get user msg size in u32s
36154 if(get_user(size, &user_msg[0])){
36155 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36156 s32 rcode;
36157 dma_addr_t addr;
36158
36159 + pax_track_stack();
36160 +
36161 memset(msg, 0 , sizeof(msg));
36162 len = scsi_bufflen(cmd);
36163 direction = 0x00000000;
36164 diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36165 --- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36166 +++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36167 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36168 struct hostdata *ha;
36169 char name[16];
36170
36171 + pax_track_stack();
36172 +
36173 sprintf(name, "%s%d", driver_name, j);
36174
36175 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36176 diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36177 --- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36178 +++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36179 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36180 size_t rlen;
36181 size_t dlen;
36182
36183 + pax_track_stack();
36184 +
36185 fiph = (struct fip_header *)skb->data;
36186 sub = fiph->fip_subcode;
36187 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36188 diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36189 --- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36190 +++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36191 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36192 /* Start local port initiatialization */
36193
36194 lp->link_up = 0;
36195 - lp->tt = fnic_transport_template;
36196 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36197
36198 lp->max_retry_count = fnic->config.flogi_retries;
36199 lp->max_rport_retry_count = fnic->config.plogi_retries;
36200 diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36201 --- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36202 +++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36203 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36204 ulong flags;
36205 gdth_ha_str *ha;
36206
36207 + pax_track_stack();
36208 +
36209 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36210 return -EFAULT;
36211 ha = gdth_find_ha(ldrv.ionode);
36212 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36213 gdth_ha_str *ha;
36214 int rval;
36215
36216 + pax_track_stack();
36217 +
36218 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36219 res.number >= MAX_HDRIVES)
36220 return -EFAULT;
36221 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36222 gdth_ha_str *ha;
36223 int rval;
36224
36225 + pax_track_stack();
36226 +
36227 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36228 return -EFAULT;
36229 ha = gdth_find_ha(gen.ionode);
36230 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36231 int i;
36232 gdth_cmd_str gdtcmd;
36233 char cmnd[MAX_COMMAND_SIZE];
36234 +
36235 + pax_track_stack();
36236 +
36237 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36238
36239 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36240 diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36241 --- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36242 +++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36243 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36244 ulong64 paddr;
36245
36246 char cmnd[MAX_COMMAND_SIZE];
36247 +
36248 + pax_track_stack();
36249 +
36250 memset(cmnd, 0xff, 12);
36251 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36252
36253 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36254 gdth_hget_str *phg;
36255 char cmnd[MAX_COMMAND_SIZE];
36256
36257 + pax_track_stack();
36258 +
36259 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36260 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36261 if (!gdtcmd || !estr)
36262 diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36263 --- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36264 +++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36265 @@ -40,7 +40,7 @@
36266 #include "scsi_logging.h"
36267
36268
36269 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36270 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36271
36272
36273 static void scsi_host_cls_release(struct device *dev)
36274 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36275 * subtract one because we increment first then return, but we need to
36276 * know what the next host number was before increment
36277 */
36278 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36279 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36280 shost->dma_channel = 0xff;
36281
36282 /* These three are default values which can be overridden */
36283 diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36284 --- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36285 +++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36286 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36287 return true;
36288 }
36289
36290 -static struct ata_port_operations ipr_sata_ops = {
36291 +static const struct ata_port_operations ipr_sata_ops = {
36292 .phy_reset = ipr_ata_phy_reset,
36293 .hardreset = ipr_sata_reset,
36294 .post_internal_cmd = ipr_ata_post_internal,
36295 diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36296 --- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36297 +++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36298 @@ -1027,7 +1027,7 @@ typedef struct {
36299 int (*intr)(struct ips_ha *);
36300 void (*enableint)(struct ips_ha *);
36301 uint32_t (*statupd)(struct ips_ha *);
36302 -} ips_hw_func_t;
36303 +} __no_const ips_hw_func_t;
36304
36305 typedef struct ips_ha {
36306 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36307 diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36308 --- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36309 +++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:22:32.000000000 -0400
36310 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
36311 * all together if not used XXX
36312 */
36313 struct {
36314 - atomic_t no_free_exch;
36315 - atomic_t no_free_exch_xid;
36316 - atomic_t xid_not_found;
36317 - atomic_t xid_busy;
36318 - atomic_t seq_not_found;
36319 - atomic_t non_bls_resp;
36320 + atomic_unchecked_t no_free_exch;
36321 + atomic_unchecked_t no_free_exch_xid;
36322 + atomic_unchecked_t xid_not_found;
36323 + atomic_unchecked_t xid_busy;
36324 + atomic_unchecked_t seq_not_found;
36325 + atomic_unchecked_t non_bls_resp;
36326 } stats;
36327 };
36328 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36329 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36330 /* allocate memory for exchange */
36331 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36332 if (!ep) {
36333 - atomic_inc(&mp->stats.no_free_exch);
36334 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36335 goto out;
36336 }
36337 memset(ep, 0, sizeof(*ep));
36338 @@ -557,7 +557,7 @@ out:
36339 return ep;
36340 err:
36341 spin_unlock_bh(&pool->lock);
36342 - atomic_inc(&mp->stats.no_free_exch_xid);
36343 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36344 mempool_free(ep, mp->ep_pool);
36345 return NULL;
36346 }
36347 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36348 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36349 ep = fc_exch_find(mp, xid);
36350 if (!ep) {
36351 - atomic_inc(&mp->stats.xid_not_found);
36352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36353 reject = FC_RJT_OX_ID;
36354 goto out;
36355 }
36356 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36357 ep = fc_exch_find(mp, xid);
36358 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36359 if (ep) {
36360 - atomic_inc(&mp->stats.xid_busy);
36361 + atomic_inc_unchecked(&mp->stats.xid_busy);
36362 reject = FC_RJT_RX_ID;
36363 goto rel;
36364 }
36365 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36366 }
36367 xid = ep->xid; /* get our XID */
36368 } else if (!ep) {
36369 - atomic_inc(&mp->stats.xid_not_found);
36370 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36371 reject = FC_RJT_RX_ID; /* XID not found */
36372 goto out;
36373 }
36374 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36375 } else {
36376 sp = &ep->seq;
36377 if (sp->id != fh->fh_seq_id) {
36378 - atomic_inc(&mp->stats.seq_not_found);
36379 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36380 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36381 goto rel;
36382 }
36383 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36384
36385 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36386 if (!ep) {
36387 - atomic_inc(&mp->stats.xid_not_found);
36388 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36389 goto out;
36390 }
36391 if (ep->esb_stat & ESB_ST_COMPLETE) {
36392 - atomic_inc(&mp->stats.xid_not_found);
36393 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36394 goto out;
36395 }
36396 if (ep->rxid == FC_XID_UNKNOWN)
36397 ep->rxid = ntohs(fh->fh_rx_id);
36398 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36399 - atomic_inc(&mp->stats.xid_not_found);
36400 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36401 goto rel;
36402 }
36403 if (ep->did != ntoh24(fh->fh_s_id) &&
36404 ep->did != FC_FID_FLOGI) {
36405 - atomic_inc(&mp->stats.xid_not_found);
36406 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36407 goto rel;
36408 }
36409 sof = fr_sof(fp);
36410 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36411 } else {
36412 sp = &ep->seq;
36413 if (sp->id != fh->fh_seq_id) {
36414 - atomic_inc(&mp->stats.seq_not_found);
36415 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36416 goto rel;
36417 }
36418 }
36419 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36420 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36421
36422 if (!sp)
36423 - atomic_inc(&mp->stats.xid_not_found);
36424 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36425 else
36426 - atomic_inc(&mp->stats.non_bls_resp);
36427 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36428
36429 fc_frame_free(fp);
36430 }
36431 diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36432 --- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36433 +++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36434 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36435 }
36436 }
36437
36438 -static struct ata_port_operations sas_sata_ops = {
36439 +static const struct ata_port_operations sas_sata_ops = {
36440 .phy_reset = sas_ata_phy_reset,
36441 .post_internal_cmd = sas_ata_post_internal,
36442 .qc_defer = ata_std_qc_defer,
36443 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36444 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36445 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36446 @@ -124,7 +124,7 @@ struct lpfc_debug {
36447 int len;
36448 };
36449
36450 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36451 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36452 static unsigned long lpfc_debugfs_start_time = 0L;
36453
36454 /**
36455 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36456 lpfc_debugfs_enable = 0;
36457
36458 len = 0;
36459 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36460 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36461 (lpfc_debugfs_max_disc_trc - 1);
36462 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36463 dtp = vport->disc_trc + i;
36464 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36465 lpfc_debugfs_enable = 0;
36466
36467 len = 0;
36468 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36469 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36470 (lpfc_debugfs_max_slow_ring_trc - 1);
36471 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36472 dtp = phba->slow_ring_trc + i;
36473 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36474 uint32_t *ptr;
36475 char buffer[1024];
36476
36477 + pax_track_stack();
36478 +
36479 off = 0;
36480 spin_lock_irq(&phba->hbalock);
36481
36482 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36483 !vport || !vport->disc_trc)
36484 return;
36485
36486 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36487 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36488 (lpfc_debugfs_max_disc_trc - 1);
36489 dtp = vport->disc_trc + index;
36490 dtp->fmt = fmt;
36491 dtp->data1 = data1;
36492 dtp->data2 = data2;
36493 dtp->data3 = data3;
36494 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36495 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36496 dtp->jif = jiffies;
36497 #endif
36498 return;
36499 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36500 !phba || !phba->slow_ring_trc)
36501 return;
36502
36503 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36504 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36505 (lpfc_debugfs_max_slow_ring_trc - 1);
36506 dtp = phba->slow_ring_trc + index;
36507 dtp->fmt = fmt;
36508 dtp->data1 = data1;
36509 dtp->data2 = data2;
36510 dtp->data3 = data3;
36511 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36512 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36513 dtp->jif = jiffies;
36514 #endif
36515 return;
36516 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36517 "slow_ring buffer\n");
36518 goto debug_failed;
36519 }
36520 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36521 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36522 memset(phba->slow_ring_trc, 0,
36523 (sizeof(struct lpfc_debugfs_trc) *
36524 lpfc_debugfs_max_slow_ring_trc));
36525 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36526 "buffer\n");
36527 goto debug_failed;
36528 }
36529 - atomic_set(&vport->disc_trc_cnt, 0);
36530 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36531
36532 snprintf(name, sizeof(name), "discovery_trace");
36533 vport->debug_disc_trc =
36534 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
36535 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36536 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36537 @@ -400,7 +400,7 @@ struct lpfc_vport {
36538 struct dentry *debug_nodelist;
36539 struct dentry *vport_debugfs_root;
36540 struct lpfc_debugfs_trc *disc_trc;
36541 - atomic_t disc_trc_cnt;
36542 + atomic_unchecked_t disc_trc_cnt;
36543 #endif
36544 uint8_t stat_data_enabled;
36545 uint8_t stat_data_blocked;
36546 @@ -725,8 +725,8 @@ struct lpfc_hba {
36547 struct timer_list fabric_block_timer;
36548 unsigned long bit_flags;
36549 #define FABRIC_COMANDS_BLOCKED 0
36550 - atomic_t num_rsrc_err;
36551 - atomic_t num_cmd_success;
36552 + atomic_unchecked_t num_rsrc_err;
36553 + atomic_unchecked_t num_cmd_success;
36554 unsigned long last_rsrc_error_time;
36555 unsigned long last_ramp_down_time;
36556 unsigned long last_ramp_up_time;
36557 @@ -740,7 +740,7 @@ struct lpfc_hba {
36558 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36559 struct dentry *debug_slow_ring_trc;
36560 struct lpfc_debugfs_trc *slow_ring_trc;
36561 - atomic_t slow_ring_trc_cnt;
36562 + atomic_unchecked_t slow_ring_trc_cnt;
36563 #endif
36564
36565 /* Used for deferred freeing of ELS data buffers */
36566 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
36567 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36568 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36569 @@ -8021,8 +8021,10 @@ lpfc_init(void)
36570 printk(LPFC_COPYRIGHT "\n");
36571
36572 if (lpfc_enable_npiv) {
36573 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36574 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36575 + pax_open_kernel();
36576 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36577 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36578 + pax_close_kernel();
36579 }
36580 lpfc_transport_template =
36581 fc_attach_transport(&lpfc_transport_functions);
36582 diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
36583 --- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36584 +++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36585 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36586 uint32_t evt_posted;
36587
36588 spin_lock_irqsave(&phba->hbalock, flags);
36589 - atomic_inc(&phba->num_rsrc_err);
36590 + atomic_inc_unchecked(&phba->num_rsrc_err);
36591 phba->last_rsrc_error_time = jiffies;
36592
36593 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36594 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36595 unsigned long flags;
36596 struct lpfc_hba *phba = vport->phba;
36597 uint32_t evt_posted;
36598 - atomic_inc(&phba->num_cmd_success);
36599 + atomic_inc_unchecked(&phba->num_cmd_success);
36600
36601 if (vport->cfg_lun_queue_depth <= queue_depth)
36602 return;
36603 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36604 int i;
36605 struct lpfc_rport_data *rdata;
36606
36607 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36608 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36609 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36610 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36611
36612 vports = lpfc_create_vport_work_array(phba);
36613 if (vports != NULL)
36614 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36615 }
36616 }
36617 lpfc_destroy_vport_work_array(phba, vports);
36618 - atomic_set(&phba->num_rsrc_err, 0);
36619 - atomic_set(&phba->num_cmd_success, 0);
36620 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36621 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36622 }
36623
36624 /**
36625 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36626 }
36627 }
36628 lpfc_destroy_vport_work_array(phba, vports);
36629 - atomic_set(&phba->num_rsrc_err, 0);
36630 - atomic_set(&phba->num_cmd_success, 0);
36631 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36632 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36633 }
36634
36635 /**
36636 diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
36637 --- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36638 +++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36639 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36640 int rval;
36641 int i;
36642
36643 + pax_track_stack();
36644 +
36645 // Allocate memory for the base list of scb for management module.
36646 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36647
36648 diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
36649 --- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36650 +++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36651 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36652 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36653 int ret;
36654
36655 + pax_track_stack();
36656 +
36657 or = osd_start_request(od, GFP_KERNEL);
36658 if (!or)
36659 return -ENOMEM;
36660 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
36661 --- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
36662 +++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
36663 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
36664 res->scsi_dev = scsi_dev;
36665 scsi_dev->hostdata = res;
36666 res->change_detected = 0;
36667 - atomic_set(&res->read_failures, 0);
36668 - atomic_set(&res->write_failures, 0);
36669 + atomic_set_unchecked(&res->read_failures, 0);
36670 + atomic_set_unchecked(&res->write_failures, 0);
36671 rc = 0;
36672 }
36673 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36674 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
36675
36676 /* If this was a SCSI read/write command keep count of errors */
36677 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36678 - atomic_inc(&res->read_failures);
36679 + atomic_inc_unchecked(&res->read_failures);
36680 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36681 - atomic_inc(&res->write_failures);
36682 + atomic_inc_unchecked(&res->write_failures);
36683
36684 if (!RES_IS_GSCSI(res->cfg_entry) &&
36685 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36686 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
36687
36688 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36689 /* add resources only after host is added into system */
36690 - if (!atomic_read(&pinstance->expose_resources))
36691 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36692 return;
36693
36694 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
36695 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
36696 init_waitqueue_head(&pinstance->reset_wait_q);
36697
36698 atomic_set(&pinstance->outstanding_cmds, 0);
36699 - atomic_set(&pinstance->expose_resources, 0);
36700 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36701
36702 INIT_LIST_HEAD(&pinstance->free_res_q);
36703 INIT_LIST_HEAD(&pinstance->used_res_q);
36704 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
36705 /* Schedule worker thread to handle CCN and take care of adding and
36706 * removing devices to OS
36707 */
36708 - atomic_set(&pinstance->expose_resources, 1);
36709 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36710 schedule_work(&pinstance->worker_q);
36711 return rc;
36712
36713 diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
36714 --- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
36715 +++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
36716 @@ -690,7 +690,7 @@ struct pmcraid_instance {
36717 atomic_t outstanding_cmds;
36718
36719 /* should add/delete resources to mid-layer now ?*/
36720 - atomic_t expose_resources;
36721 + atomic_unchecked_t expose_resources;
36722
36723 /* Tasklet to handle deferred processing */
36724 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
36725 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
36726 struct list_head queue; /* link to "to be exposed" resources */
36727 struct pmcraid_config_table_entry cfg_entry;
36728 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36729 - atomic_t read_failures; /* count of failed READ commands */
36730 - atomic_t write_failures; /* count of failed WRITE commands */
36731 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36732 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36733
36734 /* To indicate add/delete/modify during CCN */
36735 u8 change_detected;
36736 diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
36737 --- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
36738 +++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
36739 @@ -2089,7 +2089,7 @@ struct isp_operations {
36740
36741 int (*get_flash_version) (struct scsi_qla_host *, void *);
36742 int (*start_scsi) (srb_t *);
36743 -};
36744 +} __no_const;
36745
36746 /* MSI-X Support *************************************************************/
36747
36748 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
36749 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
36750 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
36751 @@ -240,7 +240,7 @@ struct ddb_entry {
36752 atomic_t retry_relogin_timer; /* Min Time between relogins
36753 * (4000 only) */
36754 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36755 - atomic_t relogin_retry_count; /* Num of times relogin has been
36756 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36757 * retried */
36758
36759 uint16_t port;
36760 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
36761 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
36762 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
36763 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
36764 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
36765 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36766 atomic_set(&ddb_entry->relogin_timer, 0);
36767 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36768 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36769 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36770 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36771 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36772 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
36773 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36774 atomic_set(&ddb_entry->port_down_timer,
36775 ha->port_down_retry_count);
36776 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36777 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36778 atomic_set(&ddb_entry->relogin_timer, 0);
36779 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36780 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
36781 diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
36782 --- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
36783 +++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
36784 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
36785 ddb_entry->fw_ddb_device_state ==
36786 DDB_DS_SESSION_FAILED) {
36787 /* Reset retry relogin timer */
36788 - atomic_inc(&ddb_entry->relogin_retry_count);
36789 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36790 DEBUG2(printk("scsi%ld: index[%d] relogin"
36791 " timed out-retrying"
36792 " relogin (%d)\n",
36793 ha->host_no,
36794 ddb_entry->fw_ddb_index,
36795 - atomic_read(&ddb_entry->
36796 + atomic_read_unchecked(&ddb_entry->
36797 relogin_retry_count))
36798 );
36799 start_dpc++;
36800 diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
36801 --- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
36802 +++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
36803 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
36804 unsigned long timeout;
36805 int rtn = 0;
36806
36807 - atomic_inc(&cmd->device->iorequest_cnt);
36808 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36809
36810 /* check if the device is still usable */
36811 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36812 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
36813 --- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
36814 +++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
36815 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
36816 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36817 unsigned char *cmd = (unsigned char *)scp->cmnd;
36818
36819 + pax_track_stack();
36820 +
36821 if ((errsts = check_readiness(scp, 1, devip)))
36822 return errsts;
36823 memset(arr, 0, sizeof(arr));
36824 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
36825 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36826 unsigned char *cmd = (unsigned char *)scp->cmnd;
36827
36828 + pax_track_stack();
36829 +
36830 if ((errsts = check_readiness(scp, 1, devip)))
36831 return errsts;
36832 memset(arr, 0, sizeof(arr));
36833 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
36834 --- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
36835 +++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
36836 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
36837
36838 scsi_init_cmd_errh(cmd);
36839 cmd->result = DID_NO_CONNECT << 16;
36840 - atomic_inc(&cmd->device->iorequest_cnt);
36841 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36842
36843 /*
36844 * SCSI request completion path will do scsi_device_unbusy(),
36845 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
36846 */
36847 cmd->serial_number = 0;
36848
36849 - atomic_inc(&cmd->device->iodone_cnt);
36850 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36851 if (cmd->result)
36852 - atomic_inc(&cmd->device->ioerr_cnt);
36853 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36854
36855 disposition = scsi_decide_disposition(cmd);
36856 if (disposition != SUCCESS &&
36857 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
36858 --- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
36859 +++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
36860 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
36861 char *buf) \
36862 { \
36863 struct scsi_device *sdev = to_scsi_device(dev); \
36864 - unsigned long long count = atomic_read(&sdev->field); \
36865 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36866 return snprintf(buf, 20, "0x%llx\n", count); \
36867 } \
36868 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36869 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
36870 --- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
36871 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
36872 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
36873 * Netlink Infrastructure
36874 */
36875
36876 -static atomic_t fc_event_seq;
36877 +static atomic_unchecked_t fc_event_seq;
36878
36879 /**
36880 * fc_get_event_number - Obtain the next sequential FC event number
36881 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
36882 u32
36883 fc_get_event_number(void)
36884 {
36885 - return atomic_add_return(1, &fc_event_seq);
36886 + return atomic_add_return_unchecked(1, &fc_event_seq);
36887 }
36888 EXPORT_SYMBOL(fc_get_event_number);
36889
36890 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
36891 {
36892 int error;
36893
36894 - atomic_set(&fc_event_seq, 0);
36895 + atomic_set_unchecked(&fc_event_seq, 0);
36896
36897 error = transport_class_register(&fc_host_class);
36898 if (error)
36899 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
36900 --- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
36901 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
36902 @@ -81,7 +81,7 @@ struct iscsi_internal {
36903 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36904 };
36905
36906 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36907 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36908 static struct workqueue_struct *iscsi_eh_timer_workq;
36909
36910 /*
36911 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
36912 int err;
36913
36914 ihost = shost->shost_data;
36915 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36916 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36917
36918 if (id == ISCSI_MAX_TARGET) {
36919 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36920 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
36921 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36922 ISCSI_TRANSPORT_VERSION);
36923
36924 - atomic_set(&iscsi_session_nr, 0);
36925 + atomic_set_unchecked(&iscsi_session_nr, 0);
36926
36927 err = class_register(&iscsi_transport_class);
36928 if (err)
36929 diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
36930 --- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
36931 +++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
36932 @@ -33,7 +33,7 @@
36933 #include "scsi_transport_srp_internal.h"
36934
36935 struct srp_host_attrs {
36936 - atomic_t next_port_id;
36937 + atomic_unchecked_t next_port_id;
36938 };
36939 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36940
36941 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
36942 struct Scsi_Host *shost = dev_to_shost(dev);
36943 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36944
36945 - atomic_set(&srp_host->next_port_id, 0);
36946 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36947 return 0;
36948 }
36949
36950 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
36951 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36952 rport->roles = ids->roles;
36953
36954 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36955 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36956 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36957
36958 transport_setup_device(&rport->dev);
36959 diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
36960 --- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
36961 +++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
36962 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
36963 const struct file_operations * fops;
36964 };
36965
36966 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36967 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36968 {"allow_dio", &adio_fops},
36969 {"debug", &debug_fops},
36970 {"def_reserved_size", &dressz_fops},
36971 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
36972 {
36973 int k, mask;
36974 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36975 - struct sg_proc_leaf * leaf;
36976 + const struct sg_proc_leaf * leaf;
36977
36978 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36979 if (!sg_proc_sgp)
36980 diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
36981 --- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
36982 +++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
36983 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
36984 int do_iounmap = 0;
36985 int do_disable_device = 1;
36986
36987 + pax_track_stack();
36988 +
36989 memset(&sym_dev, 0, sizeof(sym_dev));
36990 memset(&nvram, 0, sizeof(nvram));
36991 sym_dev.pdev = pdev;
36992 diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
36993 --- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
36994 +++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
36995 @@ -18,7 +18,7 @@
36996
36997 #define MAX_CONFIG_LEN 40
36998
36999 -static struct kgdb_io kgdboc_io_ops;
37000 +static const struct kgdb_io kgdboc_io_ops;
37001
37002 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37003 static int configured = -1;
37004 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37005 module_put(THIS_MODULE);
37006 }
37007
37008 -static struct kgdb_io kgdboc_io_ops = {
37009 +static const struct kgdb_io kgdboc_io_ops = {
37010 .name = "kgdboc",
37011 .read_char = kgdboc_get_char,
37012 .write_char = kgdboc_put_char,
37013 diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37014 --- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37015 +++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37016 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37017 EXPORT_SYMBOL_GPL(spi_sync);
37018
37019 /* portable code must never pass more than 32 bytes */
37020 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37021 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37022
37023 static u8 *buf;
37024
37025 diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37026 --- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37027 +++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37028 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37029 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37030 }
37031
37032 -static struct vm_operations_struct binder_vm_ops = {
37033 +static const struct vm_operations_struct binder_vm_ops = {
37034 .open = binder_vma_open,
37035 .close = binder_vma_close,
37036 };
37037 diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37038 --- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37039 +++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37040 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37041 return VM_FAULT_NOPAGE;
37042 }
37043
37044 -static struct vm_operations_struct b3dfg_vm_ops = {
37045 +static const struct vm_operations_struct b3dfg_vm_ops = {
37046 .fault = b3dfg_vma_fault,
37047 };
37048
37049 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37050 return r;
37051 }
37052
37053 -static struct file_operations b3dfg_fops = {
37054 +static const struct file_operations b3dfg_fops = {
37055 .owner = THIS_MODULE,
37056 .open = b3dfg_open,
37057 .release = b3dfg_release,
37058 diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37059 --- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37060 +++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37061 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37062 mutex_unlock(&dev->mutex);
37063 }
37064
37065 -static struct vm_operations_struct comedi_vm_ops = {
37066 +static const struct vm_operations_struct comedi_vm_ops = {
37067 .close = comedi_unmap,
37068 };
37069
37070 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37071 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37072 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37073 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37074 static dev_t adsp_devno;
37075 static struct class *adsp_class;
37076
37077 -static struct file_operations adsp_fops = {
37078 +static const struct file_operations adsp_fops = {
37079 .owner = THIS_MODULE,
37080 .open = adsp_open,
37081 .unlocked_ioctl = adsp_ioctl,
37082 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37083 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37084 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37085 @@ -1022,7 +1022,7 @@ done:
37086 return rc;
37087 }
37088
37089 -static struct file_operations audio_aac_fops = {
37090 +static const struct file_operations audio_aac_fops = {
37091 .owner = THIS_MODULE,
37092 .open = audio_open,
37093 .release = audio_release,
37094 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37095 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37096 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37097 @@ -833,7 +833,7 @@ done:
37098 return rc;
37099 }
37100
37101 -static struct file_operations audio_amrnb_fops = {
37102 +static const struct file_operations audio_amrnb_fops = {
37103 .owner = THIS_MODULE,
37104 .open = audamrnb_open,
37105 .release = audamrnb_release,
37106 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37107 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37108 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37109 @@ -805,7 +805,7 @@ dma_fail:
37110 return rc;
37111 }
37112
37113 -static struct file_operations audio_evrc_fops = {
37114 +static const struct file_operations audio_evrc_fops = {
37115 .owner = THIS_MODULE,
37116 .open = audevrc_open,
37117 .release = audevrc_release,
37118 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37119 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37120 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37121 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37122 return 0;
37123 }
37124
37125 -static struct file_operations audio_fops = {
37126 +static const struct file_operations audio_fops = {
37127 .owner = THIS_MODULE,
37128 .open = audio_in_open,
37129 .release = audio_in_release,
37130 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37131 .unlocked_ioctl = audio_in_ioctl,
37132 };
37133
37134 -static struct file_operations audpre_fops = {
37135 +static const struct file_operations audpre_fops = {
37136 .owner = THIS_MODULE,
37137 .open = audpre_open,
37138 .unlocked_ioctl = audpre_ioctl,
37139 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37140 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37141 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37142 @@ -941,7 +941,7 @@ done:
37143 return rc;
37144 }
37145
37146 -static struct file_operations audio_mp3_fops = {
37147 +static const struct file_operations audio_mp3_fops = {
37148 .owner = THIS_MODULE,
37149 .open = audio_open,
37150 .release = audio_release,
37151 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37152 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37153 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37154 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37155 return 0;
37156 }
37157
37158 -static struct file_operations audio_fops = {
37159 +static const struct file_operations audio_fops = {
37160 .owner = THIS_MODULE,
37161 .open = audio_open,
37162 .release = audio_release,
37163 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37164 .unlocked_ioctl = audio_ioctl,
37165 };
37166
37167 -static struct file_operations audpp_fops = {
37168 +static const struct file_operations audpp_fops = {
37169 .owner = THIS_MODULE,
37170 .open = audpp_open,
37171 .unlocked_ioctl = audpp_ioctl,
37172 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37173 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37174 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37175 @@ -816,7 +816,7 @@ err:
37176 return rc;
37177 }
37178
37179 -static struct file_operations audio_qcelp_fops = {
37180 +static const struct file_operations audio_qcelp_fops = {
37181 .owner = THIS_MODULE,
37182 .open = audqcelp_open,
37183 .release = audqcelp_release,
37184 diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37185 --- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37186 +++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37187 @@ -242,7 +242,7 @@ err:
37188 return rc;
37189 }
37190
37191 -static struct file_operations snd_fops = {
37192 +static const struct file_operations snd_fops = {
37193 .owner = THIS_MODULE,
37194 .open = snd_open,
37195 .release = snd_release,
37196 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37197 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37198 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37199 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37200 return 0;
37201 }
37202
37203 -static struct file_operations qmi_fops = {
37204 +static const struct file_operations qmi_fops = {
37205 .owner = THIS_MODULE,
37206 .read = qmi_read,
37207 .write = qmi_write,
37208 diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37209 --- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37210 +++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37211 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37212 return rc;
37213 }
37214
37215 -static struct file_operations rpcrouter_server_fops = {
37216 +static const struct file_operations rpcrouter_server_fops = {
37217 .owner = THIS_MODULE,
37218 .open = rpcrouter_open,
37219 .release = rpcrouter_release,
37220 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37221 .unlocked_ioctl = rpcrouter_ioctl,
37222 };
37223
37224 -static struct file_operations rpcrouter_router_fops = {
37225 +static const struct file_operations rpcrouter_router_fops = {
37226 .owner = THIS_MODULE,
37227 .open = rpcrouter_open,
37228 .release = rpcrouter_release,
37229 diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37230 --- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37231 +++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37232 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37233 return 0;
37234 }
37235
37236 -static struct block_device_operations dst_blk_ops = {
37237 +static const struct block_device_operations dst_blk_ops = {
37238 .open = dst_bdev_open,
37239 .release = dst_bdev_release,
37240 .owner = THIS_MODULE,
37241 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37242 n->size = ctl->size;
37243
37244 atomic_set(&n->refcnt, 1);
37245 - atomic_long_set(&n->gen, 0);
37246 + atomic_long_set_unchecked(&n->gen, 0);
37247 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37248
37249 err = dst_node_sysfs_init(n);
37250 diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37251 --- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37252 +++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37253 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37254 t->error = 0;
37255 t->retries = 0;
37256 atomic_set(&t->refcnt, 1);
37257 - t->gen = atomic_long_inc_return(&n->gen);
37258 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
37259
37260 t->enc = bio_data_dir(bio);
37261 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37262 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37263 --- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37264 +++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37265 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37266 struct net_device_stats *stats = &etdev->net_stats;
37267
37268 if (pMpTcb->Flags & fMP_DEST_BROAD)
37269 - atomic_inc(&etdev->Stats.brdcstxmt);
37270 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37271 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37272 - atomic_inc(&etdev->Stats.multixmt);
37273 + atomic_inc_unchecked(&etdev->Stats.multixmt);
37274 else
37275 - atomic_inc(&etdev->Stats.unixmt);
37276 + atomic_inc_unchecked(&etdev->Stats.unixmt);
37277
37278 if (pMpTcb->Packet) {
37279 stats->tx_bytes += pMpTcb->Packet->len;
37280 diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37281 --- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37282 +++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37283 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37284 * operations
37285 */
37286 u32 unircv; /* # multicast packets received */
37287 - atomic_t unixmt; /* # multicast packets for Tx */
37288 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37289 u32 multircv; /* # multicast packets received */
37290 - atomic_t multixmt; /* # multicast packets for Tx */
37291 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37292 u32 brdcstrcv; /* # broadcast packets received */
37293 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
37294 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37295 u32 norcvbuf; /* # Rx packets discarded */
37296 u32 noxmtbuf; /* # Tx packets discarded */
37297
37298 diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37299 --- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37300 +++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37301 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37302 return 0;
37303 }
37304
37305 -static struct vm_operations_struct go7007_vm_ops = {
37306 +static const struct vm_operations_struct go7007_vm_ops = {
37307 .open = go7007_vm_open,
37308 .close = go7007_vm_close,
37309 .fault = go7007_vm_fault,
37310 diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37311 --- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37312 +++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37313 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37314 /* The one and only one */
37315 static struct blkvsc_driver_context g_blkvsc_drv;
37316
37317 -static struct block_device_operations block_ops = {
37318 +static const struct block_device_operations block_ops = {
37319 .owner = THIS_MODULE,
37320 .open = blkvsc_open,
37321 .release = blkvsc_release,
37322 diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37323 --- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37324 +++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37325 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37326
37327 DPRINT_ENTER(VMBUS);
37328
37329 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37330 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
37331 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37332 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37333
37334 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37335 ASSERT(msgInfo != NULL);
37336 diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37337 --- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37338 +++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37339 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37340 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37341 u32 outputAddressHi = outputAddress >> 32;
37342 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37343 - volatile void *hypercallPage = gHvContext.HypercallPage;
37344 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37345
37346 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37347 Control, Input, Output);
37348 diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37349 --- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37350 +++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37351 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37352 to_device_context(root_device_obj);
37353 struct device_context *child_device_ctx =
37354 to_device_context(child_device_obj);
37355 - static atomic_t device_num = ATOMIC_INIT(0);
37356 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37357
37358 DPRINT_ENTER(VMBUS_DRV);
37359
37360 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37361
37362 /* Set the device name. Otherwise, device_register() will fail. */
37363 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37364 - atomic_inc_return(&device_num));
37365 + atomic_inc_return_unchecked(&device_num));
37366
37367 /* The new device belongs to this bus */
37368 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37369 diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37370 --- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37371 +++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37372 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37373 struct VMBUS_CONNECTION {
37374 enum VMBUS_CONNECT_STATE ConnectState;
37375
37376 - atomic_t NextGpadlHandle;
37377 + atomic_unchecked_t NextGpadlHandle;
37378
37379 /*
37380 * Represents channel interrupts. Each bit position represents a
37381 diff -urNp linux-2.6.32.45/drivers/staging/iio/ring_generic.h linux-2.6.32.45/drivers/staging/iio/ring_generic.h
37382 --- linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-03-27 14:31:47.000000000 -0400
37383 +++ linux-2.6.32.45/drivers/staging/iio/ring_generic.h 2011-08-23 20:24:26.000000000 -0400
37384 @@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
37385
37386 int (*is_enabled)(struct iio_ring_buffer *ring);
37387 int (*enable)(struct iio_ring_buffer *ring);
37388 -};
37389 +} __no_const;
37390
37391 /**
37392 * struct iio_ring_buffer - general ring buffer structure
37393 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37394 --- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37395 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37396 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37397 * since the RX tasklet also increments it.
37398 */
37399 #ifdef CONFIG_64BIT
37400 - atomic64_add(rx_status.dropped_packets,
37401 - (atomic64_t *)&priv->stats.rx_dropped);
37402 + atomic64_add_unchecked(rx_status.dropped_packets,
37403 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37404 #else
37405 - atomic_add(rx_status.dropped_packets,
37406 - (atomic_t *)&priv->stats.rx_dropped);
37407 + atomic_add_unchecked(rx_status.dropped_packets,
37408 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37409 #endif
37410 }
37411
37412 diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37413 --- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37414 +++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37415 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37416 /* Increment RX stats for virtual ports */
37417 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37418 #ifdef CONFIG_64BIT
37419 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37420 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37421 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37422 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37423 #else
37424 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37425 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37426 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37427 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37428 #endif
37429 }
37430 netif_receive_skb(skb);
37431 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37432 dev->name);
37433 */
37434 #ifdef CONFIG_64BIT
37435 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37436 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37437 #else
37438 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37439 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37440 #endif
37441 dev_kfree_skb_irq(skb);
37442 }
37443 diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37444 --- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37445 +++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37446 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37447 return 0;
37448 }
37449
37450 -static struct file_operations lcd_fops = {
37451 +static const struct file_operations lcd_fops = {
37452 .write = lcd_write,
37453 .open = lcd_open,
37454 .release = lcd_release,
37455 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37456 return 0;
37457 }
37458
37459 -static struct file_operations keypad_fops = {
37460 +static const struct file_operations keypad_fops = {
37461 .read = keypad_read, /* read */
37462 .open = keypad_open, /* open */
37463 .release = keypad_release, /* close */
37464 diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
37465 --- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37466 +++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37467 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37468 ATA_BMDMA_SHT(DRV_NAME),
37469 };
37470
37471 -static struct ata_port_operations phison_ops = {
37472 +static const struct ata_port_operations phison_ops = {
37473 .inherits = &ata_bmdma_port_ops,
37474 .prereset = phison_pre_reset,
37475 };
37476 diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
37477 --- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37478 +++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37479 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37480 return 0;
37481 }
37482
37483 -static struct file_operations poch_fops = {
37484 +static const struct file_operations poch_fops = {
37485 .owner = THIS_MODULE,
37486 .open = poch_open,
37487 .release = poch_release,
37488 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
37489 --- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37490 +++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37491 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37492 mutex_init(&psb->mcache_lock);
37493 psb->mcache_root = RB_ROOT;
37494 psb->mcache_timeout = msecs_to_jiffies(5000);
37495 - atomic_long_set(&psb->mcache_gen, 0);
37496 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37497
37498 psb->trans_max_pages = 100;
37499
37500 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37501 INIT_LIST_HEAD(&psb->crypto_ready_list);
37502 INIT_LIST_HEAD(&psb->crypto_active_list);
37503
37504 - atomic_set(&psb->trans_gen, 1);
37505 + atomic_set_unchecked(&psb->trans_gen, 1);
37506 atomic_long_set(&psb->total_inodes, 0);
37507
37508 mutex_init(&psb->state_lock);
37509 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
37510 --- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37511 +++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37512 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37513 m->data = data;
37514 m->start = start;
37515 m->size = size;
37516 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37517 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37518
37519 mutex_lock(&psb->mcache_lock);
37520 err = pohmelfs_mcache_insert(psb, m);
37521 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
37522 --- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37523 +++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37524 @@ -570,14 +570,14 @@ struct pohmelfs_config;
37525 struct pohmelfs_sb {
37526 struct rb_root mcache_root;
37527 struct mutex mcache_lock;
37528 - atomic_long_t mcache_gen;
37529 + atomic_long_unchecked_t mcache_gen;
37530 unsigned long mcache_timeout;
37531
37532 unsigned int idx;
37533
37534 unsigned int trans_retries;
37535
37536 - atomic_t trans_gen;
37537 + atomic_unchecked_t trans_gen;
37538
37539 unsigned int crypto_attached_size;
37540 unsigned int crypto_align_size;
37541 diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
37542 --- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37543 +++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37544 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37545 int err;
37546 struct netfs_cmd *cmd = t->iovec.iov_base;
37547
37548 - t->gen = atomic_inc_return(&psb->trans_gen);
37549 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37550
37551 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37552 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37553 diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
37554 --- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37555 +++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37556 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37557 static dev_t sep_devno;
37558
37559 /* the files operations structure of the driver */
37560 -static struct file_operations sep_file_operations = {
37561 +static const struct file_operations sep_file_operations = {
37562 .owner = THIS_MODULE,
37563 .ioctl = sep_ioctl,
37564 .poll = sep_poll,
37565 diff -urNp linux-2.6.32.45/drivers/staging/usbip/usbip_common.h linux-2.6.32.45/drivers/staging/usbip/usbip_common.h
37566 --- linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-04-17 17:00:52.000000000 -0400
37567 +++ linux-2.6.32.45/drivers/staging/usbip/usbip_common.h 2011-08-23 20:24:26.000000000 -0400
37568 @@ -374,7 +374,7 @@ struct usbip_device {
37569 void (*shutdown)(struct usbip_device *);
37570 void (*reset)(struct usbip_device *);
37571 void (*unusable)(struct usbip_device *);
37572 - } eh_ops;
37573 + } __no_const eh_ops;
37574 };
37575
37576
37577 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
37578 --- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37579 +++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37580 @@ -92,7 +92,7 @@ struct vhci_hcd {
37581 unsigned resuming:1;
37582 unsigned long re_timeout;
37583
37584 - atomic_t seqnum;
37585 + atomic_unchecked_t seqnum;
37586
37587 /*
37588 * NOTE:
37589 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
37590 --- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37591 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37592 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37593 return;
37594 }
37595
37596 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37597 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37598 if (priv->seqnum == 0xffff)
37599 usbip_uinfo("seqnum max\n");
37600
37601 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37602 return -ENOMEM;
37603 }
37604
37605 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37606 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37607 if (unlink->seqnum == 0xffff)
37608 usbip_uinfo("seqnum max\n");
37609
37610 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37611 vdev->rhport = rhport;
37612 }
37613
37614 - atomic_set(&vhci->seqnum, 0);
37615 + atomic_set_unchecked(&vhci->seqnum, 0);
37616 spin_lock_init(&vhci->lock);
37617
37618
37619 diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
37620 --- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37621 +++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37622 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37623 usbip_uerr("cannot find a urb of seqnum %u\n",
37624 pdu->base.seqnum);
37625 usbip_uinfo("max seqnum %d\n",
37626 - atomic_read(&the_controller->seqnum));
37627 + atomic_read_unchecked(&the_controller->seqnum));
37628 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37629 return;
37630 }
37631 diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
37632 --- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37633 +++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37634 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37635 static int __init vme_user_probe(struct device *, int, int);
37636 static int __exit vme_user_remove(struct device *, int, int);
37637
37638 -static struct file_operations vme_user_fops = {
37639 +static const struct file_operations vme_user_fops = {
37640 .open = vme_user_open,
37641 .release = vme_user_release,
37642 .read = vme_user_read,
37643 diff -urNp linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c
37644 --- linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-03-27 14:31:47.000000000 -0400
37645 +++ linux-2.6.32.45/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 20:24:26.000000000 -0400
37646 @@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
37647
37648 struct usbctlx_completor {
37649 int (*complete) (struct usbctlx_completor *);
37650 -};
37651 +} __no_const;
37652 typedef struct usbctlx_completor usbctlx_completor_t;
37653
37654 static int
37655 diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
37656 --- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37657 +++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37658 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37659 bool mContinue;
37660 char *pIn, *pOut;
37661
37662 + pax_track_stack();
37663 +
37664 if (!SCI_Prepare(j))
37665 return 0;
37666
37667 diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
37668 --- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37669 +++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37670 @@ -23,6 +23,7 @@
37671 #include <linux/string.h>
37672 #include <linux/kobject.h>
37673 #include <linux/uio_driver.h>
37674 +#include <asm/local.h>
37675
37676 #define UIO_MAX_DEVICES 255
37677
37678 @@ -30,10 +31,10 @@ struct uio_device {
37679 struct module *owner;
37680 struct device *dev;
37681 int minor;
37682 - atomic_t event;
37683 + atomic_unchecked_t event;
37684 struct fasync_struct *async_queue;
37685 wait_queue_head_t wait;
37686 - int vma_count;
37687 + local_t vma_count;
37688 struct uio_info *info;
37689 struct kobject *map_dir;
37690 struct kobject *portio_dir;
37691 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
37692 return entry->show(mem, buf);
37693 }
37694
37695 -static struct sysfs_ops map_sysfs_ops = {
37696 +static const struct sysfs_ops map_sysfs_ops = {
37697 .show = map_type_show,
37698 };
37699
37700 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
37701 return entry->show(port, buf);
37702 }
37703
37704 -static struct sysfs_ops portio_sysfs_ops = {
37705 +static const struct sysfs_ops portio_sysfs_ops = {
37706 .show = portio_type_show,
37707 };
37708
37709 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
37710 struct uio_device *idev = dev_get_drvdata(dev);
37711 if (idev)
37712 return sprintf(buf, "%u\n",
37713 - (unsigned int)atomic_read(&idev->event));
37714 + (unsigned int)atomic_read_unchecked(&idev->event));
37715 else
37716 return -ENODEV;
37717 }
37718 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
37719 {
37720 struct uio_device *idev = info->uio_dev;
37721
37722 - atomic_inc(&idev->event);
37723 + atomic_inc_unchecked(&idev->event);
37724 wake_up_interruptible(&idev->wait);
37725 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37726 }
37727 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
37728 }
37729
37730 listener->dev = idev;
37731 - listener->event_count = atomic_read(&idev->event);
37732 + listener->event_count = atomic_read_unchecked(&idev->event);
37733 filep->private_data = listener;
37734
37735 if (idev->info->open) {
37736 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
37737 return -EIO;
37738
37739 poll_wait(filep, &idev->wait, wait);
37740 - if (listener->event_count != atomic_read(&idev->event))
37741 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37742 return POLLIN | POLLRDNORM;
37743 return 0;
37744 }
37745 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
37746 do {
37747 set_current_state(TASK_INTERRUPTIBLE);
37748
37749 - event_count = atomic_read(&idev->event);
37750 + event_count = atomic_read_unchecked(&idev->event);
37751 if (event_count != listener->event_count) {
37752 if (copy_to_user(buf, &event_count, count))
37753 retval = -EFAULT;
37754 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
37755 static void uio_vma_open(struct vm_area_struct *vma)
37756 {
37757 struct uio_device *idev = vma->vm_private_data;
37758 - idev->vma_count++;
37759 + local_inc(&idev->vma_count);
37760 }
37761
37762 static void uio_vma_close(struct vm_area_struct *vma)
37763 {
37764 struct uio_device *idev = vma->vm_private_data;
37765 - idev->vma_count--;
37766 + local_dec(&idev->vma_count);
37767 }
37768
37769 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37770 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
37771 idev->owner = owner;
37772 idev->info = info;
37773 init_waitqueue_head(&idev->wait);
37774 - atomic_set(&idev->event, 0);
37775 + atomic_set_unchecked(&idev->event, 0);
37776
37777 ret = uio_get_minor(idev);
37778 if (ret)
37779 diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
37780 --- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
37781 +++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
37782 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37783 if (printk_ratelimit())
37784 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37785 __func__, vpi, vci);
37786 - atomic_inc(&vcc->stats->rx_err);
37787 + atomic_inc_unchecked(&vcc->stats->rx_err);
37788 return;
37789 }
37790
37791 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37792 if (length > ATM_MAX_AAL5_PDU) {
37793 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37794 __func__, length, vcc);
37795 - atomic_inc(&vcc->stats->rx_err);
37796 + atomic_inc_unchecked(&vcc->stats->rx_err);
37797 goto out;
37798 }
37799
37800 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37801 if (sarb->len < pdu_length) {
37802 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37803 __func__, pdu_length, sarb->len, vcc);
37804 - atomic_inc(&vcc->stats->rx_err);
37805 + atomic_inc_unchecked(&vcc->stats->rx_err);
37806 goto out;
37807 }
37808
37809 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37810 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37811 __func__, vcc);
37812 - atomic_inc(&vcc->stats->rx_err);
37813 + atomic_inc_unchecked(&vcc->stats->rx_err);
37814 goto out;
37815 }
37816
37817 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37818 if (printk_ratelimit())
37819 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37820 __func__, length);
37821 - atomic_inc(&vcc->stats->rx_drop);
37822 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37823 goto out;
37824 }
37825
37826 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37827
37828 vcc->push(vcc, skb);
37829
37830 - atomic_inc(&vcc->stats->rx);
37831 + atomic_inc_unchecked(&vcc->stats->rx);
37832 out:
37833 skb_trim(sarb, 0);
37834 }
37835 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
37836 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37837
37838 usbatm_pop(vcc, skb);
37839 - atomic_inc(&vcc->stats->tx);
37840 + atomic_inc_unchecked(&vcc->stats->tx);
37841
37842 skb = skb_dequeue(&instance->sndqueue);
37843 }
37844 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
37845 if (!left--)
37846 return sprintf(page,
37847 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37848 - atomic_read(&atm_dev->stats.aal5.tx),
37849 - atomic_read(&atm_dev->stats.aal5.tx_err),
37850 - atomic_read(&atm_dev->stats.aal5.rx),
37851 - atomic_read(&atm_dev->stats.aal5.rx_err),
37852 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37853 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37854 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37855 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37856 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37857 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37858
37859 if (!left--) {
37860 if (instance->disconnected)
37861 diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
37862 --- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
37863 +++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
37864 @@ -314,7 +314,7 @@ static ssize_t wdm_write
37865 if (r < 0)
37866 goto outnp;
37867
37868 - if (!file->f_flags && O_NONBLOCK)
37869 + if (!(file->f_flags & O_NONBLOCK))
37870 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
37871 &desc->flags));
37872 else
37873 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
37874 --- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
37875 +++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
37876 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
37877
37878 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37879
37880 -struct usb_mon_operations *mon_ops;
37881 +const struct usb_mon_operations *mon_ops;
37882
37883 /*
37884 * The registration is unlocked.
37885 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
37886 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
37887 */
37888
37889 -int usb_mon_register (struct usb_mon_operations *ops)
37890 +int usb_mon_register (const struct usb_mon_operations *ops)
37891 {
37892
37893 if (mon_ops)
37894 diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
37895 --- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
37896 +++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
37897 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
37898 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37899
37900 struct usb_mon_operations {
37901 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
37902 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37903 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37904 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
37905 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37906 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37907 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
37908 };
37909
37910 -extern struct usb_mon_operations *mon_ops;
37911 +extern const struct usb_mon_operations *mon_ops;
37912
37913 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
37914 {
37915 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
37916 (*mon_ops->urb_complete)(bus, urb, status);
37917 }
37918
37919 -int usb_mon_register(struct usb_mon_operations *ops);
37920 +int usb_mon_register(const struct usb_mon_operations *ops);
37921 void usb_mon_deregister(void);
37922
37923 #else
37924 diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
37925 --- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
37926 +++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
37927 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
37928 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37929 if (buf) {
37930 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37931 - if (len > 0) {
37932 - smallbuf = kmalloc(++len, GFP_NOIO);
37933 + if (len++ > 0) {
37934 + smallbuf = kmalloc(len, GFP_NOIO);
37935 if (!smallbuf)
37936 return buf;
37937 memcpy(smallbuf, buf, len);
37938 diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
37939 --- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
37940 +++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
37941 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
37942 return pdata->msgdata[1];
37943 }
37944
37945 -static struct backlight_ops appledisplay_bl_data = {
37946 +static const struct backlight_ops appledisplay_bl_data = {
37947 .get_brightness = appledisplay_bl_get_brightness,
37948 .update_status = appledisplay_bl_update_status,
37949 };
37950 diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
37951 --- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
37952 +++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
37953 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
37954 /*
37955 * Ops
37956 */
37957 -static struct usb_mon_operations mon_ops_0 = {
37958 +static const struct usb_mon_operations mon_ops_0 = {
37959 .urb_submit = mon_submit,
37960 .urb_submit_error = mon_submit_error,
37961 .urb_complete = mon_complete,
37962 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
37963 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
37964 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
37965 @@ -192,7 +192,7 @@ struct wahc {
37966 struct list_head xfer_delayed_list;
37967 spinlock_t xfer_list_lock;
37968 struct work_struct xfer_work;
37969 - atomic_t xfer_id_count;
37970 + atomic_unchecked_t xfer_id_count;
37971 };
37972
37973
37974 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37975 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37976 spin_lock_init(&wa->xfer_list_lock);
37977 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37978 - atomic_set(&wa->xfer_id_count, 1);
37979 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37980 }
37981
37982 /**
37983 diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
37984 --- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
37985 +++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
37986 @@ -293,7 +293,7 @@ out:
37987 */
37988 static void wa_xfer_id_init(struct wa_xfer *xfer)
37989 {
37990 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37991 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37992 }
37993
37994 /*
37995 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
37996 --- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
37997 +++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
37998 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
37999 size_t len = skb->len;
38000 size_t used;
38001 ssize_t result;
38002 - struct wlp_nonce enonce, rnonce;
38003 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38004 enum wlp_assc_error assc_err;
38005 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38006 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38007 diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38008 --- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38009 +++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38010 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38011 return ret;
38012 }
38013
38014 -static
38015 -struct sysfs_ops wss_sysfs_ops = {
38016 +static const struct sysfs_ops wss_sysfs_ops = {
38017 .show = wlp_wss_attr_show,
38018 .store = wlp_wss_attr_store,
38019 };
38020 diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38021 --- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38022 +++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38023 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38024 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38025 }
38026
38027 -static struct backlight_ops atmel_lcdc_bl_ops = {
38028 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38029 .update_status = atmel_bl_update_status,
38030 .get_brightness = atmel_bl_get_brightness,
38031 };
38032 diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38033 --- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38034 +++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38035 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38036 return bd->props.brightness;
38037 }
38038
38039 -static struct backlight_ops aty128_bl_data = {
38040 +static const struct backlight_ops aty128_bl_data = {
38041 .get_brightness = aty128_bl_get_brightness,
38042 .update_status = aty128_bl_update_status,
38043 };
38044 diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38045 --- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38046 +++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38047 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38048 return bd->props.brightness;
38049 }
38050
38051 -static struct backlight_ops aty_bl_data = {
38052 +static const struct backlight_ops aty_bl_data = {
38053 .get_brightness = aty_bl_get_brightness,
38054 .update_status = aty_bl_update_status,
38055 };
38056 diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38057 --- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38058 +++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38059 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38060 return bd->props.brightness;
38061 }
38062
38063 -static struct backlight_ops radeon_bl_data = {
38064 +static const struct backlight_ops radeon_bl_data = {
38065 .get_brightness = radeon_bl_get_brightness,
38066 .update_status = radeon_bl_update_status,
38067 };
38068 diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38069 --- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38070 +++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38071 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38072 return error ? data->current_brightness : reg_val;
38073 }
38074
38075 -static struct backlight_ops adp5520_bl_ops = {
38076 +static const struct backlight_ops adp5520_bl_ops = {
38077 .update_status = adp5520_bl_update_status,
38078 .get_brightness = adp5520_bl_get_brightness,
38079 };
38080 diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38081 --- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38082 +++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38083 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38084 return 1;
38085 }
38086
38087 -static struct backlight_ops adx_backlight_ops = {
38088 +static const struct backlight_ops adx_backlight_ops = {
38089 .options = 0,
38090 .update_status = adx_backlight_update_status,
38091 .get_brightness = adx_backlight_get_brightness,
38092 diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38093 --- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38094 +++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38095 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38096 return pwm_channel_enable(&pwmbl->pwmc);
38097 }
38098
38099 -static struct backlight_ops atmel_pwm_bl_ops = {
38100 +static const struct backlight_ops atmel_pwm_bl_ops = {
38101 .get_brightness = atmel_pwm_bl_get_intensity,
38102 .update_status = atmel_pwm_bl_set_intensity,
38103 };
38104 diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38105 --- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38106 +++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38107 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38108 * ERR_PTR() or a pointer to the newly allocated device.
38109 */
38110 struct backlight_device *backlight_device_register(const char *name,
38111 - struct device *parent, void *devdata, struct backlight_ops *ops)
38112 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38113 {
38114 struct backlight_device *new_bd;
38115 int rc;
38116 diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38117 --- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38118 +++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38119 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38120 }
38121 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38122
38123 -static struct backlight_ops corgi_bl_ops = {
38124 +static const struct backlight_ops corgi_bl_ops = {
38125 .get_brightness = corgi_bl_get_intensity,
38126 .update_status = corgi_bl_update_status,
38127 };
38128 diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38129 --- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38130 +++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38131 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38132 return intensity;
38133 }
38134
38135 -static struct backlight_ops cr_backlight_ops = {
38136 +static const struct backlight_ops cr_backlight_ops = {
38137 .get_brightness = cr_backlight_get_intensity,
38138 .update_status = cr_backlight_set_intensity,
38139 };
38140 diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38141 --- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38142 +++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38143 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38144 return data->current_brightness;
38145 }
38146
38147 -static struct backlight_ops da903x_backlight_ops = {
38148 +static const struct backlight_ops da903x_backlight_ops = {
38149 .update_status = da903x_backlight_update_status,
38150 .get_brightness = da903x_backlight_get_brightness,
38151 };
38152 diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38153 --- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38154 +++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38155 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38156 }
38157 EXPORT_SYMBOL(corgibl_limit_intensity);
38158
38159 -static struct backlight_ops genericbl_ops = {
38160 +static const struct backlight_ops genericbl_ops = {
38161 .options = BL_CORE_SUSPENDRESUME,
38162 .get_brightness = genericbl_get_intensity,
38163 .update_status = genericbl_send_intensity,
38164 diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38165 --- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38166 +++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38167 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38168 return current_intensity;
38169 }
38170
38171 -static struct backlight_ops hp680bl_ops = {
38172 +static const struct backlight_ops hp680bl_ops = {
38173 .get_brightness = hp680bl_get_intensity,
38174 .update_status = hp680bl_set_intensity,
38175 };
38176 diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38177 --- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38178 +++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38179 @@ -93,7 +93,7 @@ out:
38180 return ret;
38181 }
38182
38183 -static struct backlight_ops jornada_bl_ops = {
38184 +static const struct backlight_ops jornada_bl_ops = {
38185 .get_brightness = jornada_bl_get_brightness,
38186 .update_status = jornada_bl_update_status,
38187 .options = BL_CORE_SUSPENDRESUME,
38188 diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38189 --- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38190 +++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38191 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38192 return kb3886bl_intensity;
38193 }
38194
38195 -static struct backlight_ops kb3886bl_ops = {
38196 +static const struct backlight_ops kb3886bl_ops = {
38197 .get_brightness = kb3886bl_get_intensity,
38198 .update_status = kb3886bl_send_intensity,
38199 };
38200 diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38201 --- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38202 +++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38203 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38204 return current_intensity;
38205 }
38206
38207 -static struct backlight_ops locomobl_data = {
38208 +static const struct backlight_ops locomobl_data = {
38209 .get_brightness = locomolcd_get_intensity,
38210 .update_status = locomolcd_set_intensity,
38211 };
38212 diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38213 --- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38214 +++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38215 @@ -33,7 +33,7 @@ struct dmi_match_data {
38216 unsigned long iostart;
38217 unsigned long iolen;
38218 /* Backlight operations structure. */
38219 - struct backlight_ops backlight_ops;
38220 + const struct backlight_ops backlight_ops;
38221 };
38222
38223 /* Module parameters. */
38224 diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38225 --- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38226 +++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38227 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38228 return bl->current_intensity;
38229 }
38230
38231 -static struct backlight_ops omapbl_ops = {
38232 +static const struct backlight_ops omapbl_ops = {
38233 .get_brightness = omapbl_get_intensity,
38234 .update_status = omapbl_update_status,
38235 };
38236 diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38237 --- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38238 +++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38239 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38240 return intensity - HW_LEVEL_MIN;
38241 }
38242
38243 -static struct backlight_ops progearbl_ops = {
38244 +static const struct backlight_ops progearbl_ops = {
38245 .get_brightness = progearbl_get_intensity,
38246 .update_status = progearbl_set_intensity,
38247 };
38248 diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38249 --- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38250 +++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38251 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38252 return bl->props.brightness;
38253 }
38254
38255 -static struct backlight_ops pwm_backlight_ops = {
38256 +static const struct backlight_ops pwm_backlight_ops = {
38257 .update_status = pwm_backlight_update_status,
38258 .get_brightness = pwm_backlight_get_brightness,
38259 };
38260 diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38261 --- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38262 +++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38263 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38264 return props->brightness;
38265 }
38266
38267 -static struct backlight_ops bl_ops = {
38268 +static const struct backlight_ops bl_ops = {
38269 .get_brightness = tosa_bl_get_brightness,
38270 .update_status = tosa_bl_update_status,
38271 };
38272 diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38273 --- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38274 +++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38275 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38276 return data->current_brightness;
38277 }
38278
38279 -static struct backlight_ops wm831x_backlight_ops = {
38280 +static const struct backlight_ops wm831x_backlight_ops = {
38281 .options = BL_CORE_SUSPENDRESUME,
38282 .update_status = wm831x_backlight_update_status,
38283 .get_brightness = wm831x_backlight_get_brightness,
38284 diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38285 --- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38286 +++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38287 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38288 return 0;
38289 }
38290
38291 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38292 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38293 .get_brightness = bl_get_brightness,
38294 };
38295
38296 diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38297 --- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38298 +++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38299 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38300 return 0;
38301 }
38302
38303 -static struct backlight_ops bfin_lq043fb_bl_ops = {
38304 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
38305 .get_brightness = bl_get_brightness,
38306 };
38307
38308 diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38309 --- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38310 +++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38311 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38312 rc = -ENODEV;
38313 goto out;
38314 }
38315 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38316 - !info->fbops->fb_setcmap)) {
38317 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38318 rc = -EINVAL;
38319 goto out1;
38320 }
38321 diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38322 --- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38323 +++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38324 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38325 image->dx += image->width + 8;
38326 }
38327 } else if (rotate == FB_ROTATE_UD) {
38328 - for (x = 0; x < num && image->dx >= 0; x++) {
38329 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38330 info->fbops->fb_imageblit(info, image);
38331 image->dx -= image->width + 8;
38332 }
38333 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38334 image->dy += image->height + 8;
38335 }
38336 } else if (rotate == FB_ROTATE_CCW) {
38337 - for (x = 0; x < num && image->dy >= 0; x++) {
38338 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38339 info->fbops->fb_imageblit(info, image);
38340 image->dy -= image->height + 8;
38341 }
38342 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38343 int flags = info->flags;
38344 int ret = 0;
38345
38346 + pax_track_stack();
38347 +
38348 if (var->activate & FB_ACTIVATE_INV_MODE) {
38349 struct fb_videomode mode1, mode2;
38350
38351 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38352 void __user *argp = (void __user *)arg;
38353 long ret = 0;
38354
38355 + pax_track_stack();
38356 +
38357 switch (cmd) {
38358 case FBIOGET_VSCREENINFO:
38359 if (!lock_fb_info(info))
38360 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38361 return -EFAULT;
38362 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38363 return -EINVAL;
38364 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38365 + if (con2fb.framebuffer >= FB_MAX)
38366 return -EINVAL;
38367 if (!registered_fb[con2fb.framebuffer])
38368 request_module("fb%d", con2fb.framebuffer);
38369 diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38370 --- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38371 +++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38372 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38373 }
38374 }
38375 printk("ringbuffer lockup!!!\n");
38376 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38377 i810_report_error(mmio);
38378 par->dev_flags |= LOCKUP;
38379 info->pixmap.scan_align = 1;
38380 diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38381 --- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38382 +++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38383 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38384 return bd->props.brightness;
38385 }
38386
38387 -static struct backlight_ops nvidia_bl_ops = {
38388 +static const struct backlight_ops nvidia_bl_ops = {
38389 .get_brightness = nvidia_bl_get_brightness,
38390 .update_status = nvidia_bl_update_status,
38391 };
38392 diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38393 --- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38394 +++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38395 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38396 return bd->props.brightness;
38397 }
38398
38399 -static struct backlight_ops riva_bl_ops = {
38400 +static const struct backlight_ops riva_bl_ops = {
38401 .get_brightness = riva_bl_get_brightness,
38402 .update_status = riva_bl_update_status,
38403 };
38404 diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38405 --- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38406 +++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38407 @@ -18,6 +18,7 @@
38408 #include <linux/fb.h>
38409 #include <linux/io.h>
38410 #include <linux/mutex.h>
38411 +#include <linux/moduleloader.h>
38412 #include <video/edid.h>
38413 #include <video/uvesafb.h>
38414 #ifdef CONFIG_X86
38415 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38416 NULL,
38417 };
38418
38419 - return call_usermodehelper(v86d_path, argv, envp, 1);
38420 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38421 }
38422
38423 /*
38424 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38425 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38426 par->pmi_setpal = par->ypan = 0;
38427 } else {
38428 +
38429 +#ifdef CONFIG_PAX_KERNEXEC
38430 +#ifdef CONFIG_MODULES
38431 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38432 +#endif
38433 + if (!par->pmi_code) {
38434 + par->pmi_setpal = par->ypan = 0;
38435 + return 0;
38436 + }
38437 +#endif
38438 +
38439 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38440 + task->t.regs.edi);
38441 +
38442 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38443 + pax_open_kernel();
38444 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38445 + pax_close_kernel();
38446 +
38447 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38448 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38449 +#else
38450 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38451 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38452 +#endif
38453 +
38454 printk(KERN_INFO "uvesafb: protected mode interface info at "
38455 "%04x:%04x\n",
38456 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38457 @@ -1799,6 +1822,11 @@ out:
38458 if (par->vbe_modes)
38459 kfree(par->vbe_modes);
38460
38461 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38462 + if (par->pmi_code)
38463 + module_free_exec(NULL, par->pmi_code);
38464 +#endif
38465 +
38466 framebuffer_release(info);
38467 return err;
38468 }
38469 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38470 kfree(par->vbe_state_orig);
38471 if (par->vbe_state_saved)
38472 kfree(par->vbe_state_saved);
38473 +
38474 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38475 + if (par->pmi_code)
38476 + module_free_exec(NULL, par->pmi_code);
38477 +#endif
38478 +
38479 }
38480
38481 framebuffer_release(info);
38482 diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38483 --- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38484 +++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38485 @@ -9,6 +9,7 @@
38486 */
38487
38488 #include <linux/module.h>
38489 +#include <linux/moduleloader.h>
38490 #include <linux/kernel.h>
38491 #include <linux/errno.h>
38492 #include <linux/string.h>
38493 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38494 static int vram_total __initdata; /* Set total amount of memory */
38495 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38496 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38497 -static void (*pmi_start)(void) __read_mostly;
38498 -static void (*pmi_pal) (void) __read_mostly;
38499 +static void (*pmi_start)(void) __read_only;
38500 +static void (*pmi_pal) (void) __read_only;
38501 static int depth __read_mostly;
38502 static int vga_compat __read_mostly;
38503 /* --------------------------------------------------------------------- */
38504 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38505 unsigned int size_vmode;
38506 unsigned int size_remap;
38507 unsigned int size_total;
38508 + void *pmi_code = NULL;
38509
38510 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38511 return -ENODEV;
38512 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38513 size_remap = size_total;
38514 vesafb_fix.smem_len = size_remap;
38515
38516 -#ifndef __i386__
38517 - screen_info.vesapm_seg = 0;
38518 -#endif
38519 -
38520 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38521 printk(KERN_WARNING
38522 "vesafb: cannot reserve video memory at 0x%lx\n",
38523 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38524 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38525 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38526
38527 +#ifdef __i386__
38528 +
38529 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38530 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
38531 + if (!pmi_code)
38532 +#elif !defined(CONFIG_PAX_KERNEXEC)
38533 + if (0)
38534 +#endif
38535 +
38536 +#endif
38537 + screen_info.vesapm_seg = 0;
38538 +
38539 if (screen_info.vesapm_seg) {
38540 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38541 - screen_info.vesapm_seg,screen_info.vesapm_off);
38542 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38543 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38544 }
38545
38546 if (screen_info.vesapm_seg < 0xc000)
38547 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38548
38549 if (ypan || pmi_setpal) {
38550 unsigned short *pmi_base;
38551 +
38552 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38553 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38554 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38555 +
38556 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38557 + pax_open_kernel();
38558 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38559 +#else
38560 + pmi_code = pmi_base;
38561 +#endif
38562 +
38563 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38564 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38565 +
38566 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38567 + pmi_start = ktva_ktla(pmi_start);
38568 + pmi_pal = ktva_ktla(pmi_pal);
38569 + pax_close_kernel();
38570 +#endif
38571 +
38572 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38573 if (pmi_base[3]) {
38574 printk(KERN_INFO "vesafb: pmi: ports = ");
38575 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38576 info->node, info->fix.id);
38577 return 0;
38578 err:
38579 +
38580 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38581 + module_free_exec(NULL, pmi_code);
38582 +#endif
38583 +
38584 if (info->screen_base)
38585 iounmap(info->screen_base);
38586 framebuffer_release(info);
38587 diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
38588 --- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38589 +++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38590 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38591 return 0;
38592 }
38593
38594 -static struct sysfs_ops hyp_sysfs_ops = {
38595 +static const struct sysfs_ops hyp_sysfs_ops = {
38596 .show = hyp_sysfs_show,
38597 .store = hyp_sysfs_store,
38598 };
38599 diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
38600 --- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38601 +++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38602 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38603 static void
38604 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38605 {
38606 - char *s = nd_get_link(nd);
38607 + const char *s = nd_get_link(nd);
38608
38609 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38610 IS_ERR(s) ? "<error>" : s);
38611 diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
38612 --- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38613 +++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38614 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38615 size += sizeof(struct io_event) * nr_events;
38616 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38617
38618 - if (nr_pages < 0)
38619 + if (nr_pages <= 0)
38620 return -EINVAL;
38621
38622 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38623 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38624 struct aio_timeout to;
38625 int retry = 0;
38626
38627 + pax_track_stack();
38628 +
38629 /* needed to zero any padding within an entry (there shouldn't be
38630 * any, but C is fun!
38631 */
38632 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38633 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38634 {
38635 ssize_t ret;
38636 + struct iovec iovstack;
38637
38638 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38639 kiocb->ki_nbytes, 1,
38640 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38641 + &iovstack, &kiocb->ki_iovec);
38642 if (ret < 0)
38643 goto out;
38644
38645 + if (kiocb->ki_iovec == &iovstack) {
38646 + kiocb->ki_inline_vec = iovstack;
38647 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
38648 + }
38649 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38650 kiocb->ki_cur_seg = 0;
38651 /* ki_nbytes/left now reflect bytes instead of segs */
38652 diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
38653 --- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38654 +++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38655 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38656 unsigned long limit;
38657
38658 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38659 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38660 if (limit != RLIM_INFINITY && offset > limit)
38661 goto out_sig;
38662 if (offset > inode->i_sb->s_maxbytes)
38663 diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
38664 --- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38665 +++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38666 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38667 set_bit(n,sbi->symlink_bitmap);
38668 sl = &sbi->symlink[n];
38669 sl->len = strlen(symname);
38670 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38671 + slsize = sl->len+1;
38672 + sl->data = kmalloc(slsize, GFP_KERNEL);
38673 if (!sl->data) {
38674 clear_bit(n,sbi->symlink_bitmap);
38675 unlock_kernel();
38676 diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
38677 --- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38678 +++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38679 @@ -15,7 +15,7 @@
38680 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
38681 {
38682 struct autofs_info *ino = autofs4_dentry_ino(dentry);
38683 - nd_set_link(nd, (char *)ino->u.symlink);
38684 + nd_set_link(nd, ino->u.symlink);
38685 return NULL;
38686 }
38687
38688 diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
38689 --- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
38690 +++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
38691 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
38692 {
38693 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
38694 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38695 - char *link = nd_get_link(nd);
38696 + const char *link = nd_get_link(nd);
38697 if (!IS_ERR(link))
38698 kfree(link);
38699 }
38700 diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
38701 --- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
38702 +++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
38703 @@ -16,6 +16,7 @@
38704 #include <linux/string.h>
38705 #include <linux/fs.h>
38706 #include <linux/file.h>
38707 +#include <linux/security.h>
38708 #include <linux/stat.h>
38709 #include <linux/fcntl.h>
38710 #include <linux/ptrace.h>
38711 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
38712 #endif
38713 # define START_STACK(u) (u.start_stack)
38714
38715 + memset(&dump, 0, sizeof(dump));
38716 +
38717 fs = get_fs();
38718 set_fs(KERNEL_DS);
38719 has_dumped = 1;
38720 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
38721
38722 /* If the size of the dump file exceeds the rlimit, then see what would happen
38723 if we wrote the stack, but not the data area. */
38724 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38725 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
38726 dump.u_dsize = 0;
38727
38728 /* Make sure we have enough room to write the stack and data areas. */
38729 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38730 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
38731 dump.u_ssize = 0;
38732
38733 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
38734 dump_size = dump.u_ssize << PAGE_SHIFT;
38735 DUMP_WRITE(dump_start,dump_size);
38736 }
38737 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
38738 - set_fs(KERNEL_DS);
38739 - DUMP_WRITE(current,sizeof(*current));
38740 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
38741 end_coredump:
38742 set_fs(fs);
38743 return has_dumped;
38744 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
38745 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
38746 if (rlim >= RLIM_INFINITY)
38747 rlim = ~0;
38748 +
38749 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38750 if (ex.a_data + ex.a_bss > rlim)
38751 return -ENOMEM;
38752
38753 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
38754 install_exec_creds(bprm);
38755 current->flags &= ~PF_FORKNOEXEC;
38756
38757 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38758 + current->mm->pax_flags = 0UL;
38759 +#endif
38760 +
38761 +#ifdef CONFIG_PAX_PAGEEXEC
38762 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38763 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38764 +
38765 +#ifdef CONFIG_PAX_EMUTRAMP
38766 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38767 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38768 +#endif
38769 +
38770 +#ifdef CONFIG_PAX_MPROTECT
38771 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38772 + current->mm->pax_flags |= MF_PAX_MPROTECT;
38773 +#endif
38774 +
38775 + }
38776 +#endif
38777 +
38778 if (N_MAGIC(ex) == OMAGIC) {
38779 unsigned long text_addr, map_size;
38780 loff_t pos;
38781 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
38782
38783 down_write(&current->mm->mmap_sem);
38784 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38785 - PROT_READ | PROT_WRITE | PROT_EXEC,
38786 + PROT_READ | PROT_WRITE,
38787 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38788 fd_offset + ex.a_text);
38789 up_write(&current->mm->mmap_sem);
38790 diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
38791 --- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38792 +++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
38793 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
38794 #define elf_core_dump NULL
38795 #endif
38796
38797 +#ifdef CONFIG_PAX_MPROTECT
38798 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38799 +#endif
38800 +
38801 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38802 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38803 #else
38804 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
38805 .load_binary = load_elf_binary,
38806 .load_shlib = load_elf_library,
38807 .core_dump = elf_core_dump,
38808 +
38809 +#ifdef CONFIG_PAX_MPROTECT
38810 + .handle_mprotect= elf_handle_mprotect,
38811 +#endif
38812 +
38813 .min_coredump = ELF_EXEC_PAGESIZE,
38814 .hasvdso = 1
38815 };
38816 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38817
38818 static int set_brk(unsigned long start, unsigned long end)
38819 {
38820 + unsigned long e = end;
38821 +
38822 start = ELF_PAGEALIGN(start);
38823 end = ELF_PAGEALIGN(end);
38824 if (end > start) {
38825 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38826 if (BAD_ADDR(addr))
38827 return addr;
38828 }
38829 - current->mm->start_brk = current->mm->brk = end;
38830 + current->mm->start_brk = current->mm->brk = e;
38831 return 0;
38832 }
38833
38834 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38835 elf_addr_t __user *u_rand_bytes;
38836 const char *k_platform = ELF_PLATFORM;
38837 const char *k_base_platform = ELF_BASE_PLATFORM;
38838 - unsigned char k_rand_bytes[16];
38839 + u32 k_rand_bytes[4];
38840 int items;
38841 elf_addr_t *elf_info;
38842 int ei_index = 0;
38843 const struct cred *cred = current_cred();
38844 struct vm_area_struct *vma;
38845 + unsigned long saved_auxv[AT_VECTOR_SIZE];
38846 +
38847 + pax_track_stack();
38848
38849 /*
38850 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38851 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38852 * Generate 16 random bytes for userspace PRNG seeding.
38853 */
38854 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38855 - u_rand_bytes = (elf_addr_t __user *)
38856 - STACK_ALLOC(p, sizeof(k_rand_bytes));
38857 + srandom32(k_rand_bytes[0] ^ random32());
38858 + srandom32(k_rand_bytes[1] ^ random32());
38859 + srandom32(k_rand_bytes[2] ^ random32());
38860 + srandom32(k_rand_bytes[3] ^ random32());
38861 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
38862 + u_rand_bytes = (elf_addr_t __user *) p;
38863 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38864 return -EFAULT;
38865
38866 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38867 return -EFAULT;
38868 current->mm->env_end = p;
38869
38870 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38871 +
38872 /* Put the elf_info on the stack in the right place. */
38873 sp = (elf_addr_t __user *)envp + 1;
38874 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38875 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38876 return -EFAULT;
38877 return 0;
38878 }
38879 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
38880 {
38881 struct elf_phdr *elf_phdata;
38882 struct elf_phdr *eppnt;
38883 - unsigned long load_addr = 0;
38884 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38885 int load_addr_set = 0;
38886 unsigned long last_bss = 0, elf_bss = 0;
38887 - unsigned long error = ~0UL;
38888 + unsigned long error = -EINVAL;
38889 unsigned long total_size;
38890 int retval, i, size;
38891
38892 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
38893 goto out_close;
38894 }
38895
38896 +#ifdef CONFIG_PAX_SEGMEXEC
38897 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38898 + pax_task_size = SEGMEXEC_TASK_SIZE;
38899 +#endif
38900 +
38901 eppnt = elf_phdata;
38902 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38903 if (eppnt->p_type == PT_LOAD) {
38904 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
38905 k = load_addr + eppnt->p_vaddr;
38906 if (BAD_ADDR(k) ||
38907 eppnt->p_filesz > eppnt->p_memsz ||
38908 - eppnt->p_memsz > TASK_SIZE ||
38909 - TASK_SIZE - eppnt->p_memsz < k) {
38910 + eppnt->p_memsz > pax_task_size ||
38911 + pax_task_size - eppnt->p_memsz < k) {
38912 error = -ENOMEM;
38913 goto out_close;
38914 }
38915 @@ -532,6 +557,194 @@ out:
38916 return error;
38917 }
38918
38919 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38920 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38921 +{
38922 + unsigned long pax_flags = 0UL;
38923 +
38924 +#ifdef CONFIG_PAX_PAGEEXEC
38925 + if (elf_phdata->p_flags & PF_PAGEEXEC)
38926 + pax_flags |= MF_PAX_PAGEEXEC;
38927 +#endif
38928 +
38929 +#ifdef CONFIG_PAX_SEGMEXEC
38930 + if (elf_phdata->p_flags & PF_SEGMEXEC)
38931 + pax_flags |= MF_PAX_SEGMEXEC;
38932 +#endif
38933 +
38934 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38935 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38936 + if (nx_enabled)
38937 + pax_flags &= ~MF_PAX_SEGMEXEC;
38938 + else
38939 + pax_flags &= ~MF_PAX_PAGEEXEC;
38940 + }
38941 +#endif
38942 +
38943 +#ifdef CONFIG_PAX_EMUTRAMP
38944 + if (elf_phdata->p_flags & PF_EMUTRAMP)
38945 + pax_flags |= MF_PAX_EMUTRAMP;
38946 +#endif
38947 +
38948 +#ifdef CONFIG_PAX_MPROTECT
38949 + if (elf_phdata->p_flags & PF_MPROTECT)
38950 + pax_flags |= MF_PAX_MPROTECT;
38951 +#endif
38952 +
38953 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38954 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38955 + pax_flags |= MF_PAX_RANDMMAP;
38956 +#endif
38957 +
38958 + return pax_flags;
38959 +}
38960 +#endif
38961 +
38962 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
38963 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38964 +{
38965 + unsigned long pax_flags = 0UL;
38966 +
38967 +#ifdef CONFIG_PAX_PAGEEXEC
38968 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38969 + pax_flags |= MF_PAX_PAGEEXEC;
38970 +#endif
38971 +
38972 +#ifdef CONFIG_PAX_SEGMEXEC
38973 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38974 + pax_flags |= MF_PAX_SEGMEXEC;
38975 +#endif
38976 +
38977 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38978 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38979 + if (nx_enabled)
38980 + pax_flags &= ~MF_PAX_SEGMEXEC;
38981 + else
38982 + pax_flags &= ~MF_PAX_PAGEEXEC;
38983 + }
38984 +#endif
38985 +
38986 +#ifdef CONFIG_PAX_EMUTRAMP
38987 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
38988 + pax_flags |= MF_PAX_EMUTRAMP;
38989 +#endif
38990 +
38991 +#ifdef CONFIG_PAX_MPROTECT
38992 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
38993 + pax_flags |= MF_PAX_MPROTECT;
38994 +#endif
38995 +
38996 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38997 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
38998 + pax_flags |= MF_PAX_RANDMMAP;
38999 +#endif
39000 +
39001 + return pax_flags;
39002 +}
39003 +#endif
39004 +
39005 +#ifdef CONFIG_PAX_EI_PAX
39006 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39007 +{
39008 + unsigned long pax_flags = 0UL;
39009 +
39010 +#ifdef CONFIG_PAX_PAGEEXEC
39011 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39012 + pax_flags |= MF_PAX_PAGEEXEC;
39013 +#endif
39014 +
39015 +#ifdef CONFIG_PAX_SEGMEXEC
39016 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39017 + pax_flags |= MF_PAX_SEGMEXEC;
39018 +#endif
39019 +
39020 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39021 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39022 + if (nx_enabled)
39023 + pax_flags &= ~MF_PAX_SEGMEXEC;
39024 + else
39025 + pax_flags &= ~MF_PAX_PAGEEXEC;
39026 + }
39027 +#endif
39028 +
39029 +#ifdef CONFIG_PAX_EMUTRAMP
39030 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39031 + pax_flags |= MF_PAX_EMUTRAMP;
39032 +#endif
39033 +
39034 +#ifdef CONFIG_PAX_MPROTECT
39035 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39036 + pax_flags |= MF_PAX_MPROTECT;
39037 +#endif
39038 +
39039 +#ifdef CONFIG_PAX_ASLR
39040 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39041 + pax_flags |= MF_PAX_RANDMMAP;
39042 +#endif
39043 +
39044 + return pax_flags;
39045 +}
39046 +#endif
39047 +
39048 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39049 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39050 +{
39051 + unsigned long pax_flags = 0UL;
39052 +
39053 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39054 + unsigned long i;
39055 + int found_flags = 0;
39056 +#endif
39057 +
39058 +#ifdef CONFIG_PAX_EI_PAX
39059 + pax_flags = pax_parse_ei_pax(elf_ex);
39060 +#endif
39061 +
39062 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39063 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39064 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39065 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39066 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39067 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39068 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39069 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39070 + return -EINVAL;
39071 +
39072 +#ifdef CONFIG_PAX_SOFTMODE
39073 + if (pax_softmode)
39074 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39075 + else
39076 +#endif
39077 +
39078 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39079 + found_flags = 1;
39080 + break;
39081 + }
39082 +#endif
39083 +
39084 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39085 + if (found_flags == 0) {
39086 + struct elf_phdr phdr;
39087 + memset(&phdr, 0, sizeof(phdr));
39088 + phdr.p_flags = PF_NOEMUTRAMP;
39089 +#ifdef CONFIG_PAX_SOFTMODE
39090 + if (pax_softmode)
39091 + pax_flags = pax_parse_softmode(&phdr);
39092 + else
39093 +#endif
39094 + pax_flags = pax_parse_hardmode(&phdr);
39095 + }
39096 +#endif
39097 +
39098 +
39099 + if (0 > pax_check_flags(&pax_flags))
39100 + return -EINVAL;
39101 +
39102 + current->mm->pax_flags = pax_flags;
39103 + return 0;
39104 +}
39105 +#endif
39106 +
39107 /*
39108 * These are the functions used to load ELF style executables and shared
39109 * libraries. There is no binary dependent code anywhere else.
39110 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39111 {
39112 unsigned int random_variable = 0;
39113
39114 +#ifdef CONFIG_PAX_RANDUSTACK
39115 + if (randomize_va_space)
39116 + return stack_top - current->mm->delta_stack;
39117 +#endif
39118 +
39119 if ((current->flags & PF_RANDOMIZE) &&
39120 !(current->personality & ADDR_NO_RANDOMIZE)) {
39121 random_variable = get_random_int() & STACK_RND_MASK;
39122 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39123 unsigned long load_addr = 0, load_bias = 0;
39124 int load_addr_set = 0;
39125 char * elf_interpreter = NULL;
39126 - unsigned long error;
39127 + unsigned long error = 0;
39128 struct elf_phdr *elf_ppnt, *elf_phdata;
39129 unsigned long elf_bss, elf_brk;
39130 int retval, i;
39131 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39132 unsigned long start_code, end_code, start_data, end_data;
39133 unsigned long reloc_func_desc = 0;
39134 int executable_stack = EXSTACK_DEFAULT;
39135 - unsigned long def_flags = 0;
39136 struct {
39137 struct elfhdr elf_ex;
39138 struct elfhdr interp_elf_ex;
39139 } *loc;
39140 + unsigned long pax_task_size = TASK_SIZE;
39141
39142 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39143 if (!loc) {
39144 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39145
39146 /* OK, This is the point of no return */
39147 current->flags &= ~PF_FORKNOEXEC;
39148 - current->mm->def_flags = def_flags;
39149 +
39150 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39151 + current->mm->pax_flags = 0UL;
39152 +#endif
39153 +
39154 +#ifdef CONFIG_PAX_DLRESOLVE
39155 + current->mm->call_dl_resolve = 0UL;
39156 +#endif
39157 +
39158 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39159 + current->mm->call_syscall = 0UL;
39160 +#endif
39161 +
39162 +#ifdef CONFIG_PAX_ASLR
39163 + current->mm->delta_mmap = 0UL;
39164 + current->mm->delta_stack = 0UL;
39165 +#endif
39166 +
39167 + current->mm->def_flags = 0;
39168 +
39169 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39170 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39171 + send_sig(SIGKILL, current, 0);
39172 + goto out_free_dentry;
39173 + }
39174 +#endif
39175 +
39176 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39177 + pax_set_initial_flags(bprm);
39178 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39179 + if (pax_set_initial_flags_func)
39180 + (pax_set_initial_flags_func)(bprm);
39181 +#endif
39182 +
39183 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39184 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39185 + current->mm->context.user_cs_limit = PAGE_SIZE;
39186 + current->mm->def_flags |= VM_PAGEEXEC;
39187 + }
39188 +#endif
39189 +
39190 +#ifdef CONFIG_PAX_SEGMEXEC
39191 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39192 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39193 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39194 + pax_task_size = SEGMEXEC_TASK_SIZE;
39195 + }
39196 +#endif
39197 +
39198 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39199 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39200 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39201 + put_cpu();
39202 + }
39203 +#endif
39204
39205 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39206 may depend on the personality. */
39207 SET_PERSONALITY(loc->elf_ex);
39208 +
39209 +#ifdef CONFIG_PAX_ASLR
39210 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39211 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39212 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39213 + }
39214 +#endif
39215 +
39216 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39217 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39218 + executable_stack = EXSTACK_DISABLE_X;
39219 + current->personality &= ~READ_IMPLIES_EXEC;
39220 + } else
39221 +#endif
39222 +
39223 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39224 current->personality |= READ_IMPLIES_EXEC;
39225
39226 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39227 #else
39228 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39229 #endif
39230 +
39231 +#ifdef CONFIG_PAX_RANDMMAP
39232 + /* PaX: randomize base address at the default exe base if requested */
39233 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39234 +#ifdef CONFIG_SPARC64
39235 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39236 +#else
39237 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39238 +#endif
39239 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39240 + elf_flags |= MAP_FIXED;
39241 + }
39242 +#endif
39243 +
39244 }
39245
39246 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39247 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39248 * allowed task size. Note that p_filesz must always be
39249 * <= p_memsz so it is only necessary to check p_memsz.
39250 */
39251 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39252 - elf_ppnt->p_memsz > TASK_SIZE ||
39253 - TASK_SIZE - elf_ppnt->p_memsz < k) {
39254 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39255 + elf_ppnt->p_memsz > pax_task_size ||
39256 + pax_task_size - elf_ppnt->p_memsz < k) {
39257 /* set_brk can never work. Avoid overflows. */
39258 send_sig(SIGKILL, current, 0);
39259 retval = -EINVAL;
39260 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39261 start_data += load_bias;
39262 end_data += load_bias;
39263
39264 +#ifdef CONFIG_PAX_RANDMMAP
39265 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39266 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39267 +#endif
39268 +
39269 /* Calling set_brk effectively mmaps the pages that we need
39270 * for the bss and break sections. We must do this before
39271 * mapping in the interpreter, to make sure it doesn't wind
39272 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39273 goto out_free_dentry;
39274 }
39275 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39276 - send_sig(SIGSEGV, current, 0);
39277 - retval = -EFAULT; /* Nobody gets to see this, but.. */
39278 - goto out_free_dentry;
39279 + /*
39280 + * This bss-zeroing can fail if the ELF
39281 + * file specifies odd protections. So
39282 + * we don't check the return value
39283 + */
39284 }
39285
39286 if (elf_interpreter) {
39287 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39288 unsigned long n = off;
39289 if (n > PAGE_SIZE)
39290 n = PAGE_SIZE;
39291 - if (!dump_write(file, buf, n))
39292 + if (!dump_write(file, buf, n)) {
39293 + free_page((unsigned long)buf);
39294 return 0;
39295 + }
39296 off -= n;
39297 }
39298 free_page((unsigned long)buf);
39299 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39300 * Decide what to dump of a segment, part, all or none.
39301 */
39302 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39303 - unsigned long mm_flags)
39304 + unsigned long mm_flags, long signr)
39305 {
39306 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39307
39308 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39309 if (vma->vm_file == NULL)
39310 return 0;
39311
39312 - if (FILTER(MAPPED_PRIVATE))
39313 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39314 goto whole;
39315
39316 /*
39317 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39318 #undef DUMP_WRITE
39319
39320 #define DUMP_WRITE(addr, nr) \
39321 + do { \
39322 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39323 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39324 - goto end_coredump;
39325 + goto end_coredump; \
39326 + } while (0);
39327
39328 static void fill_elf_header(struct elfhdr *elf, int segs,
39329 u16 machine, u32 flags, u8 osabi)
39330 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39331 {
39332 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39333 int i = 0;
39334 - do
39335 + do {
39336 i += 2;
39337 - while (auxv[i - 2] != AT_NULL);
39338 + } while (auxv[i - 2] != AT_NULL);
39339 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39340 }
39341
39342 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39343 phdr.p_offset = offset;
39344 phdr.p_vaddr = vma->vm_start;
39345 phdr.p_paddr = 0;
39346 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
39347 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39348 phdr.p_memsz = vma->vm_end - vma->vm_start;
39349 offset += phdr.p_filesz;
39350 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39351 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39352 unsigned long addr;
39353 unsigned long end;
39354
39355 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
39356 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39357
39358 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39359 struct page *page;
39360 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39361 page = get_dump_page(addr);
39362 if (page) {
39363 void *kaddr = kmap(page);
39364 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39365 stop = ((size += PAGE_SIZE) > limit) ||
39366 !dump_write(file, kaddr, PAGE_SIZE);
39367 kunmap(page);
39368 @@ -2042,6 +2356,97 @@ out:
39369
39370 #endif /* USE_ELF_CORE_DUMP */
39371
39372 +#ifdef CONFIG_PAX_MPROTECT
39373 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
39374 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39375 + * we'll remove VM_MAYWRITE for good on RELRO segments.
39376 + *
39377 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39378 + * basis because we want to allow the common case and not the special ones.
39379 + */
39380 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39381 +{
39382 + struct elfhdr elf_h;
39383 + struct elf_phdr elf_p;
39384 + unsigned long i;
39385 + unsigned long oldflags;
39386 + bool is_textrel_rw, is_textrel_rx, is_relro;
39387 +
39388 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39389 + return;
39390 +
39391 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39392 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39393 +
39394 +#ifdef CONFIG_PAX_ELFRELOCS
39395 + /* possible TEXTREL */
39396 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39397 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39398 +#else
39399 + is_textrel_rw = false;
39400 + is_textrel_rx = false;
39401 +#endif
39402 +
39403 + /* possible RELRO */
39404 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39405 +
39406 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39407 + return;
39408 +
39409 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39410 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39411 +
39412 +#ifdef CONFIG_PAX_ETEXECRELOCS
39413 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39414 +#else
39415 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39416 +#endif
39417 +
39418 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39419 + !elf_check_arch(&elf_h) ||
39420 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39421 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39422 + return;
39423 +
39424 + for (i = 0UL; i < elf_h.e_phnum; i++) {
39425 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39426 + return;
39427 + switch (elf_p.p_type) {
39428 + case PT_DYNAMIC:
39429 + if (!is_textrel_rw && !is_textrel_rx)
39430 + continue;
39431 + i = 0UL;
39432 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39433 + elf_dyn dyn;
39434 +
39435 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39436 + return;
39437 + if (dyn.d_tag == DT_NULL)
39438 + return;
39439 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39440 + gr_log_textrel(vma);
39441 + if (is_textrel_rw)
39442 + vma->vm_flags |= VM_MAYWRITE;
39443 + else
39444 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39445 + vma->vm_flags &= ~VM_MAYWRITE;
39446 + return;
39447 + }
39448 + i++;
39449 + }
39450 + return;
39451 +
39452 + case PT_GNU_RELRO:
39453 + if (!is_relro)
39454 + continue;
39455 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39456 + vma->vm_flags &= ~VM_MAYWRITE;
39457 + return;
39458 + }
39459 + }
39460 +}
39461 +#endif
39462 +
39463 static int __init init_elf_binfmt(void)
39464 {
39465 return register_binfmt(&elf_format);
39466 diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39467 --- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39468 +++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39469 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39470 realdatastart = (unsigned long) -ENOMEM;
39471 printk("Unable to allocate RAM for process data, errno %d\n",
39472 (int)-realdatastart);
39473 + down_write(&current->mm->mmap_sem);
39474 do_munmap(current->mm, textpos, text_len);
39475 + up_write(&current->mm->mmap_sem);
39476 ret = realdatastart;
39477 goto err;
39478 }
39479 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39480 }
39481 if (IS_ERR_VALUE(result)) {
39482 printk("Unable to read data+bss, errno %d\n", (int)-result);
39483 + down_write(&current->mm->mmap_sem);
39484 do_munmap(current->mm, textpos, text_len);
39485 do_munmap(current->mm, realdatastart, data_len + extra);
39486 + up_write(&current->mm->mmap_sem);
39487 ret = result;
39488 goto err;
39489 }
39490 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39491 }
39492 if (IS_ERR_VALUE(result)) {
39493 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39494 + down_write(&current->mm->mmap_sem);
39495 do_munmap(current->mm, textpos, text_len + data_len + extra +
39496 MAX_SHARED_LIBS * sizeof(unsigned long));
39497 + up_write(&current->mm->mmap_sem);
39498 ret = result;
39499 goto err;
39500 }
39501 diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
39502 --- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39503 +++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39504 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39505
39506 i = 0;
39507 while (i < bio_slab_nr) {
39508 - struct bio_slab *bslab = &bio_slabs[i];
39509 + bslab = &bio_slabs[i];
39510
39511 if (!bslab->slab && entry == -1)
39512 entry = i;
39513 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39514 const int read = bio_data_dir(bio) == READ;
39515 struct bio_map_data *bmd = bio->bi_private;
39516 int i;
39517 - char *p = bmd->sgvecs[0].iov_base;
39518 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
39519
39520 __bio_for_each_segment(bvec, bio, i, 0) {
39521 char *addr = page_address(bvec->bv_page);
39522 diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
39523 --- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39524 +++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39525 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39526 else if (bdev->bd_contains == bdev)
39527 res = 0; /* is a whole device which isn't held */
39528
39529 - else if (bdev->bd_contains->bd_holder == bd_claim)
39530 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39531 res = 0; /* is a partition of a device that is being partitioned */
39532 else if (bdev->bd_contains->bd_holder != NULL)
39533 res = -EBUSY; /* is a partition of a held device */
39534 diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
39535 --- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39536 +++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39537 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39538 free_extent_buffer(buf);
39539 add_root_to_dirty_list(root);
39540 } else {
39541 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39542 - parent_start = parent->start;
39543 - else
39544 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39545 + if (parent)
39546 + parent_start = parent->start;
39547 + else
39548 + parent_start = 0;
39549 + } else
39550 parent_start = 0;
39551
39552 WARN_ON(trans->transid != btrfs_header_generation(parent));
39553 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39554
39555 ret = 0;
39556 if (slot == 0) {
39557 - struct btrfs_disk_key disk_key;
39558 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39559 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39560 }
39561 diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
39562 --- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39563 +++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39564 @@ -39,7 +39,7 @@
39565 #include "tree-log.h"
39566 #include "free-space-cache.h"
39567
39568 -static struct extent_io_ops btree_extent_io_ops;
39569 +static const struct extent_io_ops btree_extent_io_ops;
39570 static void end_workqueue_fn(struct btrfs_work *work);
39571 static void free_fs_root(struct btrfs_root *root);
39572
39573 @@ -2607,7 +2607,7 @@ out:
39574 return 0;
39575 }
39576
39577 -static struct extent_io_ops btree_extent_io_ops = {
39578 +static const struct extent_io_ops btree_extent_io_ops = {
39579 .write_cache_pages_lock_hook = btree_lock_page_hook,
39580 .readpage_end_io_hook = btree_readpage_end_io_hook,
39581 .submit_bio_hook = btree_submit_bio_hook,
39582 diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
39583 --- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39584 +++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39585 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39586 struct bio *bio, int mirror_num,
39587 unsigned long bio_flags);
39588 struct extent_io_ops {
39589 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39590 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39591 u64 start, u64 end, int *page_started,
39592 unsigned long *nr_written);
39593 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39594 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39595 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39596 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39597 extent_submit_bio_hook_t *submit_bio_hook;
39598 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
39599 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39600 size_t size, struct bio *bio,
39601 unsigned long bio_flags);
39602 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39603 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39604 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39605 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39606 u64 start, u64 end,
39607 struct extent_state *state);
39608 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39609 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39610 u64 start, u64 end,
39611 struct extent_state *state);
39612 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39613 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39614 struct extent_state *state);
39615 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39616 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39617 struct extent_state *state, int uptodate);
39618 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39619 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39620 unsigned long old, unsigned long bits);
39621 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39622 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39623 unsigned long bits);
39624 - int (*merge_extent_hook)(struct inode *inode,
39625 + int (* const merge_extent_hook)(struct inode *inode,
39626 struct extent_state *new,
39627 struct extent_state *other);
39628 - int (*split_extent_hook)(struct inode *inode,
39629 + int (* const split_extent_hook)(struct inode *inode,
39630 struct extent_state *orig, u64 split);
39631 - int (*write_cache_pages_lock_hook)(struct page *page);
39632 + int (* const write_cache_pages_lock_hook)(struct page *page);
39633 };
39634
39635 struct extent_io_tree {
39636 @@ -88,7 +88,7 @@ struct extent_io_tree {
39637 u64 dirty_bytes;
39638 spinlock_t lock;
39639 spinlock_t buffer_lock;
39640 - struct extent_io_ops *ops;
39641 + const struct extent_io_ops *ops;
39642 };
39643
39644 struct extent_state {
39645 diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
39646 --- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39647 +++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39648 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39649 u64 group_start = group->key.objectid;
39650 new_extents = kmalloc(sizeof(*new_extents),
39651 GFP_NOFS);
39652 + if (!new_extents) {
39653 + ret = -ENOMEM;
39654 + goto out;
39655 + }
39656 nr_extents = 1;
39657 ret = get_new_locations(reloc_inode,
39658 extent_key,
39659 diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
39660 --- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39661 +++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39662 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39663
39664 while(1) {
39665 if (entry->bytes < bytes || entry->offset < min_start) {
39666 - struct rb_node *node;
39667 -
39668 node = rb_next(&entry->offset_index);
39669 if (!node)
39670 break;
39671 @@ -1226,7 +1224,7 @@ again:
39672 */
39673 while (entry->bitmap || found_bitmap ||
39674 (!entry->bitmap && entry->bytes < min_bytes)) {
39675 - struct rb_node *node = rb_next(&entry->offset_index);
39676 + node = rb_next(&entry->offset_index);
39677
39678 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39679 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
39680 diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
39681 --- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
39682 +++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
39683 @@ -63,7 +63,7 @@ static const struct inode_operations btr
39684 static const struct address_space_operations btrfs_aops;
39685 static const struct address_space_operations btrfs_symlink_aops;
39686 static const struct file_operations btrfs_dir_file_operations;
39687 -static struct extent_io_ops btrfs_extent_io_ops;
39688 +static const struct extent_io_ops btrfs_extent_io_ops;
39689
39690 static struct kmem_cache *btrfs_inode_cachep;
39691 struct kmem_cache *btrfs_trans_handle_cachep;
39692 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
39693 1, 0, NULL, GFP_NOFS);
39694 while (start < end) {
39695 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
39696 + BUG_ON(!async_cow);
39697 async_cow->inode = inode;
39698 async_cow->root = root;
39699 async_cow->locked_page = locked_page;
39700 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
39701 inline_size = btrfs_file_extent_inline_item_len(leaf,
39702 btrfs_item_nr(leaf, path->slots[0]));
39703 tmp = kmalloc(inline_size, GFP_NOFS);
39704 + if (!tmp)
39705 + return -ENOMEM;
39706 ptr = btrfs_file_extent_inline_start(item);
39707
39708 read_extent_buffer(leaf, tmp, ptr, inline_size);
39709 @@ -5410,7 +5413,7 @@ fail:
39710 return -ENOMEM;
39711 }
39712
39713 -static int btrfs_getattr(struct vfsmount *mnt,
39714 +int btrfs_getattr(struct vfsmount *mnt,
39715 struct dentry *dentry, struct kstat *stat)
39716 {
39717 struct inode *inode = dentry->d_inode;
39718 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
39719 return 0;
39720 }
39721
39722 +EXPORT_SYMBOL(btrfs_getattr);
39723 +
39724 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
39725 +{
39726 + return BTRFS_I(inode)->root->anon_super.s_dev;
39727 +}
39728 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39729 +
39730 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
39731 struct inode *new_dir, struct dentry *new_dentry)
39732 {
39733 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
39734 .fsync = btrfs_sync_file,
39735 };
39736
39737 -static struct extent_io_ops btrfs_extent_io_ops = {
39738 +static const struct extent_io_ops btrfs_extent_io_ops = {
39739 .fill_delalloc = run_delalloc_range,
39740 .submit_bio_hook = btrfs_submit_bio_hook,
39741 .merge_bio_hook = btrfs_merge_bio_hook,
39742 diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
39743 --- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
39744 +++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
39745 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
39746 }
39747 spin_unlock(&rc->reloc_root_tree.lock);
39748
39749 - BUG_ON((struct btrfs_root *)node->data != root);
39750 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
39751
39752 if (!del) {
39753 spin_lock(&rc->reloc_root_tree.lock);
39754 diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
39755 --- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
39756 +++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
39757 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
39758 complete(&root->kobj_unregister);
39759 }
39760
39761 -static struct sysfs_ops btrfs_super_attr_ops = {
39762 +static const struct sysfs_ops btrfs_super_attr_ops = {
39763 .show = btrfs_super_attr_show,
39764 .store = btrfs_super_attr_store,
39765 };
39766
39767 -static struct sysfs_ops btrfs_root_attr_ops = {
39768 +static const struct sysfs_ops btrfs_root_attr_ops = {
39769 .show = btrfs_root_attr_show,
39770 .store = btrfs_root_attr_store,
39771 };
39772 diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
39773 --- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
39774 +++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
39775 @@ -25,6 +25,7 @@
39776 #include <linux/percpu.h>
39777 #include <linux/slab.h>
39778 #include <linux/capability.h>
39779 +#include <linux/security.h>
39780 #include <linux/blkdev.h>
39781 #include <linux/file.h>
39782 #include <linux/quotaops.h>
39783 diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
39784 --- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
39785 +++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
39786 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
39787 args);
39788
39789 /* start by checking things over */
39790 - ASSERT(cache->fstop_percent >= 0 &&
39791 - cache->fstop_percent < cache->fcull_percent &&
39792 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
39793 cache->fcull_percent < cache->frun_percent &&
39794 cache->frun_percent < 100);
39795
39796 - ASSERT(cache->bstop_percent >= 0 &&
39797 - cache->bstop_percent < cache->bcull_percent &&
39798 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
39799 cache->bcull_percent < cache->brun_percent &&
39800 cache->brun_percent < 100);
39801
39802 diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
39803 --- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
39804 +++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
39805 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
39806 if (test_bit(CACHEFILES_DEAD, &cache->flags))
39807 return -EIO;
39808
39809 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
39810 + if (datalen > PAGE_SIZE - 1)
39811 return -EOPNOTSUPP;
39812
39813 /* drag the command string into the kernel so we can parse it */
39814 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
39815 if (args[0] != '%' || args[1] != '\0')
39816 return -EINVAL;
39817
39818 - if (fstop < 0 || fstop >= cache->fcull_percent)
39819 + if (fstop >= cache->fcull_percent)
39820 return cachefiles_daemon_range_error(cache, args);
39821
39822 cache->fstop_percent = fstop;
39823 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
39824 if (args[0] != '%' || args[1] != '\0')
39825 return -EINVAL;
39826
39827 - if (bstop < 0 || bstop >= cache->bcull_percent)
39828 + if (bstop >= cache->bcull_percent)
39829 return cachefiles_daemon_range_error(cache, args);
39830
39831 cache->bstop_percent = bstop;
39832 diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
39833 --- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
39834 +++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
39835 @@ -56,7 +56,7 @@ struct cachefiles_cache {
39836 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39837 struct rb_root active_nodes; /* active nodes (can't be culled) */
39838 rwlock_t active_lock; /* lock for active_nodes */
39839 - atomic_t gravecounter; /* graveyard uniquifier */
39840 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39841 unsigned frun_percent; /* when to stop culling (% files) */
39842 unsigned fcull_percent; /* when to start culling (% files) */
39843 unsigned fstop_percent; /* when to stop allocating (% files) */
39844 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
39845 * proc.c
39846 */
39847 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39848 -extern atomic_t cachefiles_lookup_histogram[HZ];
39849 -extern atomic_t cachefiles_mkdir_histogram[HZ];
39850 -extern atomic_t cachefiles_create_histogram[HZ];
39851 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39852 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39853 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39854
39855 extern int __init cachefiles_proc_init(void);
39856 extern void cachefiles_proc_cleanup(void);
39857 static inline
39858 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39859 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39860 {
39861 unsigned long jif = jiffies - start_jif;
39862 if (jif >= HZ)
39863 jif = HZ - 1;
39864 - atomic_inc(&histogram[jif]);
39865 + atomic_inc_unchecked(&histogram[jif]);
39866 }
39867
39868 #else
39869 diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
39870 --- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
39871 +++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
39872 @@ -250,7 +250,7 @@ try_again:
39873 /* first step is to make up a grave dentry in the graveyard */
39874 sprintf(nbuffer, "%08x%08x",
39875 (uint32_t) get_seconds(),
39876 - (uint32_t) atomic_inc_return(&cache->gravecounter));
39877 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39878
39879 /* do the multiway lock magic */
39880 trap = lock_rename(cache->graveyard, dir);
39881 diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
39882 --- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
39883 +++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
39884 @@ -14,9 +14,9 @@
39885 #include <linux/seq_file.h>
39886 #include "internal.h"
39887
39888 -atomic_t cachefiles_lookup_histogram[HZ];
39889 -atomic_t cachefiles_mkdir_histogram[HZ];
39890 -atomic_t cachefiles_create_histogram[HZ];
39891 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39892 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39893 +atomic_unchecked_t cachefiles_create_histogram[HZ];
39894
39895 /*
39896 * display the latency histogram
39897 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39898 return 0;
39899 default:
39900 index = (unsigned long) v - 3;
39901 - x = atomic_read(&cachefiles_lookup_histogram[index]);
39902 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
39903 - z = atomic_read(&cachefiles_create_histogram[index]);
39904 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39905 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39906 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39907 if (x == 0 && y == 0 && z == 0)
39908 return 0;
39909
39910 diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
39911 --- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
39912 +++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
39913 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
39914 old_fs = get_fs();
39915 set_fs(KERNEL_DS);
39916 ret = file->f_op->write(
39917 - file, (const void __user *) data, len, &pos);
39918 + file, (__force const void __user *) data, len, &pos);
39919 set_fs(old_fs);
39920 kunmap(page);
39921 if (ret != len)
39922 diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
39923 --- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
39924 +++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
39925 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
39926 tcon = list_entry(tmp3,
39927 struct cifsTconInfo,
39928 tcon_list);
39929 - atomic_set(&tcon->num_smbs_sent, 0);
39930 - atomic_set(&tcon->num_writes, 0);
39931 - atomic_set(&tcon->num_reads, 0);
39932 - atomic_set(&tcon->num_oplock_brks, 0);
39933 - atomic_set(&tcon->num_opens, 0);
39934 - atomic_set(&tcon->num_posixopens, 0);
39935 - atomic_set(&tcon->num_posixmkdirs, 0);
39936 - atomic_set(&tcon->num_closes, 0);
39937 - atomic_set(&tcon->num_deletes, 0);
39938 - atomic_set(&tcon->num_mkdirs, 0);
39939 - atomic_set(&tcon->num_rmdirs, 0);
39940 - atomic_set(&tcon->num_renames, 0);
39941 - atomic_set(&tcon->num_t2renames, 0);
39942 - atomic_set(&tcon->num_ffirst, 0);
39943 - atomic_set(&tcon->num_fnext, 0);
39944 - atomic_set(&tcon->num_fclose, 0);
39945 - atomic_set(&tcon->num_hardlinks, 0);
39946 - atomic_set(&tcon->num_symlinks, 0);
39947 - atomic_set(&tcon->num_locks, 0);
39948 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39949 + atomic_set_unchecked(&tcon->num_writes, 0);
39950 + atomic_set_unchecked(&tcon->num_reads, 0);
39951 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39952 + atomic_set_unchecked(&tcon->num_opens, 0);
39953 + atomic_set_unchecked(&tcon->num_posixopens, 0);
39954 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39955 + atomic_set_unchecked(&tcon->num_closes, 0);
39956 + atomic_set_unchecked(&tcon->num_deletes, 0);
39957 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
39958 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
39959 + atomic_set_unchecked(&tcon->num_renames, 0);
39960 + atomic_set_unchecked(&tcon->num_t2renames, 0);
39961 + atomic_set_unchecked(&tcon->num_ffirst, 0);
39962 + atomic_set_unchecked(&tcon->num_fnext, 0);
39963 + atomic_set_unchecked(&tcon->num_fclose, 0);
39964 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
39965 + atomic_set_unchecked(&tcon->num_symlinks, 0);
39966 + atomic_set_unchecked(&tcon->num_locks, 0);
39967 }
39968 }
39969 }
39970 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
39971 if (tcon->need_reconnect)
39972 seq_puts(m, "\tDISCONNECTED ");
39973 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39974 - atomic_read(&tcon->num_smbs_sent),
39975 - atomic_read(&tcon->num_oplock_brks));
39976 + atomic_read_unchecked(&tcon->num_smbs_sent),
39977 + atomic_read_unchecked(&tcon->num_oplock_brks));
39978 seq_printf(m, "\nReads: %d Bytes: %lld",
39979 - atomic_read(&tcon->num_reads),
39980 + atomic_read_unchecked(&tcon->num_reads),
39981 (long long)(tcon->bytes_read));
39982 seq_printf(m, "\nWrites: %d Bytes: %lld",
39983 - atomic_read(&tcon->num_writes),
39984 + atomic_read_unchecked(&tcon->num_writes),
39985 (long long)(tcon->bytes_written));
39986 seq_printf(m, "\nFlushes: %d",
39987 - atomic_read(&tcon->num_flushes));
39988 + atomic_read_unchecked(&tcon->num_flushes));
39989 seq_printf(m, "\nLocks: %d HardLinks: %d "
39990 "Symlinks: %d",
39991 - atomic_read(&tcon->num_locks),
39992 - atomic_read(&tcon->num_hardlinks),
39993 - atomic_read(&tcon->num_symlinks));
39994 + atomic_read_unchecked(&tcon->num_locks),
39995 + atomic_read_unchecked(&tcon->num_hardlinks),
39996 + atomic_read_unchecked(&tcon->num_symlinks));
39997 seq_printf(m, "\nOpens: %d Closes: %d "
39998 "Deletes: %d",
39999 - atomic_read(&tcon->num_opens),
40000 - atomic_read(&tcon->num_closes),
40001 - atomic_read(&tcon->num_deletes));
40002 + atomic_read_unchecked(&tcon->num_opens),
40003 + atomic_read_unchecked(&tcon->num_closes),
40004 + atomic_read_unchecked(&tcon->num_deletes));
40005 seq_printf(m, "\nPosix Opens: %d "
40006 "Posix Mkdirs: %d",
40007 - atomic_read(&tcon->num_posixopens),
40008 - atomic_read(&tcon->num_posixmkdirs));
40009 + atomic_read_unchecked(&tcon->num_posixopens),
40010 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40011 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40012 - atomic_read(&tcon->num_mkdirs),
40013 - atomic_read(&tcon->num_rmdirs));
40014 + atomic_read_unchecked(&tcon->num_mkdirs),
40015 + atomic_read_unchecked(&tcon->num_rmdirs));
40016 seq_printf(m, "\nRenames: %d T2 Renames %d",
40017 - atomic_read(&tcon->num_renames),
40018 - atomic_read(&tcon->num_t2renames));
40019 + atomic_read_unchecked(&tcon->num_renames),
40020 + atomic_read_unchecked(&tcon->num_t2renames));
40021 seq_printf(m, "\nFindFirst: %d FNext %d "
40022 "FClose %d",
40023 - atomic_read(&tcon->num_ffirst),
40024 - atomic_read(&tcon->num_fnext),
40025 - atomic_read(&tcon->num_fclose));
40026 + atomic_read_unchecked(&tcon->num_ffirst),
40027 + atomic_read_unchecked(&tcon->num_fnext),
40028 + atomic_read_unchecked(&tcon->num_fclose));
40029 }
40030 }
40031 }
40032 diff -urNp linux-2.6.32.45/fs/cifs/cifsfs.c linux-2.6.32.45/fs/cifs/cifsfs.c
40033 --- linux-2.6.32.45/fs/cifs/cifsfs.c 2011-03-27 14:31:47.000000000 -0400
40034 +++ linux-2.6.32.45/fs/cifs/cifsfs.c 2011-08-25 17:17:57.000000000 -0400
40035 @@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
40036 cifs_req_cachep = kmem_cache_create("cifs_request",
40037 CIFSMaxBufSize +
40038 MAX_CIFS_HDR_SIZE, 0,
40039 - SLAB_HWCACHE_ALIGN, NULL);
40040 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40041 if (cifs_req_cachep == NULL)
40042 return -ENOMEM;
40043
40044 @@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
40045 efficient to alloc 1 per page off the slab compared to 17K (5page)
40046 alloc of large cifs buffers even when page debugging is on */
40047 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40048 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40049 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40050 NULL);
40051 if (cifs_sm_req_cachep == NULL) {
40052 mempool_destroy(cifs_req_poolp);
40053 @@ -991,8 +991,8 @@ init_cifs(void)
40054 atomic_set(&bufAllocCount, 0);
40055 atomic_set(&smBufAllocCount, 0);
40056 #ifdef CONFIG_CIFS_STATS2
40057 - atomic_set(&totBufAllocCount, 0);
40058 - atomic_set(&totSmBufAllocCount, 0);
40059 + atomic_set_unchecked(&totBufAllocCount, 0);
40060 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40061 #endif /* CONFIG_CIFS_STATS2 */
40062
40063 atomic_set(&midCount, 0);
40064 diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40065 --- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40066 +++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-25 17:17:57.000000000 -0400
40067 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40068 __u16 Flags; /* optional support bits */
40069 enum statusEnum tidStatus;
40070 #ifdef CONFIG_CIFS_STATS
40071 - atomic_t num_smbs_sent;
40072 - atomic_t num_writes;
40073 - atomic_t num_reads;
40074 - atomic_t num_flushes;
40075 - atomic_t num_oplock_brks;
40076 - atomic_t num_opens;
40077 - atomic_t num_closes;
40078 - atomic_t num_deletes;
40079 - atomic_t num_mkdirs;
40080 - atomic_t num_posixopens;
40081 - atomic_t num_posixmkdirs;
40082 - atomic_t num_rmdirs;
40083 - atomic_t num_renames;
40084 - atomic_t num_t2renames;
40085 - atomic_t num_ffirst;
40086 - atomic_t num_fnext;
40087 - atomic_t num_fclose;
40088 - atomic_t num_hardlinks;
40089 - atomic_t num_symlinks;
40090 - atomic_t num_locks;
40091 - atomic_t num_acl_get;
40092 - atomic_t num_acl_set;
40093 + atomic_unchecked_t num_smbs_sent;
40094 + atomic_unchecked_t num_writes;
40095 + atomic_unchecked_t num_reads;
40096 + atomic_unchecked_t num_flushes;
40097 + atomic_unchecked_t num_oplock_brks;
40098 + atomic_unchecked_t num_opens;
40099 + atomic_unchecked_t num_closes;
40100 + atomic_unchecked_t num_deletes;
40101 + atomic_unchecked_t num_mkdirs;
40102 + atomic_unchecked_t num_posixopens;
40103 + atomic_unchecked_t num_posixmkdirs;
40104 + atomic_unchecked_t num_rmdirs;
40105 + atomic_unchecked_t num_renames;
40106 + atomic_unchecked_t num_t2renames;
40107 + atomic_unchecked_t num_ffirst;
40108 + atomic_unchecked_t num_fnext;
40109 + atomic_unchecked_t num_fclose;
40110 + atomic_unchecked_t num_hardlinks;
40111 + atomic_unchecked_t num_symlinks;
40112 + atomic_unchecked_t num_locks;
40113 + atomic_unchecked_t num_acl_get;
40114 + atomic_unchecked_t num_acl_set;
40115 #ifdef CONFIG_CIFS_STATS2
40116 unsigned long long time_writes;
40117 unsigned long long time_reads;
40118 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40119 }
40120
40121 #ifdef CONFIG_CIFS_STATS
40122 -#define cifs_stats_inc atomic_inc
40123 +#define cifs_stats_inc atomic_inc_unchecked
40124
40125 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40126 unsigned int bytes)
40127 @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40128 /* Various Debug counters */
40129 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40130 #ifdef CONFIG_CIFS_STATS2
40131 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40132 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40133 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40134 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40135 #endif
40136 GLOBAL_EXTERN atomic_t smBufAllocCount;
40137 GLOBAL_EXTERN atomic_t midCount;
40138 diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40139 --- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40140 +++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40141 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40142
40143 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40144 {
40145 - char *p = nd_get_link(nd);
40146 + const char *p = nd_get_link(nd);
40147 if (!IS_ERR(p))
40148 kfree(p);
40149 }
40150 diff -urNp linux-2.6.32.45/fs/cifs/misc.c linux-2.6.32.45/fs/cifs/misc.c
40151 --- linux-2.6.32.45/fs/cifs/misc.c 2011-03-27 14:31:47.000000000 -0400
40152 +++ linux-2.6.32.45/fs/cifs/misc.c 2011-08-25 17:17:57.000000000 -0400
40153 @@ -155,7 +155,7 @@ cifs_buf_get(void)
40154 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40155 atomic_inc(&bufAllocCount);
40156 #ifdef CONFIG_CIFS_STATS2
40157 - atomic_inc(&totBufAllocCount);
40158 + atomic_inc_unchecked(&totBufAllocCount);
40159 #endif /* CONFIG_CIFS_STATS2 */
40160 }
40161
40162 @@ -190,7 +190,7 @@ cifs_small_buf_get(void)
40163 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40164 atomic_inc(&smBufAllocCount);
40165 #ifdef CONFIG_CIFS_STATS2
40166 - atomic_inc(&totSmBufAllocCount);
40167 + atomic_inc_unchecked(&totSmBufAllocCount);
40168 #endif /* CONFIG_CIFS_STATS2 */
40169
40170 }
40171 diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40172 --- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40173 +++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40174 @@ -24,14 +24,14 @@
40175 #include <linux/coda_fs_i.h>
40176 #include <linux/coda_cache.h>
40177
40178 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40179 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40180
40181 /* replace or extend an acl cache hit */
40182 void coda_cache_enter(struct inode *inode, int mask)
40183 {
40184 struct coda_inode_info *cii = ITOC(inode);
40185
40186 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40187 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40188 if (cii->c_uid != current_fsuid()) {
40189 cii->c_uid = current_fsuid();
40190 cii->c_cached_perm = mask;
40191 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40192 void coda_cache_clear_inode(struct inode *inode)
40193 {
40194 struct coda_inode_info *cii = ITOC(inode);
40195 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40196 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40197 }
40198
40199 /* remove all acl caches */
40200 void coda_cache_clear_all(struct super_block *sb)
40201 {
40202 - atomic_inc(&permission_epoch);
40203 + atomic_inc_unchecked(&permission_epoch);
40204 }
40205
40206
40207 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40208
40209 hit = (mask & cii->c_cached_perm) == mask &&
40210 cii->c_uid == current_fsuid() &&
40211 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40212 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40213
40214 return hit;
40215 }
40216 diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40217 --- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40218 +++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40219 @@ -29,10 +29,12 @@
40220 #undef elfhdr
40221 #undef elf_phdr
40222 #undef elf_note
40223 +#undef elf_dyn
40224 #undef elf_addr_t
40225 #define elfhdr elf32_hdr
40226 #define elf_phdr elf32_phdr
40227 #define elf_note elf32_note
40228 +#define elf_dyn Elf32_Dyn
40229 #define elf_addr_t Elf32_Addr
40230
40231 /*
40232 diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40233 --- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40234 +++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40235 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40236
40237 struct compat_readdir_callback {
40238 struct compat_old_linux_dirent __user *dirent;
40239 + struct file * file;
40240 int result;
40241 };
40242
40243 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40244 buf->result = -EOVERFLOW;
40245 return -EOVERFLOW;
40246 }
40247 +
40248 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40249 + return 0;
40250 +
40251 buf->result++;
40252 dirent = buf->dirent;
40253 if (!access_ok(VERIFY_WRITE, dirent,
40254 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40255
40256 buf.result = 0;
40257 buf.dirent = dirent;
40258 + buf.file = file;
40259
40260 error = vfs_readdir(file, compat_fillonedir, &buf);
40261 if (buf.result)
40262 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40263 struct compat_getdents_callback {
40264 struct compat_linux_dirent __user *current_dir;
40265 struct compat_linux_dirent __user *previous;
40266 + struct file * file;
40267 int count;
40268 int error;
40269 };
40270 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40271 buf->error = -EOVERFLOW;
40272 return -EOVERFLOW;
40273 }
40274 +
40275 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40276 + return 0;
40277 +
40278 dirent = buf->previous;
40279 if (dirent) {
40280 if (__put_user(offset, &dirent->d_off))
40281 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40282 buf.previous = NULL;
40283 buf.count = count;
40284 buf.error = 0;
40285 + buf.file = file;
40286
40287 error = vfs_readdir(file, compat_filldir, &buf);
40288 if (error >= 0)
40289 @@ -987,6 +999,7 @@ out:
40290 struct compat_getdents_callback64 {
40291 struct linux_dirent64 __user *current_dir;
40292 struct linux_dirent64 __user *previous;
40293 + struct file * file;
40294 int count;
40295 int error;
40296 };
40297 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40298 buf->error = -EINVAL; /* only used if we fail.. */
40299 if (reclen > buf->count)
40300 return -EINVAL;
40301 +
40302 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40303 + return 0;
40304 +
40305 dirent = buf->previous;
40306
40307 if (dirent) {
40308 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40309 buf.previous = NULL;
40310 buf.count = count;
40311 buf.error = 0;
40312 + buf.file = file;
40313
40314 error = vfs_readdir(file, compat_filldir64, &buf);
40315 if (error >= 0)
40316 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40317 * verify all the pointers
40318 */
40319 ret = -EINVAL;
40320 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40321 + if (nr_segs > UIO_MAXIOV)
40322 goto out;
40323 if (!file->f_op)
40324 goto out;
40325 @@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40326 compat_uptr_t __user *envp,
40327 struct pt_regs * regs)
40328 {
40329 +#ifdef CONFIG_GRKERNSEC
40330 + struct file *old_exec_file;
40331 + struct acl_subject_label *old_acl;
40332 + struct rlimit old_rlim[RLIM_NLIMITS];
40333 +#endif
40334 struct linux_binprm *bprm;
40335 struct file *file;
40336 struct files_struct *displaced;
40337 bool clear_in_exec;
40338 int retval;
40339 + const struct cred *cred = current_cred();
40340 +
40341 + /*
40342 + * We move the actual failure in case of RLIMIT_NPROC excess from
40343 + * set*uid() to execve() because too many poorly written programs
40344 + * don't check setuid() return code. Here we additionally recheck
40345 + * whether NPROC limit is still exceeded.
40346 + */
40347 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40348 +
40349 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40350 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40351 + retval = -EAGAIN;
40352 + goto out_ret;
40353 + }
40354 +
40355 + /* We're below the limit (still or again), so we don't want to make
40356 + * further execve() calls fail. */
40357 + current->flags &= ~PF_NPROC_EXCEEDED;
40358
40359 retval = unshare_files(&displaced);
40360 if (retval)
40361 @@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40362 bprm->filename = filename;
40363 bprm->interp = filename;
40364
40365 + if (gr_process_user_ban()) {
40366 + retval = -EPERM;
40367 + goto out_file;
40368 + }
40369 +
40370 + retval = -EACCES;
40371 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40372 + goto out_file;
40373 +
40374 retval = bprm_mm_init(bprm);
40375 if (retval)
40376 goto out_file;
40377 @@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40378 if (retval < 0)
40379 goto out;
40380
40381 + if (!gr_tpe_allow(file)) {
40382 + retval = -EACCES;
40383 + goto out;
40384 + }
40385 +
40386 + if (gr_check_crash_exec(file)) {
40387 + retval = -EACCES;
40388 + goto out;
40389 + }
40390 +
40391 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40392 +
40393 + gr_handle_exec_args_compat(bprm, argv);
40394 +
40395 +#ifdef CONFIG_GRKERNSEC
40396 + old_acl = current->acl;
40397 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40398 + old_exec_file = current->exec_file;
40399 + get_file(file);
40400 + current->exec_file = file;
40401 +#endif
40402 +
40403 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40404 + bprm->unsafe & LSM_UNSAFE_SHARE);
40405 + if (retval < 0)
40406 + goto out_fail;
40407 +
40408 retval = search_binary_handler(bprm, regs);
40409 if (retval < 0)
40410 - goto out;
40411 + goto out_fail;
40412 +#ifdef CONFIG_GRKERNSEC
40413 + if (old_exec_file)
40414 + fput(old_exec_file);
40415 +#endif
40416
40417 /* execve succeeded */
40418 current->fs->in_exec = 0;
40419 @@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40420 put_files_struct(displaced);
40421 return retval;
40422
40423 +out_fail:
40424 +#ifdef CONFIG_GRKERNSEC
40425 + current->acl = old_acl;
40426 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40427 + fput(current->exec_file);
40428 + current->exec_file = old_exec_file;
40429 +#endif
40430 +
40431 out:
40432 if (bprm->mm) {
40433 acct_arg_size(bprm, 0);
40434 @@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40435 struct fdtable *fdt;
40436 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40437
40438 + pax_track_stack();
40439 +
40440 if (n < 0)
40441 goto out_nofds;
40442
40443 diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40444 --- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40445 +++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40446 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40447 up = (struct compat_video_spu_palette __user *) arg;
40448 err = get_user(palp, &up->palette);
40449 err |= get_user(length, &up->length);
40450 + if (err)
40451 + return -EFAULT;
40452
40453 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40454 err = put_user(compat_ptr(palp), &up_native->palette);
40455 diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40456 --- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40457 +++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40458 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40459 }
40460 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40461 struct configfs_dirent *next;
40462 - const char * name;
40463 + const unsigned char * name;
40464 + char d_name[sizeof(next->s_dentry->d_iname)];
40465 int len;
40466
40467 next = list_entry(p, struct configfs_dirent,
40468 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40469 continue;
40470
40471 name = configfs_get_name(next);
40472 - len = strlen(name);
40473 + if (next->s_dentry && name == next->s_dentry->d_iname) {
40474 + len = next->s_dentry->d_name.len;
40475 + memcpy(d_name, name, len);
40476 + name = d_name;
40477 + } else
40478 + len = strlen(name);
40479 if (next->s_dentry)
40480 ino = next->s_dentry->d_inode->i_ino;
40481 else
40482 diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40483 --- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40484 +++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40485 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40486
40487 static struct kmem_cache *dentry_cache __read_mostly;
40488
40489 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40490 -
40491 /*
40492 * This is the single most critical data structure when it comes
40493 * to the dcache: the hashtable for lookups. Somebody should try
40494 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40495 mempages -= reserve;
40496
40497 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40498 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40499 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40500
40501 dcache_init();
40502 inode_init();
40503 diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40504 --- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40505 +++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40506 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40507 kfree(ls);
40508 }
40509
40510 -static struct sysfs_ops dlm_attr_ops = {
40511 +static const struct sysfs_ops dlm_attr_ops = {
40512 .show = dlm_attr_show,
40513 .store = dlm_attr_store,
40514 };
40515 diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40516 --- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40517 +++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40518 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40519 old_fs = get_fs();
40520 set_fs(get_ds());
40521 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40522 - (char __user *)lower_buf,
40523 + (__force char __user *)lower_buf,
40524 lower_bufsiz);
40525 set_fs(old_fs);
40526 if (rc < 0)
40527 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40528 }
40529 old_fs = get_fs();
40530 set_fs(get_ds());
40531 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40532 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40533 set_fs(old_fs);
40534 if (rc < 0)
40535 goto out_free;
40536 diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40537 --- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40538 +++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40539 @@ -56,12 +56,24 @@
40540 #include <linux/fsnotify.h>
40541 #include <linux/fs_struct.h>
40542 #include <linux/pipe_fs_i.h>
40543 +#include <linux/random.h>
40544 +#include <linux/seq_file.h>
40545 +
40546 +#ifdef CONFIG_PAX_REFCOUNT
40547 +#include <linux/kallsyms.h>
40548 +#include <linux/kdebug.h>
40549 +#endif
40550
40551 #include <asm/uaccess.h>
40552 #include <asm/mmu_context.h>
40553 #include <asm/tlb.h>
40554 #include "internal.h"
40555
40556 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40557 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40558 +EXPORT_SYMBOL(pax_set_initial_flags_func);
40559 +#endif
40560 +
40561 int core_uses_pid;
40562 char core_pattern[CORENAME_MAX_SIZE] = "core";
40563 unsigned int core_pipe_limit;
40564 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40565 goto out;
40566
40567 file = do_filp_open(AT_FDCWD, tmp,
40568 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40569 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40570 MAY_READ | MAY_EXEC | MAY_OPEN);
40571 putname(tmp);
40572 error = PTR_ERR(file);
40573 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40574 int write)
40575 {
40576 struct page *page;
40577 - int ret;
40578
40579 -#ifdef CONFIG_STACK_GROWSUP
40580 - if (write) {
40581 - ret = expand_stack_downwards(bprm->vma, pos);
40582 - if (ret < 0)
40583 - return NULL;
40584 - }
40585 -#endif
40586 - ret = get_user_pages(current, bprm->mm, pos,
40587 - 1, write, 1, &page, NULL);
40588 - if (ret <= 0)
40589 + if (0 > expand_stack_downwards(bprm->vma, pos))
40590 + return NULL;
40591 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40592 return NULL;
40593
40594 if (write) {
40595 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40596 vma->vm_end = STACK_TOP_MAX;
40597 vma->vm_start = vma->vm_end - PAGE_SIZE;
40598 vma->vm_flags = VM_STACK_FLAGS;
40599 +
40600 +#ifdef CONFIG_PAX_SEGMEXEC
40601 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40602 +#endif
40603 +
40604 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40605
40606 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40607 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40608 mm->stack_vm = mm->total_vm = 1;
40609 up_write(&mm->mmap_sem);
40610 bprm->p = vma->vm_end - sizeof(void *);
40611 +
40612 +#ifdef CONFIG_PAX_RANDUSTACK
40613 + if (randomize_va_space)
40614 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40615 +#endif
40616 +
40617 return 0;
40618 err:
40619 up_write(&mm->mmap_sem);
40620 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40621 int r;
40622 mm_segment_t oldfs = get_fs();
40623 set_fs(KERNEL_DS);
40624 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
40625 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40626 set_fs(oldfs);
40627 return r;
40628 }
40629 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40630 unsigned long new_end = old_end - shift;
40631 struct mmu_gather *tlb;
40632
40633 - BUG_ON(new_start > new_end);
40634 + if (new_start >= new_end || new_start < mmap_min_addr)
40635 + return -ENOMEM;
40636
40637 /*
40638 * ensure there are no vmas between where we want to go
40639 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40640 if (vma != find_vma(mm, new_start))
40641 return -EFAULT;
40642
40643 +#ifdef CONFIG_PAX_SEGMEXEC
40644 + BUG_ON(pax_find_mirror_vma(vma));
40645 +#endif
40646 +
40647 /*
40648 * cover the whole range: [new_start, old_end)
40649 */
40650 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40651 stack_top = arch_align_stack(stack_top);
40652 stack_top = PAGE_ALIGN(stack_top);
40653
40654 - if (unlikely(stack_top < mmap_min_addr) ||
40655 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40656 - return -ENOMEM;
40657 -
40658 stack_shift = vma->vm_end - stack_top;
40659
40660 bprm->p -= stack_shift;
40661 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40662 bprm->exec -= stack_shift;
40663
40664 down_write(&mm->mmap_sem);
40665 +
40666 + /* Move stack pages down in memory. */
40667 + if (stack_shift) {
40668 + ret = shift_arg_pages(vma, stack_shift);
40669 + if (ret)
40670 + goto out_unlock;
40671 + }
40672 +
40673 vm_flags = VM_STACK_FLAGS;
40674
40675 /*
40676 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40677 vm_flags &= ~VM_EXEC;
40678 vm_flags |= mm->def_flags;
40679
40680 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40681 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40682 + vm_flags &= ~VM_EXEC;
40683 +
40684 +#ifdef CONFIG_PAX_MPROTECT
40685 + if (mm->pax_flags & MF_PAX_MPROTECT)
40686 + vm_flags &= ~VM_MAYEXEC;
40687 +#endif
40688 +
40689 + }
40690 +#endif
40691 +
40692 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40693 vm_flags);
40694 if (ret)
40695 goto out_unlock;
40696 BUG_ON(prev != vma);
40697
40698 - /* Move stack pages down in memory. */
40699 - if (stack_shift) {
40700 - ret = shift_arg_pages(vma, stack_shift);
40701 - if (ret)
40702 - goto out_unlock;
40703 - }
40704 -
40705 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40706 stack_size = vma->vm_end - vma->vm_start;
40707 /*
40708 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40709 int err;
40710
40711 file = do_filp_open(AT_FDCWD, name,
40712 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40713 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40714 MAY_EXEC | MAY_OPEN);
40715 if (IS_ERR(file))
40716 goto out;
40717 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40718 old_fs = get_fs();
40719 set_fs(get_ds());
40720 /* The cast to a user pointer is valid due to the set_fs() */
40721 - result = vfs_read(file, (void __user *)addr, count, &pos);
40722 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
40723 set_fs(old_fs);
40724 return result;
40725 }
40726 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40727 }
40728 rcu_read_unlock();
40729
40730 - if (p->fs->users > n_fs) {
40731 + if (atomic_read(&p->fs->users) > n_fs) {
40732 bprm->unsafe |= LSM_UNSAFE_SHARE;
40733 } else {
40734 res = -EAGAIN;
40735 @@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40736 char __user *__user *envp,
40737 struct pt_regs * regs)
40738 {
40739 +#ifdef CONFIG_GRKERNSEC
40740 + struct file *old_exec_file;
40741 + struct acl_subject_label *old_acl;
40742 + struct rlimit old_rlim[RLIM_NLIMITS];
40743 +#endif
40744 struct linux_binprm *bprm;
40745 struct file *file;
40746 struct files_struct *displaced;
40747 bool clear_in_exec;
40748 int retval;
40749 + const struct cred *cred = current_cred();
40750 +
40751 + /*
40752 + * We move the actual failure in case of RLIMIT_NPROC excess from
40753 + * set*uid() to execve() because too many poorly written programs
40754 + * don't check setuid() return code. Here we additionally recheck
40755 + * whether NPROC limit is still exceeded.
40756 + */
40757 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40758 +
40759 + if ((current->flags & PF_NPROC_EXCEEDED) &&
40760 + atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40761 + retval = -EAGAIN;
40762 + goto out_ret;
40763 + }
40764 +
40765 + /* We're below the limit (still or again), so we don't want to make
40766 + * further execve() calls fail. */
40767 + current->flags &= ~PF_NPROC_EXCEEDED;
40768
40769 retval = unshare_files(&displaced);
40770 if (retval)
40771 @@ -1383,6 +1436,16 @@ int do_execve(char * filename,
40772 bprm->filename = filename;
40773 bprm->interp = filename;
40774
40775 + if (gr_process_user_ban()) {
40776 + retval = -EPERM;
40777 + goto out_file;
40778 + }
40779 +
40780 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40781 + retval = -EACCES;
40782 + goto out_file;
40783 + }
40784 +
40785 retval = bprm_mm_init(bprm);
40786 if (retval)
40787 goto out_file;
40788 @@ -1412,10 +1475,41 @@ int do_execve(char * filename,
40789 if (retval < 0)
40790 goto out;
40791
40792 + if (!gr_tpe_allow(file)) {
40793 + retval = -EACCES;
40794 + goto out;
40795 + }
40796 +
40797 + if (gr_check_crash_exec(file)) {
40798 + retval = -EACCES;
40799 + goto out;
40800 + }
40801 +
40802 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40803 +
40804 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
40805 +
40806 +#ifdef CONFIG_GRKERNSEC
40807 + old_acl = current->acl;
40808 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40809 + old_exec_file = current->exec_file;
40810 + get_file(file);
40811 + current->exec_file = file;
40812 +#endif
40813 +
40814 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40815 + bprm->unsafe & LSM_UNSAFE_SHARE);
40816 + if (retval < 0)
40817 + goto out_fail;
40818 +
40819 current->flags &= ~PF_KTHREAD;
40820 retval = search_binary_handler(bprm,regs);
40821 if (retval < 0)
40822 - goto out;
40823 + goto out_fail;
40824 +#ifdef CONFIG_GRKERNSEC
40825 + if (old_exec_file)
40826 + fput(old_exec_file);
40827 +#endif
40828
40829 /* execve succeeded */
40830 current->fs->in_exec = 0;
40831 @@ -1426,6 +1520,14 @@ int do_execve(char * filename,
40832 put_files_struct(displaced);
40833 return retval;
40834
40835 +out_fail:
40836 +#ifdef CONFIG_GRKERNSEC
40837 + current->acl = old_acl;
40838 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40839 + fput(current->exec_file);
40840 + current->exec_file = old_exec_file;
40841 +#endif
40842 +
40843 out:
40844 if (bprm->mm) {
40845 acct_arg_size(bprm, 0);
40846 @@ -1591,6 +1693,220 @@ out:
40847 return ispipe;
40848 }
40849
40850 +int pax_check_flags(unsigned long *flags)
40851 +{
40852 + int retval = 0;
40853 +
40854 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40855 + if (*flags & MF_PAX_SEGMEXEC)
40856 + {
40857 + *flags &= ~MF_PAX_SEGMEXEC;
40858 + retval = -EINVAL;
40859 + }
40860 +#endif
40861 +
40862 + if ((*flags & MF_PAX_PAGEEXEC)
40863 +
40864 +#ifdef CONFIG_PAX_PAGEEXEC
40865 + && (*flags & MF_PAX_SEGMEXEC)
40866 +#endif
40867 +
40868 + )
40869 + {
40870 + *flags &= ~MF_PAX_PAGEEXEC;
40871 + retval = -EINVAL;
40872 + }
40873 +
40874 + if ((*flags & MF_PAX_MPROTECT)
40875 +
40876 +#ifdef CONFIG_PAX_MPROTECT
40877 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40878 +#endif
40879 +
40880 + )
40881 + {
40882 + *flags &= ~MF_PAX_MPROTECT;
40883 + retval = -EINVAL;
40884 + }
40885 +
40886 + if ((*flags & MF_PAX_EMUTRAMP)
40887 +
40888 +#ifdef CONFIG_PAX_EMUTRAMP
40889 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40890 +#endif
40891 +
40892 + )
40893 + {
40894 + *flags &= ~MF_PAX_EMUTRAMP;
40895 + retval = -EINVAL;
40896 + }
40897 +
40898 + return retval;
40899 +}
40900 +
40901 +EXPORT_SYMBOL(pax_check_flags);
40902 +
40903 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40904 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40905 +{
40906 + struct task_struct *tsk = current;
40907 + struct mm_struct *mm = current->mm;
40908 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40909 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40910 + char *path_exec = NULL;
40911 + char *path_fault = NULL;
40912 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
40913 +
40914 + if (buffer_exec && buffer_fault) {
40915 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40916 +
40917 + down_read(&mm->mmap_sem);
40918 + vma = mm->mmap;
40919 + while (vma && (!vma_exec || !vma_fault)) {
40920 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40921 + vma_exec = vma;
40922 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40923 + vma_fault = vma;
40924 + vma = vma->vm_next;
40925 + }
40926 + if (vma_exec) {
40927 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40928 + if (IS_ERR(path_exec))
40929 + path_exec = "<path too long>";
40930 + else {
40931 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40932 + if (path_exec) {
40933 + *path_exec = 0;
40934 + path_exec = buffer_exec;
40935 + } else
40936 + path_exec = "<path too long>";
40937 + }
40938 + }
40939 + if (vma_fault) {
40940 + start = vma_fault->vm_start;
40941 + end = vma_fault->vm_end;
40942 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40943 + if (vma_fault->vm_file) {
40944 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40945 + if (IS_ERR(path_fault))
40946 + path_fault = "<path too long>";
40947 + else {
40948 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40949 + if (path_fault) {
40950 + *path_fault = 0;
40951 + path_fault = buffer_fault;
40952 + } else
40953 + path_fault = "<path too long>";
40954 + }
40955 + } else
40956 + path_fault = "<anonymous mapping>";
40957 + }
40958 + up_read(&mm->mmap_sem);
40959 + }
40960 + if (tsk->signal->curr_ip)
40961 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40962 + else
40963 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40964 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40965 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40966 + task_uid(tsk), task_euid(tsk), pc, sp);
40967 + free_page((unsigned long)buffer_exec);
40968 + free_page((unsigned long)buffer_fault);
40969 + pax_report_insns(pc, sp);
40970 + do_coredump(SIGKILL, SIGKILL, regs);
40971 +}
40972 +#endif
40973 +
40974 +#ifdef CONFIG_PAX_REFCOUNT
40975 +void pax_report_refcount_overflow(struct pt_regs *regs)
40976 +{
40977 + if (current->signal->curr_ip)
40978 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40979 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40980 + else
40981 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40982 + current->comm, task_pid_nr(current), current_uid(), current_euid());
40983 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40984 + show_regs(regs);
40985 + force_sig_specific(SIGKILL, current);
40986 +}
40987 +#endif
40988 +
40989 +#ifdef CONFIG_PAX_USERCOPY
40990 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
40991 +int object_is_on_stack(const void *obj, unsigned long len)
40992 +{
40993 + const void * const stack = task_stack_page(current);
40994 + const void * const stackend = stack + THREAD_SIZE;
40995 +
40996 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
40997 + const void *frame = NULL;
40998 + const void *oldframe;
40999 +#endif
41000 +
41001 + if (obj + len < obj)
41002 + return -1;
41003 +
41004 + if (obj + len <= stack || stackend <= obj)
41005 + return 0;
41006 +
41007 + if (obj < stack || stackend < obj + len)
41008 + return -1;
41009 +
41010 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41011 + oldframe = __builtin_frame_address(1);
41012 + if (oldframe)
41013 + frame = __builtin_frame_address(2);
41014 + /*
41015 + low ----------------------------------------------> high
41016 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41017 + ^----------------^
41018 + allow copies only within here
41019 + */
41020 + while (stack <= frame && frame < stackend) {
41021 + /* if obj + len extends past the last frame, this
41022 + check won't pass and the next frame will be 0,
41023 + causing us to bail out and correctly report
41024 + the copy as invalid
41025 + */
41026 + if (obj + len <= frame)
41027 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41028 + oldframe = frame;
41029 + frame = *(const void * const *)frame;
41030 + }
41031 + return -1;
41032 +#else
41033 + return 1;
41034 +#endif
41035 +}
41036 +
41037 +
41038 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41039 +{
41040 + if (current->signal->curr_ip)
41041 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41042 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41043 + else
41044 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41045 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41046 +
41047 + dump_stack();
41048 + gr_handle_kernel_exploit();
41049 + do_group_exit(SIGKILL);
41050 +}
41051 +#endif
41052 +
41053 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41054 +void pax_track_stack(void)
41055 +{
41056 + unsigned long sp = (unsigned long)&sp;
41057 + if (sp < current_thread_info()->lowest_stack &&
41058 + sp > (unsigned long)task_stack_page(current))
41059 + current_thread_info()->lowest_stack = sp;
41060 +}
41061 +EXPORT_SYMBOL(pax_track_stack);
41062 +#endif
41063 +
41064 static int zap_process(struct task_struct *start)
41065 {
41066 struct task_struct *t;
41067 @@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41068 pipe = file->f_path.dentry->d_inode->i_pipe;
41069
41070 pipe_lock(pipe);
41071 - pipe->readers++;
41072 - pipe->writers--;
41073 + atomic_inc(&pipe->readers);
41074 + atomic_dec(&pipe->writers);
41075
41076 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41077 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41078 wake_up_interruptible_sync(&pipe->wait);
41079 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41080 pipe_wait(pipe);
41081 }
41082
41083 - pipe->readers--;
41084 - pipe->writers++;
41085 + atomic_dec(&pipe->readers);
41086 + atomic_inc(&pipe->writers);
41087 pipe_unlock(pipe);
41088
41089 }
41090 @@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41091 char **helper_argv = NULL;
41092 int helper_argc = 0;
41093 int dump_count = 0;
41094 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41095 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41096
41097 audit_core_dumps(signr);
41098
41099 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41100 + gr_handle_brute_attach(current, mm->flags);
41101 +
41102 binfmt = mm->binfmt;
41103 if (!binfmt || !binfmt->core_dump)
41104 goto fail;
41105 @@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41106 */
41107 clear_thread_flag(TIF_SIGPENDING);
41108
41109 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41110 +
41111 /*
41112 * lock_kernel() because format_corename() is controlled by sysctl, which
41113 * uses lock_kernel()
41114 @@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41115 goto fail_unlock;
41116 }
41117
41118 - dump_count = atomic_inc_return(&core_dump_count);
41119 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41120 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41121 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41122 task_tgid_vnr(current), current->comm);
41123 @@ -1972,7 +2293,7 @@ close_fail:
41124 filp_close(file, NULL);
41125 fail_dropcount:
41126 if (dump_count)
41127 - atomic_dec(&core_dump_count);
41128 + atomic_dec_unchecked(&core_dump_count);
41129 fail_unlock:
41130 if (helper_argv)
41131 argv_free(helper_argv);
41132 diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41133 --- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41134 +++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41135 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41136
41137 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41138 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41139 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41140 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41141 sbi->s_resuid != current_fsuid() &&
41142 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41143 return 0;
41144 diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41145 --- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41146 +++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41147 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41148
41149 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41150 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41151 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41152 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41153 sbi->s_resuid != current_fsuid() &&
41154 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41155 return 0;
41156 diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41157 --- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41158 +++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41159 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41160 /* Hm, nope. Are (enough) root reserved blocks available? */
41161 if (sbi->s_resuid == current_fsuid() ||
41162 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41163 - capable(CAP_SYS_RESOURCE)) {
41164 + capable_nolog(CAP_SYS_RESOURCE)) {
41165 if (free_blocks >= (nblocks + dirty_blocks))
41166 return 1;
41167 }
41168 diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41169 --- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41170 +++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41171 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41172
41173 /* stats for buddy allocator */
41174 spinlock_t s_mb_pa_lock;
41175 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41176 - atomic_t s_bal_success; /* we found long enough chunks */
41177 - atomic_t s_bal_allocated; /* in blocks */
41178 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41179 - atomic_t s_bal_goals; /* goal hits */
41180 - atomic_t s_bal_breaks; /* too long searches */
41181 - atomic_t s_bal_2orders; /* 2^order hits */
41182 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41183 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41184 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41185 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41186 + atomic_unchecked_t s_bal_goals; /* goal hits */
41187 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41188 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41189 spinlock_t s_bal_lock;
41190 unsigned long s_mb_buddies_generated;
41191 unsigned long long s_mb_generation_time;
41192 - atomic_t s_mb_lost_chunks;
41193 - atomic_t s_mb_preallocated;
41194 - atomic_t s_mb_discarded;
41195 + atomic_unchecked_t s_mb_lost_chunks;
41196 + atomic_unchecked_t s_mb_preallocated;
41197 + atomic_unchecked_t s_mb_discarded;
41198 atomic_t s_lock_busy;
41199
41200 /* locality groups */
41201 diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41202 --- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41203 +++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41204 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41205 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41206
41207 if (EXT4_SB(sb)->s_mb_stats)
41208 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41209 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41210
41211 break;
41212 }
41213 @@ -2131,7 +2131,7 @@ repeat:
41214 ac->ac_status = AC_STATUS_CONTINUE;
41215 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41216 cr = 3;
41217 - atomic_inc(&sbi->s_mb_lost_chunks);
41218 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41219 goto repeat;
41220 }
41221 }
41222 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41223 ext4_grpblk_t counters[16];
41224 } sg;
41225
41226 + pax_track_stack();
41227 +
41228 group--;
41229 if (group == 0)
41230 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41231 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41232 if (sbi->s_mb_stats) {
41233 printk(KERN_INFO
41234 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41235 - atomic_read(&sbi->s_bal_allocated),
41236 - atomic_read(&sbi->s_bal_reqs),
41237 - atomic_read(&sbi->s_bal_success));
41238 + atomic_read_unchecked(&sbi->s_bal_allocated),
41239 + atomic_read_unchecked(&sbi->s_bal_reqs),
41240 + atomic_read_unchecked(&sbi->s_bal_success));
41241 printk(KERN_INFO
41242 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41243 "%u 2^N hits, %u breaks, %u lost\n",
41244 - atomic_read(&sbi->s_bal_ex_scanned),
41245 - atomic_read(&sbi->s_bal_goals),
41246 - atomic_read(&sbi->s_bal_2orders),
41247 - atomic_read(&sbi->s_bal_breaks),
41248 - atomic_read(&sbi->s_mb_lost_chunks));
41249 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41250 + atomic_read_unchecked(&sbi->s_bal_goals),
41251 + atomic_read_unchecked(&sbi->s_bal_2orders),
41252 + atomic_read_unchecked(&sbi->s_bal_breaks),
41253 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41254 printk(KERN_INFO
41255 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41256 sbi->s_mb_buddies_generated++,
41257 sbi->s_mb_generation_time);
41258 printk(KERN_INFO
41259 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41260 - atomic_read(&sbi->s_mb_preallocated),
41261 - atomic_read(&sbi->s_mb_discarded));
41262 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41263 + atomic_read_unchecked(&sbi->s_mb_discarded));
41264 }
41265
41266 free_percpu(sbi->s_locality_groups);
41267 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41268 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41269
41270 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41271 - atomic_inc(&sbi->s_bal_reqs);
41272 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41273 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41274 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41275 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41276 - atomic_inc(&sbi->s_bal_success);
41277 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41278 + atomic_inc_unchecked(&sbi->s_bal_success);
41279 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41280 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41281 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41282 - atomic_inc(&sbi->s_bal_goals);
41283 + atomic_inc_unchecked(&sbi->s_bal_goals);
41284 if (ac->ac_found > sbi->s_mb_max_to_scan)
41285 - atomic_inc(&sbi->s_bal_breaks);
41286 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41287 }
41288
41289 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41290 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41291 trace_ext4_mb_new_inode_pa(ac, pa);
41292
41293 ext4_mb_use_inode_pa(ac, pa);
41294 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41295 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41296
41297 ei = EXT4_I(ac->ac_inode);
41298 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41299 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41300 trace_ext4_mb_new_group_pa(ac, pa);
41301
41302 ext4_mb_use_group_pa(ac, pa);
41303 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41304 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41305
41306 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41307 lg = ac->ac_lg;
41308 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41309 * from the bitmap and continue.
41310 */
41311 }
41312 - atomic_add(free, &sbi->s_mb_discarded);
41313 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
41314
41315 return err;
41316 }
41317 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41318 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41319 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41320 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41321 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41322 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41323
41324 if (ac) {
41325 ac->ac_sb = sb;
41326 diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41327 --- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41328 +++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41329 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41330 }
41331
41332
41333 -static struct sysfs_ops ext4_attr_ops = {
41334 +static const struct sysfs_ops ext4_attr_ops = {
41335 .show = ext4_attr_show,
41336 .store = ext4_attr_store,
41337 };
41338 diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41339 --- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41340 +++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41341 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41342 if (err)
41343 return err;
41344
41345 + if (gr_handle_chroot_fowner(pid, type))
41346 + return -ENOENT;
41347 + if (gr_check_protected_task_fowner(pid, type))
41348 + return -EACCES;
41349 +
41350 f_modown(filp, pid, type, force);
41351 return 0;
41352 }
41353 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41354 switch (cmd) {
41355 case F_DUPFD:
41356 case F_DUPFD_CLOEXEC:
41357 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41358 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41359 break;
41360 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41361 diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41362 --- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41363 +++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41364 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41365 */
41366 filp->f_op = &read_pipefifo_fops;
41367 pipe->r_counter++;
41368 - if (pipe->readers++ == 0)
41369 + if (atomic_inc_return(&pipe->readers) == 1)
41370 wake_up_partner(inode);
41371
41372 - if (!pipe->writers) {
41373 + if (!atomic_read(&pipe->writers)) {
41374 if ((filp->f_flags & O_NONBLOCK)) {
41375 /* suppress POLLHUP until we have
41376 * seen a writer */
41377 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41378 * errno=ENXIO when there is no process reading the FIFO.
41379 */
41380 ret = -ENXIO;
41381 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41382 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41383 goto err;
41384
41385 filp->f_op = &write_pipefifo_fops;
41386 pipe->w_counter++;
41387 - if (!pipe->writers++)
41388 + if (atomic_inc_return(&pipe->writers) == 1)
41389 wake_up_partner(inode);
41390
41391 - if (!pipe->readers) {
41392 + if (!atomic_read(&pipe->readers)) {
41393 wait_for_partner(inode, &pipe->r_counter);
41394 if (signal_pending(current))
41395 goto err_wr;
41396 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41397 */
41398 filp->f_op = &rdwr_pipefifo_fops;
41399
41400 - pipe->readers++;
41401 - pipe->writers++;
41402 + atomic_inc(&pipe->readers);
41403 + atomic_inc(&pipe->writers);
41404 pipe->r_counter++;
41405 pipe->w_counter++;
41406 - if (pipe->readers == 1 || pipe->writers == 1)
41407 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41408 wake_up_partner(inode);
41409 break;
41410
41411 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41412 return 0;
41413
41414 err_rd:
41415 - if (!--pipe->readers)
41416 + if (atomic_dec_and_test(&pipe->readers))
41417 wake_up_interruptible(&pipe->wait);
41418 ret = -ERESTARTSYS;
41419 goto err;
41420
41421 err_wr:
41422 - if (!--pipe->writers)
41423 + if (atomic_dec_and_test(&pipe->writers))
41424 wake_up_interruptible(&pipe->wait);
41425 ret = -ERESTARTSYS;
41426 goto err;
41427
41428 err:
41429 - if (!pipe->readers && !pipe->writers)
41430 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41431 free_pipe_info(inode);
41432
41433 err_nocleanup:
41434 diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41435 --- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41436 +++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41437 @@ -14,6 +14,7 @@
41438 #include <linux/slab.h>
41439 #include <linux/vmalloc.h>
41440 #include <linux/file.h>
41441 +#include <linux/security.h>
41442 #include <linux/fdtable.h>
41443 #include <linux/bitops.h>
41444 #include <linux/interrupt.h>
41445 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41446 * N.B. For clone tasks sharing a files structure, this test
41447 * will limit the total number of files that can be opened.
41448 */
41449 +
41450 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41451 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41452 return -EMFILE;
41453
41454 diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41455 --- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41456 +++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41457 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41458 int len = dot ? dot - name : strlen(name);
41459
41460 fs = __get_fs_type(name, len);
41461 +
41462 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
41463 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41464 +#else
41465 if (!fs && (request_module("%.*s", len, name) == 0))
41466 +#endif
41467 fs = __get_fs_type(name, len);
41468
41469 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41470 diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41471 --- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41472 +++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41473 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41474 parent ? (char *) parent->def->name : "<no-parent>",
41475 def->name, netfs_data);
41476
41477 - fscache_stat(&fscache_n_acquires);
41478 + fscache_stat_unchecked(&fscache_n_acquires);
41479
41480 /* if there's no parent cookie, then we don't create one here either */
41481 if (!parent) {
41482 - fscache_stat(&fscache_n_acquires_null);
41483 + fscache_stat_unchecked(&fscache_n_acquires_null);
41484 _leave(" [no parent]");
41485 return NULL;
41486 }
41487 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41488 /* allocate and initialise a cookie */
41489 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41490 if (!cookie) {
41491 - fscache_stat(&fscache_n_acquires_oom);
41492 + fscache_stat_unchecked(&fscache_n_acquires_oom);
41493 _leave(" [ENOMEM]");
41494 return NULL;
41495 }
41496 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41497
41498 switch (cookie->def->type) {
41499 case FSCACHE_COOKIE_TYPE_INDEX:
41500 - fscache_stat(&fscache_n_cookie_index);
41501 + fscache_stat_unchecked(&fscache_n_cookie_index);
41502 break;
41503 case FSCACHE_COOKIE_TYPE_DATAFILE:
41504 - fscache_stat(&fscache_n_cookie_data);
41505 + fscache_stat_unchecked(&fscache_n_cookie_data);
41506 break;
41507 default:
41508 - fscache_stat(&fscache_n_cookie_special);
41509 + fscache_stat_unchecked(&fscache_n_cookie_special);
41510 break;
41511 }
41512
41513 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41514 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41515 atomic_dec(&parent->n_children);
41516 __fscache_cookie_put(cookie);
41517 - fscache_stat(&fscache_n_acquires_nobufs);
41518 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41519 _leave(" = NULL");
41520 return NULL;
41521 }
41522 }
41523
41524 - fscache_stat(&fscache_n_acquires_ok);
41525 + fscache_stat_unchecked(&fscache_n_acquires_ok);
41526 _leave(" = %p", cookie);
41527 return cookie;
41528 }
41529 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41530 cache = fscache_select_cache_for_object(cookie->parent);
41531 if (!cache) {
41532 up_read(&fscache_addremove_sem);
41533 - fscache_stat(&fscache_n_acquires_no_cache);
41534 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41535 _leave(" = -ENOMEDIUM [no cache]");
41536 return -ENOMEDIUM;
41537 }
41538 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41539 object = cache->ops->alloc_object(cache, cookie);
41540 fscache_stat_d(&fscache_n_cop_alloc_object);
41541 if (IS_ERR(object)) {
41542 - fscache_stat(&fscache_n_object_no_alloc);
41543 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
41544 ret = PTR_ERR(object);
41545 goto error;
41546 }
41547
41548 - fscache_stat(&fscache_n_object_alloc);
41549 + fscache_stat_unchecked(&fscache_n_object_alloc);
41550
41551 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41552
41553 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41554 struct fscache_object *object;
41555 struct hlist_node *_p;
41556
41557 - fscache_stat(&fscache_n_updates);
41558 + fscache_stat_unchecked(&fscache_n_updates);
41559
41560 if (!cookie) {
41561 - fscache_stat(&fscache_n_updates_null);
41562 + fscache_stat_unchecked(&fscache_n_updates_null);
41563 _leave(" [no cookie]");
41564 return;
41565 }
41566 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41567 struct fscache_object *object;
41568 unsigned long event;
41569
41570 - fscache_stat(&fscache_n_relinquishes);
41571 + fscache_stat_unchecked(&fscache_n_relinquishes);
41572 if (retire)
41573 - fscache_stat(&fscache_n_relinquishes_retire);
41574 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41575
41576 if (!cookie) {
41577 - fscache_stat(&fscache_n_relinquishes_null);
41578 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
41579 _leave(" [no cookie]");
41580 return;
41581 }
41582 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41583
41584 /* wait for the cookie to finish being instantiated (or to fail) */
41585 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41586 - fscache_stat(&fscache_n_relinquishes_waitcrt);
41587 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41588 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41589 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41590 }
41591 diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
41592 --- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41593 +++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41594 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41595 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41596 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41597
41598 -extern atomic_t fscache_n_op_pend;
41599 -extern atomic_t fscache_n_op_run;
41600 -extern atomic_t fscache_n_op_enqueue;
41601 -extern atomic_t fscache_n_op_deferred_release;
41602 -extern atomic_t fscache_n_op_release;
41603 -extern atomic_t fscache_n_op_gc;
41604 -extern atomic_t fscache_n_op_cancelled;
41605 -extern atomic_t fscache_n_op_rejected;
41606 -
41607 -extern atomic_t fscache_n_attr_changed;
41608 -extern atomic_t fscache_n_attr_changed_ok;
41609 -extern atomic_t fscache_n_attr_changed_nobufs;
41610 -extern atomic_t fscache_n_attr_changed_nomem;
41611 -extern atomic_t fscache_n_attr_changed_calls;
41612 -
41613 -extern atomic_t fscache_n_allocs;
41614 -extern atomic_t fscache_n_allocs_ok;
41615 -extern atomic_t fscache_n_allocs_wait;
41616 -extern atomic_t fscache_n_allocs_nobufs;
41617 -extern atomic_t fscache_n_allocs_intr;
41618 -extern atomic_t fscache_n_allocs_object_dead;
41619 -extern atomic_t fscache_n_alloc_ops;
41620 -extern atomic_t fscache_n_alloc_op_waits;
41621 -
41622 -extern atomic_t fscache_n_retrievals;
41623 -extern atomic_t fscache_n_retrievals_ok;
41624 -extern atomic_t fscache_n_retrievals_wait;
41625 -extern atomic_t fscache_n_retrievals_nodata;
41626 -extern atomic_t fscache_n_retrievals_nobufs;
41627 -extern atomic_t fscache_n_retrievals_intr;
41628 -extern atomic_t fscache_n_retrievals_nomem;
41629 -extern atomic_t fscache_n_retrievals_object_dead;
41630 -extern atomic_t fscache_n_retrieval_ops;
41631 -extern atomic_t fscache_n_retrieval_op_waits;
41632 -
41633 -extern atomic_t fscache_n_stores;
41634 -extern atomic_t fscache_n_stores_ok;
41635 -extern atomic_t fscache_n_stores_again;
41636 -extern atomic_t fscache_n_stores_nobufs;
41637 -extern atomic_t fscache_n_stores_oom;
41638 -extern atomic_t fscache_n_store_ops;
41639 -extern atomic_t fscache_n_store_calls;
41640 -extern atomic_t fscache_n_store_pages;
41641 -extern atomic_t fscache_n_store_radix_deletes;
41642 -extern atomic_t fscache_n_store_pages_over_limit;
41643 -
41644 -extern atomic_t fscache_n_store_vmscan_not_storing;
41645 -extern atomic_t fscache_n_store_vmscan_gone;
41646 -extern atomic_t fscache_n_store_vmscan_busy;
41647 -extern atomic_t fscache_n_store_vmscan_cancelled;
41648 -
41649 -extern atomic_t fscache_n_marks;
41650 -extern atomic_t fscache_n_uncaches;
41651 -
41652 -extern atomic_t fscache_n_acquires;
41653 -extern atomic_t fscache_n_acquires_null;
41654 -extern atomic_t fscache_n_acquires_no_cache;
41655 -extern atomic_t fscache_n_acquires_ok;
41656 -extern atomic_t fscache_n_acquires_nobufs;
41657 -extern atomic_t fscache_n_acquires_oom;
41658 -
41659 -extern atomic_t fscache_n_updates;
41660 -extern atomic_t fscache_n_updates_null;
41661 -extern atomic_t fscache_n_updates_run;
41662 -
41663 -extern atomic_t fscache_n_relinquishes;
41664 -extern atomic_t fscache_n_relinquishes_null;
41665 -extern atomic_t fscache_n_relinquishes_waitcrt;
41666 -extern atomic_t fscache_n_relinquishes_retire;
41667 -
41668 -extern atomic_t fscache_n_cookie_index;
41669 -extern atomic_t fscache_n_cookie_data;
41670 -extern atomic_t fscache_n_cookie_special;
41671 -
41672 -extern atomic_t fscache_n_object_alloc;
41673 -extern atomic_t fscache_n_object_no_alloc;
41674 -extern atomic_t fscache_n_object_lookups;
41675 -extern atomic_t fscache_n_object_lookups_negative;
41676 -extern atomic_t fscache_n_object_lookups_positive;
41677 -extern atomic_t fscache_n_object_lookups_timed_out;
41678 -extern atomic_t fscache_n_object_created;
41679 -extern atomic_t fscache_n_object_avail;
41680 -extern atomic_t fscache_n_object_dead;
41681 -
41682 -extern atomic_t fscache_n_checkaux_none;
41683 -extern atomic_t fscache_n_checkaux_okay;
41684 -extern atomic_t fscache_n_checkaux_update;
41685 -extern atomic_t fscache_n_checkaux_obsolete;
41686 +extern atomic_unchecked_t fscache_n_op_pend;
41687 +extern atomic_unchecked_t fscache_n_op_run;
41688 +extern atomic_unchecked_t fscache_n_op_enqueue;
41689 +extern atomic_unchecked_t fscache_n_op_deferred_release;
41690 +extern atomic_unchecked_t fscache_n_op_release;
41691 +extern atomic_unchecked_t fscache_n_op_gc;
41692 +extern atomic_unchecked_t fscache_n_op_cancelled;
41693 +extern atomic_unchecked_t fscache_n_op_rejected;
41694 +
41695 +extern atomic_unchecked_t fscache_n_attr_changed;
41696 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
41697 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41698 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41699 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
41700 +
41701 +extern atomic_unchecked_t fscache_n_allocs;
41702 +extern atomic_unchecked_t fscache_n_allocs_ok;
41703 +extern atomic_unchecked_t fscache_n_allocs_wait;
41704 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
41705 +extern atomic_unchecked_t fscache_n_allocs_intr;
41706 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
41707 +extern atomic_unchecked_t fscache_n_alloc_ops;
41708 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
41709 +
41710 +extern atomic_unchecked_t fscache_n_retrievals;
41711 +extern atomic_unchecked_t fscache_n_retrievals_ok;
41712 +extern atomic_unchecked_t fscache_n_retrievals_wait;
41713 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
41714 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41715 +extern atomic_unchecked_t fscache_n_retrievals_intr;
41716 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
41717 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41718 +extern atomic_unchecked_t fscache_n_retrieval_ops;
41719 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41720 +
41721 +extern atomic_unchecked_t fscache_n_stores;
41722 +extern atomic_unchecked_t fscache_n_stores_ok;
41723 +extern atomic_unchecked_t fscache_n_stores_again;
41724 +extern atomic_unchecked_t fscache_n_stores_nobufs;
41725 +extern atomic_unchecked_t fscache_n_stores_oom;
41726 +extern atomic_unchecked_t fscache_n_store_ops;
41727 +extern atomic_unchecked_t fscache_n_store_calls;
41728 +extern atomic_unchecked_t fscache_n_store_pages;
41729 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
41730 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41731 +
41732 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41733 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41734 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41735 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41736 +
41737 +extern atomic_unchecked_t fscache_n_marks;
41738 +extern atomic_unchecked_t fscache_n_uncaches;
41739 +
41740 +extern atomic_unchecked_t fscache_n_acquires;
41741 +extern atomic_unchecked_t fscache_n_acquires_null;
41742 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
41743 +extern atomic_unchecked_t fscache_n_acquires_ok;
41744 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
41745 +extern atomic_unchecked_t fscache_n_acquires_oom;
41746 +
41747 +extern atomic_unchecked_t fscache_n_updates;
41748 +extern atomic_unchecked_t fscache_n_updates_null;
41749 +extern atomic_unchecked_t fscache_n_updates_run;
41750 +
41751 +extern atomic_unchecked_t fscache_n_relinquishes;
41752 +extern atomic_unchecked_t fscache_n_relinquishes_null;
41753 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41754 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
41755 +
41756 +extern atomic_unchecked_t fscache_n_cookie_index;
41757 +extern atomic_unchecked_t fscache_n_cookie_data;
41758 +extern atomic_unchecked_t fscache_n_cookie_special;
41759 +
41760 +extern atomic_unchecked_t fscache_n_object_alloc;
41761 +extern atomic_unchecked_t fscache_n_object_no_alloc;
41762 +extern atomic_unchecked_t fscache_n_object_lookups;
41763 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
41764 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
41765 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41766 +extern atomic_unchecked_t fscache_n_object_created;
41767 +extern atomic_unchecked_t fscache_n_object_avail;
41768 +extern atomic_unchecked_t fscache_n_object_dead;
41769 +
41770 +extern atomic_unchecked_t fscache_n_checkaux_none;
41771 +extern atomic_unchecked_t fscache_n_checkaux_okay;
41772 +extern atomic_unchecked_t fscache_n_checkaux_update;
41773 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41774
41775 extern atomic_t fscache_n_cop_alloc_object;
41776 extern atomic_t fscache_n_cop_lookup_object;
41777 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
41778 atomic_inc(stat);
41779 }
41780
41781 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41782 +{
41783 + atomic_inc_unchecked(stat);
41784 +}
41785 +
41786 static inline void fscache_stat_d(atomic_t *stat)
41787 {
41788 atomic_dec(stat);
41789 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
41790
41791 #define __fscache_stat(stat) (NULL)
41792 #define fscache_stat(stat) do {} while (0)
41793 +#define fscache_stat_unchecked(stat) do {} while (0)
41794 #define fscache_stat_d(stat) do {} while (0)
41795 #endif
41796
41797 diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
41798 --- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
41799 +++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
41800 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
41801 /* update the object metadata on disk */
41802 case FSCACHE_OBJECT_UPDATING:
41803 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41804 - fscache_stat(&fscache_n_updates_run);
41805 + fscache_stat_unchecked(&fscache_n_updates_run);
41806 fscache_stat(&fscache_n_cop_update_object);
41807 object->cache->ops->update_object(object);
41808 fscache_stat_d(&fscache_n_cop_update_object);
41809 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
41810 spin_lock(&object->lock);
41811 object->state = FSCACHE_OBJECT_DEAD;
41812 spin_unlock(&object->lock);
41813 - fscache_stat(&fscache_n_object_dead);
41814 + fscache_stat_unchecked(&fscache_n_object_dead);
41815 goto terminal_transit;
41816
41817 /* handle the parent cache of this object being withdrawn from
41818 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
41819 spin_lock(&object->lock);
41820 object->state = FSCACHE_OBJECT_DEAD;
41821 spin_unlock(&object->lock);
41822 - fscache_stat(&fscache_n_object_dead);
41823 + fscache_stat_unchecked(&fscache_n_object_dead);
41824 goto terminal_transit;
41825
41826 /* complain about the object being woken up once it is
41827 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
41828 parent->cookie->def->name, cookie->def->name,
41829 object->cache->tag->name);
41830
41831 - fscache_stat(&fscache_n_object_lookups);
41832 + fscache_stat_unchecked(&fscache_n_object_lookups);
41833 fscache_stat(&fscache_n_cop_lookup_object);
41834 ret = object->cache->ops->lookup_object(object);
41835 fscache_stat_d(&fscache_n_cop_lookup_object);
41836 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
41837 if (ret == -ETIMEDOUT) {
41838 /* probably stuck behind another object, so move this one to
41839 * the back of the queue */
41840 - fscache_stat(&fscache_n_object_lookups_timed_out);
41841 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41842 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41843 }
41844
41845 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
41846
41847 spin_lock(&object->lock);
41848 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41849 - fscache_stat(&fscache_n_object_lookups_negative);
41850 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41851
41852 /* transit here to allow write requests to begin stacking up
41853 * and read requests to begin returning ENODATA */
41854 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
41855 * result, in which case there may be data available */
41856 spin_lock(&object->lock);
41857 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41858 - fscache_stat(&fscache_n_object_lookups_positive);
41859 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41860
41861 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41862
41863 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
41864 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41865 } else {
41866 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41867 - fscache_stat(&fscache_n_object_created);
41868 + fscache_stat_unchecked(&fscache_n_object_created);
41869
41870 object->state = FSCACHE_OBJECT_AVAILABLE;
41871 spin_unlock(&object->lock);
41872 @@ -633,7 +633,7 @@ static void fscache_object_available(str
41873 fscache_enqueue_dependents(object);
41874
41875 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41876 - fscache_stat(&fscache_n_object_avail);
41877 + fscache_stat_unchecked(&fscache_n_object_avail);
41878
41879 _leave("");
41880 }
41881 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41882 enum fscache_checkaux result;
41883
41884 if (!object->cookie->def->check_aux) {
41885 - fscache_stat(&fscache_n_checkaux_none);
41886 + fscache_stat_unchecked(&fscache_n_checkaux_none);
41887 return FSCACHE_CHECKAUX_OKAY;
41888 }
41889
41890 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41891 switch (result) {
41892 /* entry okay as is */
41893 case FSCACHE_CHECKAUX_OKAY:
41894 - fscache_stat(&fscache_n_checkaux_okay);
41895 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
41896 break;
41897
41898 /* entry requires update */
41899 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41900 - fscache_stat(&fscache_n_checkaux_update);
41901 + fscache_stat_unchecked(&fscache_n_checkaux_update);
41902 break;
41903
41904 /* entry requires deletion */
41905 case FSCACHE_CHECKAUX_OBSOLETE:
41906 - fscache_stat(&fscache_n_checkaux_obsolete);
41907 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41908 break;
41909
41910 default:
41911 diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
41912 --- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
41913 +++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
41914 @@ -16,7 +16,7 @@
41915 #include <linux/seq_file.h>
41916 #include "internal.h"
41917
41918 -atomic_t fscache_op_debug_id;
41919 +atomic_unchecked_t fscache_op_debug_id;
41920 EXPORT_SYMBOL(fscache_op_debug_id);
41921
41922 /**
41923 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
41924 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41925 ASSERTCMP(atomic_read(&op->usage), >, 0);
41926
41927 - fscache_stat(&fscache_n_op_enqueue);
41928 + fscache_stat_unchecked(&fscache_n_op_enqueue);
41929 switch (op->flags & FSCACHE_OP_TYPE) {
41930 case FSCACHE_OP_FAST:
41931 _debug("queue fast");
41932 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
41933 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41934 if (op->processor)
41935 fscache_enqueue_operation(op);
41936 - fscache_stat(&fscache_n_op_run);
41937 + fscache_stat_unchecked(&fscache_n_op_run);
41938 }
41939
41940 /*
41941 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
41942 if (object->n_ops > 0) {
41943 atomic_inc(&op->usage);
41944 list_add_tail(&op->pend_link, &object->pending_ops);
41945 - fscache_stat(&fscache_n_op_pend);
41946 + fscache_stat_unchecked(&fscache_n_op_pend);
41947 } else if (!list_empty(&object->pending_ops)) {
41948 atomic_inc(&op->usage);
41949 list_add_tail(&op->pend_link, &object->pending_ops);
41950 - fscache_stat(&fscache_n_op_pend);
41951 + fscache_stat_unchecked(&fscache_n_op_pend);
41952 fscache_start_operations(object);
41953 } else {
41954 ASSERTCMP(object->n_in_progress, ==, 0);
41955 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
41956 object->n_exclusive++; /* reads and writes must wait */
41957 atomic_inc(&op->usage);
41958 list_add_tail(&op->pend_link, &object->pending_ops);
41959 - fscache_stat(&fscache_n_op_pend);
41960 + fscache_stat_unchecked(&fscache_n_op_pend);
41961 ret = 0;
41962 } else {
41963 /* not allowed to submit ops in any other state */
41964 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
41965 if (object->n_exclusive > 0) {
41966 atomic_inc(&op->usage);
41967 list_add_tail(&op->pend_link, &object->pending_ops);
41968 - fscache_stat(&fscache_n_op_pend);
41969 + fscache_stat_unchecked(&fscache_n_op_pend);
41970 } else if (!list_empty(&object->pending_ops)) {
41971 atomic_inc(&op->usage);
41972 list_add_tail(&op->pend_link, &object->pending_ops);
41973 - fscache_stat(&fscache_n_op_pend);
41974 + fscache_stat_unchecked(&fscache_n_op_pend);
41975 fscache_start_operations(object);
41976 } else {
41977 ASSERTCMP(object->n_exclusive, ==, 0);
41978 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
41979 object->n_ops++;
41980 atomic_inc(&op->usage);
41981 list_add_tail(&op->pend_link, &object->pending_ops);
41982 - fscache_stat(&fscache_n_op_pend);
41983 + fscache_stat_unchecked(&fscache_n_op_pend);
41984 ret = 0;
41985 } else if (object->state == FSCACHE_OBJECT_DYING ||
41986 object->state == FSCACHE_OBJECT_LC_DYING ||
41987 object->state == FSCACHE_OBJECT_WITHDRAWING) {
41988 - fscache_stat(&fscache_n_op_rejected);
41989 + fscache_stat_unchecked(&fscache_n_op_rejected);
41990 ret = -ENOBUFS;
41991 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
41992 fscache_report_unexpected_submission(object, op, ostate);
41993 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
41994
41995 ret = -EBUSY;
41996 if (!list_empty(&op->pend_link)) {
41997 - fscache_stat(&fscache_n_op_cancelled);
41998 + fscache_stat_unchecked(&fscache_n_op_cancelled);
41999 list_del_init(&op->pend_link);
42000 object->n_ops--;
42001 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42002 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42003 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42004 BUG();
42005
42006 - fscache_stat(&fscache_n_op_release);
42007 + fscache_stat_unchecked(&fscache_n_op_release);
42008
42009 if (op->release) {
42010 op->release(op);
42011 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42012 * lock, and defer it otherwise */
42013 if (!spin_trylock(&object->lock)) {
42014 _debug("defer put");
42015 - fscache_stat(&fscache_n_op_deferred_release);
42016 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42017
42018 cache = object->cache;
42019 spin_lock(&cache->op_gc_list_lock);
42020 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42021
42022 _debug("GC DEFERRED REL OBJ%x OP%x",
42023 object->debug_id, op->debug_id);
42024 - fscache_stat(&fscache_n_op_gc);
42025 + fscache_stat_unchecked(&fscache_n_op_gc);
42026
42027 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42028
42029 diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42030 --- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42031 +++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42032 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42033 val = radix_tree_lookup(&cookie->stores, page->index);
42034 if (!val) {
42035 rcu_read_unlock();
42036 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42037 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42038 __fscache_uncache_page(cookie, page);
42039 return true;
42040 }
42041 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42042 spin_unlock(&cookie->stores_lock);
42043
42044 if (xpage) {
42045 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42046 - fscache_stat(&fscache_n_store_radix_deletes);
42047 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42048 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42049 ASSERTCMP(xpage, ==, page);
42050 } else {
42051 - fscache_stat(&fscache_n_store_vmscan_gone);
42052 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42053 }
42054
42055 wake_up_bit(&cookie->flags, 0);
42056 @@ -106,7 +106,7 @@ page_busy:
42057 /* we might want to wait here, but that could deadlock the allocator as
42058 * the slow-work threads writing to the cache may all end up sleeping
42059 * on memory allocation */
42060 - fscache_stat(&fscache_n_store_vmscan_busy);
42061 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42062 return false;
42063 }
42064 EXPORT_SYMBOL(__fscache_maybe_release_page);
42065 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42066 FSCACHE_COOKIE_STORING_TAG);
42067 if (!radix_tree_tag_get(&cookie->stores, page->index,
42068 FSCACHE_COOKIE_PENDING_TAG)) {
42069 - fscache_stat(&fscache_n_store_radix_deletes);
42070 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42071 xpage = radix_tree_delete(&cookie->stores, page->index);
42072 }
42073 spin_unlock(&cookie->stores_lock);
42074 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42075
42076 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42077
42078 - fscache_stat(&fscache_n_attr_changed_calls);
42079 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42080
42081 if (fscache_object_is_active(object)) {
42082 fscache_set_op_state(op, "CallFS");
42083 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42084
42085 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42086
42087 - fscache_stat(&fscache_n_attr_changed);
42088 + fscache_stat_unchecked(&fscache_n_attr_changed);
42089
42090 op = kzalloc(sizeof(*op), GFP_KERNEL);
42091 if (!op) {
42092 - fscache_stat(&fscache_n_attr_changed_nomem);
42093 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42094 _leave(" = -ENOMEM");
42095 return -ENOMEM;
42096 }
42097 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42098 if (fscache_submit_exclusive_op(object, op) < 0)
42099 goto nobufs;
42100 spin_unlock(&cookie->lock);
42101 - fscache_stat(&fscache_n_attr_changed_ok);
42102 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42103 fscache_put_operation(op);
42104 _leave(" = 0");
42105 return 0;
42106 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42107 nobufs:
42108 spin_unlock(&cookie->lock);
42109 kfree(op);
42110 - fscache_stat(&fscache_n_attr_changed_nobufs);
42111 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42112 _leave(" = %d", -ENOBUFS);
42113 return -ENOBUFS;
42114 }
42115 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42116 /* allocate a retrieval operation and attempt to submit it */
42117 op = kzalloc(sizeof(*op), GFP_NOIO);
42118 if (!op) {
42119 - fscache_stat(&fscache_n_retrievals_nomem);
42120 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42121 return NULL;
42122 }
42123
42124 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42125 return 0;
42126 }
42127
42128 - fscache_stat(&fscache_n_retrievals_wait);
42129 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42130
42131 jif = jiffies;
42132 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42133 fscache_wait_bit_interruptible,
42134 TASK_INTERRUPTIBLE) != 0) {
42135 - fscache_stat(&fscache_n_retrievals_intr);
42136 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42137 _leave(" = -ERESTARTSYS");
42138 return -ERESTARTSYS;
42139 }
42140 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42141 */
42142 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42143 struct fscache_retrieval *op,
42144 - atomic_t *stat_op_waits,
42145 - atomic_t *stat_object_dead)
42146 + atomic_unchecked_t *stat_op_waits,
42147 + atomic_unchecked_t *stat_object_dead)
42148 {
42149 int ret;
42150
42151 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42152 goto check_if_dead;
42153
42154 _debug(">>> WT");
42155 - fscache_stat(stat_op_waits);
42156 + fscache_stat_unchecked(stat_op_waits);
42157 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42158 fscache_wait_bit_interruptible,
42159 TASK_INTERRUPTIBLE) < 0) {
42160 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42161
42162 check_if_dead:
42163 if (unlikely(fscache_object_is_dead(object))) {
42164 - fscache_stat(stat_object_dead);
42165 + fscache_stat_unchecked(stat_object_dead);
42166 return -ENOBUFS;
42167 }
42168 return 0;
42169 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42170
42171 _enter("%p,%p,,,", cookie, page);
42172
42173 - fscache_stat(&fscache_n_retrievals);
42174 + fscache_stat_unchecked(&fscache_n_retrievals);
42175
42176 if (hlist_empty(&cookie->backing_objects))
42177 goto nobufs;
42178 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42179 goto nobufs_unlock;
42180 spin_unlock(&cookie->lock);
42181
42182 - fscache_stat(&fscache_n_retrieval_ops);
42183 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42184
42185 /* pin the netfs read context in case we need to do the actual netfs
42186 * read because we've encountered a cache read failure */
42187 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42188
42189 error:
42190 if (ret == -ENOMEM)
42191 - fscache_stat(&fscache_n_retrievals_nomem);
42192 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42193 else if (ret == -ERESTARTSYS)
42194 - fscache_stat(&fscache_n_retrievals_intr);
42195 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42196 else if (ret == -ENODATA)
42197 - fscache_stat(&fscache_n_retrievals_nodata);
42198 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42199 else if (ret < 0)
42200 - fscache_stat(&fscache_n_retrievals_nobufs);
42201 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42202 else
42203 - fscache_stat(&fscache_n_retrievals_ok);
42204 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42205
42206 fscache_put_retrieval(op);
42207 _leave(" = %d", ret);
42208 @@ -453,7 +453,7 @@ nobufs_unlock:
42209 spin_unlock(&cookie->lock);
42210 kfree(op);
42211 nobufs:
42212 - fscache_stat(&fscache_n_retrievals_nobufs);
42213 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42214 _leave(" = -ENOBUFS");
42215 return -ENOBUFS;
42216 }
42217 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42218
42219 _enter("%p,,%d,,,", cookie, *nr_pages);
42220
42221 - fscache_stat(&fscache_n_retrievals);
42222 + fscache_stat_unchecked(&fscache_n_retrievals);
42223
42224 if (hlist_empty(&cookie->backing_objects))
42225 goto nobufs;
42226 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42227 goto nobufs_unlock;
42228 spin_unlock(&cookie->lock);
42229
42230 - fscache_stat(&fscache_n_retrieval_ops);
42231 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42232
42233 /* pin the netfs read context in case we need to do the actual netfs
42234 * read because we've encountered a cache read failure */
42235 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42236
42237 error:
42238 if (ret == -ENOMEM)
42239 - fscache_stat(&fscache_n_retrievals_nomem);
42240 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42241 else if (ret == -ERESTARTSYS)
42242 - fscache_stat(&fscache_n_retrievals_intr);
42243 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42244 else if (ret == -ENODATA)
42245 - fscache_stat(&fscache_n_retrievals_nodata);
42246 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42247 else if (ret < 0)
42248 - fscache_stat(&fscache_n_retrievals_nobufs);
42249 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42250 else
42251 - fscache_stat(&fscache_n_retrievals_ok);
42252 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42253
42254 fscache_put_retrieval(op);
42255 _leave(" = %d", ret);
42256 @@ -570,7 +570,7 @@ nobufs_unlock:
42257 spin_unlock(&cookie->lock);
42258 kfree(op);
42259 nobufs:
42260 - fscache_stat(&fscache_n_retrievals_nobufs);
42261 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42262 _leave(" = -ENOBUFS");
42263 return -ENOBUFS;
42264 }
42265 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42266
42267 _enter("%p,%p,,,", cookie, page);
42268
42269 - fscache_stat(&fscache_n_allocs);
42270 + fscache_stat_unchecked(&fscache_n_allocs);
42271
42272 if (hlist_empty(&cookie->backing_objects))
42273 goto nobufs;
42274 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42275 goto nobufs_unlock;
42276 spin_unlock(&cookie->lock);
42277
42278 - fscache_stat(&fscache_n_alloc_ops);
42279 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42280
42281 ret = fscache_wait_for_retrieval_activation(
42282 object, op,
42283 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42284
42285 error:
42286 if (ret == -ERESTARTSYS)
42287 - fscache_stat(&fscache_n_allocs_intr);
42288 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42289 else if (ret < 0)
42290 - fscache_stat(&fscache_n_allocs_nobufs);
42291 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42292 else
42293 - fscache_stat(&fscache_n_allocs_ok);
42294 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42295
42296 fscache_put_retrieval(op);
42297 _leave(" = %d", ret);
42298 @@ -651,7 +651,7 @@ nobufs_unlock:
42299 spin_unlock(&cookie->lock);
42300 kfree(op);
42301 nobufs:
42302 - fscache_stat(&fscache_n_allocs_nobufs);
42303 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42304 _leave(" = -ENOBUFS");
42305 return -ENOBUFS;
42306 }
42307 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42308
42309 spin_lock(&cookie->stores_lock);
42310
42311 - fscache_stat(&fscache_n_store_calls);
42312 + fscache_stat_unchecked(&fscache_n_store_calls);
42313
42314 /* find a page to store */
42315 page = NULL;
42316 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42317 page = results[0];
42318 _debug("gang %d [%lx]", n, page->index);
42319 if (page->index > op->store_limit) {
42320 - fscache_stat(&fscache_n_store_pages_over_limit);
42321 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42322 goto superseded;
42323 }
42324
42325 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42326
42327 if (page) {
42328 fscache_set_op_state(&op->op, "Store");
42329 - fscache_stat(&fscache_n_store_pages);
42330 + fscache_stat_unchecked(&fscache_n_store_pages);
42331 fscache_stat(&fscache_n_cop_write_page);
42332 ret = object->cache->ops->write_page(op, page);
42333 fscache_stat_d(&fscache_n_cop_write_page);
42334 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42335 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42336 ASSERT(PageFsCache(page));
42337
42338 - fscache_stat(&fscache_n_stores);
42339 + fscache_stat_unchecked(&fscache_n_stores);
42340
42341 op = kzalloc(sizeof(*op), GFP_NOIO);
42342 if (!op)
42343 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42344 spin_unlock(&cookie->stores_lock);
42345 spin_unlock(&object->lock);
42346
42347 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42348 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42349 op->store_limit = object->store_limit;
42350
42351 if (fscache_submit_op(object, &op->op) < 0)
42352 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42353
42354 spin_unlock(&cookie->lock);
42355 radix_tree_preload_end();
42356 - fscache_stat(&fscache_n_store_ops);
42357 - fscache_stat(&fscache_n_stores_ok);
42358 + fscache_stat_unchecked(&fscache_n_store_ops);
42359 + fscache_stat_unchecked(&fscache_n_stores_ok);
42360
42361 /* the slow work queue now carries its own ref on the object */
42362 fscache_put_operation(&op->op);
42363 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42364 return 0;
42365
42366 already_queued:
42367 - fscache_stat(&fscache_n_stores_again);
42368 + fscache_stat_unchecked(&fscache_n_stores_again);
42369 already_pending:
42370 spin_unlock(&cookie->stores_lock);
42371 spin_unlock(&object->lock);
42372 spin_unlock(&cookie->lock);
42373 radix_tree_preload_end();
42374 kfree(op);
42375 - fscache_stat(&fscache_n_stores_ok);
42376 + fscache_stat_unchecked(&fscache_n_stores_ok);
42377 _leave(" = 0");
42378 return 0;
42379
42380 @@ -886,14 +886,14 @@ nobufs:
42381 spin_unlock(&cookie->lock);
42382 radix_tree_preload_end();
42383 kfree(op);
42384 - fscache_stat(&fscache_n_stores_nobufs);
42385 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
42386 _leave(" = -ENOBUFS");
42387 return -ENOBUFS;
42388
42389 nomem_free:
42390 kfree(op);
42391 nomem:
42392 - fscache_stat(&fscache_n_stores_oom);
42393 + fscache_stat_unchecked(&fscache_n_stores_oom);
42394 _leave(" = -ENOMEM");
42395 return -ENOMEM;
42396 }
42397 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42398 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42399 ASSERTCMP(page, !=, NULL);
42400
42401 - fscache_stat(&fscache_n_uncaches);
42402 + fscache_stat_unchecked(&fscache_n_uncaches);
42403
42404 /* cache withdrawal may beat us to it */
42405 if (!PageFsCache(page))
42406 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42407 unsigned long loop;
42408
42409 #ifdef CONFIG_FSCACHE_STATS
42410 - atomic_add(pagevec->nr, &fscache_n_marks);
42411 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42412 #endif
42413
42414 for (loop = 0; loop < pagevec->nr; loop++) {
42415 diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42416 --- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42417 +++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42418 @@ -18,95 +18,95 @@
42419 /*
42420 * operation counters
42421 */
42422 -atomic_t fscache_n_op_pend;
42423 -atomic_t fscache_n_op_run;
42424 -atomic_t fscache_n_op_enqueue;
42425 -atomic_t fscache_n_op_requeue;
42426 -atomic_t fscache_n_op_deferred_release;
42427 -atomic_t fscache_n_op_release;
42428 -atomic_t fscache_n_op_gc;
42429 -atomic_t fscache_n_op_cancelled;
42430 -atomic_t fscache_n_op_rejected;
42431 -
42432 -atomic_t fscache_n_attr_changed;
42433 -atomic_t fscache_n_attr_changed_ok;
42434 -atomic_t fscache_n_attr_changed_nobufs;
42435 -atomic_t fscache_n_attr_changed_nomem;
42436 -atomic_t fscache_n_attr_changed_calls;
42437 -
42438 -atomic_t fscache_n_allocs;
42439 -atomic_t fscache_n_allocs_ok;
42440 -atomic_t fscache_n_allocs_wait;
42441 -atomic_t fscache_n_allocs_nobufs;
42442 -atomic_t fscache_n_allocs_intr;
42443 -atomic_t fscache_n_allocs_object_dead;
42444 -atomic_t fscache_n_alloc_ops;
42445 -atomic_t fscache_n_alloc_op_waits;
42446 -
42447 -atomic_t fscache_n_retrievals;
42448 -atomic_t fscache_n_retrievals_ok;
42449 -atomic_t fscache_n_retrievals_wait;
42450 -atomic_t fscache_n_retrievals_nodata;
42451 -atomic_t fscache_n_retrievals_nobufs;
42452 -atomic_t fscache_n_retrievals_intr;
42453 -atomic_t fscache_n_retrievals_nomem;
42454 -atomic_t fscache_n_retrievals_object_dead;
42455 -atomic_t fscache_n_retrieval_ops;
42456 -atomic_t fscache_n_retrieval_op_waits;
42457 -
42458 -atomic_t fscache_n_stores;
42459 -atomic_t fscache_n_stores_ok;
42460 -atomic_t fscache_n_stores_again;
42461 -atomic_t fscache_n_stores_nobufs;
42462 -atomic_t fscache_n_stores_oom;
42463 -atomic_t fscache_n_store_ops;
42464 -atomic_t fscache_n_store_calls;
42465 -atomic_t fscache_n_store_pages;
42466 -atomic_t fscache_n_store_radix_deletes;
42467 -atomic_t fscache_n_store_pages_over_limit;
42468 -
42469 -atomic_t fscache_n_store_vmscan_not_storing;
42470 -atomic_t fscache_n_store_vmscan_gone;
42471 -atomic_t fscache_n_store_vmscan_busy;
42472 -atomic_t fscache_n_store_vmscan_cancelled;
42473 -
42474 -atomic_t fscache_n_marks;
42475 -atomic_t fscache_n_uncaches;
42476 -
42477 -atomic_t fscache_n_acquires;
42478 -atomic_t fscache_n_acquires_null;
42479 -atomic_t fscache_n_acquires_no_cache;
42480 -atomic_t fscache_n_acquires_ok;
42481 -atomic_t fscache_n_acquires_nobufs;
42482 -atomic_t fscache_n_acquires_oom;
42483 -
42484 -atomic_t fscache_n_updates;
42485 -atomic_t fscache_n_updates_null;
42486 -atomic_t fscache_n_updates_run;
42487 -
42488 -atomic_t fscache_n_relinquishes;
42489 -atomic_t fscache_n_relinquishes_null;
42490 -atomic_t fscache_n_relinquishes_waitcrt;
42491 -atomic_t fscache_n_relinquishes_retire;
42492 -
42493 -atomic_t fscache_n_cookie_index;
42494 -atomic_t fscache_n_cookie_data;
42495 -atomic_t fscache_n_cookie_special;
42496 -
42497 -atomic_t fscache_n_object_alloc;
42498 -atomic_t fscache_n_object_no_alloc;
42499 -atomic_t fscache_n_object_lookups;
42500 -atomic_t fscache_n_object_lookups_negative;
42501 -atomic_t fscache_n_object_lookups_positive;
42502 -atomic_t fscache_n_object_lookups_timed_out;
42503 -atomic_t fscache_n_object_created;
42504 -atomic_t fscache_n_object_avail;
42505 -atomic_t fscache_n_object_dead;
42506 -
42507 -atomic_t fscache_n_checkaux_none;
42508 -atomic_t fscache_n_checkaux_okay;
42509 -atomic_t fscache_n_checkaux_update;
42510 -atomic_t fscache_n_checkaux_obsolete;
42511 +atomic_unchecked_t fscache_n_op_pend;
42512 +atomic_unchecked_t fscache_n_op_run;
42513 +atomic_unchecked_t fscache_n_op_enqueue;
42514 +atomic_unchecked_t fscache_n_op_requeue;
42515 +atomic_unchecked_t fscache_n_op_deferred_release;
42516 +atomic_unchecked_t fscache_n_op_release;
42517 +atomic_unchecked_t fscache_n_op_gc;
42518 +atomic_unchecked_t fscache_n_op_cancelled;
42519 +atomic_unchecked_t fscache_n_op_rejected;
42520 +
42521 +atomic_unchecked_t fscache_n_attr_changed;
42522 +atomic_unchecked_t fscache_n_attr_changed_ok;
42523 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
42524 +atomic_unchecked_t fscache_n_attr_changed_nomem;
42525 +atomic_unchecked_t fscache_n_attr_changed_calls;
42526 +
42527 +atomic_unchecked_t fscache_n_allocs;
42528 +atomic_unchecked_t fscache_n_allocs_ok;
42529 +atomic_unchecked_t fscache_n_allocs_wait;
42530 +atomic_unchecked_t fscache_n_allocs_nobufs;
42531 +atomic_unchecked_t fscache_n_allocs_intr;
42532 +atomic_unchecked_t fscache_n_allocs_object_dead;
42533 +atomic_unchecked_t fscache_n_alloc_ops;
42534 +atomic_unchecked_t fscache_n_alloc_op_waits;
42535 +
42536 +atomic_unchecked_t fscache_n_retrievals;
42537 +atomic_unchecked_t fscache_n_retrievals_ok;
42538 +atomic_unchecked_t fscache_n_retrievals_wait;
42539 +atomic_unchecked_t fscache_n_retrievals_nodata;
42540 +atomic_unchecked_t fscache_n_retrievals_nobufs;
42541 +atomic_unchecked_t fscache_n_retrievals_intr;
42542 +atomic_unchecked_t fscache_n_retrievals_nomem;
42543 +atomic_unchecked_t fscache_n_retrievals_object_dead;
42544 +atomic_unchecked_t fscache_n_retrieval_ops;
42545 +atomic_unchecked_t fscache_n_retrieval_op_waits;
42546 +
42547 +atomic_unchecked_t fscache_n_stores;
42548 +atomic_unchecked_t fscache_n_stores_ok;
42549 +atomic_unchecked_t fscache_n_stores_again;
42550 +atomic_unchecked_t fscache_n_stores_nobufs;
42551 +atomic_unchecked_t fscache_n_stores_oom;
42552 +atomic_unchecked_t fscache_n_store_ops;
42553 +atomic_unchecked_t fscache_n_store_calls;
42554 +atomic_unchecked_t fscache_n_store_pages;
42555 +atomic_unchecked_t fscache_n_store_radix_deletes;
42556 +atomic_unchecked_t fscache_n_store_pages_over_limit;
42557 +
42558 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42559 +atomic_unchecked_t fscache_n_store_vmscan_gone;
42560 +atomic_unchecked_t fscache_n_store_vmscan_busy;
42561 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42562 +
42563 +atomic_unchecked_t fscache_n_marks;
42564 +atomic_unchecked_t fscache_n_uncaches;
42565 +
42566 +atomic_unchecked_t fscache_n_acquires;
42567 +atomic_unchecked_t fscache_n_acquires_null;
42568 +atomic_unchecked_t fscache_n_acquires_no_cache;
42569 +atomic_unchecked_t fscache_n_acquires_ok;
42570 +atomic_unchecked_t fscache_n_acquires_nobufs;
42571 +atomic_unchecked_t fscache_n_acquires_oom;
42572 +
42573 +atomic_unchecked_t fscache_n_updates;
42574 +atomic_unchecked_t fscache_n_updates_null;
42575 +atomic_unchecked_t fscache_n_updates_run;
42576 +
42577 +atomic_unchecked_t fscache_n_relinquishes;
42578 +atomic_unchecked_t fscache_n_relinquishes_null;
42579 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42580 +atomic_unchecked_t fscache_n_relinquishes_retire;
42581 +
42582 +atomic_unchecked_t fscache_n_cookie_index;
42583 +atomic_unchecked_t fscache_n_cookie_data;
42584 +atomic_unchecked_t fscache_n_cookie_special;
42585 +
42586 +atomic_unchecked_t fscache_n_object_alloc;
42587 +atomic_unchecked_t fscache_n_object_no_alloc;
42588 +atomic_unchecked_t fscache_n_object_lookups;
42589 +atomic_unchecked_t fscache_n_object_lookups_negative;
42590 +atomic_unchecked_t fscache_n_object_lookups_positive;
42591 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
42592 +atomic_unchecked_t fscache_n_object_created;
42593 +atomic_unchecked_t fscache_n_object_avail;
42594 +atomic_unchecked_t fscache_n_object_dead;
42595 +
42596 +atomic_unchecked_t fscache_n_checkaux_none;
42597 +atomic_unchecked_t fscache_n_checkaux_okay;
42598 +atomic_unchecked_t fscache_n_checkaux_update;
42599 +atomic_unchecked_t fscache_n_checkaux_obsolete;
42600
42601 atomic_t fscache_n_cop_alloc_object;
42602 atomic_t fscache_n_cop_lookup_object;
42603 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42604 seq_puts(m, "FS-Cache statistics\n");
42605
42606 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42607 - atomic_read(&fscache_n_cookie_index),
42608 - atomic_read(&fscache_n_cookie_data),
42609 - atomic_read(&fscache_n_cookie_special));
42610 + atomic_read_unchecked(&fscache_n_cookie_index),
42611 + atomic_read_unchecked(&fscache_n_cookie_data),
42612 + atomic_read_unchecked(&fscache_n_cookie_special));
42613
42614 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42615 - atomic_read(&fscache_n_object_alloc),
42616 - atomic_read(&fscache_n_object_no_alloc),
42617 - atomic_read(&fscache_n_object_avail),
42618 - atomic_read(&fscache_n_object_dead));
42619 + atomic_read_unchecked(&fscache_n_object_alloc),
42620 + atomic_read_unchecked(&fscache_n_object_no_alloc),
42621 + atomic_read_unchecked(&fscache_n_object_avail),
42622 + atomic_read_unchecked(&fscache_n_object_dead));
42623 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42624 - atomic_read(&fscache_n_checkaux_none),
42625 - atomic_read(&fscache_n_checkaux_okay),
42626 - atomic_read(&fscache_n_checkaux_update),
42627 - atomic_read(&fscache_n_checkaux_obsolete));
42628 + atomic_read_unchecked(&fscache_n_checkaux_none),
42629 + atomic_read_unchecked(&fscache_n_checkaux_okay),
42630 + atomic_read_unchecked(&fscache_n_checkaux_update),
42631 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42632
42633 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42634 - atomic_read(&fscache_n_marks),
42635 - atomic_read(&fscache_n_uncaches));
42636 + atomic_read_unchecked(&fscache_n_marks),
42637 + atomic_read_unchecked(&fscache_n_uncaches));
42638
42639 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42640 " oom=%u\n",
42641 - atomic_read(&fscache_n_acquires),
42642 - atomic_read(&fscache_n_acquires_null),
42643 - atomic_read(&fscache_n_acquires_no_cache),
42644 - atomic_read(&fscache_n_acquires_ok),
42645 - atomic_read(&fscache_n_acquires_nobufs),
42646 - atomic_read(&fscache_n_acquires_oom));
42647 + atomic_read_unchecked(&fscache_n_acquires),
42648 + atomic_read_unchecked(&fscache_n_acquires_null),
42649 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
42650 + atomic_read_unchecked(&fscache_n_acquires_ok),
42651 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
42652 + atomic_read_unchecked(&fscache_n_acquires_oom));
42653
42654 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42655 - atomic_read(&fscache_n_object_lookups),
42656 - atomic_read(&fscache_n_object_lookups_negative),
42657 - atomic_read(&fscache_n_object_lookups_positive),
42658 - atomic_read(&fscache_n_object_lookups_timed_out),
42659 - atomic_read(&fscache_n_object_created));
42660 + atomic_read_unchecked(&fscache_n_object_lookups),
42661 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
42662 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
42663 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42664 + atomic_read_unchecked(&fscache_n_object_created));
42665
42666 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42667 - atomic_read(&fscache_n_updates),
42668 - atomic_read(&fscache_n_updates_null),
42669 - atomic_read(&fscache_n_updates_run));
42670 + atomic_read_unchecked(&fscache_n_updates),
42671 + atomic_read_unchecked(&fscache_n_updates_null),
42672 + atomic_read_unchecked(&fscache_n_updates_run));
42673
42674 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42675 - atomic_read(&fscache_n_relinquishes),
42676 - atomic_read(&fscache_n_relinquishes_null),
42677 - atomic_read(&fscache_n_relinquishes_waitcrt),
42678 - atomic_read(&fscache_n_relinquishes_retire));
42679 + atomic_read_unchecked(&fscache_n_relinquishes),
42680 + atomic_read_unchecked(&fscache_n_relinquishes_null),
42681 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42682 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
42683
42684 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42685 - atomic_read(&fscache_n_attr_changed),
42686 - atomic_read(&fscache_n_attr_changed_ok),
42687 - atomic_read(&fscache_n_attr_changed_nobufs),
42688 - atomic_read(&fscache_n_attr_changed_nomem),
42689 - atomic_read(&fscache_n_attr_changed_calls));
42690 + atomic_read_unchecked(&fscache_n_attr_changed),
42691 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
42692 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42693 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42694 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
42695
42696 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42697 - atomic_read(&fscache_n_allocs),
42698 - atomic_read(&fscache_n_allocs_ok),
42699 - atomic_read(&fscache_n_allocs_wait),
42700 - atomic_read(&fscache_n_allocs_nobufs),
42701 - atomic_read(&fscache_n_allocs_intr));
42702 + atomic_read_unchecked(&fscache_n_allocs),
42703 + atomic_read_unchecked(&fscache_n_allocs_ok),
42704 + atomic_read_unchecked(&fscache_n_allocs_wait),
42705 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
42706 + atomic_read_unchecked(&fscache_n_allocs_intr));
42707 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42708 - atomic_read(&fscache_n_alloc_ops),
42709 - atomic_read(&fscache_n_alloc_op_waits),
42710 - atomic_read(&fscache_n_allocs_object_dead));
42711 + atomic_read_unchecked(&fscache_n_alloc_ops),
42712 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
42713 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
42714
42715 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42716 " int=%u oom=%u\n",
42717 - atomic_read(&fscache_n_retrievals),
42718 - atomic_read(&fscache_n_retrievals_ok),
42719 - atomic_read(&fscache_n_retrievals_wait),
42720 - atomic_read(&fscache_n_retrievals_nodata),
42721 - atomic_read(&fscache_n_retrievals_nobufs),
42722 - atomic_read(&fscache_n_retrievals_intr),
42723 - atomic_read(&fscache_n_retrievals_nomem));
42724 + atomic_read_unchecked(&fscache_n_retrievals),
42725 + atomic_read_unchecked(&fscache_n_retrievals_ok),
42726 + atomic_read_unchecked(&fscache_n_retrievals_wait),
42727 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
42728 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42729 + atomic_read_unchecked(&fscache_n_retrievals_intr),
42730 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
42731 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42732 - atomic_read(&fscache_n_retrieval_ops),
42733 - atomic_read(&fscache_n_retrieval_op_waits),
42734 - atomic_read(&fscache_n_retrievals_object_dead));
42735 + atomic_read_unchecked(&fscache_n_retrieval_ops),
42736 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42737 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42738
42739 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42740 - atomic_read(&fscache_n_stores),
42741 - atomic_read(&fscache_n_stores_ok),
42742 - atomic_read(&fscache_n_stores_again),
42743 - atomic_read(&fscache_n_stores_nobufs),
42744 - atomic_read(&fscache_n_stores_oom));
42745 + atomic_read_unchecked(&fscache_n_stores),
42746 + atomic_read_unchecked(&fscache_n_stores_ok),
42747 + atomic_read_unchecked(&fscache_n_stores_again),
42748 + atomic_read_unchecked(&fscache_n_stores_nobufs),
42749 + atomic_read_unchecked(&fscache_n_stores_oom));
42750 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42751 - atomic_read(&fscache_n_store_ops),
42752 - atomic_read(&fscache_n_store_calls),
42753 - atomic_read(&fscache_n_store_pages),
42754 - atomic_read(&fscache_n_store_radix_deletes),
42755 - atomic_read(&fscache_n_store_pages_over_limit));
42756 + atomic_read_unchecked(&fscache_n_store_ops),
42757 + atomic_read_unchecked(&fscache_n_store_calls),
42758 + atomic_read_unchecked(&fscache_n_store_pages),
42759 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
42760 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42761
42762 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42763 - atomic_read(&fscache_n_store_vmscan_not_storing),
42764 - atomic_read(&fscache_n_store_vmscan_gone),
42765 - atomic_read(&fscache_n_store_vmscan_busy),
42766 - atomic_read(&fscache_n_store_vmscan_cancelled));
42767 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42768 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42769 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42770 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42771
42772 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42773 - atomic_read(&fscache_n_op_pend),
42774 - atomic_read(&fscache_n_op_run),
42775 - atomic_read(&fscache_n_op_enqueue),
42776 - atomic_read(&fscache_n_op_cancelled),
42777 - atomic_read(&fscache_n_op_rejected));
42778 + atomic_read_unchecked(&fscache_n_op_pend),
42779 + atomic_read_unchecked(&fscache_n_op_run),
42780 + atomic_read_unchecked(&fscache_n_op_enqueue),
42781 + atomic_read_unchecked(&fscache_n_op_cancelled),
42782 + atomic_read_unchecked(&fscache_n_op_rejected));
42783 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42784 - atomic_read(&fscache_n_op_deferred_release),
42785 - atomic_read(&fscache_n_op_release),
42786 - atomic_read(&fscache_n_op_gc));
42787 + atomic_read_unchecked(&fscache_n_op_deferred_release),
42788 + atomic_read_unchecked(&fscache_n_op_release),
42789 + atomic_read_unchecked(&fscache_n_op_gc));
42790
42791 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42792 atomic_read(&fscache_n_cop_alloc_object),
42793 diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
42794 --- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
42795 +++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
42796 @@ -4,6 +4,7 @@
42797 #include <linux/path.h>
42798 #include <linux/slab.h>
42799 #include <linux/fs_struct.h>
42800 +#include <linux/grsecurity.h>
42801
42802 /*
42803 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
42804 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
42805 old_root = fs->root;
42806 fs->root = *path;
42807 path_get(path);
42808 + gr_set_chroot_entries(current, path);
42809 write_unlock(&fs->lock);
42810 if (old_root.dentry)
42811 path_put(&old_root);
42812 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
42813 && fs->root.mnt == old_root->mnt) {
42814 path_get(new_root);
42815 fs->root = *new_root;
42816 + gr_set_chroot_entries(p, new_root);
42817 count++;
42818 }
42819 if (fs->pwd.dentry == old_root->dentry
42820 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
42821 task_lock(tsk);
42822 write_lock(&fs->lock);
42823 tsk->fs = NULL;
42824 - kill = !--fs->users;
42825 + gr_clear_chroot_entries(tsk);
42826 + kill = !atomic_dec_return(&fs->users);
42827 write_unlock(&fs->lock);
42828 task_unlock(tsk);
42829 if (kill)
42830 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
42831 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42832 /* We don't need to lock fs - think why ;-) */
42833 if (fs) {
42834 - fs->users = 1;
42835 + atomic_set(&fs->users, 1);
42836 fs->in_exec = 0;
42837 rwlock_init(&fs->lock);
42838 fs->umask = old->umask;
42839 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
42840
42841 task_lock(current);
42842 write_lock(&fs->lock);
42843 - kill = !--fs->users;
42844 + kill = !atomic_dec_return(&fs->users);
42845 current->fs = new_fs;
42846 + gr_set_chroot_entries(current, &new_fs->root);
42847 write_unlock(&fs->lock);
42848 task_unlock(current);
42849
42850 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
42851
42852 /* to be mentioned only in INIT_TASK */
42853 struct fs_struct init_fs = {
42854 - .users = 1,
42855 + .users = ATOMIC_INIT(1),
42856 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
42857 .umask = 0022,
42858 };
42859 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
42860 task_lock(current);
42861
42862 write_lock(&init_fs.lock);
42863 - init_fs.users++;
42864 + atomic_inc(&init_fs.users);
42865 write_unlock(&init_fs.lock);
42866
42867 write_lock(&fs->lock);
42868 current->fs = &init_fs;
42869 - kill = !--fs->users;
42870 + gr_set_chroot_entries(current, &current->fs->root);
42871 + kill = !atomic_dec_return(&fs->users);
42872 write_unlock(&fs->lock);
42873
42874 task_unlock(current);
42875 diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
42876 --- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
42877 +++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
42878 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
42879 INIT_LIST_HEAD(&cuse_conntbl[i]);
42880
42881 /* inherit and extend fuse_dev_operations */
42882 - cuse_channel_fops = fuse_dev_operations;
42883 - cuse_channel_fops.owner = THIS_MODULE;
42884 - cuse_channel_fops.open = cuse_channel_open;
42885 - cuse_channel_fops.release = cuse_channel_release;
42886 + pax_open_kernel();
42887 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42888 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42889 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
42890 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
42891 + pax_close_kernel();
42892
42893 cuse_class = class_create(THIS_MODULE, "cuse");
42894 if (IS_ERR(cuse_class))
42895 diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
42896 --- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
42897 +++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
42898 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
42899 {
42900 struct fuse_notify_inval_entry_out outarg;
42901 int err = -EINVAL;
42902 - char buf[FUSE_NAME_MAX+1];
42903 + char *buf = NULL;
42904 struct qstr name;
42905
42906 if (size < sizeof(outarg))
42907 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
42908 if (outarg.namelen > FUSE_NAME_MAX)
42909 goto err;
42910
42911 + err = -ENOMEM;
42912 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
42913 + if (!buf)
42914 + goto err;
42915 +
42916 name.name = buf;
42917 name.len = outarg.namelen;
42918 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
42919 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
42920
42921 down_read(&fc->killsb);
42922 err = -ENOENT;
42923 - if (!fc->sb)
42924 - goto err_unlock;
42925 -
42926 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42927 -
42928 -err_unlock:
42929 + if (fc->sb)
42930 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42931 up_read(&fc->killsb);
42932 + kfree(buf);
42933 return err;
42934
42935 err:
42936 fuse_copy_finish(cs);
42937 + kfree(buf);
42938 return err;
42939 }
42940
42941 diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
42942 --- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
42943 +++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
42944 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
42945 return link;
42946 }
42947
42948 -static void free_link(char *link)
42949 +static void free_link(const char *link)
42950 {
42951 if (!IS_ERR(link))
42952 free_page((unsigned long) link);
42953 diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
42954 --- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
42955 +++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
42956 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
42957 unsigned int x;
42958 int error;
42959
42960 + pax_track_stack();
42961 +
42962 if (ndentry->d_inode) {
42963 nip = GFS2_I(ndentry->d_inode);
42964 if (ip == nip)
42965 diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
42966 --- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
42967 +++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
42968 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
42969 return a->store ? a->store(sdp, buf, len) : len;
42970 }
42971
42972 -static struct sysfs_ops gfs2_attr_ops = {
42973 +static const struct sysfs_ops gfs2_attr_ops = {
42974 .show = gfs2_attr_show,
42975 .store = gfs2_attr_store,
42976 };
42977 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
42978 return 0;
42979 }
42980
42981 -static struct kset_uevent_ops gfs2_uevent_ops = {
42982 +static const struct kset_uevent_ops gfs2_uevent_ops = {
42983 .uevent = gfs2_uevent,
42984 };
42985
42986 diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
42987 --- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
42988 +++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
42989 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
42990 int err;
42991 u16 type;
42992
42993 + pax_track_stack();
42994 +
42995 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
42996 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
42997 if (err)
42998 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
42999 int entry_size;
43000 int err;
43001
43002 + pax_track_stack();
43003 +
43004 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43005 sb = dir->i_sb;
43006 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43007 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43008 int entry_size, type;
43009 int err = 0;
43010
43011 + pax_track_stack();
43012 +
43013 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43014 dst_dir->i_ino, dst_name->name);
43015 sb = src_dir->i_sb;
43016 diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43017 --- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43018 +++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43019 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43020 struct hfsplus_readdir_data *rd;
43021 u16 type;
43022
43023 + pax_track_stack();
43024 +
43025 if (filp->f_pos >= inode->i_size)
43026 return 0;
43027
43028 diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43029 --- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43030 +++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43031 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43032 int res = 0;
43033 u16 type;
43034
43035 + pax_track_stack();
43036 +
43037 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43038
43039 HFSPLUS_I(inode).dev = 0;
43040 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43041 struct hfs_find_data fd;
43042 hfsplus_cat_entry entry;
43043
43044 + pax_track_stack();
43045 +
43046 if (HFSPLUS_IS_RSRC(inode))
43047 main_inode = HFSPLUS_I(inode).rsrc_inode;
43048
43049 diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43050 --- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43051 +++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43052 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43053 struct hfsplus_cat_file *file;
43054 int res;
43055
43056 + pax_track_stack();
43057 +
43058 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43059 return -EOPNOTSUPP;
43060
43061 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43062 struct hfsplus_cat_file *file;
43063 ssize_t res = 0;
43064
43065 + pax_track_stack();
43066 +
43067 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43068 return -EOPNOTSUPP;
43069
43070 diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43071 --- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43072 +++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43073 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43074 struct nls_table *nls = NULL;
43075 int err = -EINVAL;
43076
43077 + pax_track_stack();
43078 +
43079 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43080 if (!sbi)
43081 return -ENOMEM;
43082 diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43083 --- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43084 +++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43085 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43086 .kill_sb = kill_litter_super,
43087 };
43088
43089 -static struct vfsmount *hugetlbfs_vfsmount;
43090 +struct vfsmount *hugetlbfs_vfsmount;
43091
43092 static int can_do_hugetlb_shm(void)
43093 {
43094 diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43095 --- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43096 +++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43097 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43098 u64 phys, u64 len, u32 flags)
43099 {
43100 struct fiemap_extent extent;
43101 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43102 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43103
43104 /* only count the extents */
43105 if (fieinfo->fi_extents_max == 0) {
43106 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43107
43108 fieinfo.fi_flags = fiemap.fm_flags;
43109 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43110 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43111 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43112
43113 if (fiemap.fm_extent_count != 0 &&
43114 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43115 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43116 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43117 fiemap.fm_flags = fieinfo.fi_flags;
43118 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43119 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43120 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43121 error = -EFAULT;
43122
43123 return error;
43124 diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43125 --- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43126 +++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43127 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43128 tid_t this_tid;
43129 int result;
43130
43131 + pax_track_stack();
43132 +
43133 jbd_debug(1, "Start checkpoint\n");
43134
43135 /*
43136 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43137 --- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43138 +++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43139 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43140 int outpos = 0;
43141 int pos=0;
43142
43143 + pax_track_stack();
43144 +
43145 memset(positions,0,sizeof(positions));
43146
43147 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43148 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43149 int outpos = 0;
43150 int pos=0;
43151
43152 + pax_track_stack();
43153 +
43154 memset(positions,0,sizeof(positions));
43155
43156 while (outpos<destlen) {
43157 diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43158 --- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43159 +++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43160 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43161 int ret;
43162 uint32_t mysrclen, mydstlen;
43163
43164 + pax_track_stack();
43165 +
43166 mysrclen = *sourcelen;
43167 mydstlen = *dstlen - 8;
43168
43169 diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43170 --- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43171 +++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43172 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43173 struct jffs2_unknown_node marker = {
43174 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43175 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43176 - .totlen = cpu_to_je32(c->cleanmarker_size)
43177 + .totlen = cpu_to_je32(c->cleanmarker_size),
43178 + .hdr_crc = cpu_to_je32(0)
43179 };
43180
43181 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43182 diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43183 --- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43184 +++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43185 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43186 {
43187 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43188 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43189 - .totlen = constant_cpu_to_je32(8)
43190 + .totlen = constant_cpu_to_je32(8),
43191 + .hdr_crc = constant_cpu_to_je32(0)
43192 };
43193
43194 /*
43195 diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43196 --- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43197 +++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43198 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43199
43200 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43201
43202 + pax_track_stack();
43203 +
43204 /* Phase.1 : Merge same xref */
43205 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43206 xref_tmphash[i] = NULL;
43207 diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43208 --- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43209 +++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43210 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43211
43212 jfs_inode_cachep =
43213 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43214 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43215 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43216 init_once);
43217 if (jfs_inode_cachep == NULL)
43218 return -ENOMEM;
43219 diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43220 --- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43221 +++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43222 @@ -86,7 +86,7 @@ config HAVE_AOUT
43223
43224 config BINFMT_AOUT
43225 tristate "Kernel support for a.out and ECOFF binaries"
43226 - depends on HAVE_AOUT
43227 + depends on HAVE_AOUT && BROKEN
43228 ---help---
43229 A.out (Assembler.OUTput) is a set of formats for libraries and
43230 executables used in the earliest versions of UNIX. Linux used
43231 diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43232 --- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43233 +++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43234 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43235
43236 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43237 struct dentry *next;
43238 + char d_name[sizeof(next->d_iname)];
43239 + const unsigned char *name;
43240 +
43241 next = list_entry(p, struct dentry, d_u.d_child);
43242 if (d_unhashed(next) || !next->d_inode)
43243 continue;
43244
43245 spin_unlock(&dcache_lock);
43246 - if (filldir(dirent, next->d_name.name,
43247 + name = next->d_name.name;
43248 + if (name == next->d_iname) {
43249 + memcpy(d_name, name, next->d_name.len);
43250 + name = d_name;
43251 + }
43252 + if (filldir(dirent, name,
43253 next->d_name.len, filp->f_pos,
43254 next->d_inode->i_ino,
43255 dt_type(next->d_inode)) < 0)
43256 diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43257 --- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43258 +++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43259 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43260 /*
43261 * Cookie counter for NLM requests
43262 */
43263 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43264 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43265
43266 void nlmclnt_next_cookie(struct nlm_cookie *c)
43267 {
43268 - u32 cookie = atomic_inc_return(&nlm_cookie);
43269 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43270
43271 memcpy(c->data, &cookie, 4);
43272 c->len=4;
43273 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43274 struct nlm_rqst reqst, *req;
43275 int status;
43276
43277 + pax_track_stack();
43278 +
43279 req = &reqst;
43280 memset(req, 0, sizeof(*req));
43281 locks_init_lock(&req->a_args.lock.fl);
43282 diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43283 --- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43284 +++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43285 @@ -43,7 +43,7 @@
43286
43287 static struct svc_program nlmsvc_program;
43288
43289 -struct nlmsvc_binding * nlmsvc_ops;
43290 +const struct nlmsvc_binding * nlmsvc_ops;
43291 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43292
43293 static DEFINE_MUTEX(nlmsvc_mutex);
43294 diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43295 --- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43296 +++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43297 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43298
43299 static struct kmem_cache *filelock_cache __read_mostly;
43300
43301 +static void locks_init_lock_always(struct file_lock *fl)
43302 +{
43303 + fl->fl_next = NULL;
43304 + fl->fl_fasync = NULL;
43305 + fl->fl_owner = NULL;
43306 + fl->fl_pid = 0;
43307 + fl->fl_nspid = NULL;
43308 + fl->fl_file = NULL;
43309 + fl->fl_flags = 0;
43310 + fl->fl_type = 0;
43311 + fl->fl_start = fl->fl_end = 0;
43312 +}
43313 +
43314 /* Allocate an empty lock structure. */
43315 static struct file_lock *locks_alloc_lock(void)
43316 {
43317 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43318 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43319 +
43320 + if (fl)
43321 + locks_init_lock_always(fl);
43322 +
43323 + return fl;
43324 }
43325
43326 void locks_release_private(struct file_lock *fl)
43327 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43328 INIT_LIST_HEAD(&fl->fl_link);
43329 INIT_LIST_HEAD(&fl->fl_block);
43330 init_waitqueue_head(&fl->fl_wait);
43331 - fl->fl_next = NULL;
43332 - fl->fl_fasync = NULL;
43333 - fl->fl_owner = NULL;
43334 - fl->fl_pid = 0;
43335 - fl->fl_nspid = NULL;
43336 - fl->fl_file = NULL;
43337 - fl->fl_flags = 0;
43338 - fl->fl_type = 0;
43339 - fl->fl_start = fl->fl_end = 0;
43340 fl->fl_ops = NULL;
43341 fl->fl_lmops = NULL;
43342 + locks_init_lock_always(fl);
43343 }
43344
43345 EXPORT_SYMBOL(locks_init_lock);
43346 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43347 return;
43348
43349 if (filp->f_op && filp->f_op->flock) {
43350 - struct file_lock fl = {
43351 + struct file_lock flock = {
43352 .fl_pid = current->tgid,
43353 .fl_file = filp,
43354 .fl_flags = FL_FLOCK,
43355 .fl_type = F_UNLCK,
43356 .fl_end = OFFSET_MAX,
43357 };
43358 - filp->f_op->flock(filp, F_SETLKW, &fl);
43359 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
43360 - fl.fl_ops->fl_release_private(&fl);
43361 + filp->f_op->flock(filp, F_SETLKW, &flock);
43362 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
43363 + flock.fl_ops->fl_release_private(&flock);
43364 }
43365
43366 lock_kernel();
43367 diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43368 --- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43369 +++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43370 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43371 if (!cache)
43372 goto fail;
43373 cache->c_name = name;
43374 - cache->c_op.free = NULL;
43375 + *(void **)&cache->c_op.free = NULL;
43376 if (cache_op)
43377 - cache->c_op.free = cache_op->free;
43378 + *(void **)&cache->c_op.free = cache_op->free;
43379 atomic_set(&cache->c_entry_count, 0);
43380 cache->c_bucket_bits = bucket_bits;
43381 #ifdef MB_CACHE_INDEXES_COUNT
43382 diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43383 --- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43384 +++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43385 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43386 return ret;
43387
43388 /*
43389 - * Read/write DACs are always overridable.
43390 - * Executable DACs are overridable if at least one exec bit is set.
43391 - */
43392 - if (!(mask & MAY_EXEC) || execute_ok(inode))
43393 - if (capable(CAP_DAC_OVERRIDE))
43394 - return 0;
43395 -
43396 - /*
43397 * Searching includes executable on directories, else just read.
43398 */
43399 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43400 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43401 if (capable(CAP_DAC_READ_SEARCH))
43402 return 0;
43403
43404 + /*
43405 + * Read/write DACs are always overridable.
43406 + * Executable DACs are overridable if at least one exec bit is set.
43407 + */
43408 + if (!(mask & MAY_EXEC) || execute_ok(inode))
43409 + if (capable(CAP_DAC_OVERRIDE))
43410 + return 0;
43411 +
43412 return -EACCES;
43413 }
43414
43415 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43416 if (!ret)
43417 goto ok;
43418
43419 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43420 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43421 + capable(CAP_DAC_OVERRIDE))
43422 goto ok;
43423
43424 return ret;
43425 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43426 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43427 error = PTR_ERR(cookie);
43428 if (!IS_ERR(cookie)) {
43429 - char *s = nd_get_link(nd);
43430 + const char *s = nd_get_link(nd);
43431 error = 0;
43432 if (s)
43433 error = __vfs_follow_link(nd, s);
43434 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43435 err = security_inode_follow_link(path->dentry, nd);
43436 if (err)
43437 goto loop;
43438 +
43439 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43440 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43441 + err = -EACCES;
43442 + goto loop;
43443 + }
43444 +
43445 current->link_count++;
43446 current->total_link_count++;
43447 nd->depth++;
43448 @@ -1016,11 +1024,18 @@ return_reval:
43449 break;
43450 }
43451 return_base:
43452 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43453 + path_put(&nd->path);
43454 + return -ENOENT;
43455 + }
43456 return 0;
43457 out_dput:
43458 path_put_conditional(&next, nd);
43459 break;
43460 }
43461 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43462 + err = -ENOENT;
43463 +
43464 path_put(&nd->path);
43465 return_err:
43466 return err;
43467 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43468 int retval = path_init(dfd, name, flags, nd);
43469 if (!retval)
43470 retval = path_walk(name, nd);
43471 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43472 - nd->path.dentry->d_inode))
43473 - audit_inode(name, nd->path.dentry);
43474 +
43475 + if (likely(!retval)) {
43476 + if (nd->path.dentry && nd->path.dentry->d_inode) {
43477 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43478 + retval = -ENOENT;
43479 + if (!audit_dummy_context())
43480 + audit_inode(name, nd->path.dentry);
43481 + }
43482 + }
43483 if (nd->root.mnt) {
43484 path_put(&nd->root);
43485 nd->root.mnt = NULL;
43486 }
43487 +
43488 return retval;
43489 }
43490
43491 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43492 if (error)
43493 goto err_out;
43494
43495 +
43496 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43497 + error = -EPERM;
43498 + goto err_out;
43499 + }
43500 + if (gr_handle_rawio(inode)) {
43501 + error = -EPERM;
43502 + goto err_out;
43503 + }
43504 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43505 + error = -EACCES;
43506 + goto err_out;
43507 + }
43508 +
43509 if (flag & O_TRUNC) {
43510 error = get_write_access(inode);
43511 if (error)
43512 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43513 int error;
43514 struct dentry *dir = nd->path.dentry;
43515
43516 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43517 + error = -EACCES;
43518 + goto out_unlock;
43519 + }
43520 +
43521 if (!IS_POSIXACL(dir->d_inode))
43522 mode &= ~current_umask();
43523 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43524 if (error)
43525 goto out_unlock;
43526 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43527 + if (!error)
43528 + gr_handle_create(path->dentry, nd->path.mnt);
43529 out_unlock:
43530 mutex_unlock(&dir->d_inode->i_mutex);
43531 dput(nd->path.dentry);
43532 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43533 &nd, flag);
43534 if (error)
43535 return ERR_PTR(error);
43536 +
43537 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43538 + error = -EPERM;
43539 + goto exit;
43540 + }
43541 +
43542 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43543 + error = -EPERM;
43544 + goto exit;
43545 + }
43546 +
43547 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43548 + error = -EACCES;
43549 + goto exit;
43550 + }
43551 +
43552 goto ok;
43553 }
43554
43555 @@ -1795,6 +1854,14 @@ do_last:
43556 /*
43557 * It already exists.
43558 */
43559 +
43560 + /* only check if O_CREAT is specified, all other checks need
43561 + to go into may_open */
43562 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43563 + error = -EACCES;
43564 + goto exit_mutex_unlock;
43565 + }
43566 +
43567 mutex_unlock(&dir->d_inode->i_mutex);
43568 audit_inode(pathname, path.dentry);
43569
43570 @@ -1887,6 +1954,13 @@ do_link:
43571 error = security_inode_follow_link(path.dentry, &nd);
43572 if (error)
43573 goto exit_dput;
43574 +
43575 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43576 + path.dentry, nd.path.mnt)) {
43577 + error = -EACCES;
43578 + goto exit_dput;
43579 + }
43580 +
43581 error = __do_follow_link(&path, &nd);
43582 if (error) {
43583 /* Does someone understand code flow here? Or it is only
43584 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43585 error = may_mknod(mode);
43586 if (error)
43587 goto out_dput;
43588 +
43589 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43590 + error = -EPERM;
43591 + goto out_dput;
43592 + }
43593 +
43594 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43595 + error = -EACCES;
43596 + goto out_dput;
43597 + }
43598 +
43599 error = mnt_want_write(nd.path.mnt);
43600 if (error)
43601 goto out_dput;
43602 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43603 }
43604 out_drop_write:
43605 mnt_drop_write(nd.path.mnt);
43606 +
43607 + if (!error)
43608 + gr_handle_create(dentry, nd.path.mnt);
43609 out_dput:
43610 dput(dentry);
43611 out_unlock:
43612 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43613 if (IS_ERR(dentry))
43614 goto out_unlock;
43615
43616 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43617 + error = -EACCES;
43618 + goto out_dput;
43619 + }
43620 +
43621 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43622 mode &= ~current_umask();
43623 error = mnt_want_write(nd.path.mnt);
43624 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43625 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43626 out_drop_write:
43627 mnt_drop_write(nd.path.mnt);
43628 +
43629 + if (!error)
43630 + gr_handle_create(dentry, nd.path.mnt);
43631 +
43632 out_dput:
43633 dput(dentry);
43634 out_unlock:
43635 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43636 char * name;
43637 struct dentry *dentry;
43638 struct nameidata nd;
43639 + ino_t saved_ino = 0;
43640 + dev_t saved_dev = 0;
43641
43642 error = user_path_parent(dfd, pathname, &nd, &name);
43643 if (error)
43644 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43645 error = PTR_ERR(dentry);
43646 if (IS_ERR(dentry))
43647 goto exit2;
43648 +
43649 + if (dentry->d_inode != NULL) {
43650 + if (dentry->d_inode->i_nlink <= 1) {
43651 + saved_ino = dentry->d_inode->i_ino;
43652 + saved_dev = gr_get_dev_from_dentry(dentry);
43653 + }
43654 +
43655 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43656 + error = -EACCES;
43657 + goto exit3;
43658 + }
43659 + }
43660 +
43661 error = mnt_want_write(nd.path.mnt);
43662 if (error)
43663 goto exit3;
43664 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43665 if (error)
43666 goto exit4;
43667 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43668 + if (!error && (saved_dev || saved_ino))
43669 + gr_handle_delete(saved_ino, saved_dev);
43670 exit4:
43671 mnt_drop_write(nd.path.mnt);
43672 exit3:
43673 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43674 struct dentry *dentry;
43675 struct nameidata nd;
43676 struct inode *inode = NULL;
43677 + ino_t saved_ino = 0;
43678 + dev_t saved_dev = 0;
43679
43680 error = user_path_parent(dfd, pathname, &nd, &name);
43681 if (error)
43682 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43683 if (nd.last.name[nd.last.len])
43684 goto slashes;
43685 inode = dentry->d_inode;
43686 - if (inode)
43687 + if (inode) {
43688 + if (inode->i_nlink <= 1) {
43689 + saved_ino = inode->i_ino;
43690 + saved_dev = gr_get_dev_from_dentry(dentry);
43691 + }
43692 +
43693 atomic_inc(&inode->i_count);
43694 +
43695 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43696 + error = -EACCES;
43697 + goto exit2;
43698 + }
43699 + }
43700 error = mnt_want_write(nd.path.mnt);
43701 if (error)
43702 goto exit2;
43703 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43704 if (error)
43705 goto exit3;
43706 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43707 + if (!error && (saved_ino || saved_dev))
43708 + gr_handle_delete(saved_ino, saved_dev);
43709 exit3:
43710 mnt_drop_write(nd.path.mnt);
43711 exit2:
43712 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43713 if (IS_ERR(dentry))
43714 goto out_unlock;
43715
43716 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43717 + error = -EACCES;
43718 + goto out_dput;
43719 + }
43720 +
43721 error = mnt_want_write(nd.path.mnt);
43722 if (error)
43723 goto out_dput;
43724 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43725 if (error)
43726 goto out_drop_write;
43727 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43728 + if (!error)
43729 + gr_handle_create(dentry, nd.path.mnt);
43730 out_drop_write:
43731 mnt_drop_write(nd.path.mnt);
43732 out_dput:
43733 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43734 error = PTR_ERR(new_dentry);
43735 if (IS_ERR(new_dentry))
43736 goto out_unlock;
43737 +
43738 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43739 + old_path.dentry->d_inode,
43740 + old_path.dentry->d_inode->i_mode, to)) {
43741 + error = -EACCES;
43742 + goto out_dput;
43743 + }
43744 +
43745 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
43746 + old_path.dentry, old_path.mnt, to)) {
43747 + error = -EACCES;
43748 + goto out_dput;
43749 + }
43750 +
43751 error = mnt_want_write(nd.path.mnt);
43752 if (error)
43753 goto out_dput;
43754 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43755 if (error)
43756 goto out_drop_write;
43757 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43758 + if (!error)
43759 + gr_handle_create(new_dentry, nd.path.mnt);
43760 out_drop_write:
43761 mnt_drop_write(nd.path.mnt);
43762 out_dput:
43763 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43764 char *to;
43765 int error;
43766
43767 + pax_track_stack();
43768 +
43769 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43770 if (error)
43771 goto exit;
43772 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43773 if (new_dentry == trap)
43774 goto exit5;
43775
43776 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43777 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
43778 + to);
43779 + if (error)
43780 + goto exit5;
43781 +
43782 error = mnt_want_write(oldnd.path.mnt);
43783 if (error)
43784 goto exit5;
43785 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43786 goto exit6;
43787 error = vfs_rename(old_dir->d_inode, old_dentry,
43788 new_dir->d_inode, new_dentry);
43789 + if (!error)
43790 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43791 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43792 exit6:
43793 mnt_drop_write(oldnd.path.mnt);
43794 exit5:
43795 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
43796
43797 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43798 {
43799 + char tmpbuf[64];
43800 + const char *newlink;
43801 int len;
43802
43803 len = PTR_ERR(link);
43804 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
43805 len = strlen(link);
43806 if (len > (unsigned) buflen)
43807 len = buflen;
43808 - if (copy_to_user(buffer, link, len))
43809 +
43810 + if (len < sizeof(tmpbuf)) {
43811 + memcpy(tmpbuf, link, len);
43812 + newlink = tmpbuf;
43813 + } else
43814 + newlink = link;
43815 +
43816 + if (copy_to_user(buffer, newlink, len))
43817 len = -EFAULT;
43818 out:
43819 return len;
43820 diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
43821 --- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
43822 +++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
43823 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
43824 if (!(sb->s_flags & MS_RDONLY))
43825 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43826 up_write(&sb->s_umount);
43827 +
43828 + gr_log_remount(mnt->mnt_devname, retval);
43829 +
43830 return retval;
43831 }
43832
43833 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
43834 security_sb_umount_busy(mnt);
43835 up_write(&namespace_sem);
43836 release_mounts(&umount_list);
43837 +
43838 + gr_log_unmount(mnt->mnt_devname, retval);
43839 +
43840 return retval;
43841 }
43842
43843 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
43844 if (retval)
43845 goto dput_out;
43846
43847 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43848 + retval = -EPERM;
43849 + goto dput_out;
43850 + }
43851 +
43852 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43853 + retval = -EPERM;
43854 + goto dput_out;
43855 + }
43856 +
43857 if (flags & MS_REMOUNT)
43858 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43859 data_page);
43860 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
43861 dev_name, data_page);
43862 dput_out:
43863 path_put(&path);
43864 +
43865 + gr_log_mount(dev_name, dir_name, retval);
43866 +
43867 return retval;
43868 }
43869
43870 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
43871 goto out1;
43872 }
43873
43874 + if (gr_handle_chroot_pivot()) {
43875 + error = -EPERM;
43876 + path_put(&old);
43877 + goto out1;
43878 + }
43879 +
43880 read_lock(&current->fs->lock);
43881 root = current->fs->root;
43882 path_get(&current->fs->root);
43883 diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
43884 --- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43885 +++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43886 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
43887 int res, val = 0, len;
43888 __u8 __name[NCP_MAXPATHLEN + 1];
43889
43890 + pax_track_stack();
43891 +
43892 parent = dget_parent(dentry);
43893 dir = parent->d_inode;
43894
43895 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
43896 int error, res, len;
43897 __u8 __name[NCP_MAXPATHLEN + 1];
43898
43899 + pax_track_stack();
43900 +
43901 lock_kernel();
43902 error = -EIO;
43903 if (!ncp_conn_valid(server))
43904 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
43905 int error, result, len;
43906 int opmode;
43907 __u8 __name[NCP_MAXPATHLEN + 1];
43908 -
43909 +
43910 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43911 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43912
43913 + pax_track_stack();
43914 +
43915 error = -EIO;
43916 lock_kernel();
43917 if (!ncp_conn_valid(server))
43918 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
43919 int error, len;
43920 __u8 __name[NCP_MAXPATHLEN + 1];
43921
43922 + pax_track_stack();
43923 +
43924 DPRINTK("ncp_mkdir: making %s/%s\n",
43925 dentry->d_parent->d_name.name, dentry->d_name.name);
43926
43927 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
43928 if (!ncp_conn_valid(server))
43929 goto out;
43930
43931 + pax_track_stack();
43932 +
43933 ncp_age_dentry(server, dentry);
43934 len = sizeof(__name);
43935 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43936 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
43937 int old_len, new_len;
43938 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43939
43940 + pax_track_stack();
43941 +
43942 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43943 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43944 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43945 diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
43946 --- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43947 +++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
43948 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
43949 #endif
43950 struct ncp_entry_info finfo;
43951
43952 + pax_track_stack();
43953 +
43954 data.wdog_pid = NULL;
43955 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43956 if (!server)
43957 diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
43958 --- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
43959 +++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
43960 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
43961 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43962 nfsi->attrtimeo_timestamp = jiffies;
43963
43964 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43965 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43966 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43967 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43968 else
43969 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
43970 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43971 }
43972
43973 -static atomic_long_t nfs_attr_generation_counter;
43974 +static atomic_long_unchecked_t nfs_attr_generation_counter;
43975
43976 static unsigned long nfs_read_attr_generation_counter(void)
43977 {
43978 - return atomic_long_read(&nfs_attr_generation_counter);
43979 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43980 }
43981
43982 unsigned long nfs_inc_attr_generation_counter(void)
43983 {
43984 - return atomic_long_inc_return(&nfs_attr_generation_counter);
43985 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
43986 }
43987
43988 void nfs_fattr_init(struct nfs_fattr *fattr)
43989 diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
43990 --- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
43991 +++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
43992 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
43993 fput(filp);
43994 }
43995
43996 -static struct nlmsvc_binding nfsd_nlm_ops = {
43997 +static const struct nlmsvc_binding nfsd_nlm_ops = {
43998 .fopen = nlm_fopen, /* open file for locking */
43999 .fclose = nlm_fclose, /* close file */
44000 };
44001 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44002 --- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44003 +++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44004 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44005 unsigned int cmd;
44006 int err;
44007
44008 + pax_track_stack();
44009 +
44010 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44011 (long long) lock->lk_offset,
44012 (long long) lock->lk_length);
44013 diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44014 --- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44015 +++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44016 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44017 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44018 u32 minorversion = resp->cstate.minorversion;
44019
44020 + pax_track_stack();
44021 +
44022 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44023 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44024 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44025 diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44026 --- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44027 +++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44028 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44029 } else {
44030 oldfs = get_fs();
44031 set_fs(KERNEL_DS);
44032 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44033 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44034 set_fs(oldfs);
44035 }
44036
44037 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44038
44039 /* Write the data. */
44040 oldfs = get_fs(); set_fs(KERNEL_DS);
44041 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44042 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44043 set_fs(oldfs);
44044 if (host_err < 0)
44045 goto out_nfserr;
44046 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44047 */
44048
44049 oldfs = get_fs(); set_fs(KERNEL_DS);
44050 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44051 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44052 set_fs(oldfs);
44053
44054 if (host_err < 0)
44055 diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44056 --- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44057 +++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44058 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44059 unsigned int cmd, void __user *argp)
44060 {
44061 struct nilfs_argv argv[5];
44062 - const static size_t argsz[5] = {
44063 + static const size_t argsz[5] = {
44064 sizeof(struct nilfs_vdesc),
44065 sizeof(struct nilfs_period),
44066 sizeof(__u64),
44067 diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44068 --- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44069 +++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44070 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44071 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44072 }
44073
44074 -static struct fsnotify_ops dnotify_fsnotify_ops = {
44075 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
44076 .handle_event = dnotify_handle_event,
44077 .should_send_event = dnotify_should_send_event,
44078 .free_group_priv = NULL,
44079 diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44080 --- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44081 +++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44082 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44083 * get set to 0 so it will never get 'freed'
44084 */
44085 static struct fsnotify_event q_overflow_event;
44086 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44087 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44088
44089 /**
44090 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44091 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44092 */
44093 u32 fsnotify_get_cookie(void)
44094 {
44095 - return atomic_inc_return(&fsnotify_sync_cookie);
44096 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44097 }
44098 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44099
44100 diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44101 --- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44102 +++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44103 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44104 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44105 ~(s64)(ndir->itype.index.block_size - 1)));
44106 /* Bounds checks. */
44107 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44108 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44109 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44110 "inode 0x%lx or driver bug.", vdir->i_ino);
44111 goto err_out;
44112 diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44113 --- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44114 +++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44115 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44116 #endif /* NTFS_RW */
44117 };
44118
44119 -const struct file_operations ntfs_empty_file_ops = {};
44120 +const struct file_operations ntfs_empty_file_ops __read_only;
44121
44122 -const struct inode_operations ntfs_empty_inode_ops = {};
44123 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44124 diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44125 --- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44126 +++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44127 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44128 return mlog_mask_store(mlog_attr->mask, buf, count);
44129 }
44130
44131 -static struct sysfs_ops mlog_attr_ops = {
44132 +static const struct sysfs_ops mlog_attr_ops = {
44133 .show = mlog_show,
44134 .store = mlog_store,
44135 };
44136 diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44137 --- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44138 +++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44139 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44140 goto bail;
44141 }
44142
44143 - atomic_inc(&osb->alloc_stats.moves);
44144 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44145
44146 status = 0;
44147 bail:
44148 diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44149 --- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44150 +++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44151 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44152 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44153 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44154
44155 + pax_track_stack();
44156 +
44157 /* At some point it might be nice to break this function up a
44158 * bit. */
44159
44160 diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44161 --- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44162 +++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44163 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44164
44165 struct ocfs2_alloc_stats
44166 {
44167 - atomic_t moves;
44168 - atomic_t local_data;
44169 - atomic_t bitmap_data;
44170 - atomic_t bg_allocs;
44171 - atomic_t bg_extends;
44172 + atomic_unchecked_t moves;
44173 + atomic_unchecked_t local_data;
44174 + atomic_unchecked_t bitmap_data;
44175 + atomic_unchecked_t bg_allocs;
44176 + atomic_unchecked_t bg_extends;
44177 };
44178
44179 enum ocfs2_local_alloc_state
44180 diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44181 --- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44182 +++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44183 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44184 mlog_errno(status);
44185 goto bail;
44186 }
44187 - atomic_inc(&osb->alloc_stats.bg_extends);
44188 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44189
44190 /* You should never ask for this much metadata */
44191 BUG_ON(bits_wanted >
44192 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44193 mlog_errno(status);
44194 goto bail;
44195 }
44196 - atomic_inc(&osb->alloc_stats.bg_allocs);
44197 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44198
44199 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44200 ac->ac_bits_given += (*num_bits);
44201 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44202 mlog_errno(status);
44203 goto bail;
44204 }
44205 - atomic_inc(&osb->alloc_stats.bg_allocs);
44206 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44207
44208 BUG_ON(num_bits != 1);
44209
44210 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44211 cluster_start,
44212 num_clusters);
44213 if (!status)
44214 - atomic_inc(&osb->alloc_stats.local_data);
44215 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44216 } else {
44217 if (min_clusters > (osb->bitmap_cpg - 1)) {
44218 /* The only paths asking for contiguousness
44219 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44220 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44221 bg_blkno,
44222 bg_bit_off);
44223 - atomic_inc(&osb->alloc_stats.bitmap_data);
44224 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44225 }
44226 }
44227 if (status < 0) {
44228 diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44229 --- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44230 +++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44231 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44232 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44233 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44234 "Stats",
44235 - atomic_read(&osb->alloc_stats.bitmap_data),
44236 - atomic_read(&osb->alloc_stats.local_data),
44237 - atomic_read(&osb->alloc_stats.bg_allocs),
44238 - atomic_read(&osb->alloc_stats.moves),
44239 - atomic_read(&osb->alloc_stats.bg_extends));
44240 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44241 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44242 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44243 + atomic_read_unchecked(&osb->alloc_stats.moves),
44244 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44245
44246 out += snprintf(buf + out, len - out,
44247 "%10s => State: %u Descriptor: %llu Size: %u bits "
44248 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44249 spin_lock_init(&osb->osb_xattr_lock);
44250 ocfs2_init_inode_steal_slot(osb);
44251
44252 - atomic_set(&osb->alloc_stats.moves, 0);
44253 - atomic_set(&osb->alloc_stats.local_data, 0);
44254 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44255 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44256 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44257 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44258 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44259 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44260 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44261 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44262
44263 /* Copy the blockcheck stats from the superblock probe */
44264 osb->osb_ecc_stats = *stats;
44265 diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44266 --- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44267 +++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44268 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44269 error = locks_verify_truncate(inode, NULL, length);
44270 if (!error)
44271 error = security_path_truncate(&path, length, 0);
44272 +
44273 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44274 + error = -EACCES;
44275 +
44276 if (!error) {
44277 vfs_dq_init(inode);
44278 error = do_truncate(path.dentry, length, 0, NULL);
44279 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44280 if (__mnt_is_readonly(path.mnt))
44281 res = -EROFS;
44282
44283 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44284 + res = -EACCES;
44285 +
44286 out_path_release:
44287 path_put(&path);
44288 out:
44289 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44290 if (error)
44291 goto dput_and_out;
44292
44293 + gr_log_chdir(path.dentry, path.mnt);
44294 +
44295 set_fs_pwd(current->fs, &path);
44296
44297 dput_and_out:
44298 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44299 goto out_putf;
44300
44301 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44302 +
44303 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44304 + error = -EPERM;
44305 +
44306 + if (!error)
44307 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44308 +
44309 if (!error)
44310 set_fs_pwd(current->fs, &file->f_path);
44311 out_putf:
44312 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44313 if (!capable(CAP_SYS_CHROOT))
44314 goto dput_and_out;
44315
44316 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44317 + goto dput_and_out;
44318 +
44319 + if (gr_handle_chroot_caps(&path)) {
44320 + error = -ENOMEM;
44321 + goto dput_and_out;
44322 + }
44323 +
44324 set_fs_root(current->fs, &path);
44325 +
44326 + gr_handle_chroot_chdir(&path);
44327 +
44328 error = 0;
44329 dput_and_out:
44330 path_put(&path);
44331 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44332 err = mnt_want_write_file(file);
44333 if (err)
44334 goto out_putf;
44335 +
44336 mutex_lock(&inode->i_mutex);
44337 +
44338 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44339 + err = -EACCES;
44340 + goto out_unlock;
44341 + }
44342 +
44343 if (mode == (mode_t) -1)
44344 mode = inode->i_mode;
44345 +
44346 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44347 + err = -EPERM;
44348 + goto out_unlock;
44349 + }
44350 +
44351 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44352 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44353 err = notify_change(dentry, &newattrs);
44354 +
44355 +out_unlock:
44356 mutex_unlock(&inode->i_mutex);
44357 mnt_drop_write(file->f_path.mnt);
44358 out_putf:
44359 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44360 error = mnt_want_write(path.mnt);
44361 if (error)
44362 goto dput_and_out;
44363 +
44364 mutex_lock(&inode->i_mutex);
44365 +
44366 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44367 + error = -EACCES;
44368 + goto out_unlock;
44369 + }
44370 +
44371 if (mode == (mode_t) -1)
44372 mode = inode->i_mode;
44373 +
44374 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44375 + error = -EACCES;
44376 + goto out_unlock;
44377 + }
44378 +
44379 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44380 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44381 error = notify_change(path.dentry, &newattrs);
44382 +
44383 +out_unlock:
44384 mutex_unlock(&inode->i_mutex);
44385 mnt_drop_write(path.mnt);
44386 dput_and_out:
44387 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44388 return sys_fchmodat(AT_FDCWD, filename, mode);
44389 }
44390
44391 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44392 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44393 {
44394 struct inode *inode = dentry->d_inode;
44395 int error;
44396 struct iattr newattrs;
44397
44398 + if (!gr_acl_handle_chown(dentry, mnt))
44399 + return -EACCES;
44400 +
44401 newattrs.ia_valid = ATTR_CTIME;
44402 if (user != (uid_t) -1) {
44403 newattrs.ia_valid |= ATTR_UID;
44404 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44405 error = mnt_want_write(path.mnt);
44406 if (error)
44407 goto out_release;
44408 - error = chown_common(path.dentry, user, group);
44409 + error = chown_common(path.dentry, user, group, path.mnt);
44410 mnt_drop_write(path.mnt);
44411 out_release:
44412 path_put(&path);
44413 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44414 error = mnt_want_write(path.mnt);
44415 if (error)
44416 goto out_release;
44417 - error = chown_common(path.dentry, user, group);
44418 + error = chown_common(path.dentry, user, group, path.mnt);
44419 mnt_drop_write(path.mnt);
44420 out_release:
44421 path_put(&path);
44422 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44423 error = mnt_want_write(path.mnt);
44424 if (error)
44425 goto out_release;
44426 - error = chown_common(path.dentry, user, group);
44427 + error = chown_common(path.dentry, user, group, path.mnt);
44428 mnt_drop_write(path.mnt);
44429 out_release:
44430 path_put(&path);
44431 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44432 goto out_fput;
44433 dentry = file->f_path.dentry;
44434 audit_inode(NULL, dentry);
44435 - error = chown_common(dentry, user, group);
44436 + error = chown_common(dentry, user, group, file->f_path.mnt);
44437 mnt_drop_write(file->f_path.mnt);
44438 out_fput:
44439 fput(file);
44440 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44441 if (!IS_ERR(tmp)) {
44442 fd = get_unused_fd_flags(flags);
44443 if (fd >= 0) {
44444 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44445 + struct file *f;
44446 + /* don't allow to be set by userland */
44447 + flags &= ~FMODE_GREXEC;
44448 + f = do_filp_open(dfd, tmp, flags, mode, 0);
44449 if (IS_ERR(f)) {
44450 put_unused_fd(fd);
44451 fd = PTR_ERR(f);
44452 diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44453 --- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44454 +++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44455 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44456 ldm_error ("A VBLK claims to have %d parts.", num);
44457 return false;
44458 }
44459 +
44460 if (rec >= num) {
44461 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44462 return false;
44463 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44464 goto found;
44465 }
44466
44467 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44468 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44469 if (!f) {
44470 ldm_crit ("Out of memory.");
44471 return false;
44472 diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44473 --- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44474 +++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44475 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44476 return 0; /* not a MacOS disk */
44477 }
44478 blocks_in_map = be32_to_cpu(part->map_count);
44479 + printk(" [mac]");
44480 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44481 put_dev_sector(sect);
44482 return 0;
44483 }
44484 - printk(" [mac]");
44485 for (slot = 1; slot <= blocks_in_map; ++slot) {
44486 int pos = slot * secsize;
44487 put_dev_sector(sect);
44488 diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44489 --- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44490 +++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44491 @@ -401,9 +401,9 @@ redo:
44492 }
44493 if (bufs) /* More to do? */
44494 continue;
44495 - if (!pipe->writers)
44496 + if (!atomic_read(&pipe->writers))
44497 break;
44498 - if (!pipe->waiting_writers) {
44499 + if (!atomic_read(&pipe->waiting_writers)) {
44500 /* syscall merging: Usually we must not sleep
44501 * if O_NONBLOCK is set, or if we got some data.
44502 * But if a writer sleeps in kernel space, then
44503 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44504 mutex_lock(&inode->i_mutex);
44505 pipe = inode->i_pipe;
44506
44507 - if (!pipe->readers) {
44508 + if (!atomic_read(&pipe->readers)) {
44509 send_sig(SIGPIPE, current, 0);
44510 ret = -EPIPE;
44511 goto out;
44512 @@ -511,7 +511,7 @@ redo1:
44513 for (;;) {
44514 int bufs;
44515
44516 - if (!pipe->readers) {
44517 + if (!atomic_read(&pipe->readers)) {
44518 send_sig(SIGPIPE, current, 0);
44519 if (!ret)
44520 ret = -EPIPE;
44521 @@ -597,9 +597,9 @@ redo2:
44522 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44523 do_wakeup = 0;
44524 }
44525 - pipe->waiting_writers++;
44526 + atomic_inc(&pipe->waiting_writers);
44527 pipe_wait(pipe);
44528 - pipe->waiting_writers--;
44529 + atomic_dec(&pipe->waiting_writers);
44530 }
44531 out:
44532 mutex_unlock(&inode->i_mutex);
44533 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44534 mask = 0;
44535 if (filp->f_mode & FMODE_READ) {
44536 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44537 - if (!pipe->writers && filp->f_version != pipe->w_counter)
44538 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44539 mask |= POLLHUP;
44540 }
44541
44542 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44543 * Most Unices do not set POLLERR for FIFOs but on Linux they
44544 * behave exactly like pipes for poll().
44545 */
44546 - if (!pipe->readers)
44547 + if (!atomic_read(&pipe->readers))
44548 mask |= POLLERR;
44549 }
44550
44551 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44552
44553 mutex_lock(&inode->i_mutex);
44554 pipe = inode->i_pipe;
44555 - pipe->readers -= decr;
44556 - pipe->writers -= decw;
44557 + atomic_sub(decr, &pipe->readers);
44558 + atomic_sub(decw, &pipe->writers);
44559
44560 - if (!pipe->readers && !pipe->writers) {
44561 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44562 free_pipe_info(inode);
44563 } else {
44564 wake_up_interruptible_sync(&pipe->wait);
44565 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44566
44567 if (inode->i_pipe) {
44568 ret = 0;
44569 - inode->i_pipe->readers++;
44570 + atomic_inc(&inode->i_pipe->readers);
44571 }
44572
44573 mutex_unlock(&inode->i_mutex);
44574 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44575
44576 if (inode->i_pipe) {
44577 ret = 0;
44578 - inode->i_pipe->writers++;
44579 + atomic_inc(&inode->i_pipe->writers);
44580 }
44581
44582 mutex_unlock(&inode->i_mutex);
44583 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44584 if (inode->i_pipe) {
44585 ret = 0;
44586 if (filp->f_mode & FMODE_READ)
44587 - inode->i_pipe->readers++;
44588 + atomic_inc(&inode->i_pipe->readers);
44589 if (filp->f_mode & FMODE_WRITE)
44590 - inode->i_pipe->writers++;
44591 + atomic_inc(&inode->i_pipe->writers);
44592 }
44593
44594 mutex_unlock(&inode->i_mutex);
44595 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44596 inode->i_pipe = NULL;
44597 }
44598
44599 -static struct vfsmount *pipe_mnt __read_mostly;
44600 +struct vfsmount *pipe_mnt __read_mostly;
44601 static int pipefs_delete_dentry(struct dentry *dentry)
44602 {
44603 /*
44604 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44605 goto fail_iput;
44606 inode->i_pipe = pipe;
44607
44608 - pipe->readers = pipe->writers = 1;
44609 + atomic_set(&pipe->readers, 1);
44610 + atomic_set(&pipe->writers, 1);
44611 inode->i_fop = &rdwr_pipefifo_fops;
44612
44613 /*
44614 diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
44615 --- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44616 +++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44617 @@ -60,6 +60,7 @@
44618 #include <linux/tty.h>
44619 #include <linux/string.h>
44620 #include <linux/mman.h>
44621 +#include <linux/grsecurity.h>
44622 #include <linux/proc_fs.h>
44623 #include <linux/ioport.h>
44624 #include <linux/uaccess.h>
44625 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
44626 p->nivcsw);
44627 }
44628
44629 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44630 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
44631 +{
44632 + if (p->mm)
44633 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44634 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44635 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44636 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44637 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44638 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44639 + else
44640 + seq_printf(m, "PaX:\t-----\n");
44641 +}
44642 +#endif
44643 +
44644 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44645 struct pid *pid, struct task_struct *task)
44646 {
44647 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44648 task_cap(m, task);
44649 cpuset_task_status_allowed(m, task);
44650 task_context_switch_counts(m, task);
44651 +
44652 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44653 + task_pax(m, task);
44654 +#endif
44655 +
44656 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44657 + task_grsec_rbac(m, task);
44658 +#endif
44659 +
44660 return 0;
44661 }
44662
44663 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44664 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44665 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44666 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44667 +#endif
44668 +
44669 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44670 struct pid *pid, struct task_struct *task, int whole)
44671 {
44672 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44673 cputime_t cutime, cstime, utime, stime;
44674 cputime_t cgtime, gtime;
44675 unsigned long rsslim = 0;
44676 - char tcomm[sizeof(task->comm)];
44677 + char tcomm[sizeof(task->comm)] = { 0 };
44678 unsigned long flags;
44679
44680 + pax_track_stack();
44681 +
44682 state = *get_task_state(task);
44683 vsize = eip = esp = 0;
44684 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44685 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44686 gtime = task_gtime(task);
44687 }
44688
44689 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44690 + if (PAX_RAND_FLAGS(mm)) {
44691 + eip = 0;
44692 + esp = 0;
44693 + wchan = 0;
44694 + }
44695 +#endif
44696 +#ifdef CONFIG_GRKERNSEC_HIDESYM
44697 + wchan = 0;
44698 + eip =0;
44699 + esp =0;
44700 +#endif
44701 +
44702 /* scale priority and nice values from timeslices to -20..20 */
44703 /* to make it look like a "normal" Unix priority/nice value */
44704 priority = task_prio(task);
44705 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44706 vsize,
44707 mm ? get_mm_rss(mm) : 0,
44708 rsslim,
44709 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44710 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44711 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44712 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44713 +#else
44714 mm ? (permitted ? mm->start_code : 1) : 0,
44715 mm ? (permitted ? mm->end_code : 1) : 0,
44716 (permitted && mm) ? mm->start_stack : 0,
44717 +#endif
44718 esp,
44719 eip,
44720 /* The signal information here is obsolete.
44721 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44722
44723 return 0;
44724 }
44725 +
44726 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44727 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44728 +{
44729 + u32 curr_ip = 0;
44730 + unsigned long flags;
44731 +
44732 + if (lock_task_sighand(task, &flags)) {
44733 + curr_ip = task->signal->curr_ip;
44734 + unlock_task_sighand(task, &flags);
44735 + }
44736 +
44737 + return sprintf(buffer, "%pI4\n", &curr_ip);
44738 +}
44739 +#endif
44740 diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
44741 --- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44742 +++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44743 @@ -102,6 +102,22 @@ struct pid_entry {
44744 union proc_op op;
44745 };
44746
44747 +struct getdents_callback {
44748 + struct linux_dirent __user * current_dir;
44749 + struct linux_dirent __user * previous;
44750 + struct file * file;
44751 + int count;
44752 + int error;
44753 +};
44754 +
44755 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
44756 + loff_t offset, u64 ino, unsigned int d_type)
44757 +{
44758 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
44759 + buf->error = -EINVAL;
44760 + return 0;
44761 +}
44762 +
44763 #define NOD(NAME, MODE, IOP, FOP, OP) { \
44764 .name = (NAME), \
44765 .len = sizeof(NAME) - 1, \
44766 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
44767 if (task == current)
44768 return 0;
44769
44770 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
44771 + return -EPERM;
44772 +
44773 /*
44774 * If current is actively ptrace'ing, and would also be
44775 * permitted to freshly attach with ptrace now, permit it.
44776 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
44777 if (!mm->arg_end)
44778 goto out_mm; /* Shh! No looking before we're done */
44779
44780 + if (gr_acl_handle_procpidmem(task))
44781 + goto out_mm;
44782 +
44783 len = mm->arg_end - mm->arg_start;
44784
44785 if (len > PAGE_SIZE)
44786 @@ -287,12 +309,28 @@ out:
44787 return res;
44788 }
44789
44790 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44791 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44792 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
44793 + _mm->pax_flags & MF_PAX_SEGMEXEC))
44794 +#endif
44795 +
44796 static int proc_pid_auxv(struct task_struct *task, char *buffer)
44797 {
44798 int res = 0;
44799 struct mm_struct *mm = get_task_mm(task);
44800 if (mm) {
44801 unsigned int nwords = 0;
44802 +
44803 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44804 + /* allow if we're currently ptracing this task */
44805 + if (PAX_RAND_FLAGS(mm) &&
44806 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
44807 + mmput(mm);
44808 + return res;
44809 + }
44810 +#endif
44811 +
44812 do {
44813 nwords += 2;
44814 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
44815 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
44816 }
44817
44818
44819 -#ifdef CONFIG_KALLSYMS
44820 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44821 /*
44822 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
44823 * Returns the resolved symbol. If that fails, simply return the address.
44824 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
44825 }
44826 #endif /* CONFIG_KALLSYMS */
44827
44828 -#ifdef CONFIG_STACKTRACE
44829 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44830
44831 #define MAX_STACK_TRACE_DEPTH 64
44832
44833 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
44834 return count;
44835 }
44836
44837 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44838 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44839 static int proc_pid_syscall(struct task_struct *task, char *buffer)
44840 {
44841 long nr;
44842 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
44843 /************************************************************************/
44844
44845 /* permission checks */
44846 -static int proc_fd_access_allowed(struct inode *inode)
44847 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44848 {
44849 struct task_struct *task;
44850 int allowed = 0;
44851 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
44852 */
44853 task = get_proc_task(inode);
44854 if (task) {
44855 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44856 + if (log)
44857 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44858 + else
44859 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44860 put_task_struct(task);
44861 }
44862 return allowed;
44863 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
44864 if (!task)
44865 goto out_no_task;
44866
44867 + if (gr_acl_handle_procpidmem(task))
44868 + goto out;
44869 +
44870 if (!ptrace_may_access(task, PTRACE_MODE_READ))
44871 goto out;
44872
44873 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
44874 path_put(&nd->path);
44875
44876 /* Are we allowed to snoop on the tasks file descriptors? */
44877 - if (!proc_fd_access_allowed(inode))
44878 + if (!proc_fd_access_allowed(inode,0))
44879 goto out;
44880
44881 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44882 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
44883 struct path path;
44884
44885 /* Are we allowed to snoop on the tasks file descriptors? */
44886 - if (!proc_fd_access_allowed(inode))
44887 - goto out;
44888 + /* logging this is needed for learning on chromium to work properly,
44889 + but we don't want to flood the logs from 'ps' which does a readlink
44890 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44891 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
44892 + */
44893 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44894 + if (!proc_fd_access_allowed(inode,0))
44895 + goto out;
44896 + } else {
44897 + if (!proc_fd_access_allowed(inode,1))
44898 + goto out;
44899 + }
44900
44901 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44902 if (error)
44903 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
44904 rcu_read_lock();
44905 cred = __task_cred(task);
44906 inode->i_uid = cred->euid;
44907 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44908 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44909 +#else
44910 inode->i_gid = cred->egid;
44911 +#endif
44912 rcu_read_unlock();
44913 }
44914 security_task_to_inode(task, inode);
44915 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
44916 struct inode *inode = dentry->d_inode;
44917 struct task_struct *task;
44918 const struct cred *cred;
44919 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44920 + const struct cred *tmpcred = current_cred();
44921 +#endif
44922
44923 generic_fillattr(inode, stat);
44924
44925 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
44926 stat->uid = 0;
44927 stat->gid = 0;
44928 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44929 +
44930 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44931 + rcu_read_unlock();
44932 + return -ENOENT;
44933 + }
44934 +
44935 if (task) {
44936 + cred = __task_cred(task);
44937 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44938 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44939 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44940 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44941 +#endif
44942 + ) {
44943 +#endif
44944 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44945 +#ifdef CONFIG_GRKERNSEC_PROC_USER
44946 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44947 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44948 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44949 +#endif
44950 task_dumpable(task)) {
44951 - cred = __task_cred(task);
44952 stat->uid = cred->euid;
44953 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44954 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44955 +#else
44956 stat->gid = cred->egid;
44957 +#endif
44958 }
44959 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44960 + } else {
44961 + rcu_read_unlock();
44962 + return -ENOENT;
44963 + }
44964 +#endif
44965 }
44966 rcu_read_unlock();
44967 return 0;
44968 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
44969
44970 if (task) {
44971 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44972 +#ifdef CONFIG_GRKERNSEC_PROC_USER
44973 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44974 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44975 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44976 +#endif
44977 task_dumpable(task)) {
44978 rcu_read_lock();
44979 cred = __task_cred(task);
44980 inode->i_uid = cred->euid;
44981 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44982 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44983 +#else
44984 inode->i_gid = cred->egid;
44985 +#endif
44986 rcu_read_unlock();
44987 } else {
44988 inode->i_uid = 0;
44989 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
44990 int fd = proc_fd(inode);
44991
44992 if (task) {
44993 - files = get_files_struct(task);
44994 + if (!gr_acl_handle_procpidmem(task))
44995 + files = get_files_struct(task);
44996 put_task_struct(task);
44997 }
44998 if (files) {
44999 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
45000 static int proc_fd_permission(struct inode *inode, int mask)
45001 {
45002 int rv;
45003 + struct task_struct *task;
45004
45005 rv = generic_permission(inode, mask, NULL);
45006 - if (rv == 0)
45007 - return 0;
45008 +
45009 if (task_pid(current) == proc_pid(inode))
45010 rv = 0;
45011 +
45012 + task = get_proc_task(inode);
45013 + if (task == NULL)
45014 + return rv;
45015 +
45016 + if (gr_acl_handle_procpidmem(task))
45017 + rv = -EACCES;
45018 +
45019 + put_task_struct(task);
45020 +
45021 return rv;
45022 }
45023
45024 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45025 if (!task)
45026 goto out_no_task;
45027
45028 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45029 + goto out;
45030 +
45031 /*
45032 * Yes, it does not scale. And it should not. Don't add
45033 * new entries into /proc/<tgid>/ without very good reasons.
45034 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45035 if (!task)
45036 goto out_no_task;
45037
45038 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45039 + goto out;
45040 +
45041 ret = 0;
45042 i = filp->f_pos;
45043 switch (i) {
45044 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45045 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45046 void *cookie)
45047 {
45048 - char *s = nd_get_link(nd);
45049 + const char *s = nd_get_link(nd);
45050 if (!IS_ERR(s))
45051 __putname(s);
45052 }
45053 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45054 #ifdef CONFIG_SCHED_DEBUG
45055 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45056 #endif
45057 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45058 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45059 INF("syscall", S_IRUSR, proc_pid_syscall),
45060 #endif
45061 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45062 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45063 #ifdef CONFIG_SECURITY
45064 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45065 #endif
45066 -#ifdef CONFIG_KALLSYMS
45067 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45068 INF("wchan", S_IRUGO, proc_pid_wchan),
45069 #endif
45070 -#ifdef CONFIG_STACKTRACE
45071 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45072 ONE("stack", S_IRUSR, proc_pid_stack),
45073 #endif
45074 #ifdef CONFIG_SCHEDSTATS
45075 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45076 #ifdef CONFIG_TASK_IO_ACCOUNTING
45077 INF("io", S_IRUSR, proc_tgid_io_accounting),
45078 #endif
45079 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45080 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45081 +#endif
45082 };
45083
45084 static int proc_tgid_base_readdir(struct file * filp,
45085 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45086 if (!inode)
45087 goto out;
45088
45089 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45090 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45091 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45092 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45093 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45094 +#else
45095 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45096 +#endif
45097 inode->i_op = &proc_tgid_base_inode_operations;
45098 inode->i_fop = &proc_tgid_base_operations;
45099 inode->i_flags|=S_IMMUTABLE;
45100 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45101 if (!task)
45102 goto out;
45103
45104 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45105 + goto out_put_task;
45106 +
45107 result = proc_pid_instantiate(dir, dentry, task, NULL);
45108 +out_put_task:
45109 put_task_struct(task);
45110 out:
45111 return result;
45112 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45113 {
45114 unsigned int nr;
45115 struct task_struct *reaper;
45116 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45117 + const struct cred *tmpcred = current_cred();
45118 + const struct cred *itercred;
45119 +#endif
45120 + filldir_t __filldir = filldir;
45121 struct tgid_iter iter;
45122 struct pid_namespace *ns;
45123
45124 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45125 for (iter = next_tgid(ns, iter);
45126 iter.task;
45127 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45128 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45129 + rcu_read_lock();
45130 + itercred = __task_cred(iter.task);
45131 +#endif
45132 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45133 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45134 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45135 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45136 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45137 +#endif
45138 + )
45139 +#endif
45140 + )
45141 + __filldir = &gr_fake_filldir;
45142 + else
45143 + __filldir = filldir;
45144 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45145 + rcu_read_unlock();
45146 +#endif
45147 filp->f_pos = iter.tgid + TGID_OFFSET;
45148 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45149 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45150 put_task_struct(iter.task);
45151 goto out;
45152 }
45153 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45154 #ifdef CONFIG_SCHED_DEBUG
45155 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45156 #endif
45157 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45158 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45159 INF("syscall", S_IRUSR, proc_pid_syscall),
45160 #endif
45161 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45162 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45163 #ifdef CONFIG_SECURITY
45164 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45165 #endif
45166 -#ifdef CONFIG_KALLSYMS
45167 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45168 INF("wchan", S_IRUGO, proc_pid_wchan),
45169 #endif
45170 -#ifdef CONFIG_STACKTRACE
45171 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45172 ONE("stack", S_IRUSR, proc_pid_stack),
45173 #endif
45174 #ifdef CONFIG_SCHEDSTATS
45175 diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45176 --- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45177 +++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45178 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45179
45180 static int __init proc_cmdline_init(void)
45181 {
45182 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45183 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45184 +#else
45185 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45186 +#endif
45187 return 0;
45188 }
45189 module_init(proc_cmdline_init);
45190 diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45191 --- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45192 +++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45193 @@ -64,7 +64,11 @@ static const struct file_operations proc
45194
45195 static int __init proc_devices_init(void)
45196 {
45197 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45198 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45199 +#else
45200 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45201 +#endif
45202 return 0;
45203 }
45204 module_init(proc_devices_init);
45205 diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45206 --- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45207 +++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45208 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45209 if (de->mode) {
45210 inode->i_mode = de->mode;
45211 inode->i_uid = de->uid;
45212 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45213 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45214 +#else
45215 inode->i_gid = de->gid;
45216 +#endif
45217 }
45218 if (de->size)
45219 inode->i_size = de->size;
45220 diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45221 --- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45222 +++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45223 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45224 struct pid *pid, struct task_struct *task);
45225 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45226 struct pid *pid, struct task_struct *task);
45227 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45228 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45229 +#endif
45230 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45231
45232 extern const struct file_operations proc_maps_operations;
45233 diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45234 --- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45235 +++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45236 @@ -30,12 +30,12 @@ config PROC_FS
45237
45238 config PROC_KCORE
45239 bool "/proc/kcore support" if !ARM
45240 - depends on PROC_FS && MMU
45241 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45242
45243 config PROC_VMCORE
45244 bool "/proc/vmcore support (EXPERIMENTAL)"
45245 - depends on PROC_FS && CRASH_DUMP
45246 - default y
45247 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45248 + default n
45249 help
45250 Exports the dump image of crashed kernel in ELF format.
45251
45252 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45253 limited in memory.
45254
45255 config PROC_PAGE_MONITOR
45256 - default y
45257 - depends on PROC_FS && MMU
45258 + default n
45259 + depends on PROC_FS && MMU && !GRKERNSEC
45260 bool "Enable /proc page monitoring" if EMBEDDED
45261 help
45262 Various /proc files exist to monitor process memory utilization:
45263 diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45264 --- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45265 +++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45266 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45267 off_t offset = 0;
45268 struct kcore_list *m;
45269
45270 + pax_track_stack();
45271 +
45272 /* setup ELF header */
45273 elf = (struct elfhdr *) bufp;
45274 bufp += sizeof(struct elfhdr);
45275 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45276 * the addresses in the elf_phdr on our list.
45277 */
45278 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45279 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45280 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45281 + if (tsz > buflen)
45282 tsz = buflen;
45283 -
45284 +
45285 while (buflen) {
45286 struct kcore_list *m;
45287
45288 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45289 kfree(elf_buf);
45290 } else {
45291 if (kern_addr_valid(start)) {
45292 - unsigned long n;
45293 + char *elf_buf;
45294 + mm_segment_t oldfs;
45295
45296 - n = copy_to_user(buffer, (char *)start, tsz);
45297 - /*
45298 - * We cannot distingush between fault on source
45299 - * and fault on destination. When this happens
45300 - * we clear too and hope it will trigger the
45301 - * EFAULT again.
45302 - */
45303 - if (n) {
45304 - if (clear_user(buffer + tsz - n,
45305 - n))
45306 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45307 + if (!elf_buf)
45308 + return -ENOMEM;
45309 + oldfs = get_fs();
45310 + set_fs(KERNEL_DS);
45311 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45312 + set_fs(oldfs);
45313 + if (copy_to_user(buffer, elf_buf, tsz)) {
45314 + kfree(elf_buf);
45315 return -EFAULT;
45316 + }
45317 }
45318 + set_fs(oldfs);
45319 + kfree(elf_buf);
45320 } else {
45321 if (clear_user(buffer, tsz))
45322 return -EFAULT;
45323 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45324
45325 static int open_kcore(struct inode *inode, struct file *filp)
45326 {
45327 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45328 + return -EPERM;
45329 +#endif
45330 if (!capable(CAP_SYS_RAWIO))
45331 return -EPERM;
45332 if (kcore_need_update)
45333 diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45334 --- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45335 +++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45336 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45337 unsigned long pages[NR_LRU_LISTS];
45338 int lru;
45339
45340 + pax_track_stack();
45341 +
45342 /*
45343 * display in kilobytes.
45344 */
45345 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45346 vmi.used >> 10,
45347 vmi.largest_chunk >> 10
45348 #ifdef CONFIG_MEMORY_FAILURE
45349 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45350 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45351 #endif
45352 );
45353
45354 diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45355 --- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45356 +++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45357 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45358 if (len < 1)
45359 len = 1;
45360 seq_printf(m, "%*c", len, ' ');
45361 - seq_path(m, &file->f_path, "");
45362 + seq_path(m, &file->f_path, "\n\\");
45363 }
45364
45365 seq_putc(m, '\n');
45366 diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45367 --- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45368 +++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45369 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45370 struct task_struct *task;
45371 struct nsproxy *ns;
45372 struct net *net = NULL;
45373 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45374 + const struct cred *cred = current_cred();
45375 +#endif
45376 +
45377 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45378 + if (cred->fsuid)
45379 + return net;
45380 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45381 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45382 + return net;
45383 +#endif
45384
45385 rcu_read_lock();
45386 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45387 diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45388 --- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45389 +++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45390 @@ -7,6 +7,8 @@
45391 #include <linux/security.h>
45392 #include "internal.h"
45393
45394 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45395 +
45396 static const struct dentry_operations proc_sys_dentry_operations;
45397 static const struct file_operations proc_sys_file_operations;
45398 static const struct inode_operations proc_sys_inode_operations;
45399 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45400 if (!p)
45401 goto out;
45402
45403 + if (gr_handle_sysctl(p, MAY_EXEC))
45404 + goto out;
45405 +
45406 err = ERR_PTR(-ENOMEM);
45407 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45408 if (h)
45409 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45410 if (*pos < file->f_pos)
45411 continue;
45412
45413 + if (gr_handle_sysctl(table, 0))
45414 + continue;
45415 +
45416 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45417 if (res)
45418 return res;
45419 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45420 if (IS_ERR(head))
45421 return PTR_ERR(head);
45422
45423 + if (table && gr_handle_sysctl(table, MAY_EXEC))
45424 + return -ENOENT;
45425 +
45426 generic_fillattr(inode, stat);
45427 if (table)
45428 stat->mode = (stat->mode & S_IFMT) | table->mode;
45429 diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45430 --- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45431 +++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45432 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
45433 #ifdef CONFIG_PROC_DEVICETREE
45434 proc_device_tree_init();
45435 #endif
45436 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45437 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45438 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45439 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45440 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45441 +#endif
45442 +#else
45443 proc_mkdir("bus", NULL);
45444 +#endif
45445 proc_sys_init();
45446 }
45447
45448 diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45449 --- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45450 +++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45451 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45452 "VmStk:\t%8lu kB\n"
45453 "VmExe:\t%8lu kB\n"
45454 "VmLib:\t%8lu kB\n"
45455 - "VmPTE:\t%8lu kB\n",
45456 - hiwater_vm << (PAGE_SHIFT-10),
45457 + "VmPTE:\t%8lu kB\n"
45458 +
45459 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45460 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45461 +#endif
45462 +
45463 + ,hiwater_vm << (PAGE_SHIFT-10),
45464 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45465 mm->locked_vm << (PAGE_SHIFT-10),
45466 hiwater_rss << (PAGE_SHIFT-10),
45467 total_rss << (PAGE_SHIFT-10),
45468 data << (PAGE_SHIFT-10),
45469 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45470 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45471 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45472 +
45473 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45474 + , mm->context.user_cs_base, mm->context.user_cs_limit
45475 +#endif
45476 +
45477 + );
45478 }
45479
45480 unsigned long task_vsize(struct mm_struct *mm)
45481 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45482 struct proc_maps_private *priv = m->private;
45483 struct vm_area_struct *vma = v;
45484
45485 - vma_stop(priv, vma);
45486 + if (!IS_ERR(vma))
45487 + vma_stop(priv, vma);
45488 if (priv->task)
45489 put_task_struct(priv->task);
45490 }
45491 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45492 return ret;
45493 }
45494
45495 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45496 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45497 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45498 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45499 +#endif
45500 +
45501 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45502 {
45503 struct mm_struct *mm = vma->vm_mm;
45504 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45505 int flags = vma->vm_flags;
45506 unsigned long ino = 0;
45507 unsigned long long pgoff = 0;
45508 - unsigned long start;
45509 dev_t dev = 0;
45510 int len;
45511
45512 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45513 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45514 }
45515
45516 - /* We don't show the stack guard page in /proc/maps */
45517 - start = vma->vm_start;
45518 - if (vma->vm_flags & VM_GROWSDOWN)
45519 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45520 - start += PAGE_SIZE;
45521 -
45522 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45523 - start,
45524 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45525 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45526 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45527 +#else
45528 + vma->vm_start,
45529 vma->vm_end,
45530 +#endif
45531 flags & VM_READ ? 'r' : '-',
45532 flags & VM_WRITE ? 'w' : '-',
45533 flags & VM_EXEC ? 'x' : '-',
45534 flags & VM_MAYSHARE ? 's' : 'p',
45535 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45536 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45537 +#else
45538 pgoff,
45539 +#endif
45540 MAJOR(dev), MINOR(dev), ino, &len);
45541
45542 /*
45543 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45544 */
45545 if (file) {
45546 pad_len_spaces(m, len);
45547 - seq_path(m, &file->f_path, "\n");
45548 + seq_path(m, &file->f_path, "\n\\");
45549 } else {
45550 const char *name = arch_vma_name(vma);
45551 if (!name) {
45552 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45553 if (vma->vm_start <= mm->brk &&
45554 vma->vm_end >= mm->start_brk) {
45555 name = "[heap]";
45556 - } else if (vma->vm_start <= mm->start_stack &&
45557 - vma->vm_end >= mm->start_stack) {
45558 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45559 + (vma->vm_start <= mm->start_stack &&
45560 + vma->vm_end >= mm->start_stack)) {
45561 name = "[stack]";
45562 }
45563 } else {
45564 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45565 };
45566
45567 memset(&mss, 0, sizeof mss);
45568 - mss.vma = vma;
45569 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45570 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45571 +
45572 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45573 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45574 +#endif
45575 + mss.vma = vma;
45576 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45577 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45578 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45579 + }
45580 +#endif
45581
45582 show_map_vma(m, vma);
45583
45584 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45585 "Swap: %8lu kB\n"
45586 "KernelPageSize: %8lu kB\n"
45587 "MMUPageSize: %8lu kB\n",
45588 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45589 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45590 +#else
45591 (vma->vm_end - vma->vm_start) >> 10,
45592 +#endif
45593 mss.resident >> 10,
45594 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45595 mss.shared_clean >> 10,
45596 diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
45597 --- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45598 +++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45599 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45600 else
45601 bytes += kobjsize(mm);
45602
45603 - if (current->fs && current->fs->users > 1)
45604 + if (current->fs && atomic_read(&current->fs->users) > 1)
45605 sbytes += kobjsize(current->fs);
45606 else
45607 bytes += kobjsize(current->fs);
45608 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45609 if (len < 1)
45610 len = 1;
45611 seq_printf(m, "%*c", len, ' ');
45612 - seq_path(m, &file->f_path, "");
45613 + seq_path(m, &file->f_path, "\n\\");
45614 }
45615
45616 seq_putc(m, '\n');
45617 diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
45618 --- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45619 +++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45620 @@ -16,6 +16,7 @@
45621 #include <linux/security.h>
45622 #include <linux/syscalls.h>
45623 #include <linux/unistd.h>
45624 +#include <linux/namei.h>
45625
45626 #include <asm/uaccess.h>
45627
45628 @@ -67,6 +68,7 @@ struct old_linux_dirent {
45629
45630 struct readdir_callback {
45631 struct old_linux_dirent __user * dirent;
45632 + struct file * file;
45633 int result;
45634 };
45635
45636 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45637 buf->result = -EOVERFLOW;
45638 return -EOVERFLOW;
45639 }
45640 +
45641 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45642 + return 0;
45643 +
45644 buf->result++;
45645 dirent = buf->dirent;
45646 if (!access_ok(VERIFY_WRITE, dirent,
45647 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45648
45649 buf.result = 0;
45650 buf.dirent = dirent;
45651 + buf.file = file;
45652
45653 error = vfs_readdir(file, fillonedir, &buf);
45654 if (buf.result)
45655 @@ -142,6 +149,7 @@ struct linux_dirent {
45656 struct getdents_callback {
45657 struct linux_dirent __user * current_dir;
45658 struct linux_dirent __user * previous;
45659 + struct file * file;
45660 int count;
45661 int error;
45662 };
45663 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45664 buf->error = -EOVERFLOW;
45665 return -EOVERFLOW;
45666 }
45667 +
45668 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45669 + return 0;
45670 +
45671 dirent = buf->previous;
45672 if (dirent) {
45673 if (__put_user(offset, &dirent->d_off))
45674 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45675 buf.previous = NULL;
45676 buf.count = count;
45677 buf.error = 0;
45678 + buf.file = file;
45679
45680 error = vfs_readdir(file, filldir, &buf);
45681 if (error >= 0)
45682 @@ -228,6 +241,7 @@ out:
45683 struct getdents_callback64 {
45684 struct linux_dirent64 __user * current_dir;
45685 struct linux_dirent64 __user * previous;
45686 + struct file *file;
45687 int count;
45688 int error;
45689 };
45690 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45691 buf->error = -EINVAL; /* only used if we fail.. */
45692 if (reclen > buf->count)
45693 return -EINVAL;
45694 +
45695 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45696 + return 0;
45697 +
45698 dirent = buf->previous;
45699 if (dirent) {
45700 if (__put_user(offset, &dirent->d_off))
45701 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45702
45703 buf.current_dir = dirent;
45704 buf.previous = NULL;
45705 + buf.file = file;
45706 buf.count = count;
45707 buf.error = 0;
45708
45709 diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
45710 --- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45711 +++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45712 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45713 struct reiserfs_dir_entry de;
45714 int ret = 0;
45715
45716 + pax_track_stack();
45717 +
45718 reiserfs_write_lock(inode->i_sb);
45719
45720 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45721 diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
45722 --- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45723 +++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45724 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45725 return;
45726 }
45727
45728 - atomic_inc(&(fs_generation(tb->tb_sb)));
45729 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45730 do_balance_starts(tb);
45731
45732 /* balance leaf returns 0 except if combining L R and S into
45733 diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
45734 --- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45735 +++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45736 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45737 vi->vi_index, vi->vi_type, vi->vi_ih);
45738 }
45739
45740 -static struct item_operations stat_data_ops = {
45741 +static const struct item_operations stat_data_ops = {
45742 .bytes_number = sd_bytes_number,
45743 .decrement_key = sd_decrement_key,
45744 .is_left_mergeable = sd_is_left_mergeable,
45745 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
45746 vi->vi_index, vi->vi_type, vi->vi_ih);
45747 }
45748
45749 -static struct item_operations direct_ops = {
45750 +static const struct item_operations direct_ops = {
45751 .bytes_number = direct_bytes_number,
45752 .decrement_key = direct_decrement_key,
45753 .is_left_mergeable = direct_is_left_mergeable,
45754 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
45755 vi->vi_index, vi->vi_type, vi->vi_ih);
45756 }
45757
45758 -static struct item_operations indirect_ops = {
45759 +static const struct item_operations indirect_ops = {
45760 .bytes_number = indirect_bytes_number,
45761 .decrement_key = indirect_decrement_key,
45762 .is_left_mergeable = indirect_is_left_mergeable,
45763 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
45764 printk("\n");
45765 }
45766
45767 -static struct item_operations direntry_ops = {
45768 +static const struct item_operations direntry_ops = {
45769 .bytes_number = direntry_bytes_number,
45770 .decrement_key = direntry_decrement_key,
45771 .is_left_mergeable = direntry_is_left_mergeable,
45772 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
45773 "Invalid item type observed, run fsck ASAP");
45774 }
45775
45776 -static struct item_operations errcatch_ops = {
45777 +static const struct item_operations errcatch_ops = {
45778 errcatch_bytes_number,
45779 errcatch_decrement_key,
45780 errcatch_is_left_mergeable,
45781 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
45782 #error Item types must use disk-format assigned values.
45783 #endif
45784
45785 -struct item_operations *item_ops[TYPE_ANY + 1] = {
45786 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
45787 &stat_data_ops,
45788 &indirect_ops,
45789 &direct_ops,
45790 diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
45791 --- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
45792 +++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
45793 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
45794 struct buffer_head *bh;
45795 int i, j;
45796
45797 + pax_track_stack();
45798 +
45799 bh = __getblk(dev, block, bufsize);
45800 if (buffer_uptodate(bh))
45801 return (bh);
45802 diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
45803 --- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
45804 +++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
45805 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
45806 unsigned long savelink = 1;
45807 struct timespec ctime;
45808
45809 + pax_track_stack();
45810 +
45811 /* three balancings: (1) old name removal, (2) new name insertion
45812 and (3) maybe "save" link insertion
45813 stat data updates: (1) old directory,
45814 diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
45815 --- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
45816 +++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
45817 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
45818 "SMALL_TAILS " : "NO_TAILS ",
45819 replay_only(sb) ? "REPLAY_ONLY " : "",
45820 convert_reiserfs(sb) ? "CONV " : "",
45821 - atomic_read(&r->s_generation_counter),
45822 + atomic_read_unchecked(&r->s_generation_counter),
45823 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
45824 SF(s_do_balance), SF(s_unneeded_left_neighbor),
45825 SF(s_good_search_by_key_reada), SF(s_bmaps),
45826 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
45827 struct journal_params *jp = &rs->s_v1.s_journal;
45828 char b[BDEVNAME_SIZE];
45829
45830 + pax_track_stack();
45831 +
45832 seq_printf(m, /* on-disk fields */
45833 "jp_journal_1st_block: \t%i\n"
45834 "jp_journal_dev: \t%s[%x]\n"
45835 diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
45836 --- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
45837 +++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
45838 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
45839 int iter = 0;
45840 #endif
45841
45842 + pax_track_stack();
45843 +
45844 BUG_ON(!th->t_trans_id);
45845
45846 init_tb_struct(th, &s_del_balance, sb, path,
45847 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
45848 int retval;
45849 int quota_cut_bytes = 0;
45850
45851 + pax_track_stack();
45852 +
45853 BUG_ON(!th->t_trans_id);
45854
45855 le_key2cpu_key(&cpu_key, key);
45856 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
45857 int quota_cut_bytes;
45858 loff_t tail_pos = 0;
45859
45860 + pax_track_stack();
45861 +
45862 BUG_ON(!th->t_trans_id);
45863
45864 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
45865 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
45866 int retval;
45867 int fs_gen;
45868
45869 + pax_track_stack();
45870 +
45871 BUG_ON(!th->t_trans_id);
45872
45873 fs_gen = get_generation(inode->i_sb);
45874 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
45875 int fs_gen = 0;
45876 int quota_bytes = 0;
45877
45878 + pax_track_stack();
45879 +
45880 BUG_ON(!th->t_trans_id);
45881
45882 if (inode) { /* Do we count quotas for item? */
45883 diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
45884 --- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
45885 +++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
45886 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
45887 {.option_name = NULL}
45888 };
45889
45890 + pax_track_stack();
45891 +
45892 *blocks = 0;
45893 if (!options || !*options)
45894 /* use default configuration: create tails, journaling on, no
45895 diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
45896 --- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
45897 +++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
45898 @@ -20,6 +20,7 @@
45899 #include <linux/module.h>
45900 #include <linux/slab.h>
45901 #include <linux/poll.h>
45902 +#include <linux/security.h>
45903 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45904 #include <linux/file.h>
45905 #include <linux/fdtable.h>
45906 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
45907 int retval, i, timed_out = 0;
45908 unsigned long slack = 0;
45909
45910 + pax_track_stack();
45911 +
45912 rcu_read_lock();
45913 retval = max_select_fd(n, fds);
45914 rcu_read_unlock();
45915 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
45916 /* Allocate small arguments on the stack to save memory and be faster */
45917 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45918
45919 + pax_track_stack();
45920 +
45921 ret = -EINVAL;
45922 if (n < 0)
45923 goto out_nofds;
45924 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
45925 struct poll_list *walk = head;
45926 unsigned long todo = nfds;
45927
45928 + pax_track_stack();
45929 +
45930 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45931 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45932 return -EINVAL;
45933
45934 diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
45935 --- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
45936 +++ linux-2.6.32.45/fs/seq_file.c 2011-08-23 21:22:32.000000000 -0400
45937 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45938 return 0;
45939 }
45940 if (!m->buf) {
45941 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45942 + m->size = PAGE_SIZE;
45943 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45944 if (!m->buf)
45945 return -ENOMEM;
45946 }
45947 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45948 Eoverflow:
45949 m->op->stop(m, p);
45950 kfree(m->buf);
45951 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45952 + m->size <<= 1;
45953 + m->buf = kmalloc(m->size, GFP_KERNEL);
45954 return !m->buf ? -ENOMEM : -EAGAIN;
45955 }
45956
45957 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45958 m->version = file->f_version;
45959 /* grab buffer if we didn't have one */
45960 if (!m->buf) {
45961 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45962 + m->size = PAGE_SIZE;
45963 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45964 if (!m->buf)
45965 goto Enomem;
45966 }
45967 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45968 goto Fill;
45969 m->op->stop(m, p);
45970 kfree(m->buf);
45971 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45972 + m->size <<= 1;
45973 + m->buf = kmalloc(m->size, GFP_KERNEL);
45974 if (!m->buf)
45975 goto Enomem;
45976 m->count = 0;
45977 @@ -551,7 +555,7 @@ static void single_stop(struct seq_file
45978 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45979 void *data)
45980 {
45981 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45982 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45983 int res = -ENOMEM;
45984
45985 if (op) {
45986 diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
45987 --- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
45988 +++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
45989 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
45990
45991 out:
45992 if (server->local_nls != NULL && server->remote_nls != NULL)
45993 - server->ops->convert = convert_cp;
45994 + *(void **)&server->ops->convert = convert_cp;
45995 else
45996 - server->ops->convert = convert_memcpy;
45997 + *(void **)&server->ops->convert = convert_memcpy;
45998
45999 smb_unlock_server(server);
46000 return n;
46001 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46002
46003 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46004 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46005 - server->ops->getattr = smb_proc_getattr_core;
46006 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
46007 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46008 - server->ops->getattr = smb_proc_getattr_ff;
46009 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46010 }
46011
46012 /* Decode server capabilities */
46013 @@ -3439,7 +3439,7 @@ out:
46014 static void
46015 install_ops(struct smb_ops *dst, struct smb_ops *src)
46016 {
46017 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46018 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46019 }
46020
46021 /* < LANMAN2 */
46022 diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46023 --- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46024 +++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46025 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46026
46027 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46028 {
46029 - char *s = nd_get_link(nd);
46030 + const char *s = nd_get_link(nd);
46031 if (!IS_ERR(s))
46032 __putname(s);
46033 }
46034 diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46035 --- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46036 +++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46037 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46038 pipe_lock(pipe);
46039
46040 for (;;) {
46041 - if (!pipe->readers) {
46042 + if (!atomic_read(&pipe->readers)) {
46043 send_sig(SIGPIPE, current, 0);
46044 if (!ret)
46045 ret = -EPIPE;
46046 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46047 do_wakeup = 0;
46048 }
46049
46050 - pipe->waiting_writers++;
46051 + atomic_inc(&pipe->waiting_writers);
46052 pipe_wait(pipe);
46053 - pipe->waiting_writers--;
46054 + atomic_dec(&pipe->waiting_writers);
46055 }
46056
46057 pipe_unlock(pipe);
46058 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46059 .spd_release = spd_release_page,
46060 };
46061
46062 + pax_track_stack();
46063 +
46064 index = *ppos >> PAGE_CACHE_SHIFT;
46065 loff = *ppos & ~PAGE_CACHE_MASK;
46066 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46067 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46068 old_fs = get_fs();
46069 set_fs(get_ds());
46070 /* The cast to a user pointer is valid due to the set_fs() */
46071 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46072 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46073 set_fs(old_fs);
46074
46075 return res;
46076 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46077 old_fs = get_fs();
46078 set_fs(get_ds());
46079 /* The cast to a user pointer is valid due to the set_fs() */
46080 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46081 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46082 set_fs(old_fs);
46083
46084 return res;
46085 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46086 .spd_release = spd_release_page,
46087 };
46088
46089 + pax_track_stack();
46090 +
46091 index = *ppos >> PAGE_CACHE_SHIFT;
46092 offset = *ppos & ~PAGE_CACHE_MASK;
46093 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46094 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46095 goto err;
46096
46097 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46098 - vec[i].iov_base = (void __user *) page_address(page);
46099 + vec[i].iov_base = (__force void __user *) page_address(page);
46100 vec[i].iov_len = this_len;
46101 pages[i] = page;
46102 spd.nr_pages++;
46103 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46104 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46105 {
46106 while (!pipe->nrbufs) {
46107 - if (!pipe->writers)
46108 + if (!atomic_read(&pipe->writers))
46109 return 0;
46110
46111 - if (!pipe->waiting_writers && sd->num_spliced)
46112 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46113 return 0;
46114
46115 if (sd->flags & SPLICE_F_NONBLOCK)
46116 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46117 * out of the pipe right after the splice_to_pipe(). So set
46118 * PIPE_READERS appropriately.
46119 */
46120 - pipe->readers = 1;
46121 + atomic_set(&pipe->readers, 1);
46122
46123 current->splice_pipe = pipe;
46124 }
46125 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46126 .spd_release = spd_release_page,
46127 };
46128
46129 + pax_track_stack();
46130 +
46131 pipe = pipe_info(file->f_path.dentry->d_inode);
46132 if (!pipe)
46133 return -EBADF;
46134 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46135 ret = -ERESTARTSYS;
46136 break;
46137 }
46138 - if (!pipe->writers)
46139 + if (!atomic_read(&pipe->writers))
46140 break;
46141 - if (!pipe->waiting_writers) {
46142 + if (!atomic_read(&pipe->waiting_writers)) {
46143 if (flags & SPLICE_F_NONBLOCK) {
46144 ret = -EAGAIN;
46145 break;
46146 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46147 pipe_lock(pipe);
46148
46149 while (pipe->nrbufs >= PIPE_BUFFERS) {
46150 - if (!pipe->readers) {
46151 + if (!atomic_read(&pipe->readers)) {
46152 send_sig(SIGPIPE, current, 0);
46153 ret = -EPIPE;
46154 break;
46155 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46156 ret = -ERESTARTSYS;
46157 break;
46158 }
46159 - pipe->waiting_writers++;
46160 + atomic_inc(&pipe->waiting_writers);
46161 pipe_wait(pipe);
46162 - pipe->waiting_writers--;
46163 + atomic_dec(&pipe->waiting_writers);
46164 }
46165
46166 pipe_unlock(pipe);
46167 @@ -1785,14 +1791,14 @@ retry:
46168 pipe_double_lock(ipipe, opipe);
46169
46170 do {
46171 - if (!opipe->readers) {
46172 + if (!atomic_read(&opipe->readers)) {
46173 send_sig(SIGPIPE, current, 0);
46174 if (!ret)
46175 ret = -EPIPE;
46176 break;
46177 }
46178
46179 - if (!ipipe->nrbufs && !ipipe->writers)
46180 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46181 break;
46182
46183 /*
46184 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46185 pipe_double_lock(ipipe, opipe);
46186
46187 do {
46188 - if (!opipe->readers) {
46189 + if (!atomic_read(&opipe->readers)) {
46190 send_sig(SIGPIPE, current, 0);
46191 if (!ret)
46192 ret = -EPIPE;
46193 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46194 * return EAGAIN if we have the potential of some data in the
46195 * future, otherwise just return 0
46196 */
46197 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46198 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46199 ret = -EAGAIN;
46200
46201 pipe_unlock(ipipe);
46202 diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46203 --- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46204 +++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46205 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46206
46207 struct sysfs_open_dirent {
46208 atomic_t refcnt;
46209 - atomic_t event;
46210 + atomic_unchecked_t event;
46211 wait_queue_head_t poll;
46212 struct list_head buffers; /* goes through sysfs_buffer.list */
46213 };
46214 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46215 size_t count;
46216 loff_t pos;
46217 char * page;
46218 - struct sysfs_ops * ops;
46219 + const struct sysfs_ops * ops;
46220 struct mutex mutex;
46221 int needs_read_fill;
46222 int event;
46223 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46224 {
46225 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46226 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46227 - struct sysfs_ops * ops = buffer->ops;
46228 + const struct sysfs_ops * ops = buffer->ops;
46229 int ret = 0;
46230 ssize_t count;
46231
46232 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46233 if (!sysfs_get_active_two(attr_sd))
46234 return -ENODEV;
46235
46236 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46237 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46238 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46239
46240 sysfs_put_active_two(attr_sd);
46241 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46242 {
46243 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46244 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46245 - struct sysfs_ops * ops = buffer->ops;
46246 + const struct sysfs_ops * ops = buffer->ops;
46247 int rc;
46248
46249 /* need attr_sd for attr and ops, its parent for kobj */
46250 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46251 return -ENOMEM;
46252
46253 atomic_set(&new_od->refcnt, 0);
46254 - atomic_set(&new_od->event, 1);
46255 + atomic_set_unchecked(&new_od->event, 1);
46256 init_waitqueue_head(&new_od->poll);
46257 INIT_LIST_HEAD(&new_od->buffers);
46258 goto retry;
46259 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46260 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46261 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46262 struct sysfs_buffer *buffer;
46263 - struct sysfs_ops *ops;
46264 + const struct sysfs_ops *ops;
46265 int error = -EACCES;
46266 char *p;
46267
46268 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46269
46270 sysfs_put_active_two(attr_sd);
46271
46272 - if (buffer->event != atomic_read(&od->event))
46273 + if (buffer->event != atomic_read_unchecked(&od->event))
46274 goto trigger;
46275
46276 return DEFAULT_POLLMASK;
46277 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46278
46279 od = sd->s_attr.open;
46280 if (od) {
46281 - atomic_inc(&od->event);
46282 + atomic_inc_unchecked(&od->event);
46283 wake_up_interruptible(&od->poll);
46284 }
46285
46286 diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46287 --- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46288 +++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46289 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46290 .s_name = "",
46291 .s_count = ATOMIC_INIT(1),
46292 .s_flags = SYSFS_DIR,
46293 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46294 + .s_mode = S_IFDIR | S_IRWXU,
46295 +#else
46296 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46297 +#endif
46298 .s_ino = 1,
46299 };
46300
46301 diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46302 --- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46303 +++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46304 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46305
46306 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46307 {
46308 - char *page = nd_get_link(nd);
46309 + const char *page = nd_get_link(nd);
46310 if (!IS_ERR(page))
46311 free_page((unsigned long)page);
46312 }
46313 diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46314 --- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46315 +++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46316 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46317
46318 mutex_lock(&sbi->s_alloc_mutex);
46319 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46320 - if (bloc->logicalBlockNum < 0 ||
46321 - (bloc->logicalBlockNum + count) >
46322 - partmap->s_partition_len) {
46323 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46324 udf_debug("%d < %d || %d + %d > %d\n",
46325 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46326 count, partmap->s_partition_len);
46327 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46328
46329 mutex_lock(&sbi->s_alloc_mutex);
46330 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46331 - if (bloc->logicalBlockNum < 0 ||
46332 - (bloc->logicalBlockNum + count) >
46333 - partmap->s_partition_len) {
46334 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46335 udf_debug("%d < %d || %d + %d > %d\n",
46336 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46337 partmap->s_partition_len);
46338 diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46339 --- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46340 +++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46341 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46342 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46343 int lastblock = 0;
46344
46345 + pax_track_stack();
46346 +
46347 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46348 prev_epos.block = iinfo->i_location;
46349 prev_epos.bh = NULL;
46350 diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46351 --- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46352 +++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46353 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46354
46355 u8 udf_tag_checksum(const struct tag *t)
46356 {
46357 - u8 *data = (u8 *)t;
46358 + const u8 *data = (const u8 *)t;
46359 u8 checksum = 0;
46360 int i;
46361 for (i = 0; i < sizeof(struct tag); ++i)
46362 diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46363 --- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46364 +++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46365 @@ -1,6 +1,7 @@
46366 #include <linux/compiler.h>
46367 #include <linux/file.h>
46368 #include <linux/fs.h>
46369 +#include <linux/security.h>
46370 #include <linux/linkage.h>
46371 #include <linux/mount.h>
46372 #include <linux/namei.h>
46373 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46374 goto mnt_drop_write_and_out;
46375 }
46376 }
46377 +
46378 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46379 + error = -EACCES;
46380 + goto mnt_drop_write_and_out;
46381 + }
46382 +
46383 mutex_lock(&inode->i_mutex);
46384 error = notify_change(path->dentry, &newattrs);
46385 mutex_unlock(&inode->i_mutex);
46386 diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46387 --- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46388 +++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46389 @@ -17,8 +17,8 @@
46390 struct posix_acl *
46391 posix_acl_from_xattr(const void *value, size_t size)
46392 {
46393 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46394 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46395 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46396 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46397 int count;
46398 struct posix_acl *acl;
46399 struct posix_acl_entry *acl_e;
46400 diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46401 --- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46402 +++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46403 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46404 * Extended attribute SET operations
46405 */
46406 static long
46407 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46408 +setxattr(struct path *path, const char __user *name, const void __user *value,
46409 size_t size, int flags)
46410 {
46411 int error;
46412 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46413 return PTR_ERR(kvalue);
46414 }
46415
46416 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46417 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46418 + error = -EACCES;
46419 + goto out;
46420 + }
46421 +
46422 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46423 +out:
46424 kfree(kvalue);
46425 return error;
46426 }
46427 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46428 return error;
46429 error = mnt_want_write(path.mnt);
46430 if (!error) {
46431 - error = setxattr(path.dentry, name, value, size, flags);
46432 + error = setxattr(&path, name, value, size, flags);
46433 mnt_drop_write(path.mnt);
46434 }
46435 path_put(&path);
46436 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46437 return error;
46438 error = mnt_want_write(path.mnt);
46439 if (!error) {
46440 - error = setxattr(path.dentry, name, value, size, flags);
46441 + error = setxattr(&path, name, value, size, flags);
46442 mnt_drop_write(path.mnt);
46443 }
46444 path_put(&path);
46445 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46446 const void __user *,value, size_t, size, int, flags)
46447 {
46448 struct file *f;
46449 - struct dentry *dentry;
46450 int error = -EBADF;
46451
46452 f = fget(fd);
46453 if (!f)
46454 return error;
46455 - dentry = f->f_path.dentry;
46456 - audit_inode(NULL, dentry);
46457 + audit_inode(NULL, f->f_path.dentry);
46458 error = mnt_want_write_file(f);
46459 if (!error) {
46460 - error = setxattr(dentry, name, value, size, flags);
46461 + error = setxattr(&f->f_path, name, value, size, flags);
46462 mnt_drop_write(f->f_path.mnt);
46463 }
46464 fput(f);
46465 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46466 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46467 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46468 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46469 xfs_fsop_geom_t fsgeo;
46470 int error;
46471
46472 + memset(&fsgeo, 0, sizeof(fsgeo));
46473 error = xfs_fs_geometry(mp, &fsgeo, 3);
46474 if (error)
46475 return -error;
46476 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46477 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46478 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46479 @@ -134,7 +134,7 @@ xfs_find_handle(
46480 }
46481
46482 error = -EFAULT;
46483 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46484 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46485 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46486 goto out_put;
46487
46488 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46489 if (IS_ERR(dentry))
46490 return PTR_ERR(dentry);
46491
46492 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46493 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46494 if (!kbuf)
46495 goto out_dput;
46496
46497 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46498 xfs_mount_t *mp,
46499 void __user *arg)
46500 {
46501 - xfs_fsop_geom_t fsgeo;
46502 + xfs_fsop_geom_t fsgeo;
46503 int error;
46504
46505 error = xfs_fs_geometry(mp, &fsgeo, 3);
46506 diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46507 --- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46508 +++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46509 @@ -468,7 +468,7 @@ xfs_vn_put_link(
46510 struct nameidata *nd,
46511 void *p)
46512 {
46513 - char *s = nd_get_link(nd);
46514 + const char *s = nd_get_link(nd);
46515
46516 if (!IS_ERR(s))
46517 kfree(s);
46518 diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46519 --- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46520 +++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46521 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46522 int nmap,
46523 int ret_nmap);
46524 #else
46525 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46526 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46527 #endif /* DEBUG */
46528
46529 #if defined(XFS_RW_TRACE)
46530 diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46531 --- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46532 +++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46533 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46534 }
46535
46536 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46537 - if (filldir(dirent, sfep->name, sfep->namelen,
46538 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46539 + char name[sfep->namelen];
46540 + memcpy(name, sfep->name, sfep->namelen);
46541 + if (filldir(dirent, name, sfep->namelen,
46542 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
46543 + *offset = off & 0x7fffffff;
46544 + return 0;
46545 + }
46546 + } else if (filldir(dirent, sfep->name, sfep->namelen,
46547 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46548 *offset = off & 0x7fffffff;
46549 return 0;
46550 diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
46551 --- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46552 +++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46553 @@ -0,0 +1,105 @@
46554 +#include <linux/kernel.h>
46555 +#include <linux/mm.h>
46556 +#include <linux/slab.h>
46557 +#include <linux/vmalloc.h>
46558 +#include <linux/gracl.h>
46559 +#include <linux/grsecurity.h>
46560 +
46561 +static unsigned long alloc_stack_next = 1;
46562 +static unsigned long alloc_stack_size = 1;
46563 +static void **alloc_stack;
46564 +
46565 +static __inline__ int
46566 +alloc_pop(void)
46567 +{
46568 + if (alloc_stack_next == 1)
46569 + return 0;
46570 +
46571 + kfree(alloc_stack[alloc_stack_next - 2]);
46572 +
46573 + alloc_stack_next--;
46574 +
46575 + return 1;
46576 +}
46577 +
46578 +static __inline__ int
46579 +alloc_push(void *buf)
46580 +{
46581 + if (alloc_stack_next >= alloc_stack_size)
46582 + return 1;
46583 +
46584 + alloc_stack[alloc_stack_next - 1] = buf;
46585 +
46586 + alloc_stack_next++;
46587 +
46588 + return 0;
46589 +}
46590 +
46591 +void *
46592 +acl_alloc(unsigned long len)
46593 +{
46594 + void *ret = NULL;
46595 +
46596 + if (!len || len > PAGE_SIZE)
46597 + goto out;
46598 +
46599 + ret = kmalloc(len, GFP_KERNEL);
46600 +
46601 + if (ret) {
46602 + if (alloc_push(ret)) {
46603 + kfree(ret);
46604 + ret = NULL;
46605 + }
46606 + }
46607 +
46608 +out:
46609 + return ret;
46610 +}
46611 +
46612 +void *
46613 +acl_alloc_num(unsigned long num, unsigned long len)
46614 +{
46615 + if (!len || (num > (PAGE_SIZE / len)))
46616 + return NULL;
46617 +
46618 + return acl_alloc(num * len);
46619 +}
46620 +
46621 +void
46622 +acl_free_all(void)
46623 +{
46624 + if (gr_acl_is_enabled() || !alloc_stack)
46625 + return;
46626 +
46627 + while (alloc_pop()) ;
46628 +
46629 + if (alloc_stack) {
46630 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46631 + kfree(alloc_stack);
46632 + else
46633 + vfree(alloc_stack);
46634 + }
46635 +
46636 + alloc_stack = NULL;
46637 + alloc_stack_size = 1;
46638 + alloc_stack_next = 1;
46639 +
46640 + return;
46641 +}
46642 +
46643 +int
46644 +acl_alloc_stack_init(unsigned long size)
46645 +{
46646 + if ((size * sizeof (void *)) <= PAGE_SIZE)
46647 + alloc_stack =
46648 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46649 + else
46650 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
46651 +
46652 + alloc_stack_size = size;
46653 +
46654 + if (!alloc_stack)
46655 + return 0;
46656 + else
46657 + return 1;
46658 +}
46659 diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
46660 --- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46661 +++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46662 @@ -0,0 +1,4082 @@
46663 +#include <linux/kernel.h>
46664 +#include <linux/module.h>
46665 +#include <linux/sched.h>
46666 +#include <linux/mm.h>
46667 +#include <linux/file.h>
46668 +#include <linux/fs.h>
46669 +#include <linux/namei.h>
46670 +#include <linux/mount.h>
46671 +#include <linux/tty.h>
46672 +#include <linux/proc_fs.h>
46673 +#include <linux/smp_lock.h>
46674 +#include <linux/slab.h>
46675 +#include <linux/vmalloc.h>
46676 +#include <linux/types.h>
46677 +#include <linux/sysctl.h>
46678 +#include <linux/netdevice.h>
46679 +#include <linux/ptrace.h>
46680 +#include <linux/gracl.h>
46681 +#include <linux/gralloc.h>
46682 +#include <linux/grsecurity.h>
46683 +#include <linux/grinternal.h>
46684 +#include <linux/pid_namespace.h>
46685 +#include <linux/fdtable.h>
46686 +#include <linux/percpu.h>
46687 +
46688 +#include <asm/uaccess.h>
46689 +#include <asm/errno.h>
46690 +#include <asm/mman.h>
46691 +
46692 +static struct acl_role_db acl_role_set;
46693 +static struct name_db name_set;
46694 +static struct inodev_db inodev_set;
46695 +
46696 +/* for keeping track of userspace pointers used for subjects, so we
46697 + can share references in the kernel as well
46698 +*/
46699 +
46700 +static struct dentry *real_root;
46701 +static struct vfsmount *real_root_mnt;
46702 +
46703 +static struct acl_subj_map_db subj_map_set;
46704 +
46705 +static struct acl_role_label *default_role;
46706 +
46707 +static struct acl_role_label *role_list;
46708 +
46709 +static u16 acl_sp_role_value;
46710 +
46711 +extern char *gr_shared_page[4];
46712 +static DEFINE_MUTEX(gr_dev_mutex);
46713 +DEFINE_RWLOCK(gr_inode_lock);
46714 +
46715 +struct gr_arg *gr_usermode;
46716 +
46717 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
46718 +
46719 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46720 +extern void gr_clear_learn_entries(void);
46721 +
46722 +#ifdef CONFIG_GRKERNSEC_RESLOG
46723 +extern void gr_log_resource(const struct task_struct *task,
46724 + const int res, const unsigned long wanted, const int gt);
46725 +#endif
46726 +
46727 +unsigned char *gr_system_salt;
46728 +unsigned char *gr_system_sum;
46729 +
46730 +static struct sprole_pw **acl_special_roles = NULL;
46731 +static __u16 num_sprole_pws = 0;
46732 +
46733 +static struct acl_role_label *kernel_role = NULL;
46734 +
46735 +static unsigned int gr_auth_attempts = 0;
46736 +static unsigned long gr_auth_expires = 0UL;
46737 +
46738 +#ifdef CONFIG_NET
46739 +extern struct vfsmount *sock_mnt;
46740 +#endif
46741 +extern struct vfsmount *pipe_mnt;
46742 +extern struct vfsmount *shm_mnt;
46743 +#ifdef CONFIG_HUGETLBFS
46744 +extern struct vfsmount *hugetlbfs_vfsmount;
46745 +#endif
46746 +
46747 +static struct acl_object_label *fakefs_obj_rw;
46748 +static struct acl_object_label *fakefs_obj_rwx;
46749 +
46750 +extern int gr_init_uidset(void);
46751 +extern void gr_free_uidset(void);
46752 +extern void gr_remove_uid(uid_t uid);
46753 +extern int gr_find_uid(uid_t uid);
46754 +
46755 +__inline__ int
46756 +gr_acl_is_enabled(void)
46757 +{
46758 + return (gr_status & GR_READY);
46759 +}
46760 +
46761 +#ifdef CONFIG_BTRFS_FS
46762 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46763 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46764 +#endif
46765 +
46766 +static inline dev_t __get_dev(const struct dentry *dentry)
46767 +{
46768 +#ifdef CONFIG_BTRFS_FS
46769 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46770 + return get_btrfs_dev_from_inode(dentry->d_inode);
46771 + else
46772 +#endif
46773 + return dentry->d_inode->i_sb->s_dev;
46774 +}
46775 +
46776 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46777 +{
46778 + return __get_dev(dentry);
46779 +}
46780 +
46781 +static char gr_task_roletype_to_char(struct task_struct *task)
46782 +{
46783 + switch (task->role->roletype &
46784 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46785 + GR_ROLE_SPECIAL)) {
46786 + case GR_ROLE_DEFAULT:
46787 + return 'D';
46788 + case GR_ROLE_USER:
46789 + return 'U';
46790 + case GR_ROLE_GROUP:
46791 + return 'G';
46792 + case GR_ROLE_SPECIAL:
46793 + return 'S';
46794 + }
46795 +
46796 + return 'X';
46797 +}
46798 +
46799 +char gr_roletype_to_char(void)
46800 +{
46801 + return gr_task_roletype_to_char(current);
46802 +}
46803 +
46804 +__inline__ int
46805 +gr_acl_tpe_check(void)
46806 +{
46807 + if (unlikely(!(gr_status & GR_READY)))
46808 + return 0;
46809 + if (current->role->roletype & GR_ROLE_TPE)
46810 + return 1;
46811 + else
46812 + return 0;
46813 +}
46814 +
46815 +int
46816 +gr_handle_rawio(const struct inode *inode)
46817 +{
46818 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46819 + if (inode && S_ISBLK(inode->i_mode) &&
46820 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46821 + !capable(CAP_SYS_RAWIO))
46822 + return 1;
46823 +#endif
46824 + return 0;
46825 +}
46826 +
46827 +static int
46828 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46829 +{
46830 + if (likely(lena != lenb))
46831 + return 0;
46832 +
46833 + return !memcmp(a, b, lena);
46834 +}
46835 +
46836 +/* this must be called with vfsmount_lock and dcache_lock held */
46837 +
46838 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46839 + struct dentry *root, struct vfsmount *rootmnt,
46840 + char *buffer, int buflen)
46841 +{
46842 + char * end = buffer+buflen;
46843 + char * retval;
46844 + int namelen;
46845 +
46846 + *--end = '\0';
46847 + buflen--;
46848 +
46849 + if (buflen < 1)
46850 + goto Elong;
46851 + /* Get '/' right */
46852 + retval = end-1;
46853 + *retval = '/';
46854 +
46855 + for (;;) {
46856 + struct dentry * parent;
46857 +
46858 + if (dentry == root && vfsmnt == rootmnt)
46859 + break;
46860 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
46861 + /* Global root? */
46862 + if (vfsmnt->mnt_parent == vfsmnt)
46863 + goto global_root;
46864 + dentry = vfsmnt->mnt_mountpoint;
46865 + vfsmnt = vfsmnt->mnt_parent;
46866 + continue;
46867 + }
46868 + parent = dentry->d_parent;
46869 + prefetch(parent);
46870 + namelen = dentry->d_name.len;
46871 + buflen -= namelen + 1;
46872 + if (buflen < 0)
46873 + goto Elong;
46874 + end -= namelen;
46875 + memcpy(end, dentry->d_name.name, namelen);
46876 + *--end = '/';
46877 + retval = end;
46878 + dentry = parent;
46879 + }
46880 +
46881 +out:
46882 + return retval;
46883 +
46884 +global_root:
46885 + namelen = dentry->d_name.len;
46886 + buflen -= namelen;
46887 + if (buflen < 0)
46888 + goto Elong;
46889 + retval -= namelen-1; /* hit the slash */
46890 + memcpy(retval, dentry->d_name.name, namelen);
46891 + goto out;
46892 +Elong:
46893 + retval = ERR_PTR(-ENAMETOOLONG);
46894 + goto out;
46895 +}
46896 +
46897 +static char *
46898 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46899 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
46900 +{
46901 + char *retval;
46902 +
46903 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
46904 + if (unlikely(IS_ERR(retval)))
46905 + retval = strcpy(buf, "<path too long>");
46906 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
46907 + retval[1] = '\0';
46908 +
46909 + return retval;
46910 +}
46911 +
46912 +static char *
46913 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46914 + char *buf, int buflen)
46915 +{
46916 + char *res;
46917 +
46918 + /* we can use real_root, real_root_mnt, because this is only called
46919 + by the RBAC system */
46920 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
46921 +
46922 + return res;
46923 +}
46924 +
46925 +static char *
46926 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46927 + char *buf, int buflen)
46928 +{
46929 + char *res;
46930 + struct dentry *root;
46931 + struct vfsmount *rootmnt;
46932 + struct task_struct *reaper = &init_task;
46933 +
46934 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
46935 + read_lock(&reaper->fs->lock);
46936 + root = dget(reaper->fs->root.dentry);
46937 + rootmnt = mntget(reaper->fs->root.mnt);
46938 + read_unlock(&reaper->fs->lock);
46939 +
46940 + spin_lock(&dcache_lock);
46941 + spin_lock(&vfsmount_lock);
46942 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
46943 + spin_unlock(&vfsmount_lock);
46944 + spin_unlock(&dcache_lock);
46945 +
46946 + dput(root);
46947 + mntput(rootmnt);
46948 + return res;
46949 +}
46950 +
46951 +static char *
46952 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
46953 +{
46954 + char *ret;
46955 + spin_lock(&dcache_lock);
46956 + spin_lock(&vfsmount_lock);
46957 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46958 + PAGE_SIZE);
46959 + spin_unlock(&vfsmount_lock);
46960 + spin_unlock(&dcache_lock);
46961 + return ret;
46962 +}
46963 +
46964 +char *
46965 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
46966 +{
46967 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46968 + PAGE_SIZE);
46969 +}
46970 +
46971 +char *
46972 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
46973 +{
46974 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
46975 + PAGE_SIZE);
46976 +}
46977 +
46978 +char *
46979 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
46980 +{
46981 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
46982 + PAGE_SIZE);
46983 +}
46984 +
46985 +char *
46986 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
46987 +{
46988 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
46989 + PAGE_SIZE);
46990 +}
46991 +
46992 +char *
46993 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
46994 +{
46995 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
46996 + PAGE_SIZE);
46997 +}
46998 +
46999 +__inline__ __u32
47000 +to_gr_audit(const __u32 reqmode)
47001 +{
47002 + /* masks off auditable permission flags, then shifts them to create
47003 + auditing flags, and adds the special case of append auditing if
47004 + we're requesting write */
47005 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47006 +}
47007 +
47008 +struct acl_subject_label *
47009 +lookup_subject_map(const struct acl_subject_label *userp)
47010 +{
47011 + unsigned int index = shash(userp, subj_map_set.s_size);
47012 + struct subject_map *match;
47013 +
47014 + match = subj_map_set.s_hash[index];
47015 +
47016 + while (match && match->user != userp)
47017 + match = match->next;
47018 +
47019 + if (match != NULL)
47020 + return match->kernel;
47021 + else
47022 + return NULL;
47023 +}
47024 +
47025 +static void
47026 +insert_subj_map_entry(struct subject_map *subjmap)
47027 +{
47028 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47029 + struct subject_map **curr;
47030 +
47031 + subjmap->prev = NULL;
47032 +
47033 + curr = &subj_map_set.s_hash[index];
47034 + if (*curr != NULL)
47035 + (*curr)->prev = subjmap;
47036 +
47037 + subjmap->next = *curr;
47038 + *curr = subjmap;
47039 +
47040 + return;
47041 +}
47042 +
47043 +static struct acl_role_label *
47044 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47045 + const gid_t gid)
47046 +{
47047 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47048 + struct acl_role_label *match;
47049 + struct role_allowed_ip *ipp;
47050 + unsigned int x;
47051 + u32 curr_ip = task->signal->curr_ip;
47052 +
47053 + task->signal->saved_ip = curr_ip;
47054 +
47055 + match = acl_role_set.r_hash[index];
47056 +
47057 + while (match) {
47058 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47059 + for (x = 0; x < match->domain_child_num; x++) {
47060 + if (match->domain_children[x] == uid)
47061 + goto found;
47062 + }
47063 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47064 + break;
47065 + match = match->next;
47066 + }
47067 +found:
47068 + if (match == NULL) {
47069 + try_group:
47070 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47071 + match = acl_role_set.r_hash[index];
47072 +
47073 + while (match) {
47074 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47075 + for (x = 0; x < match->domain_child_num; x++) {
47076 + if (match->domain_children[x] == gid)
47077 + goto found2;
47078 + }
47079 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47080 + break;
47081 + match = match->next;
47082 + }
47083 +found2:
47084 + if (match == NULL)
47085 + match = default_role;
47086 + if (match->allowed_ips == NULL)
47087 + return match;
47088 + else {
47089 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47090 + if (likely
47091 + ((ntohl(curr_ip) & ipp->netmask) ==
47092 + (ntohl(ipp->addr) & ipp->netmask)))
47093 + return match;
47094 + }
47095 + match = default_role;
47096 + }
47097 + } else if (match->allowed_ips == NULL) {
47098 + return match;
47099 + } else {
47100 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47101 + if (likely
47102 + ((ntohl(curr_ip) & ipp->netmask) ==
47103 + (ntohl(ipp->addr) & ipp->netmask)))
47104 + return match;
47105 + }
47106 + goto try_group;
47107 + }
47108 +
47109 + return match;
47110 +}
47111 +
47112 +struct acl_subject_label *
47113 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47114 + const struct acl_role_label *role)
47115 +{
47116 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47117 + struct acl_subject_label *match;
47118 +
47119 + match = role->subj_hash[index];
47120 +
47121 + while (match && (match->inode != ino || match->device != dev ||
47122 + (match->mode & GR_DELETED))) {
47123 + match = match->next;
47124 + }
47125 +
47126 + if (match && !(match->mode & GR_DELETED))
47127 + return match;
47128 + else
47129 + return NULL;
47130 +}
47131 +
47132 +struct acl_subject_label *
47133 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47134 + const struct acl_role_label *role)
47135 +{
47136 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47137 + struct acl_subject_label *match;
47138 +
47139 + match = role->subj_hash[index];
47140 +
47141 + while (match && (match->inode != ino || match->device != dev ||
47142 + !(match->mode & GR_DELETED))) {
47143 + match = match->next;
47144 + }
47145 +
47146 + if (match && (match->mode & GR_DELETED))
47147 + return match;
47148 + else
47149 + return NULL;
47150 +}
47151 +
47152 +static struct acl_object_label *
47153 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47154 + const struct acl_subject_label *subj)
47155 +{
47156 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47157 + struct acl_object_label *match;
47158 +
47159 + match = subj->obj_hash[index];
47160 +
47161 + while (match && (match->inode != ino || match->device != dev ||
47162 + (match->mode & GR_DELETED))) {
47163 + match = match->next;
47164 + }
47165 +
47166 + if (match && !(match->mode & GR_DELETED))
47167 + return match;
47168 + else
47169 + return NULL;
47170 +}
47171 +
47172 +static struct acl_object_label *
47173 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47174 + const struct acl_subject_label *subj)
47175 +{
47176 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47177 + struct acl_object_label *match;
47178 +
47179 + match = subj->obj_hash[index];
47180 +
47181 + while (match && (match->inode != ino || match->device != dev ||
47182 + !(match->mode & GR_DELETED))) {
47183 + match = match->next;
47184 + }
47185 +
47186 + if (match && (match->mode & GR_DELETED))
47187 + return match;
47188 +
47189 + match = subj->obj_hash[index];
47190 +
47191 + while (match && (match->inode != ino || match->device != dev ||
47192 + (match->mode & GR_DELETED))) {
47193 + match = match->next;
47194 + }
47195 +
47196 + if (match && !(match->mode & GR_DELETED))
47197 + return match;
47198 + else
47199 + return NULL;
47200 +}
47201 +
47202 +static struct name_entry *
47203 +lookup_name_entry(const char *name)
47204 +{
47205 + unsigned int len = strlen(name);
47206 + unsigned int key = full_name_hash(name, len);
47207 + unsigned int index = key % name_set.n_size;
47208 + struct name_entry *match;
47209 +
47210 + match = name_set.n_hash[index];
47211 +
47212 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47213 + match = match->next;
47214 +
47215 + return match;
47216 +}
47217 +
47218 +static struct name_entry *
47219 +lookup_name_entry_create(const char *name)
47220 +{
47221 + unsigned int len = strlen(name);
47222 + unsigned int key = full_name_hash(name, len);
47223 + unsigned int index = key % name_set.n_size;
47224 + struct name_entry *match;
47225 +
47226 + match = name_set.n_hash[index];
47227 +
47228 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47229 + !match->deleted))
47230 + match = match->next;
47231 +
47232 + if (match && match->deleted)
47233 + return match;
47234 +
47235 + match = name_set.n_hash[index];
47236 +
47237 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47238 + match->deleted))
47239 + match = match->next;
47240 +
47241 + if (match && !match->deleted)
47242 + return match;
47243 + else
47244 + return NULL;
47245 +}
47246 +
47247 +static struct inodev_entry *
47248 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47249 +{
47250 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47251 + struct inodev_entry *match;
47252 +
47253 + match = inodev_set.i_hash[index];
47254 +
47255 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47256 + match = match->next;
47257 +
47258 + return match;
47259 +}
47260 +
47261 +static void
47262 +insert_inodev_entry(struct inodev_entry *entry)
47263 +{
47264 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47265 + inodev_set.i_size);
47266 + struct inodev_entry **curr;
47267 +
47268 + entry->prev = NULL;
47269 +
47270 + curr = &inodev_set.i_hash[index];
47271 + if (*curr != NULL)
47272 + (*curr)->prev = entry;
47273 +
47274 + entry->next = *curr;
47275 + *curr = entry;
47276 +
47277 + return;
47278 +}
47279 +
47280 +static void
47281 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47282 +{
47283 + unsigned int index =
47284 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47285 + struct acl_role_label **curr;
47286 + struct acl_role_label *tmp;
47287 +
47288 + curr = &acl_role_set.r_hash[index];
47289 +
47290 + /* if role was already inserted due to domains and already has
47291 + a role in the same bucket as it attached, then we need to
47292 + combine these two buckets
47293 + */
47294 + if (role->next) {
47295 + tmp = role->next;
47296 + while (tmp->next)
47297 + tmp = tmp->next;
47298 + tmp->next = *curr;
47299 + } else
47300 + role->next = *curr;
47301 + *curr = role;
47302 +
47303 + return;
47304 +}
47305 +
47306 +static void
47307 +insert_acl_role_label(struct acl_role_label *role)
47308 +{
47309 + int i;
47310 +
47311 + if (role_list == NULL) {
47312 + role_list = role;
47313 + role->prev = NULL;
47314 + } else {
47315 + role->prev = role_list;
47316 + role_list = role;
47317 + }
47318 +
47319 + /* used for hash chains */
47320 + role->next = NULL;
47321 +
47322 + if (role->roletype & GR_ROLE_DOMAIN) {
47323 + for (i = 0; i < role->domain_child_num; i++)
47324 + __insert_acl_role_label(role, role->domain_children[i]);
47325 + } else
47326 + __insert_acl_role_label(role, role->uidgid);
47327 +}
47328 +
47329 +static int
47330 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47331 +{
47332 + struct name_entry **curr, *nentry;
47333 + struct inodev_entry *ientry;
47334 + unsigned int len = strlen(name);
47335 + unsigned int key = full_name_hash(name, len);
47336 + unsigned int index = key % name_set.n_size;
47337 +
47338 + curr = &name_set.n_hash[index];
47339 +
47340 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47341 + curr = &((*curr)->next);
47342 +
47343 + if (*curr != NULL)
47344 + return 1;
47345 +
47346 + nentry = acl_alloc(sizeof (struct name_entry));
47347 + if (nentry == NULL)
47348 + return 0;
47349 + ientry = acl_alloc(sizeof (struct inodev_entry));
47350 + if (ientry == NULL)
47351 + return 0;
47352 + ientry->nentry = nentry;
47353 +
47354 + nentry->key = key;
47355 + nentry->name = name;
47356 + nentry->inode = inode;
47357 + nentry->device = device;
47358 + nentry->len = len;
47359 + nentry->deleted = deleted;
47360 +
47361 + nentry->prev = NULL;
47362 + curr = &name_set.n_hash[index];
47363 + if (*curr != NULL)
47364 + (*curr)->prev = nentry;
47365 + nentry->next = *curr;
47366 + *curr = nentry;
47367 +
47368 + /* insert us into the table searchable by inode/dev */
47369 + insert_inodev_entry(ientry);
47370 +
47371 + return 1;
47372 +}
47373 +
47374 +static void
47375 +insert_acl_obj_label(struct acl_object_label *obj,
47376 + struct acl_subject_label *subj)
47377 +{
47378 + unsigned int index =
47379 + fhash(obj->inode, obj->device, subj->obj_hash_size);
47380 + struct acl_object_label **curr;
47381 +
47382 +
47383 + obj->prev = NULL;
47384 +
47385 + curr = &subj->obj_hash[index];
47386 + if (*curr != NULL)
47387 + (*curr)->prev = obj;
47388 +
47389 + obj->next = *curr;
47390 + *curr = obj;
47391 +
47392 + return;
47393 +}
47394 +
47395 +static void
47396 +insert_acl_subj_label(struct acl_subject_label *obj,
47397 + struct acl_role_label *role)
47398 +{
47399 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47400 + struct acl_subject_label **curr;
47401 +
47402 + obj->prev = NULL;
47403 +
47404 + curr = &role->subj_hash[index];
47405 + if (*curr != NULL)
47406 + (*curr)->prev = obj;
47407 +
47408 + obj->next = *curr;
47409 + *curr = obj;
47410 +
47411 + return;
47412 +}
47413 +
47414 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47415 +
47416 +static void *
47417 +create_table(__u32 * len, int elementsize)
47418 +{
47419 + unsigned int table_sizes[] = {
47420 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47421 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47422 + 4194301, 8388593, 16777213, 33554393, 67108859
47423 + };
47424 + void *newtable = NULL;
47425 + unsigned int pwr = 0;
47426 +
47427 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47428 + table_sizes[pwr] <= *len)
47429 + pwr++;
47430 +
47431 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47432 + return newtable;
47433 +
47434 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47435 + newtable =
47436 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47437 + else
47438 + newtable = vmalloc(table_sizes[pwr] * elementsize);
47439 +
47440 + *len = table_sizes[pwr];
47441 +
47442 + return newtable;
47443 +}
47444 +
47445 +static int
47446 +init_variables(const struct gr_arg *arg)
47447 +{
47448 + struct task_struct *reaper = &init_task;
47449 + unsigned int stacksize;
47450 +
47451 + subj_map_set.s_size = arg->role_db.num_subjects;
47452 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47453 + name_set.n_size = arg->role_db.num_objects;
47454 + inodev_set.i_size = arg->role_db.num_objects;
47455 +
47456 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
47457 + !name_set.n_size || !inodev_set.i_size)
47458 + return 1;
47459 +
47460 + if (!gr_init_uidset())
47461 + return 1;
47462 +
47463 + /* set up the stack that holds allocation info */
47464 +
47465 + stacksize = arg->role_db.num_pointers + 5;
47466 +
47467 + if (!acl_alloc_stack_init(stacksize))
47468 + return 1;
47469 +
47470 + /* grab reference for the real root dentry and vfsmount */
47471 + read_lock(&reaper->fs->lock);
47472 + real_root = dget(reaper->fs->root.dentry);
47473 + real_root_mnt = mntget(reaper->fs->root.mnt);
47474 + read_unlock(&reaper->fs->lock);
47475 +
47476 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47477 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47478 +#endif
47479 +
47480 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47481 + if (fakefs_obj_rw == NULL)
47482 + return 1;
47483 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47484 +
47485 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47486 + if (fakefs_obj_rwx == NULL)
47487 + return 1;
47488 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47489 +
47490 + subj_map_set.s_hash =
47491 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47492 + acl_role_set.r_hash =
47493 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47494 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47495 + inodev_set.i_hash =
47496 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47497 +
47498 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47499 + !name_set.n_hash || !inodev_set.i_hash)
47500 + return 1;
47501 +
47502 + memset(subj_map_set.s_hash, 0,
47503 + sizeof(struct subject_map *) * subj_map_set.s_size);
47504 + memset(acl_role_set.r_hash, 0,
47505 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
47506 + memset(name_set.n_hash, 0,
47507 + sizeof (struct name_entry *) * name_set.n_size);
47508 + memset(inodev_set.i_hash, 0,
47509 + sizeof (struct inodev_entry *) * inodev_set.i_size);
47510 +
47511 + return 0;
47512 +}
47513 +
47514 +/* free information not needed after startup
47515 + currently contains user->kernel pointer mappings for subjects
47516 +*/
47517 +
47518 +static void
47519 +free_init_variables(void)
47520 +{
47521 + __u32 i;
47522 +
47523 + if (subj_map_set.s_hash) {
47524 + for (i = 0; i < subj_map_set.s_size; i++) {
47525 + if (subj_map_set.s_hash[i]) {
47526 + kfree(subj_map_set.s_hash[i]);
47527 + subj_map_set.s_hash[i] = NULL;
47528 + }
47529 + }
47530 +
47531 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47532 + PAGE_SIZE)
47533 + kfree(subj_map_set.s_hash);
47534 + else
47535 + vfree(subj_map_set.s_hash);
47536 + }
47537 +
47538 + return;
47539 +}
47540 +
47541 +static void
47542 +free_variables(void)
47543 +{
47544 + struct acl_subject_label *s;
47545 + struct acl_role_label *r;
47546 + struct task_struct *task, *task2;
47547 + unsigned int x;
47548 +
47549 + gr_clear_learn_entries();
47550 +
47551 + read_lock(&tasklist_lock);
47552 + do_each_thread(task2, task) {
47553 + task->acl_sp_role = 0;
47554 + task->acl_role_id = 0;
47555 + task->acl = NULL;
47556 + task->role = NULL;
47557 + } while_each_thread(task2, task);
47558 + read_unlock(&tasklist_lock);
47559 +
47560 + /* release the reference to the real root dentry and vfsmount */
47561 + if (real_root)
47562 + dput(real_root);
47563 + real_root = NULL;
47564 + if (real_root_mnt)
47565 + mntput(real_root_mnt);
47566 + real_root_mnt = NULL;
47567 +
47568 + /* free all object hash tables */
47569 +
47570 + FOR_EACH_ROLE_START(r)
47571 + if (r->subj_hash == NULL)
47572 + goto next_role;
47573 + FOR_EACH_SUBJECT_START(r, s, x)
47574 + if (s->obj_hash == NULL)
47575 + break;
47576 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47577 + kfree(s->obj_hash);
47578 + else
47579 + vfree(s->obj_hash);
47580 + FOR_EACH_SUBJECT_END(s, x)
47581 + FOR_EACH_NESTED_SUBJECT_START(r, s)
47582 + if (s->obj_hash == NULL)
47583 + break;
47584 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47585 + kfree(s->obj_hash);
47586 + else
47587 + vfree(s->obj_hash);
47588 + FOR_EACH_NESTED_SUBJECT_END(s)
47589 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47590 + kfree(r->subj_hash);
47591 + else
47592 + vfree(r->subj_hash);
47593 + r->subj_hash = NULL;
47594 +next_role:
47595 + FOR_EACH_ROLE_END(r)
47596 +
47597 + acl_free_all();
47598 +
47599 + if (acl_role_set.r_hash) {
47600 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47601 + PAGE_SIZE)
47602 + kfree(acl_role_set.r_hash);
47603 + else
47604 + vfree(acl_role_set.r_hash);
47605 + }
47606 + if (name_set.n_hash) {
47607 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
47608 + PAGE_SIZE)
47609 + kfree(name_set.n_hash);
47610 + else
47611 + vfree(name_set.n_hash);
47612 + }
47613 +
47614 + if (inodev_set.i_hash) {
47615 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47616 + PAGE_SIZE)
47617 + kfree(inodev_set.i_hash);
47618 + else
47619 + vfree(inodev_set.i_hash);
47620 + }
47621 +
47622 + gr_free_uidset();
47623 +
47624 + memset(&name_set, 0, sizeof (struct name_db));
47625 + memset(&inodev_set, 0, sizeof (struct inodev_db));
47626 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47627 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47628 +
47629 + default_role = NULL;
47630 + role_list = NULL;
47631 +
47632 + return;
47633 +}
47634 +
47635 +static __u32
47636 +count_user_objs(struct acl_object_label *userp)
47637 +{
47638 + struct acl_object_label o_tmp;
47639 + __u32 num = 0;
47640 +
47641 + while (userp) {
47642 + if (copy_from_user(&o_tmp, userp,
47643 + sizeof (struct acl_object_label)))
47644 + break;
47645 +
47646 + userp = o_tmp.prev;
47647 + num++;
47648 + }
47649 +
47650 + return num;
47651 +}
47652 +
47653 +static struct acl_subject_label *
47654 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47655 +
47656 +static int
47657 +copy_user_glob(struct acl_object_label *obj)
47658 +{
47659 + struct acl_object_label *g_tmp, **guser;
47660 + unsigned int len;
47661 + char *tmp;
47662 +
47663 + if (obj->globbed == NULL)
47664 + return 0;
47665 +
47666 + guser = &obj->globbed;
47667 + while (*guser) {
47668 + g_tmp = (struct acl_object_label *)
47669 + acl_alloc(sizeof (struct acl_object_label));
47670 + if (g_tmp == NULL)
47671 + return -ENOMEM;
47672 +
47673 + if (copy_from_user(g_tmp, *guser,
47674 + sizeof (struct acl_object_label)))
47675 + return -EFAULT;
47676 +
47677 + len = strnlen_user(g_tmp->filename, PATH_MAX);
47678 +
47679 + if (!len || len >= PATH_MAX)
47680 + return -EINVAL;
47681 +
47682 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47683 + return -ENOMEM;
47684 +
47685 + if (copy_from_user(tmp, g_tmp->filename, len))
47686 + return -EFAULT;
47687 + tmp[len-1] = '\0';
47688 + g_tmp->filename = tmp;
47689 +
47690 + *guser = g_tmp;
47691 + guser = &(g_tmp->next);
47692 + }
47693 +
47694 + return 0;
47695 +}
47696 +
47697 +static int
47698 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47699 + struct acl_role_label *role)
47700 +{
47701 + struct acl_object_label *o_tmp;
47702 + unsigned int len;
47703 + int ret;
47704 + char *tmp;
47705 +
47706 + while (userp) {
47707 + if ((o_tmp = (struct acl_object_label *)
47708 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
47709 + return -ENOMEM;
47710 +
47711 + if (copy_from_user(o_tmp, userp,
47712 + sizeof (struct acl_object_label)))
47713 + return -EFAULT;
47714 +
47715 + userp = o_tmp->prev;
47716 +
47717 + len = strnlen_user(o_tmp->filename, PATH_MAX);
47718 +
47719 + if (!len || len >= PATH_MAX)
47720 + return -EINVAL;
47721 +
47722 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47723 + return -ENOMEM;
47724 +
47725 + if (copy_from_user(tmp, o_tmp->filename, len))
47726 + return -EFAULT;
47727 + tmp[len-1] = '\0';
47728 + o_tmp->filename = tmp;
47729 +
47730 + insert_acl_obj_label(o_tmp, subj);
47731 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47732 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47733 + return -ENOMEM;
47734 +
47735 + ret = copy_user_glob(o_tmp);
47736 + if (ret)
47737 + return ret;
47738 +
47739 + if (o_tmp->nested) {
47740 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47741 + if (IS_ERR(o_tmp->nested))
47742 + return PTR_ERR(o_tmp->nested);
47743 +
47744 + /* insert into nested subject list */
47745 + o_tmp->nested->next = role->hash->first;
47746 + role->hash->first = o_tmp->nested;
47747 + }
47748 + }
47749 +
47750 + return 0;
47751 +}
47752 +
47753 +static __u32
47754 +count_user_subjs(struct acl_subject_label *userp)
47755 +{
47756 + struct acl_subject_label s_tmp;
47757 + __u32 num = 0;
47758 +
47759 + while (userp) {
47760 + if (copy_from_user(&s_tmp, userp,
47761 + sizeof (struct acl_subject_label)))
47762 + break;
47763 +
47764 + userp = s_tmp.prev;
47765 + /* do not count nested subjects against this count, since
47766 + they are not included in the hash table, but are
47767 + attached to objects. We have already counted
47768 + the subjects in userspace for the allocation
47769 + stack
47770 + */
47771 + if (!(s_tmp.mode & GR_NESTED))
47772 + num++;
47773 + }
47774 +
47775 + return num;
47776 +}
47777 +
47778 +static int
47779 +copy_user_allowedips(struct acl_role_label *rolep)
47780 +{
47781 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47782 +
47783 + ruserip = rolep->allowed_ips;
47784 +
47785 + while (ruserip) {
47786 + rlast = rtmp;
47787 +
47788 + if ((rtmp = (struct role_allowed_ip *)
47789 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47790 + return -ENOMEM;
47791 +
47792 + if (copy_from_user(rtmp, ruserip,
47793 + sizeof (struct role_allowed_ip)))
47794 + return -EFAULT;
47795 +
47796 + ruserip = rtmp->prev;
47797 +
47798 + if (!rlast) {
47799 + rtmp->prev = NULL;
47800 + rolep->allowed_ips = rtmp;
47801 + } else {
47802 + rlast->next = rtmp;
47803 + rtmp->prev = rlast;
47804 + }
47805 +
47806 + if (!ruserip)
47807 + rtmp->next = NULL;
47808 + }
47809 +
47810 + return 0;
47811 +}
47812 +
47813 +static int
47814 +copy_user_transitions(struct acl_role_label *rolep)
47815 +{
47816 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
47817 +
47818 + unsigned int len;
47819 + char *tmp;
47820 +
47821 + rusertp = rolep->transitions;
47822 +
47823 + while (rusertp) {
47824 + rlast = rtmp;
47825 +
47826 + if ((rtmp = (struct role_transition *)
47827 + acl_alloc(sizeof (struct role_transition))) == NULL)
47828 + return -ENOMEM;
47829 +
47830 + if (copy_from_user(rtmp, rusertp,
47831 + sizeof (struct role_transition)))
47832 + return -EFAULT;
47833 +
47834 + rusertp = rtmp->prev;
47835 +
47836 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
47837 +
47838 + if (!len || len >= GR_SPROLE_LEN)
47839 + return -EINVAL;
47840 +
47841 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47842 + return -ENOMEM;
47843 +
47844 + if (copy_from_user(tmp, rtmp->rolename, len))
47845 + return -EFAULT;
47846 + tmp[len-1] = '\0';
47847 + rtmp->rolename = tmp;
47848 +
47849 + if (!rlast) {
47850 + rtmp->prev = NULL;
47851 + rolep->transitions = rtmp;
47852 + } else {
47853 + rlast->next = rtmp;
47854 + rtmp->prev = rlast;
47855 + }
47856 +
47857 + if (!rusertp)
47858 + rtmp->next = NULL;
47859 + }
47860 +
47861 + return 0;
47862 +}
47863 +
47864 +static struct acl_subject_label *
47865 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
47866 +{
47867 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
47868 + unsigned int len;
47869 + char *tmp;
47870 + __u32 num_objs;
47871 + struct acl_ip_label **i_tmp, *i_utmp2;
47872 + struct gr_hash_struct ghash;
47873 + struct subject_map *subjmap;
47874 + unsigned int i_num;
47875 + int err;
47876 +
47877 + s_tmp = lookup_subject_map(userp);
47878 +
47879 + /* we've already copied this subject into the kernel, just return
47880 + the reference to it, and don't copy it over again
47881 + */
47882 + if (s_tmp)
47883 + return(s_tmp);
47884 +
47885 + if ((s_tmp = (struct acl_subject_label *)
47886 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
47887 + return ERR_PTR(-ENOMEM);
47888 +
47889 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
47890 + if (subjmap == NULL)
47891 + return ERR_PTR(-ENOMEM);
47892 +
47893 + subjmap->user = userp;
47894 + subjmap->kernel = s_tmp;
47895 + insert_subj_map_entry(subjmap);
47896 +
47897 + if (copy_from_user(s_tmp, userp,
47898 + sizeof (struct acl_subject_label)))
47899 + return ERR_PTR(-EFAULT);
47900 +
47901 + len = strnlen_user(s_tmp->filename, PATH_MAX);
47902 +
47903 + if (!len || len >= PATH_MAX)
47904 + return ERR_PTR(-EINVAL);
47905 +
47906 + if ((tmp = (char *) acl_alloc(len)) == NULL)
47907 + return ERR_PTR(-ENOMEM);
47908 +
47909 + if (copy_from_user(tmp, s_tmp->filename, len))
47910 + return ERR_PTR(-EFAULT);
47911 + tmp[len-1] = '\0';
47912 + s_tmp->filename = tmp;
47913 +
47914 + if (!strcmp(s_tmp->filename, "/"))
47915 + role->root_label = s_tmp;
47916 +
47917 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
47918 + return ERR_PTR(-EFAULT);
47919 +
47920 + /* copy user and group transition tables */
47921 +
47922 + if (s_tmp->user_trans_num) {
47923 + uid_t *uidlist;
47924 +
47925 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
47926 + if (uidlist == NULL)
47927 + return ERR_PTR(-ENOMEM);
47928 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
47929 + return ERR_PTR(-EFAULT);
47930 +
47931 + s_tmp->user_transitions = uidlist;
47932 + }
47933 +
47934 + if (s_tmp->group_trans_num) {
47935 + gid_t *gidlist;
47936 +
47937 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
47938 + if (gidlist == NULL)
47939 + return ERR_PTR(-ENOMEM);
47940 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
47941 + return ERR_PTR(-EFAULT);
47942 +
47943 + s_tmp->group_transitions = gidlist;
47944 + }
47945 +
47946 + /* set up object hash table */
47947 + num_objs = count_user_objs(ghash.first);
47948 +
47949 + s_tmp->obj_hash_size = num_objs;
47950 + s_tmp->obj_hash =
47951 + (struct acl_object_label **)
47952 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
47953 +
47954 + if (!s_tmp->obj_hash)
47955 + return ERR_PTR(-ENOMEM);
47956 +
47957 + memset(s_tmp->obj_hash, 0,
47958 + s_tmp->obj_hash_size *
47959 + sizeof (struct acl_object_label *));
47960 +
47961 + /* add in objects */
47962 + err = copy_user_objs(ghash.first, s_tmp, role);
47963 +
47964 + if (err)
47965 + return ERR_PTR(err);
47966 +
47967 + /* set pointer for parent subject */
47968 + if (s_tmp->parent_subject) {
47969 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
47970 +
47971 + if (IS_ERR(s_tmp2))
47972 + return s_tmp2;
47973 +
47974 + s_tmp->parent_subject = s_tmp2;
47975 + }
47976 +
47977 + /* add in ip acls */
47978 +
47979 + if (!s_tmp->ip_num) {
47980 + s_tmp->ips = NULL;
47981 + goto insert;
47982 + }
47983 +
47984 + i_tmp =
47985 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
47986 + sizeof (struct acl_ip_label *));
47987 +
47988 + if (!i_tmp)
47989 + return ERR_PTR(-ENOMEM);
47990 +
47991 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
47992 + *(i_tmp + i_num) =
47993 + (struct acl_ip_label *)
47994 + acl_alloc(sizeof (struct acl_ip_label));
47995 + if (!*(i_tmp + i_num))
47996 + return ERR_PTR(-ENOMEM);
47997 +
47998 + if (copy_from_user
47999 + (&i_utmp2, s_tmp->ips + i_num,
48000 + sizeof (struct acl_ip_label *)))
48001 + return ERR_PTR(-EFAULT);
48002 +
48003 + if (copy_from_user
48004 + (*(i_tmp + i_num), i_utmp2,
48005 + sizeof (struct acl_ip_label)))
48006 + return ERR_PTR(-EFAULT);
48007 +
48008 + if ((*(i_tmp + i_num))->iface == NULL)
48009 + continue;
48010 +
48011 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48012 + if (!len || len >= IFNAMSIZ)
48013 + return ERR_PTR(-EINVAL);
48014 + tmp = acl_alloc(len);
48015 + if (tmp == NULL)
48016 + return ERR_PTR(-ENOMEM);
48017 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48018 + return ERR_PTR(-EFAULT);
48019 + (*(i_tmp + i_num))->iface = tmp;
48020 + }
48021 +
48022 + s_tmp->ips = i_tmp;
48023 +
48024 +insert:
48025 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48026 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48027 + return ERR_PTR(-ENOMEM);
48028 +
48029 + return s_tmp;
48030 +}
48031 +
48032 +static int
48033 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48034 +{
48035 + struct acl_subject_label s_pre;
48036 + struct acl_subject_label * ret;
48037 + int err;
48038 +
48039 + while (userp) {
48040 + if (copy_from_user(&s_pre, userp,
48041 + sizeof (struct acl_subject_label)))
48042 + return -EFAULT;
48043 +
48044 + /* do not add nested subjects here, add
48045 + while parsing objects
48046 + */
48047 +
48048 + if (s_pre.mode & GR_NESTED) {
48049 + userp = s_pre.prev;
48050 + continue;
48051 + }
48052 +
48053 + ret = do_copy_user_subj(userp, role);
48054 +
48055 + err = PTR_ERR(ret);
48056 + if (IS_ERR(ret))
48057 + return err;
48058 +
48059 + insert_acl_subj_label(ret, role);
48060 +
48061 + userp = s_pre.prev;
48062 + }
48063 +
48064 + return 0;
48065 +}
48066 +
48067 +static int
48068 +copy_user_acl(struct gr_arg *arg)
48069 +{
48070 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48071 + struct sprole_pw *sptmp;
48072 + struct gr_hash_struct *ghash;
48073 + uid_t *domainlist;
48074 + unsigned int r_num;
48075 + unsigned int len;
48076 + char *tmp;
48077 + int err = 0;
48078 + __u16 i;
48079 + __u32 num_subjs;
48080 +
48081 + /* we need a default and kernel role */
48082 + if (arg->role_db.num_roles < 2)
48083 + return -EINVAL;
48084 +
48085 + /* copy special role authentication info from userspace */
48086 +
48087 + num_sprole_pws = arg->num_sprole_pws;
48088 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48089 +
48090 + if (!acl_special_roles) {
48091 + err = -ENOMEM;
48092 + goto cleanup;
48093 + }
48094 +
48095 + for (i = 0; i < num_sprole_pws; i++) {
48096 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48097 + if (!sptmp) {
48098 + err = -ENOMEM;
48099 + goto cleanup;
48100 + }
48101 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48102 + sizeof (struct sprole_pw))) {
48103 + err = -EFAULT;
48104 + goto cleanup;
48105 + }
48106 +
48107 + len =
48108 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48109 +
48110 + if (!len || len >= GR_SPROLE_LEN) {
48111 + err = -EINVAL;
48112 + goto cleanup;
48113 + }
48114 +
48115 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48116 + err = -ENOMEM;
48117 + goto cleanup;
48118 + }
48119 +
48120 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48121 + err = -EFAULT;
48122 + goto cleanup;
48123 + }
48124 + tmp[len-1] = '\0';
48125 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48126 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48127 +#endif
48128 + sptmp->rolename = tmp;
48129 + acl_special_roles[i] = sptmp;
48130 + }
48131 +
48132 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48133 +
48134 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48135 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48136 +
48137 + if (!r_tmp) {
48138 + err = -ENOMEM;
48139 + goto cleanup;
48140 + }
48141 +
48142 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48143 + sizeof (struct acl_role_label *))) {
48144 + err = -EFAULT;
48145 + goto cleanup;
48146 + }
48147 +
48148 + if (copy_from_user(r_tmp, r_utmp2,
48149 + sizeof (struct acl_role_label))) {
48150 + err = -EFAULT;
48151 + goto cleanup;
48152 + }
48153 +
48154 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48155 +
48156 + if (!len || len >= PATH_MAX) {
48157 + err = -EINVAL;
48158 + goto cleanup;
48159 + }
48160 +
48161 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48162 + err = -ENOMEM;
48163 + goto cleanup;
48164 + }
48165 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48166 + err = -EFAULT;
48167 + goto cleanup;
48168 + }
48169 + tmp[len-1] = '\0';
48170 + r_tmp->rolename = tmp;
48171 +
48172 + if (!strcmp(r_tmp->rolename, "default")
48173 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48174 + default_role = r_tmp;
48175 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48176 + kernel_role = r_tmp;
48177 + }
48178 +
48179 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48180 + err = -ENOMEM;
48181 + goto cleanup;
48182 + }
48183 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48184 + err = -EFAULT;
48185 + goto cleanup;
48186 + }
48187 +
48188 + r_tmp->hash = ghash;
48189 +
48190 + num_subjs = count_user_subjs(r_tmp->hash->first);
48191 +
48192 + r_tmp->subj_hash_size = num_subjs;
48193 + r_tmp->subj_hash =
48194 + (struct acl_subject_label **)
48195 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48196 +
48197 + if (!r_tmp->subj_hash) {
48198 + err = -ENOMEM;
48199 + goto cleanup;
48200 + }
48201 +
48202 + err = copy_user_allowedips(r_tmp);
48203 + if (err)
48204 + goto cleanup;
48205 +
48206 + /* copy domain info */
48207 + if (r_tmp->domain_children != NULL) {
48208 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48209 + if (domainlist == NULL) {
48210 + err = -ENOMEM;
48211 + goto cleanup;
48212 + }
48213 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48214 + err = -EFAULT;
48215 + goto cleanup;
48216 + }
48217 + r_tmp->domain_children = domainlist;
48218 + }
48219 +
48220 + err = copy_user_transitions(r_tmp);
48221 + if (err)
48222 + goto cleanup;
48223 +
48224 + memset(r_tmp->subj_hash, 0,
48225 + r_tmp->subj_hash_size *
48226 + sizeof (struct acl_subject_label *));
48227 +
48228 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48229 +
48230 + if (err)
48231 + goto cleanup;
48232 +
48233 + /* set nested subject list to null */
48234 + r_tmp->hash->first = NULL;
48235 +
48236 + insert_acl_role_label(r_tmp);
48237 + }
48238 +
48239 + goto return_err;
48240 + cleanup:
48241 + free_variables();
48242 + return_err:
48243 + return err;
48244 +
48245 +}
48246 +
48247 +static int
48248 +gracl_init(struct gr_arg *args)
48249 +{
48250 + int error = 0;
48251 +
48252 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48253 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48254 +
48255 + if (init_variables(args)) {
48256 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48257 + error = -ENOMEM;
48258 + free_variables();
48259 + goto out;
48260 + }
48261 +
48262 + error = copy_user_acl(args);
48263 + free_init_variables();
48264 + if (error) {
48265 + free_variables();
48266 + goto out;
48267 + }
48268 +
48269 + if ((error = gr_set_acls(0))) {
48270 + free_variables();
48271 + goto out;
48272 + }
48273 +
48274 + pax_open_kernel();
48275 + gr_status |= GR_READY;
48276 + pax_close_kernel();
48277 +
48278 + out:
48279 + return error;
48280 +}
48281 +
48282 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48283 +
48284 +static int
48285 +glob_match(const char *p, const char *n)
48286 +{
48287 + char c;
48288 +
48289 + while ((c = *p++) != '\0') {
48290 + switch (c) {
48291 + case '?':
48292 + if (*n == '\0')
48293 + return 1;
48294 + else if (*n == '/')
48295 + return 1;
48296 + break;
48297 + case '\\':
48298 + if (*n != c)
48299 + return 1;
48300 + break;
48301 + case '*':
48302 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
48303 + if (*n == '/')
48304 + return 1;
48305 + else if (c == '?') {
48306 + if (*n == '\0')
48307 + return 1;
48308 + else
48309 + ++n;
48310 + }
48311 + }
48312 + if (c == '\0') {
48313 + return 0;
48314 + } else {
48315 + const char *endp;
48316 +
48317 + if ((endp = strchr(n, '/')) == NULL)
48318 + endp = n + strlen(n);
48319 +
48320 + if (c == '[') {
48321 + for (--p; n < endp; ++n)
48322 + if (!glob_match(p, n))
48323 + return 0;
48324 + } else if (c == '/') {
48325 + while (*n != '\0' && *n != '/')
48326 + ++n;
48327 + if (*n == '/' && !glob_match(p, n + 1))
48328 + return 0;
48329 + } else {
48330 + for (--p; n < endp; ++n)
48331 + if (*n == c && !glob_match(p, n))
48332 + return 0;
48333 + }
48334 +
48335 + return 1;
48336 + }
48337 + case '[':
48338 + {
48339 + int not;
48340 + char cold;
48341 +
48342 + if (*n == '\0' || *n == '/')
48343 + return 1;
48344 +
48345 + not = (*p == '!' || *p == '^');
48346 + if (not)
48347 + ++p;
48348 +
48349 + c = *p++;
48350 + for (;;) {
48351 + unsigned char fn = (unsigned char)*n;
48352 +
48353 + if (c == '\0')
48354 + return 1;
48355 + else {
48356 + if (c == fn)
48357 + goto matched;
48358 + cold = c;
48359 + c = *p++;
48360 +
48361 + if (c == '-' && *p != ']') {
48362 + unsigned char cend = *p++;
48363 +
48364 + if (cend == '\0')
48365 + return 1;
48366 +
48367 + if (cold <= fn && fn <= cend)
48368 + goto matched;
48369 +
48370 + c = *p++;
48371 + }
48372 + }
48373 +
48374 + if (c == ']')
48375 + break;
48376 + }
48377 + if (!not)
48378 + return 1;
48379 + break;
48380 + matched:
48381 + while (c != ']') {
48382 + if (c == '\0')
48383 + return 1;
48384 +
48385 + c = *p++;
48386 + }
48387 + if (not)
48388 + return 1;
48389 + }
48390 + break;
48391 + default:
48392 + if (c != *n)
48393 + return 1;
48394 + }
48395 +
48396 + ++n;
48397 + }
48398 +
48399 + if (*n == '\0')
48400 + return 0;
48401 +
48402 + if (*n == '/')
48403 + return 0;
48404 +
48405 + return 1;
48406 +}
48407 +
48408 +static struct acl_object_label *
48409 +chk_glob_label(struct acl_object_label *globbed,
48410 + struct dentry *dentry, struct vfsmount *mnt, char **path)
48411 +{
48412 + struct acl_object_label *tmp;
48413 +
48414 + if (*path == NULL)
48415 + *path = gr_to_filename_nolock(dentry, mnt);
48416 +
48417 + tmp = globbed;
48418 +
48419 + while (tmp) {
48420 + if (!glob_match(tmp->filename, *path))
48421 + return tmp;
48422 + tmp = tmp->next;
48423 + }
48424 +
48425 + return NULL;
48426 +}
48427 +
48428 +static struct acl_object_label *
48429 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48430 + const ino_t curr_ino, const dev_t curr_dev,
48431 + const struct acl_subject_label *subj, char **path, const int checkglob)
48432 +{
48433 + struct acl_subject_label *tmpsubj;
48434 + struct acl_object_label *retval;
48435 + struct acl_object_label *retval2;
48436 +
48437 + tmpsubj = (struct acl_subject_label *) subj;
48438 + read_lock(&gr_inode_lock);
48439 + do {
48440 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48441 + if (retval) {
48442 + if (checkglob && retval->globbed) {
48443 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48444 + (struct vfsmount *)orig_mnt, path);
48445 + if (retval2)
48446 + retval = retval2;
48447 + }
48448 + break;
48449 + }
48450 + } while ((tmpsubj = tmpsubj->parent_subject));
48451 + read_unlock(&gr_inode_lock);
48452 +
48453 + return retval;
48454 +}
48455 +
48456 +static __inline__ struct acl_object_label *
48457 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48458 + const struct dentry *curr_dentry,
48459 + const struct acl_subject_label *subj, char **path, const int checkglob)
48460 +{
48461 + int newglob = checkglob;
48462 +
48463 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48464 + as we don't want a / * rule to match instead of the / object
48465 + don't do this for create lookups that call this function though, since they're looking up
48466 + on the parent and thus need globbing checks on all paths
48467 + */
48468 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48469 + newglob = GR_NO_GLOB;
48470 +
48471 + return __full_lookup(orig_dentry, orig_mnt,
48472 + curr_dentry->d_inode->i_ino,
48473 + __get_dev(curr_dentry), subj, path, newglob);
48474 +}
48475 +
48476 +static struct acl_object_label *
48477 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48478 + const struct acl_subject_label *subj, char *path, const int checkglob)
48479 +{
48480 + struct dentry *dentry = (struct dentry *) l_dentry;
48481 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48482 + struct acl_object_label *retval;
48483 +
48484 + spin_lock(&dcache_lock);
48485 + spin_lock(&vfsmount_lock);
48486 +
48487 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48488 +#ifdef CONFIG_NET
48489 + mnt == sock_mnt ||
48490 +#endif
48491 +#ifdef CONFIG_HUGETLBFS
48492 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48493 +#endif
48494 + /* ignore Eric Biederman */
48495 + IS_PRIVATE(l_dentry->d_inode))) {
48496 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48497 + goto out;
48498 + }
48499 +
48500 + for (;;) {
48501 + if (dentry == real_root && mnt == real_root_mnt)
48502 + break;
48503 +
48504 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48505 + if (mnt->mnt_parent == mnt)
48506 + break;
48507 +
48508 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48509 + if (retval != NULL)
48510 + goto out;
48511 +
48512 + dentry = mnt->mnt_mountpoint;
48513 + mnt = mnt->mnt_parent;
48514 + continue;
48515 + }
48516 +
48517 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48518 + if (retval != NULL)
48519 + goto out;
48520 +
48521 + dentry = dentry->d_parent;
48522 + }
48523 +
48524 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48525 +
48526 + if (retval == NULL)
48527 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48528 +out:
48529 + spin_unlock(&vfsmount_lock);
48530 + spin_unlock(&dcache_lock);
48531 +
48532 + BUG_ON(retval == NULL);
48533 +
48534 + return retval;
48535 +}
48536 +
48537 +static __inline__ struct acl_object_label *
48538 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48539 + const struct acl_subject_label *subj)
48540 +{
48541 + char *path = NULL;
48542 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48543 +}
48544 +
48545 +static __inline__ struct acl_object_label *
48546 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48547 + const struct acl_subject_label *subj)
48548 +{
48549 + char *path = NULL;
48550 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48551 +}
48552 +
48553 +static __inline__ struct acl_object_label *
48554 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48555 + const struct acl_subject_label *subj, char *path)
48556 +{
48557 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48558 +}
48559 +
48560 +static struct acl_subject_label *
48561 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48562 + const struct acl_role_label *role)
48563 +{
48564 + struct dentry *dentry = (struct dentry *) l_dentry;
48565 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48566 + struct acl_subject_label *retval;
48567 +
48568 + spin_lock(&dcache_lock);
48569 + spin_lock(&vfsmount_lock);
48570 +
48571 + for (;;) {
48572 + if (dentry == real_root && mnt == real_root_mnt)
48573 + break;
48574 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48575 + if (mnt->mnt_parent == mnt)
48576 + break;
48577 +
48578 + read_lock(&gr_inode_lock);
48579 + retval =
48580 + lookup_acl_subj_label(dentry->d_inode->i_ino,
48581 + __get_dev(dentry), role);
48582 + read_unlock(&gr_inode_lock);
48583 + if (retval != NULL)
48584 + goto out;
48585 +
48586 + dentry = mnt->mnt_mountpoint;
48587 + mnt = mnt->mnt_parent;
48588 + continue;
48589 + }
48590 +
48591 + read_lock(&gr_inode_lock);
48592 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48593 + __get_dev(dentry), role);
48594 + read_unlock(&gr_inode_lock);
48595 + if (retval != NULL)
48596 + goto out;
48597 +
48598 + dentry = dentry->d_parent;
48599 + }
48600 +
48601 + read_lock(&gr_inode_lock);
48602 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48603 + __get_dev(dentry), role);
48604 + read_unlock(&gr_inode_lock);
48605 +
48606 + if (unlikely(retval == NULL)) {
48607 + read_lock(&gr_inode_lock);
48608 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48609 + __get_dev(real_root), role);
48610 + read_unlock(&gr_inode_lock);
48611 + }
48612 +out:
48613 + spin_unlock(&vfsmount_lock);
48614 + spin_unlock(&dcache_lock);
48615 +
48616 + BUG_ON(retval == NULL);
48617 +
48618 + return retval;
48619 +}
48620 +
48621 +static void
48622 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48623 +{
48624 + struct task_struct *task = current;
48625 + const struct cred *cred = current_cred();
48626 +
48627 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48628 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48629 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48630 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48631 +
48632 + return;
48633 +}
48634 +
48635 +static void
48636 +gr_log_learn_sysctl(const char *path, const __u32 mode)
48637 +{
48638 + struct task_struct *task = current;
48639 + const struct cred *cred = current_cred();
48640 +
48641 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48642 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48643 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48644 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48645 +
48646 + return;
48647 +}
48648 +
48649 +static void
48650 +gr_log_learn_id_change(const char type, const unsigned int real,
48651 + const unsigned int effective, const unsigned int fs)
48652 +{
48653 + struct task_struct *task = current;
48654 + const struct cred *cred = current_cred();
48655 +
48656 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48657 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48658 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48659 + type, real, effective, fs, &task->signal->saved_ip);
48660 +
48661 + return;
48662 +}
48663 +
48664 +__u32
48665 +gr_check_link(const struct dentry * new_dentry,
48666 + const struct dentry * parent_dentry,
48667 + const struct vfsmount * parent_mnt,
48668 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48669 +{
48670 + struct acl_object_label *obj;
48671 + __u32 oldmode, newmode;
48672 + __u32 needmode;
48673 +
48674 + if (unlikely(!(gr_status & GR_READY)))
48675 + return (GR_CREATE | GR_LINK);
48676 +
48677 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48678 + oldmode = obj->mode;
48679 +
48680 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48681 + oldmode |= (GR_CREATE | GR_LINK);
48682 +
48683 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48684 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48685 + needmode |= GR_SETID | GR_AUDIT_SETID;
48686 +
48687 + newmode =
48688 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48689 + oldmode | needmode);
48690 +
48691 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48692 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48693 + GR_INHERIT | GR_AUDIT_INHERIT);
48694 +
48695 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48696 + goto bad;
48697 +
48698 + if ((oldmode & needmode) != needmode)
48699 + goto bad;
48700 +
48701 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48702 + if ((newmode & needmode) != needmode)
48703 + goto bad;
48704 +
48705 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48706 + return newmode;
48707 +bad:
48708 + needmode = oldmode;
48709 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48710 + needmode |= GR_SETID;
48711 +
48712 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48713 + gr_log_learn(old_dentry, old_mnt, needmode);
48714 + return (GR_CREATE | GR_LINK);
48715 + } else if (newmode & GR_SUPPRESS)
48716 + return GR_SUPPRESS;
48717 + else
48718 + return 0;
48719 +}
48720 +
48721 +__u32
48722 +gr_search_file(const struct dentry * dentry, const __u32 mode,
48723 + const struct vfsmount * mnt)
48724 +{
48725 + __u32 retval = mode;
48726 + struct acl_subject_label *curracl;
48727 + struct acl_object_label *currobj;
48728 +
48729 + if (unlikely(!(gr_status & GR_READY)))
48730 + return (mode & ~GR_AUDITS);
48731 +
48732 + curracl = current->acl;
48733 +
48734 + currobj = chk_obj_label(dentry, mnt, curracl);
48735 + retval = currobj->mode & mode;
48736 +
48737 + /* if we're opening a specified transfer file for writing
48738 + (e.g. /dev/initctl), then transfer our role to init
48739 + */
48740 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48741 + current->role->roletype & GR_ROLE_PERSIST)) {
48742 + struct task_struct *task = init_pid_ns.child_reaper;
48743 +
48744 + if (task->role != current->role) {
48745 + task->acl_sp_role = 0;
48746 + task->acl_role_id = current->acl_role_id;
48747 + task->role = current->role;
48748 + rcu_read_lock();
48749 + read_lock(&grsec_exec_file_lock);
48750 + gr_apply_subject_to_task(task);
48751 + read_unlock(&grsec_exec_file_lock);
48752 + rcu_read_unlock();
48753 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48754 + }
48755 + }
48756 +
48757 + if (unlikely
48758 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48759 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48760 + __u32 new_mode = mode;
48761 +
48762 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48763 +
48764 + retval = new_mode;
48765 +
48766 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48767 + new_mode |= GR_INHERIT;
48768 +
48769 + if (!(mode & GR_NOLEARN))
48770 + gr_log_learn(dentry, mnt, new_mode);
48771 + }
48772 +
48773 + return retval;
48774 +}
48775 +
48776 +__u32
48777 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48778 + const struct vfsmount * mnt, const __u32 mode)
48779 +{
48780 + struct name_entry *match;
48781 + struct acl_object_label *matchpo;
48782 + struct acl_subject_label *curracl;
48783 + char *path;
48784 + __u32 retval;
48785 +
48786 + if (unlikely(!(gr_status & GR_READY)))
48787 + return (mode & ~GR_AUDITS);
48788 +
48789 + preempt_disable();
48790 + path = gr_to_filename_rbac(new_dentry, mnt);
48791 + match = lookup_name_entry_create(path);
48792 +
48793 + if (!match)
48794 + goto check_parent;
48795 +
48796 + curracl = current->acl;
48797 +
48798 + read_lock(&gr_inode_lock);
48799 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48800 + read_unlock(&gr_inode_lock);
48801 +
48802 + if (matchpo) {
48803 + if ((matchpo->mode & mode) !=
48804 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
48805 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48806 + __u32 new_mode = mode;
48807 +
48808 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48809 +
48810 + gr_log_learn(new_dentry, mnt, new_mode);
48811 +
48812 + preempt_enable();
48813 + return new_mode;
48814 + }
48815 + preempt_enable();
48816 + return (matchpo->mode & mode);
48817 + }
48818 +
48819 + check_parent:
48820 + curracl = current->acl;
48821 +
48822 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48823 + retval = matchpo->mode & mode;
48824 +
48825 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48826 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48827 + __u32 new_mode = mode;
48828 +
48829 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48830 +
48831 + gr_log_learn(new_dentry, mnt, new_mode);
48832 + preempt_enable();
48833 + return new_mode;
48834 + }
48835 +
48836 + preempt_enable();
48837 + return retval;
48838 +}
48839 +
48840 +int
48841 +gr_check_hidden_task(const struct task_struct *task)
48842 +{
48843 + if (unlikely(!(gr_status & GR_READY)))
48844 + return 0;
48845 +
48846 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
48847 + return 1;
48848 +
48849 + return 0;
48850 +}
48851 +
48852 +int
48853 +gr_check_protected_task(const struct task_struct *task)
48854 +{
48855 + if (unlikely(!(gr_status & GR_READY) || !task))
48856 + return 0;
48857 +
48858 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48859 + task->acl != current->acl)
48860 + return 1;
48861 +
48862 + return 0;
48863 +}
48864 +
48865 +int
48866 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
48867 +{
48868 + struct task_struct *p;
48869 + int ret = 0;
48870 +
48871 + if (unlikely(!(gr_status & GR_READY) || !pid))
48872 + return ret;
48873 +
48874 + read_lock(&tasklist_lock);
48875 + do_each_pid_task(pid, type, p) {
48876 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48877 + p->acl != current->acl) {
48878 + ret = 1;
48879 + goto out;
48880 + }
48881 + } while_each_pid_task(pid, type, p);
48882 +out:
48883 + read_unlock(&tasklist_lock);
48884 +
48885 + return ret;
48886 +}
48887 +
48888 +void
48889 +gr_copy_label(struct task_struct *tsk)
48890 +{
48891 + tsk->signal->used_accept = 0;
48892 + tsk->acl_sp_role = 0;
48893 + tsk->acl_role_id = current->acl_role_id;
48894 + tsk->acl = current->acl;
48895 + tsk->role = current->role;
48896 + tsk->signal->curr_ip = current->signal->curr_ip;
48897 + tsk->signal->saved_ip = current->signal->saved_ip;
48898 + if (current->exec_file)
48899 + get_file(current->exec_file);
48900 + tsk->exec_file = current->exec_file;
48901 + tsk->is_writable = current->is_writable;
48902 + if (unlikely(current->signal->used_accept)) {
48903 + current->signal->curr_ip = 0;
48904 + current->signal->saved_ip = 0;
48905 + }
48906 +
48907 + return;
48908 +}
48909 +
48910 +static void
48911 +gr_set_proc_res(struct task_struct *task)
48912 +{
48913 + struct acl_subject_label *proc;
48914 + unsigned short i;
48915 +
48916 + proc = task->acl;
48917 +
48918 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
48919 + return;
48920 +
48921 + for (i = 0; i < RLIM_NLIMITS; i++) {
48922 + if (!(proc->resmask & (1 << i)))
48923 + continue;
48924 +
48925 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
48926 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
48927 + }
48928 +
48929 + return;
48930 +}
48931 +
48932 +extern int __gr_process_user_ban(struct user_struct *user);
48933 +
48934 +int
48935 +gr_check_user_change(int real, int effective, int fs)
48936 +{
48937 + unsigned int i;
48938 + __u16 num;
48939 + uid_t *uidlist;
48940 + int curuid;
48941 + int realok = 0;
48942 + int effectiveok = 0;
48943 + int fsok = 0;
48944 +
48945 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48946 + struct user_struct *user;
48947 +
48948 + if (real == -1)
48949 + goto skipit;
48950 +
48951 + user = find_user(real);
48952 + if (user == NULL)
48953 + goto skipit;
48954 +
48955 + if (__gr_process_user_ban(user)) {
48956 + /* for find_user */
48957 + free_uid(user);
48958 + return 1;
48959 + }
48960 +
48961 + /* for find_user */
48962 + free_uid(user);
48963 +
48964 +skipit:
48965 +#endif
48966 +
48967 + if (unlikely(!(gr_status & GR_READY)))
48968 + return 0;
48969 +
48970 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48971 + gr_log_learn_id_change('u', real, effective, fs);
48972 +
48973 + num = current->acl->user_trans_num;
48974 + uidlist = current->acl->user_transitions;
48975 +
48976 + if (uidlist == NULL)
48977 + return 0;
48978 +
48979 + if (real == -1)
48980 + realok = 1;
48981 + if (effective == -1)
48982 + effectiveok = 1;
48983 + if (fs == -1)
48984 + fsok = 1;
48985 +
48986 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
48987 + for (i = 0; i < num; i++) {
48988 + curuid = (int)uidlist[i];
48989 + if (real == curuid)
48990 + realok = 1;
48991 + if (effective == curuid)
48992 + effectiveok = 1;
48993 + if (fs == curuid)
48994 + fsok = 1;
48995 + }
48996 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
48997 + for (i = 0; i < num; i++) {
48998 + curuid = (int)uidlist[i];
48999 + if (real == curuid)
49000 + break;
49001 + if (effective == curuid)
49002 + break;
49003 + if (fs == curuid)
49004 + break;
49005 + }
49006 + /* not in deny list */
49007 + if (i == num) {
49008 + realok = 1;
49009 + effectiveok = 1;
49010 + fsok = 1;
49011 + }
49012 + }
49013 +
49014 + if (realok && effectiveok && fsok)
49015 + return 0;
49016 + else {
49017 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49018 + return 1;
49019 + }
49020 +}
49021 +
49022 +int
49023 +gr_check_group_change(int real, int effective, int fs)
49024 +{
49025 + unsigned int i;
49026 + __u16 num;
49027 + gid_t *gidlist;
49028 + int curgid;
49029 + int realok = 0;
49030 + int effectiveok = 0;
49031 + int fsok = 0;
49032 +
49033 + if (unlikely(!(gr_status & GR_READY)))
49034 + return 0;
49035 +
49036 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49037 + gr_log_learn_id_change('g', real, effective, fs);
49038 +
49039 + num = current->acl->group_trans_num;
49040 + gidlist = current->acl->group_transitions;
49041 +
49042 + if (gidlist == NULL)
49043 + return 0;
49044 +
49045 + if (real == -1)
49046 + realok = 1;
49047 + if (effective == -1)
49048 + effectiveok = 1;
49049 + if (fs == -1)
49050 + fsok = 1;
49051 +
49052 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49053 + for (i = 0; i < num; i++) {
49054 + curgid = (int)gidlist[i];
49055 + if (real == curgid)
49056 + realok = 1;
49057 + if (effective == curgid)
49058 + effectiveok = 1;
49059 + if (fs == curgid)
49060 + fsok = 1;
49061 + }
49062 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49063 + for (i = 0; i < num; i++) {
49064 + curgid = (int)gidlist[i];
49065 + if (real == curgid)
49066 + break;
49067 + if (effective == curgid)
49068 + break;
49069 + if (fs == curgid)
49070 + break;
49071 + }
49072 + /* not in deny list */
49073 + if (i == num) {
49074 + realok = 1;
49075 + effectiveok = 1;
49076 + fsok = 1;
49077 + }
49078 + }
49079 +
49080 + if (realok && effectiveok && fsok)
49081 + return 0;
49082 + else {
49083 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49084 + return 1;
49085 + }
49086 +}
49087 +
49088 +void
49089 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49090 +{
49091 + struct acl_role_label *role = task->role;
49092 + struct acl_subject_label *subj = NULL;
49093 + struct acl_object_label *obj;
49094 + struct file *filp;
49095 +
49096 + if (unlikely(!(gr_status & GR_READY)))
49097 + return;
49098 +
49099 + filp = task->exec_file;
49100 +
49101 + /* kernel process, we'll give them the kernel role */
49102 + if (unlikely(!filp)) {
49103 + task->role = kernel_role;
49104 + task->acl = kernel_role->root_label;
49105 + return;
49106 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49107 + role = lookup_acl_role_label(task, uid, gid);
49108 +
49109 + /* perform subject lookup in possibly new role
49110 + we can use this result below in the case where role == task->role
49111 + */
49112 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49113 +
49114 + /* if we changed uid/gid, but result in the same role
49115 + and are using inheritance, don't lose the inherited subject
49116 + if current subject is other than what normal lookup
49117 + would result in, we arrived via inheritance, don't
49118 + lose subject
49119 + */
49120 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49121 + (subj == task->acl)))
49122 + task->acl = subj;
49123 +
49124 + task->role = role;
49125 +
49126 + task->is_writable = 0;
49127 +
49128 + /* ignore additional mmap checks for processes that are writable
49129 + by the default ACL */
49130 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49131 + if (unlikely(obj->mode & GR_WRITE))
49132 + task->is_writable = 1;
49133 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49134 + if (unlikely(obj->mode & GR_WRITE))
49135 + task->is_writable = 1;
49136 +
49137 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49138 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49139 +#endif
49140 +
49141 + gr_set_proc_res(task);
49142 +
49143 + return;
49144 +}
49145 +
49146 +int
49147 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49148 + const int unsafe_share)
49149 +{
49150 + struct task_struct *task = current;
49151 + struct acl_subject_label *newacl;
49152 + struct acl_object_label *obj;
49153 + __u32 retmode;
49154 +
49155 + if (unlikely(!(gr_status & GR_READY)))
49156 + return 0;
49157 +
49158 + newacl = chk_subj_label(dentry, mnt, task->role);
49159 +
49160 + task_lock(task);
49161 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49162 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49163 + !(task->role->roletype & GR_ROLE_GOD) &&
49164 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49165 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49166 + task_unlock(task);
49167 + if (unsafe_share)
49168 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49169 + else
49170 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49171 + return -EACCES;
49172 + }
49173 + task_unlock(task);
49174 +
49175 + obj = chk_obj_label(dentry, mnt, task->acl);
49176 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49177 +
49178 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49179 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49180 + if (obj->nested)
49181 + task->acl = obj->nested;
49182 + else
49183 + task->acl = newacl;
49184 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49185 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49186 +
49187 + task->is_writable = 0;
49188 +
49189 + /* ignore additional mmap checks for processes that are writable
49190 + by the default ACL */
49191 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49192 + if (unlikely(obj->mode & GR_WRITE))
49193 + task->is_writable = 1;
49194 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49195 + if (unlikely(obj->mode & GR_WRITE))
49196 + task->is_writable = 1;
49197 +
49198 + gr_set_proc_res(task);
49199 +
49200 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49201 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49202 +#endif
49203 + return 0;
49204 +}
49205 +
49206 +/* always called with valid inodev ptr */
49207 +static void
49208 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49209 +{
49210 + struct acl_object_label *matchpo;
49211 + struct acl_subject_label *matchps;
49212 + struct acl_subject_label *subj;
49213 + struct acl_role_label *role;
49214 + unsigned int x;
49215 +
49216 + FOR_EACH_ROLE_START(role)
49217 + FOR_EACH_SUBJECT_START(role, subj, x)
49218 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49219 + matchpo->mode |= GR_DELETED;
49220 + FOR_EACH_SUBJECT_END(subj,x)
49221 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49222 + if (subj->inode == ino && subj->device == dev)
49223 + subj->mode |= GR_DELETED;
49224 + FOR_EACH_NESTED_SUBJECT_END(subj)
49225 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49226 + matchps->mode |= GR_DELETED;
49227 + FOR_EACH_ROLE_END(role)
49228 +
49229 + inodev->nentry->deleted = 1;
49230 +
49231 + return;
49232 +}
49233 +
49234 +void
49235 +gr_handle_delete(const ino_t ino, const dev_t dev)
49236 +{
49237 + struct inodev_entry *inodev;
49238 +
49239 + if (unlikely(!(gr_status & GR_READY)))
49240 + return;
49241 +
49242 + write_lock(&gr_inode_lock);
49243 + inodev = lookup_inodev_entry(ino, dev);
49244 + if (inodev != NULL)
49245 + do_handle_delete(inodev, ino, dev);
49246 + write_unlock(&gr_inode_lock);
49247 +
49248 + return;
49249 +}
49250 +
49251 +static void
49252 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49253 + const ino_t newinode, const dev_t newdevice,
49254 + struct acl_subject_label *subj)
49255 +{
49256 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49257 + struct acl_object_label *match;
49258 +
49259 + match = subj->obj_hash[index];
49260 +
49261 + while (match && (match->inode != oldinode ||
49262 + match->device != olddevice ||
49263 + !(match->mode & GR_DELETED)))
49264 + match = match->next;
49265 +
49266 + if (match && (match->inode == oldinode)
49267 + && (match->device == olddevice)
49268 + && (match->mode & GR_DELETED)) {
49269 + if (match->prev == NULL) {
49270 + subj->obj_hash[index] = match->next;
49271 + if (match->next != NULL)
49272 + match->next->prev = NULL;
49273 + } else {
49274 + match->prev->next = match->next;
49275 + if (match->next != NULL)
49276 + match->next->prev = match->prev;
49277 + }
49278 + match->prev = NULL;
49279 + match->next = NULL;
49280 + match->inode = newinode;
49281 + match->device = newdevice;
49282 + match->mode &= ~GR_DELETED;
49283 +
49284 + insert_acl_obj_label(match, subj);
49285 + }
49286 +
49287 + return;
49288 +}
49289 +
49290 +static void
49291 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49292 + const ino_t newinode, const dev_t newdevice,
49293 + struct acl_role_label *role)
49294 +{
49295 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49296 + struct acl_subject_label *match;
49297 +
49298 + match = role->subj_hash[index];
49299 +
49300 + while (match && (match->inode != oldinode ||
49301 + match->device != olddevice ||
49302 + !(match->mode & GR_DELETED)))
49303 + match = match->next;
49304 +
49305 + if (match && (match->inode == oldinode)
49306 + && (match->device == olddevice)
49307 + && (match->mode & GR_DELETED)) {
49308 + if (match->prev == NULL) {
49309 + role->subj_hash[index] = match->next;
49310 + if (match->next != NULL)
49311 + match->next->prev = NULL;
49312 + } else {
49313 + match->prev->next = match->next;
49314 + if (match->next != NULL)
49315 + match->next->prev = match->prev;
49316 + }
49317 + match->prev = NULL;
49318 + match->next = NULL;
49319 + match->inode = newinode;
49320 + match->device = newdevice;
49321 + match->mode &= ~GR_DELETED;
49322 +
49323 + insert_acl_subj_label(match, role);
49324 + }
49325 +
49326 + return;
49327 +}
49328 +
49329 +static void
49330 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49331 + const ino_t newinode, const dev_t newdevice)
49332 +{
49333 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49334 + struct inodev_entry *match;
49335 +
49336 + match = inodev_set.i_hash[index];
49337 +
49338 + while (match && (match->nentry->inode != oldinode ||
49339 + match->nentry->device != olddevice || !match->nentry->deleted))
49340 + match = match->next;
49341 +
49342 + if (match && (match->nentry->inode == oldinode)
49343 + && (match->nentry->device == olddevice) &&
49344 + match->nentry->deleted) {
49345 + if (match->prev == NULL) {
49346 + inodev_set.i_hash[index] = match->next;
49347 + if (match->next != NULL)
49348 + match->next->prev = NULL;
49349 + } else {
49350 + match->prev->next = match->next;
49351 + if (match->next != NULL)
49352 + match->next->prev = match->prev;
49353 + }
49354 + match->prev = NULL;
49355 + match->next = NULL;
49356 + match->nentry->inode = newinode;
49357 + match->nentry->device = newdevice;
49358 + match->nentry->deleted = 0;
49359 +
49360 + insert_inodev_entry(match);
49361 + }
49362 +
49363 + return;
49364 +}
49365 +
49366 +static void
49367 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49368 + const struct vfsmount *mnt)
49369 +{
49370 + struct acl_subject_label *subj;
49371 + struct acl_role_label *role;
49372 + unsigned int x;
49373 + ino_t inode = dentry->d_inode->i_ino;
49374 + dev_t dev = __get_dev(dentry);
49375 +
49376 + FOR_EACH_ROLE_START(role)
49377 + update_acl_subj_label(matchn->inode, matchn->device,
49378 + inode, dev, role);
49379 +
49380 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49381 + if ((subj->inode == inode) && (subj->device == dev)) {
49382 + subj->inode = inode;
49383 + subj->device = dev;
49384 + }
49385 + FOR_EACH_NESTED_SUBJECT_END(subj)
49386 + FOR_EACH_SUBJECT_START(role, subj, x)
49387 + update_acl_obj_label(matchn->inode, matchn->device,
49388 + inode, dev, subj);
49389 + FOR_EACH_SUBJECT_END(subj,x)
49390 + FOR_EACH_ROLE_END(role)
49391 +
49392 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49393 +
49394 + return;
49395 +}
49396 +
49397 +void
49398 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49399 +{
49400 + struct name_entry *matchn;
49401 +
49402 + if (unlikely(!(gr_status & GR_READY)))
49403 + return;
49404 +
49405 + preempt_disable();
49406 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49407 +
49408 + if (unlikely((unsigned long)matchn)) {
49409 + write_lock(&gr_inode_lock);
49410 + do_handle_create(matchn, dentry, mnt);
49411 + write_unlock(&gr_inode_lock);
49412 + }
49413 + preempt_enable();
49414 +
49415 + return;
49416 +}
49417 +
49418 +void
49419 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49420 + struct dentry *old_dentry,
49421 + struct dentry *new_dentry,
49422 + struct vfsmount *mnt, const __u8 replace)
49423 +{
49424 + struct name_entry *matchn;
49425 + struct inodev_entry *inodev;
49426 + ino_t oldinode = old_dentry->d_inode->i_ino;
49427 + dev_t olddev = __get_dev(old_dentry);
49428 +
49429 + /* vfs_rename swaps the name and parent link for old_dentry and
49430 + new_dentry
49431 + at this point, old_dentry has the new name, parent link, and inode
49432 + for the renamed file
49433 + if a file is being replaced by a rename, new_dentry has the inode
49434 + and name for the replaced file
49435 + */
49436 +
49437 + if (unlikely(!(gr_status & GR_READY)))
49438 + return;
49439 +
49440 + preempt_disable();
49441 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49442 +
49443 + /* we wouldn't have to check d_inode if it weren't for
49444 + NFS silly-renaming
49445 + */
49446 +
49447 + write_lock(&gr_inode_lock);
49448 + if (unlikely(replace && new_dentry->d_inode)) {
49449 + ino_t newinode = new_dentry->d_inode->i_ino;
49450 + dev_t newdev = __get_dev(new_dentry);
49451 + inodev = lookup_inodev_entry(newinode, newdev);
49452 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49453 + do_handle_delete(inodev, newinode, newdev);
49454 + }
49455 +
49456 + inodev = lookup_inodev_entry(oldinode, olddev);
49457 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49458 + do_handle_delete(inodev, oldinode, olddev);
49459 +
49460 + if (unlikely((unsigned long)matchn))
49461 + do_handle_create(matchn, old_dentry, mnt);
49462 +
49463 + write_unlock(&gr_inode_lock);
49464 + preempt_enable();
49465 +
49466 + return;
49467 +}
49468 +
49469 +static int
49470 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49471 + unsigned char **sum)
49472 +{
49473 + struct acl_role_label *r;
49474 + struct role_allowed_ip *ipp;
49475 + struct role_transition *trans;
49476 + unsigned int i;
49477 + int found = 0;
49478 + u32 curr_ip = current->signal->curr_ip;
49479 +
49480 + current->signal->saved_ip = curr_ip;
49481 +
49482 + /* check transition table */
49483 +
49484 + for (trans = current->role->transitions; trans; trans = trans->next) {
49485 + if (!strcmp(rolename, trans->rolename)) {
49486 + found = 1;
49487 + break;
49488 + }
49489 + }
49490 +
49491 + if (!found)
49492 + return 0;
49493 +
49494 + /* handle special roles that do not require authentication
49495 + and check ip */
49496 +
49497 + FOR_EACH_ROLE_START(r)
49498 + if (!strcmp(rolename, r->rolename) &&
49499 + (r->roletype & GR_ROLE_SPECIAL)) {
49500 + found = 0;
49501 + if (r->allowed_ips != NULL) {
49502 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49503 + if ((ntohl(curr_ip) & ipp->netmask) ==
49504 + (ntohl(ipp->addr) & ipp->netmask))
49505 + found = 1;
49506 + }
49507 + } else
49508 + found = 2;
49509 + if (!found)
49510 + return 0;
49511 +
49512 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49513 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49514 + *salt = NULL;
49515 + *sum = NULL;
49516 + return 1;
49517 + }
49518 + }
49519 + FOR_EACH_ROLE_END(r)
49520 +
49521 + for (i = 0; i < num_sprole_pws; i++) {
49522 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49523 + *salt = acl_special_roles[i]->salt;
49524 + *sum = acl_special_roles[i]->sum;
49525 + return 1;
49526 + }
49527 + }
49528 +
49529 + return 0;
49530 +}
49531 +
49532 +static void
49533 +assign_special_role(char *rolename)
49534 +{
49535 + struct acl_object_label *obj;
49536 + struct acl_role_label *r;
49537 + struct acl_role_label *assigned = NULL;
49538 + struct task_struct *tsk;
49539 + struct file *filp;
49540 +
49541 + FOR_EACH_ROLE_START(r)
49542 + if (!strcmp(rolename, r->rolename) &&
49543 + (r->roletype & GR_ROLE_SPECIAL)) {
49544 + assigned = r;
49545 + break;
49546 + }
49547 + FOR_EACH_ROLE_END(r)
49548 +
49549 + if (!assigned)
49550 + return;
49551 +
49552 + read_lock(&tasklist_lock);
49553 + read_lock(&grsec_exec_file_lock);
49554 +
49555 + tsk = current->real_parent;
49556 + if (tsk == NULL)
49557 + goto out_unlock;
49558 +
49559 + filp = tsk->exec_file;
49560 + if (filp == NULL)
49561 + goto out_unlock;
49562 +
49563 + tsk->is_writable = 0;
49564 +
49565 + tsk->acl_sp_role = 1;
49566 + tsk->acl_role_id = ++acl_sp_role_value;
49567 + tsk->role = assigned;
49568 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49569 +
49570 + /* ignore additional mmap checks for processes that are writable
49571 + by the default ACL */
49572 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49573 + if (unlikely(obj->mode & GR_WRITE))
49574 + tsk->is_writable = 1;
49575 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49576 + if (unlikely(obj->mode & GR_WRITE))
49577 + tsk->is_writable = 1;
49578 +
49579 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49580 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49581 +#endif
49582 +
49583 +out_unlock:
49584 + read_unlock(&grsec_exec_file_lock);
49585 + read_unlock(&tasklist_lock);
49586 + return;
49587 +}
49588 +
49589 +int gr_check_secure_terminal(struct task_struct *task)
49590 +{
49591 + struct task_struct *p, *p2, *p3;
49592 + struct files_struct *files;
49593 + struct fdtable *fdt;
49594 + struct file *our_file = NULL, *file;
49595 + int i;
49596 +
49597 + if (task->signal->tty == NULL)
49598 + return 1;
49599 +
49600 + files = get_files_struct(task);
49601 + if (files != NULL) {
49602 + rcu_read_lock();
49603 + fdt = files_fdtable(files);
49604 + for (i=0; i < fdt->max_fds; i++) {
49605 + file = fcheck_files(files, i);
49606 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49607 + get_file(file);
49608 + our_file = file;
49609 + }
49610 + }
49611 + rcu_read_unlock();
49612 + put_files_struct(files);
49613 + }
49614 +
49615 + if (our_file == NULL)
49616 + return 1;
49617 +
49618 + read_lock(&tasklist_lock);
49619 + do_each_thread(p2, p) {
49620 + files = get_files_struct(p);
49621 + if (files == NULL ||
49622 + (p->signal && p->signal->tty == task->signal->tty)) {
49623 + if (files != NULL)
49624 + put_files_struct(files);
49625 + continue;
49626 + }
49627 + rcu_read_lock();
49628 + fdt = files_fdtable(files);
49629 + for (i=0; i < fdt->max_fds; i++) {
49630 + file = fcheck_files(files, i);
49631 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49632 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49633 + p3 = task;
49634 + while (p3->pid > 0) {
49635 + if (p3 == p)
49636 + break;
49637 + p3 = p3->real_parent;
49638 + }
49639 + if (p3 == p)
49640 + break;
49641 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49642 + gr_handle_alertkill(p);
49643 + rcu_read_unlock();
49644 + put_files_struct(files);
49645 + read_unlock(&tasklist_lock);
49646 + fput(our_file);
49647 + return 0;
49648 + }
49649 + }
49650 + rcu_read_unlock();
49651 + put_files_struct(files);
49652 + } while_each_thread(p2, p);
49653 + read_unlock(&tasklist_lock);
49654 +
49655 + fput(our_file);
49656 + return 1;
49657 +}
49658 +
49659 +ssize_t
49660 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49661 +{
49662 + struct gr_arg_wrapper uwrap;
49663 + unsigned char *sprole_salt = NULL;
49664 + unsigned char *sprole_sum = NULL;
49665 + int error = sizeof (struct gr_arg_wrapper);
49666 + int error2 = 0;
49667 +
49668 + mutex_lock(&gr_dev_mutex);
49669 +
49670 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49671 + error = -EPERM;
49672 + goto out;
49673 + }
49674 +
49675 + if (count != sizeof (struct gr_arg_wrapper)) {
49676 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49677 + error = -EINVAL;
49678 + goto out;
49679 + }
49680 +
49681 +
49682 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49683 + gr_auth_expires = 0;
49684 + gr_auth_attempts = 0;
49685 + }
49686 +
49687 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49688 + error = -EFAULT;
49689 + goto out;
49690 + }
49691 +
49692 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49693 + error = -EINVAL;
49694 + goto out;
49695 + }
49696 +
49697 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49698 + error = -EFAULT;
49699 + goto out;
49700 + }
49701 +
49702 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49703 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49704 + time_after(gr_auth_expires, get_seconds())) {
49705 + error = -EBUSY;
49706 + goto out;
49707 + }
49708 +
49709 + /* if non-root trying to do anything other than use a special role,
49710 + do not attempt authentication, do not count towards authentication
49711 + locking
49712 + */
49713 +
49714 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49715 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49716 + current_uid()) {
49717 + error = -EPERM;
49718 + goto out;
49719 + }
49720 +
49721 + /* ensure pw and special role name are null terminated */
49722 +
49723 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49724 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49725 +
49726 + /* Okay.
49727 + * We have our enough of the argument structure..(we have yet
49728 + * to copy_from_user the tables themselves) . Copy the tables
49729 + * only if we need them, i.e. for loading operations. */
49730 +
49731 + switch (gr_usermode->mode) {
49732 + case GR_STATUS:
49733 + if (gr_status & GR_READY) {
49734 + error = 1;
49735 + if (!gr_check_secure_terminal(current))
49736 + error = 3;
49737 + } else
49738 + error = 2;
49739 + goto out;
49740 + case GR_SHUTDOWN:
49741 + if ((gr_status & GR_READY)
49742 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49743 + pax_open_kernel();
49744 + gr_status &= ~GR_READY;
49745 + pax_close_kernel();
49746 +
49747 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49748 + free_variables();
49749 + memset(gr_usermode, 0, sizeof (struct gr_arg));
49750 + memset(gr_system_salt, 0, GR_SALT_LEN);
49751 + memset(gr_system_sum, 0, GR_SHA_LEN);
49752 + } else if (gr_status & GR_READY) {
49753 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49754 + error = -EPERM;
49755 + } else {
49756 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49757 + error = -EAGAIN;
49758 + }
49759 + break;
49760 + case GR_ENABLE:
49761 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49762 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49763 + else {
49764 + if (gr_status & GR_READY)
49765 + error = -EAGAIN;
49766 + else
49767 + error = error2;
49768 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49769 + }
49770 + break;
49771 + case GR_RELOAD:
49772 + if (!(gr_status & GR_READY)) {
49773 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49774 + error = -EAGAIN;
49775 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49776 + lock_kernel();
49777 +
49778 + pax_open_kernel();
49779 + gr_status &= ~GR_READY;
49780 + pax_close_kernel();
49781 +
49782 + free_variables();
49783 + if (!(error2 = gracl_init(gr_usermode))) {
49784 + unlock_kernel();
49785 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
49786 + } else {
49787 + unlock_kernel();
49788 + error = error2;
49789 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49790 + }
49791 + } else {
49792 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49793 + error = -EPERM;
49794 + }
49795 + break;
49796 + case GR_SEGVMOD:
49797 + if (unlikely(!(gr_status & GR_READY))) {
49798 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
49799 + error = -EAGAIN;
49800 + break;
49801 + }
49802 +
49803 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49804 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
49805 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
49806 + struct acl_subject_label *segvacl;
49807 + segvacl =
49808 + lookup_acl_subj_label(gr_usermode->segv_inode,
49809 + gr_usermode->segv_device,
49810 + current->role);
49811 + if (segvacl) {
49812 + segvacl->crashes = 0;
49813 + segvacl->expires = 0;
49814 + }
49815 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
49816 + gr_remove_uid(gr_usermode->segv_uid);
49817 + }
49818 + } else {
49819 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
49820 + error = -EPERM;
49821 + }
49822 + break;
49823 + case GR_SPROLE:
49824 + case GR_SPROLEPAM:
49825 + if (unlikely(!(gr_status & GR_READY))) {
49826 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
49827 + error = -EAGAIN;
49828 + break;
49829 + }
49830 +
49831 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
49832 + current->role->expires = 0;
49833 + current->role->auth_attempts = 0;
49834 + }
49835 +
49836 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49837 + time_after(current->role->expires, get_seconds())) {
49838 + error = -EBUSY;
49839 + goto out;
49840 + }
49841 +
49842 + if (lookup_special_role_auth
49843 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
49844 + && ((!sprole_salt && !sprole_sum)
49845 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
49846 + char *p = "";
49847 + assign_special_role(gr_usermode->sp_role);
49848 + read_lock(&tasklist_lock);
49849 + if (current->real_parent)
49850 + p = current->real_parent->role->rolename;
49851 + read_unlock(&tasklist_lock);
49852 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
49853 + p, acl_sp_role_value);
49854 + } else {
49855 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
49856 + error = -EPERM;
49857 + if(!(current->role->auth_attempts++))
49858 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49859 +
49860 + goto out;
49861 + }
49862 + break;
49863 + case GR_UNSPROLE:
49864 + if (unlikely(!(gr_status & GR_READY))) {
49865 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
49866 + error = -EAGAIN;
49867 + break;
49868 + }
49869 +
49870 + if (current->role->roletype & GR_ROLE_SPECIAL) {
49871 + char *p = "";
49872 + int i = 0;
49873 +
49874 + read_lock(&tasklist_lock);
49875 + if (current->real_parent) {
49876 + p = current->real_parent->role->rolename;
49877 + i = current->real_parent->acl_role_id;
49878 + }
49879 + read_unlock(&tasklist_lock);
49880 +
49881 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
49882 + gr_set_acls(1);
49883 + } else {
49884 + error = -EPERM;
49885 + goto out;
49886 + }
49887 + break;
49888 + default:
49889 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
49890 + error = -EINVAL;
49891 + break;
49892 + }
49893 +
49894 + if (error != -EPERM)
49895 + goto out;
49896 +
49897 + if(!(gr_auth_attempts++))
49898 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49899 +
49900 + out:
49901 + mutex_unlock(&gr_dev_mutex);
49902 + return error;
49903 +}
49904 +
49905 +/* must be called with
49906 + rcu_read_lock();
49907 + read_lock(&tasklist_lock);
49908 + read_lock(&grsec_exec_file_lock);
49909 +*/
49910 +int gr_apply_subject_to_task(struct task_struct *task)
49911 +{
49912 + struct acl_object_label *obj;
49913 + char *tmpname;
49914 + struct acl_subject_label *tmpsubj;
49915 + struct file *filp;
49916 + struct name_entry *nmatch;
49917 +
49918 + filp = task->exec_file;
49919 + if (filp == NULL)
49920 + return 0;
49921 +
49922 + /* the following is to apply the correct subject
49923 + on binaries running when the RBAC system
49924 + is enabled, when the binaries have been
49925 + replaced or deleted since their execution
49926 + -----
49927 + when the RBAC system starts, the inode/dev
49928 + from exec_file will be one the RBAC system
49929 + is unaware of. It only knows the inode/dev
49930 + of the present file on disk, or the absence
49931 + of it.
49932 + */
49933 + preempt_disable();
49934 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
49935 +
49936 + nmatch = lookup_name_entry(tmpname);
49937 + preempt_enable();
49938 + tmpsubj = NULL;
49939 + if (nmatch) {
49940 + if (nmatch->deleted)
49941 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
49942 + else
49943 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
49944 + if (tmpsubj != NULL)
49945 + task->acl = tmpsubj;
49946 + }
49947 + if (tmpsubj == NULL)
49948 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
49949 + task->role);
49950 + if (task->acl) {
49951 + task->is_writable = 0;
49952 + /* ignore additional mmap checks for processes that are writable
49953 + by the default ACL */
49954 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49955 + if (unlikely(obj->mode & GR_WRITE))
49956 + task->is_writable = 1;
49957 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49958 + if (unlikely(obj->mode & GR_WRITE))
49959 + task->is_writable = 1;
49960 +
49961 + gr_set_proc_res(task);
49962 +
49963 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49964 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49965 +#endif
49966 + } else {
49967 + return 1;
49968 + }
49969 +
49970 + return 0;
49971 +}
49972 +
49973 +int
49974 +gr_set_acls(const int type)
49975 +{
49976 + struct task_struct *task, *task2;
49977 + struct acl_role_label *role = current->role;
49978 + __u16 acl_role_id = current->acl_role_id;
49979 + const struct cred *cred;
49980 + int ret;
49981 +
49982 + rcu_read_lock();
49983 + read_lock(&tasklist_lock);
49984 + read_lock(&grsec_exec_file_lock);
49985 + do_each_thread(task2, task) {
49986 + /* check to see if we're called from the exit handler,
49987 + if so, only replace ACLs that have inherited the admin
49988 + ACL */
49989 +
49990 + if (type && (task->role != role ||
49991 + task->acl_role_id != acl_role_id))
49992 + continue;
49993 +
49994 + task->acl_role_id = 0;
49995 + task->acl_sp_role = 0;
49996 +
49997 + if (task->exec_file) {
49998 + cred = __task_cred(task);
49999 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50000 +
50001 + ret = gr_apply_subject_to_task(task);
50002 + if (ret) {
50003 + read_unlock(&grsec_exec_file_lock);
50004 + read_unlock(&tasklist_lock);
50005 + rcu_read_unlock();
50006 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50007 + return ret;
50008 + }
50009 + } else {
50010 + // it's a kernel process
50011 + task->role = kernel_role;
50012 + task->acl = kernel_role->root_label;
50013 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50014 + task->acl->mode &= ~GR_PROCFIND;
50015 +#endif
50016 + }
50017 + } while_each_thread(task2, task);
50018 + read_unlock(&grsec_exec_file_lock);
50019 + read_unlock(&tasklist_lock);
50020 + rcu_read_unlock();
50021 +
50022 + return 0;
50023 +}
50024 +
50025 +void
50026 +gr_learn_resource(const struct task_struct *task,
50027 + const int res, const unsigned long wanted, const int gt)
50028 +{
50029 + struct acl_subject_label *acl;
50030 + const struct cred *cred;
50031 +
50032 + if (unlikely((gr_status & GR_READY) &&
50033 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50034 + goto skip_reslog;
50035 +
50036 +#ifdef CONFIG_GRKERNSEC_RESLOG
50037 + gr_log_resource(task, res, wanted, gt);
50038 +#endif
50039 + skip_reslog:
50040 +
50041 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50042 + return;
50043 +
50044 + acl = task->acl;
50045 +
50046 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50047 + !(acl->resmask & (1 << (unsigned short) res))))
50048 + return;
50049 +
50050 + if (wanted >= acl->res[res].rlim_cur) {
50051 + unsigned long res_add;
50052 +
50053 + res_add = wanted;
50054 + switch (res) {
50055 + case RLIMIT_CPU:
50056 + res_add += GR_RLIM_CPU_BUMP;
50057 + break;
50058 + case RLIMIT_FSIZE:
50059 + res_add += GR_RLIM_FSIZE_BUMP;
50060 + break;
50061 + case RLIMIT_DATA:
50062 + res_add += GR_RLIM_DATA_BUMP;
50063 + break;
50064 + case RLIMIT_STACK:
50065 + res_add += GR_RLIM_STACK_BUMP;
50066 + break;
50067 + case RLIMIT_CORE:
50068 + res_add += GR_RLIM_CORE_BUMP;
50069 + break;
50070 + case RLIMIT_RSS:
50071 + res_add += GR_RLIM_RSS_BUMP;
50072 + break;
50073 + case RLIMIT_NPROC:
50074 + res_add += GR_RLIM_NPROC_BUMP;
50075 + break;
50076 + case RLIMIT_NOFILE:
50077 + res_add += GR_RLIM_NOFILE_BUMP;
50078 + break;
50079 + case RLIMIT_MEMLOCK:
50080 + res_add += GR_RLIM_MEMLOCK_BUMP;
50081 + break;
50082 + case RLIMIT_AS:
50083 + res_add += GR_RLIM_AS_BUMP;
50084 + break;
50085 + case RLIMIT_LOCKS:
50086 + res_add += GR_RLIM_LOCKS_BUMP;
50087 + break;
50088 + case RLIMIT_SIGPENDING:
50089 + res_add += GR_RLIM_SIGPENDING_BUMP;
50090 + break;
50091 + case RLIMIT_MSGQUEUE:
50092 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50093 + break;
50094 + case RLIMIT_NICE:
50095 + res_add += GR_RLIM_NICE_BUMP;
50096 + break;
50097 + case RLIMIT_RTPRIO:
50098 + res_add += GR_RLIM_RTPRIO_BUMP;
50099 + break;
50100 + case RLIMIT_RTTIME:
50101 + res_add += GR_RLIM_RTTIME_BUMP;
50102 + break;
50103 + }
50104 +
50105 + acl->res[res].rlim_cur = res_add;
50106 +
50107 + if (wanted > acl->res[res].rlim_max)
50108 + acl->res[res].rlim_max = res_add;
50109 +
50110 + /* only log the subject filename, since resource logging is supported for
50111 + single-subject learning only */
50112 + rcu_read_lock();
50113 + cred = __task_cred(task);
50114 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50115 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50116 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50117 + "", (unsigned long) res, &task->signal->saved_ip);
50118 + rcu_read_unlock();
50119 + }
50120 +
50121 + return;
50122 +}
50123 +
50124 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50125 +void
50126 +pax_set_initial_flags(struct linux_binprm *bprm)
50127 +{
50128 + struct task_struct *task = current;
50129 + struct acl_subject_label *proc;
50130 + unsigned long flags;
50131 +
50132 + if (unlikely(!(gr_status & GR_READY)))
50133 + return;
50134 +
50135 + flags = pax_get_flags(task);
50136 +
50137 + proc = task->acl;
50138 +
50139 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50140 + flags &= ~MF_PAX_PAGEEXEC;
50141 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50142 + flags &= ~MF_PAX_SEGMEXEC;
50143 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50144 + flags &= ~MF_PAX_RANDMMAP;
50145 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50146 + flags &= ~MF_PAX_EMUTRAMP;
50147 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50148 + flags &= ~MF_PAX_MPROTECT;
50149 +
50150 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50151 + flags |= MF_PAX_PAGEEXEC;
50152 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50153 + flags |= MF_PAX_SEGMEXEC;
50154 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50155 + flags |= MF_PAX_RANDMMAP;
50156 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50157 + flags |= MF_PAX_EMUTRAMP;
50158 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50159 + flags |= MF_PAX_MPROTECT;
50160 +
50161 + pax_set_flags(task, flags);
50162 +
50163 + return;
50164 +}
50165 +#endif
50166 +
50167 +#ifdef CONFIG_SYSCTL
50168 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50169 + system to save 35kb of memory */
50170 +
50171 +/* we modify the passed in filename, but adjust it back before returning */
50172 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50173 +{
50174 + struct name_entry *nmatch;
50175 + char *p, *lastp = NULL;
50176 + struct acl_object_label *obj = NULL, *tmp;
50177 + struct acl_subject_label *tmpsubj;
50178 + char c = '\0';
50179 +
50180 + read_lock(&gr_inode_lock);
50181 +
50182 + p = name + len - 1;
50183 + do {
50184 + nmatch = lookup_name_entry(name);
50185 + if (lastp != NULL)
50186 + *lastp = c;
50187 +
50188 + if (nmatch == NULL)
50189 + goto next_component;
50190 + tmpsubj = current->acl;
50191 + do {
50192 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50193 + if (obj != NULL) {
50194 + tmp = obj->globbed;
50195 + while (tmp) {
50196 + if (!glob_match(tmp->filename, name)) {
50197 + obj = tmp;
50198 + goto found_obj;
50199 + }
50200 + tmp = tmp->next;
50201 + }
50202 + goto found_obj;
50203 + }
50204 + } while ((tmpsubj = tmpsubj->parent_subject));
50205 +next_component:
50206 + /* end case */
50207 + if (p == name)
50208 + break;
50209 +
50210 + while (*p != '/')
50211 + p--;
50212 + if (p == name)
50213 + lastp = p + 1;
50214 + else {
50215 + lastp = p;
50216 + p--;
50217 + }
50218 + c = *lastp;
50219 + *lastp = '\0';
50220 + } while (1);
50221 +found_obj:
50222 + read_unlock(&gr_inode_lock);
50223 + /* obj returned will always be non-null */
50224 + return obj;
50225 +}
50226 +
50227 +/* returns 0 when allowing, non-zero on error
50228 + op of 0 is used for readdir, so we don't log the names of hidden files
50229 +*/
50230 +__u32
50231 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50232 +{
50233 + ctl_table *tmp;
50234 + const char *proc_sys = "/proc/sys";
50235 + char *path;
50236 + struct acl_object_label *obj;
50237 + unsigned short len = 0, pos = 0, depth = 0, i;
50238 + __u32 err = 0;
50239 + __u32 mode = 0;
50240 +
50241 + if (unlikely(!(gr_status & GR_READY)))
50242 + return 0;
50243 +
50244 + /* for now, ignore operations on non-sysctl entries if it's not a
50245 + readdir*/
50246 + if (table->child != NULL && op != 0)
50247 + return 0;
50248 +
50249 + mode |= GR_FIND;
50250 + /* it's only a read if it's an entry, read on dirs is for readdir */
50251 + if (op & MAY_READ)
50252 + mode |= GR_READ;
50253 + if (op & MAY_WRITE)
50254 + mode |= GR_WRITE;
50255 +
50256 + preempt_disable();
50257 +
50258 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50259 +
50260 + /* it's only a read/write if it's an actual entry, not a dir
50261 + (which are opened for readdir)
50262 + */
50263 +
50264 + /* convert the requested sysctl entry into a pathname */
50265 +
50266 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50267 + len += strlen(tmp->procname);
50268 + len++;
50269 + depth++;
50270 + }
50271 +
50272 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50273 + /* deny */
50274 + goto out;
50275 + }
50276 +
50277 + memset(path, 0, PAGE_SIZE);
50278 +
50279 + memcpy(path, proc_sys, strlen(proc_sys));
50280 +
50281 + pos += strlen(proc_sys);
50282 +
50283 + for (; depth > 0; depth--) {
50284 + path[pos] = '/';
50285 + pos++;
50286 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50287 + if (depth == i) {
50288 + memcpy(path + pos, tmp->procname,
50289 + strlen(tmp->procname));
50290 + pos += strlen(tmp->procname);
50291 + }
50292 + i++;
50293 + }
50294 + }
50295 +
50296 + obj = gr_lookup_by_name(path, pos);
50297 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50298 +
50299 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50300 + ((err & mode) != mode))) {
50301 + __u32 new_mode = mode;
50302 +
50303 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50304 +
50305 + err = 0;
50306 + gr_log_learn_sysctl(path, new_mode);
50307 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50308 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50309 + err = -ENOENT;
50310 + } else if (!(err & GR_FIND)) {
50311 + err = -ENOENT;
50312 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50313 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50314 + path, (mode & GR_READ) ? " reading" : "",
50315 + (mode & GR_WRITE) ? " writing" : "");
50316 + err = -EACCES;
50317 + } else if ((err & mode) != mode) {
50318 + err = -EACCES;
50319 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50320 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50321 + path, (mode & GR_READ) ? " reading" : "",
50322 + (mode & GR_WRITE) ? " writing" : "");
50323 + err = 0;
50324 + } else
50325 + err = 0;
50326 +
50327 + out:
50328 + preempt_enable();
50329 +
50330 + return err;
50331 +}
50332 +#endif
50333 +
50334 +int
50335 +gr_handle_proc_ptrace(struct task_struct *task)
50336 +{
50337 + struct file *filp;
50338 + struct task_struct *tmp = task;
50339 + struct task_struct *curtemp = current;
50340 + __u32 retmode;
50341 +
50342 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50343 + if (unlikely(!(gr_status & GR_READY)))
50344 + return 0;
50345 +#endif
50346 +
50347 + read_lock(&tasklist_lock);
50348 + read_lock(&grsec_exec_file_lock);
50349 + filp = task->exec_file;
50350 +
50351 + while (tmp->pid > 0) {
50352 + if (tmp == curtemp)
50353 + break;
50354 + tmp = tmp->real_parent;
50355 + }
50356 +
50357 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50358 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50359 + read_unlock(&grsec_exec_file_lock);
50360 + read_unlock(&tasklist_lock);
50361 + return 1;
50362 + }
50363 +
50364 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50365 + if (!(gr_status & GR_READY)) {
50366 + read_unlock(&grsec_exec_file_lock);
50367 + read_unlock(&tasklist_lock);
50368 + return 0;
50369 + }
50370 +#endif
50371 +
50372 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50373 + read_unlock(&grsec_exec_file_lock);
50374 + read_unlock(&tasklist_lock);
50375 +
50376 + if (retmode & GR_NOPTRACE)
50377 + return 1;
50378 +
50379 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50380 + && (current->acl != task->acl || (current->acl != current->role->root_label
50381 + && current->pid != task->pid)))
50382 + return 1;
50383 +
50384 + return 0;
50385 +}
50386 +
50387 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50388 +{
50389 + if (unlikely(!(gr_status & GR_READY)))
50390 + return;
50391 +
50392 + if (!(current->role->roletype & GR_ROLE_GOD))
50393 + return;
50394 +
50395 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50396 + p->role->rolename, gr_task_roletype_to_char(p),
50397 + p->acl->filename);
50398 +}
50399 +
50400 +int
50401 +gr_handle_ptrace(struct task_struct *task, const long request)
50402 +{
50403 + struct task_struct *tmp = task;
50404 + struct task_struct *curtemp = current;
50405 + __u32 retmode;
50406 +
50407 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50408 + if (unlikely(!(gr_status & GR_READY)))
50409 + return 0;
50410 +#endif
50411 +
50412 + read_lock(&tasklist_lock);
50413 + while (tmp->pid > 0) {
50414 + if (tmp == curtemp)
50415 + break;
50416 + tmp = tmp->real_parent;
50417 + }
50418 +
50419 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50420 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50421 + read_unlock(&tasklist_lock);
50422 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50423 + return 1;
50424 + }
50425 + read_unlock(&tasklist_lock);
50426 +
50427 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50428 + if (!(gr_status & GR_READY))
50429 + return 0;
50430 +#endif
50431 +
50432 + read_lock(&grsec_exec_file_lock);
50433 + if (unlikely(!task->exec_file)) {
50434 + read_unlock(&grsec_exec_file_lock);
50435 + return 0;
50436 + }
50437 +
50438 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50439 + read_unlock(&grsec_exec_file_lock);
50440 +
50441 + if (retmode & GR_NOPTRACE) {
50442 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50443 + return 1;
50444 + }
50445 +
50446 + if (retmode & GR_PTRACERD) {
50447 + switch (request) {
50448 + case PTRACE_POKETEXT:
50449 + case PTRACE_POKEDATA:
50450 + case PTRACE_POKEUSR:
50451 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50452 + case PTRACE_SETREGS:
50453 + case PTRACE_SETFPREGS:
50454 +#endif
50455 +#ifdef CONFIG_X86
50456 + case PTRACE_SETFPXREGS:
50457 +#endif
50458 +#ifdef CONFIG_ALTIVEC
50459 + case PTRACE_SETVRREGS:
50460 +#endif
50461 + return 1;
50462 + default:
50463 + return 0;
50464 + }
50465 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
50466 + !(current->role->roletype & GR_ROLE_GOD) &&
50467 + (current->acl != task->acl)) {
50468 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50469 + return 1;
50470 + }
50471 +
50472 + return 0;
50473 +}
50474 +
50475 +static int is_writable_mmap(const struct file *filp)
50476 +{
50477 + struct task_struct *task = current;
50478 + struct acl_object_label *obj, *obj2;
50479 +
50480 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50481 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50482 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50483 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50484 + task->role->root_label);
50485 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50486 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50487 + return 1;
50488 + }
50489 + }
50490 + return 0;
50491 +}
50492 +
50493 +int
50494 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50495 +{
50496 + __u32 mode;
50497 +
50498 + if (unlikely(!file || !(prot & PROT_EXEC)))
50499 + return 1;
50500 +
50501 + if (is_writable_mmap(file))
50502 + return 0;
50503 +
50504 + mode =
50505 + gr_search_file(file->f_path.dentry,
50506 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50507 + file->f_path.mnt);
50508 +
50509 + if (!gr_tpe_allow(file))
50510 + return 0;
50511 +
50512 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50513 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50514 + return 0;
50515 + } else if (unlikely(!(mode & GR_EXEC))) {
50516 + return 0;
50517 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50518 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50519 + return 1;
50520 + }
50521 +
50522 + return 1;
50523 +}
50524 +
50525 +int
50526 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50527 +{
50528 + __u32 mode;
50529 +
50530 + if (unlikely(!file || !(prot & PROT_EXEC)))
50531 + return 1;
50532 +
50533 + if (is_writable_mmap(file))
50534 + return 0;
50535 +
50536 + mode =
50537 + gr_search_file(file->f_path.dentry,
50538 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50539 + file->f_path.mnt);
50540 +
50541 + if (!gr_tpe_allow(file))
50542 + return 0;
50543 +
50544 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50545 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50546 + return 0;
50547 + } else if (unlikely(!(mode & GR_EXEC))) {
50548 + return 0;
50549 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50550 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50551 + return 1;
50552 + }
50553 +
50554 + return 1;
50555 +}
50556 +
50557 +void
50558 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50559 +{
50560 + unsigned long runtime;
50561 + unsigned long cputime;
50562 + unsigned int wday, cday;
50563 + __u8 whr, chr;
50564 + __u8 wmin, cmin;
50565 + __u8 wsec, csec;
50566 + struct timespec timeval;
50567 +
50568 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50569 + !(task->acl->mode & GR_PROCACCT)))
50570 + return;
50571 +
50572 + do_posix_clock_monotonic_gettime(&timeval);
50573 + runtime = timeval.tv_sec - task->start_time.tv_sec;
50574 + wday = runtime / (3600 * 24);
50575 + runtime -= wday * (3600 * 24);
50576 + whr = runtime / 3600;
50577 + runtime -= whr * 3600;
50578 + wmin = runtime / 60;
50579 + runtime -= wmin * 60;
50580 + wsec = runtime;
50581 +
50582 + cputime = (task->utime + task->stime) / HZ;
50583 + cday = cputime / (3600 * 24);
50584 + cputime -= cday * (3600 * 24);
50585 + chr = cputime / 3600;
50586 + cputime -= chr * 3600;
50587 + cmin = cputime / 60;
50588 + cputime -= cmin * 60;
50589 + csec = cputime;
50590 +
50591 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50592 +
50593 + return;
50594 +}
50595 +
50596 +void gr_set_kernel_label(struct task_struct *task)
50597 +{
50598 + if (gr_status & GR_READY) {
50599 + task->role = kernel_role;
50600 + task->acl = kernel_role->root_label;
50601 + }
50602 + return;
50603 +}
50604 +
50605 +#ifdef CONFIG_TASKSTATS
50606 +int gr_is_taskstats_denied(int pid)
50607 +{
50608 + struct task_struct *task;
50609 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50610 + const struct cred *cred;
50611 +#endif
50612 + int ret = 0;
50613 +
50614 + /* restrict taskstats viewing to un-chrooted root users
50615 + who have the 'view' subject flag if the RBAC system is enabled
50616 + */
50617 +
50618 + rcu_read_lock();
50619 + read_lock(&tasklist_lock);
50620 + task = find_task_by_vpid(pid);
50621 + if (task) {
50622 +#ifdef CONFIG_GRKERNSEC_CHROOT
50623 + if (proc_is_chrooted(task))
50624 + ret = -EACCES;
50625 +#endif
50626 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50627 + cred = __task_cred(task);
50628 +#ifdef CONFIG_GRKERNSEC_PROC_USER
50629 + if (cred->uid != 0)
50630 + ret = -EACCES;
50631 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50632 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50633 + ret = -EACCES;
50634 +#endif
50635 +#endif
50636 + if (gr_status & GR_READY) {
50637 + if (!(task->acl->mode & GR_VIEW))
50638 + ret = -EACCES;
50639 + }
50640 + } else
50641 + ret = -ENOENT;
50642 +
50643 + read_unlock(&tasklist_lock);
50644 + rcu_read_unlock();
50645 +
50646 + return ret;
50647 +}
50648 +#endif
50649 +
50650 +/* AUXV entries are filled via a descendant of search_binary_handler
50651 + after we've already applied the subject for the target
50652 +*/
50653 +int gr_acl_enable_at_secure(void)
50654 +{
50655 + if (unlikely(!(gr_status & GR_READY)))
50656 + return 0;
50657 +
50658 + if (current->acl->mode & GR_ATSECURE)
50659 + return 1;
50660 +
50661 + return 0;
50662 +}
50663 +
50664 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50665 +{
50666 + struct task_struct *task = current;
50667 + struct dentry *dentry = file->f_path.dentry;
50668 + struct vfsmount *mnt = file->f_path.mnt;
50669 + struct acl_object_label *obj, *tmp;
50670 + struct acl_subject_label *subj;
50671 + unsigned int bufsize;
50672 + int is_not_root;
50673 + char *path;
50674 + dev_t dev = __get_dev(dentry);
50675 +
50676 + if (unlikely(!(gr_status & GR_READY)))
50677 + return 1;
50678 +
50679 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50680 + return 1;
50681 +
50682 + /* ignore Eric Biederman */
50683 + if (IS_PRIVATE(dentry->d_inode))
50684 + return 1;
50685 +
50686 + subj = task->acl;
50687 + do {
50688 + obj = lookup_acl_obj_label(ino, dev, subj);
50689 + if (obj != NULL)
50690 + return (obj->mode & GR_FIND) ? 1 : 0;
50691 + } while ((subj = subj->parent_subject));
50692 +
50693 + /* this is purely an optimization since we're looking for an object
50694 + for the directory we're doing a readdir on
50695 + if it's possible for any globbed object to match the entry we're
50696 + filling into the directory, then the object we find here will be
50697 + an anchor point with attached globbed objects
50698 + */
50699 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50700 + if (obj->globbed == NULL)
50701 + return (obj->mode & GR_FIND) ? 1 : 0;
50702 +
50703 + is_not_root = ((obj->filename[0] == '/') &&
50704 + (obj->filename[1] == '\0')) ? 0 : 1;
50705 + bufsize = PAGE_SIZE - namelen - is_not_root;
50706 +
50707 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
50708 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50709 + return 1;
50710 +
50711 + preempt_disable();
50712 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50713 + bufsize);
50714 +
50715 + bufsize = strlen(path);
50716 +
50717 + /* if base is "/", don't append an additional slash */
50718 + if (is_not_root)
50719 + *(path + bufsize) = '/';
50720 + memcpy(path + bufsize + is_not_root, name, namelen);
50721 + *(path + bufsize + namelen + is_not_root) = '\0';
50722 +
50723 + tmp = obj->globbed;
50724 + while (tmp) {
50725 + if (!glob_match(tmp->filename, path)) {
50726 + preempt_enable();
50727 + return (tmp->mode & GR_FIND) ? 1 : 0;
50728 + }
50729 + tmp = tmp->next;
50730 + }
50731 + preempt_enable();
50732 + return (obj->mode & GR_FIND) ? 1 : 0;
50733 +}
50734 +
50735 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50736 +EXPORT_SYMBOL(gr_acl_is_enabled);
50737 +#endif
50738 +EXPORT_SYMBOL(gr_learn_resource);
50739 +EXPORT_SYMBOL(gr_set_kernel_label);
50740 +#ifdef CONFIG_SECURITY
50741 +EXPORT_SYMBOL(gr_check_user_change);
50742 +EXPORT_SYMBOL(gr_check_group_change);
50743 +#endif
50744 +
50745 diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
50746 --- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50747 +++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
50748 @@ -0,0 +1,138 @@
50749 +#include <linux/kernel.h>
50750 +#include <linux/module.h>
50751 +#include <linux/sched.h>
50752 +#include <linux/gracl.h>
50753 +#include <linux/grsecurity.h>
50754 +#include <linux/grinternal.h>
50755 +
50756 +static const char *captab_log[] = {
50757 + "CAP_CHOWN",
50758 + "CAP_DAC_OVERRIDE",
50759 + "CAP_DAC_READ_SEARCH",
50760 + "CAP_FOWNER",
50761 + "CAP_FSETID",
50762 + "CAP_KILL",
50763 + "CAP_SETGID",
50764 + "CAP_SETUID",
50765 + "CAP_SETPCAP",
50766 + "CAP_LINUX_IMMUTABLE",
50767 + "CAP_NET_BIND_SERVICE",
50768 + "CAP_NET_BROADCAST",
50769 + "CAP_NET_ADMIN",
50770 + "CAP_NET_RAW",
50771 + "CAP_IPC_LOCK",
50772 + "CAP_IPC_OWNER",
50773 + "CAP_SYS_MODULE",
50774 + "CAP_SYS_RAWIO",
50775 + "CAP_SYS_CHROOT",
50776 + "CAP_SYS_PTRACE",
50777 + "CAP_SYS_PACCT",
50778 + "CAP_SYS_ADMIN",
50779 + "CAP_SYS_BOOT",
50780 + "CAP_SYS_NICE",
50781 + "CAP_SYS_RESOURCE",
50782 + "CAP_SYS_TIME",
50783 + "CAP_SYS_TTY_CONFIG",
50784 + "CAP_MKNOD",
50785 + "CAP_LEASE",
50786 + "CAP_AUDIT_WRITE",
50787 + "CAP_AUDIT_CONTROL",
50788 + "CAP_SETFCAP",
50789 + "CAP_MAC_OVERRIDE",
50790 + "CAP_MAC_ADMIN"
50791 +};
50792 +
50793 +EXPORT_SYMBOL(gr_is_capable);
50794 +EXPORT_SYMBOL(gr_is_capable_nolog);
50795 +
50796 +int
50797 +gr_is_capable(const int cap)
50798 +{
50799 + struct task_struct *task = current;
50800 + const struct cred *cred = current_cred();
50801 + struct acl_subject_label *curracl;
50802 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50803 + kernel_cap_t cap_audit = __cap_empty_set;
50804 +
50805 + if (!gr_acl_is_enabled())
50806 + return 1;
50807 +
50808 + curracl = task->acl;
50809 +
50810 + cap_drop = curracl->cap_lower;
50811 + cap_mask = curracl->cap_mask;
50812 + cap_audit = curracl->cap_invert_audit;
50813 +
50814 + while ((curracl = curracl->parent_subject)) {
50815 + /* if the cap isn't specified in the current computed mask but is specified in the
50816 + current level subject, and is lowered in the current level subject, then add
50817 + it to the set of dropped capabilities
50818 + otherwise, add the current level subject's mask to the current computed mask
50819 + */
50820 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50821 + cap_raise(cap_mask, cap);
50822 + if (cap_raised(curracl->cap_lower, cap))
50823 + cap_raise(cap_drop, cap);
50824 + if (cap_raised(curracl->cap_invert_audit, cap))
50825 + cap_raise(cap_audit, cap);
50826 + }
50827 + }
50828 +
50829 + if (!cap_raised(cap_drop, cap)) {
50830 + if (cap_raised(cap_audit, cap))
50831 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
50832 + return 1;
50833 + }
50834 +
50835 + curracl = task->acl;
50836 +
50837 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
50838 + && cap_raised(cred->cap_effective, cap)) {
50839 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50840 + task->role->roletype, cred->uid,
50841 + cred->gid, task->exec_file ?
50842 + gr_to_filename(task->exec_file->f_path.dentry,
50843 + task->exec_file->f_path.mnt) : curracl->filename,
50844 + curracl->filename, 0UL,
50845 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
50846 + return 1;
50847 + }
50848 +
50849 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
50850 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
50851 + return 0;
50852 +}
50853 +
50854 +int
50855 +gr_is_capable_nolog(const int cap)
50856 +{
50857 + struct acl_subject_label *curracl;
50858 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50859 +
50860 + if (!gr_acl_is_enabled())
50861 + return 1;
50862 +
50863 + curracl = current->acl;
50864 +
50865 + cap_drop = curracl->cap_lower;
50866 + cap_mask = curracl->cap_mask;
50867 +
50868 + while ((curracl = curracl->parent_subject)) {
50869 + /* if the cap isn't specified in the current computed mask but is specified in the
50870 + current level subject, and is lowered in the current level subject, then add
50871 + it to the set of dropped capabilities
50872 + otherwise, add the current level subject's mask to the current computed mask
50873 + */
50874 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50875 + cap_raise(cap_mask, cap);
50876 + if (cap_raised(curracl->cap_lower, cap))
50877 + cap_raise(cap_drop, cap);
50878 + }
50879 + }
50880 +
50881 + if (!cap_raised(cap_drop, cap))
50882 + return 1;
50883 +
50884 + return 0;
50885 +}
50886 +
50887 diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
50888 --- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
50889 +++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
50890 @@ -0,0 +1,431 @@
50891 +#include <linux/kernel.h>
50892 +#include <linux/sched.h>
50893 +#include <linux/types.h>
50894 +#include <linux/fs.h>
50895 +#include <linux/file.h>
50896 +#include <linux/stat.h>
50897 +#include <linux/grsecurity.h>
50898 +#include <linux/grinternal.h>
50899 +#include <linux/gracl.h>
50900 +
50901 +__u32
50902 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50903 + const struct vfsmount * mnt)
50904 +{
50905 + __u32 mode;
50906 +
50907 + if (unlikely(!dentry->d_inode))
50908 + return GR_FIND;
50909 +
50910 + mode =
50911 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
50912 +
50913 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
50914 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50915 + return mode;
50916 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
50917 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50918 + return 0;
50919 + } else if (unlikely(!(mode & GR_FIND)))
50920 + return 0;
50921 +
50922 + return GR_FIND;
50923 +}
50924 +
50925 +__u32
50926 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50927 + const int fmode)
50928 +{
50929 + __u32 reqmode = GR_FIND;
50930 + __u32 mode;
50931 +
50932 + if (unlikely(!dentry->d_inode))
50933 + return reqmode;
50934 +
50935 + if (unlikely(fmode & O_APPEND))
50936 + reqmode |= GR_APPEND;
50937 + else if (unlikely(fmode & FMODE_WRITE))
50938 + reqmode |= GR_WRITE;
50939 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50940 + reqmode |= GR_READ;
50941 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
50942 + reqmode &= ~GR_READ;
50943 + mode =
50944 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50945 + mnt);
50946 +
50947 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50948 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50949 + reqmode & GR_READ ? " reading" : "",
50950 + reqmode & GR_WRITE ? " writing" : reqmode &
50951 + GR_APPEND ? " appending" : "");
50952 + return reqmode;
50953 + } else
50954 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50955 + {
50956 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50957 + reqmode & GR_READ ? " reading" : "",
50958 + reqmode & GR_WRITE ? " writing" : reqmode &
50959 + GR_APPEND ? " appending" : "");
50960 + return 0;
50961 + } else if (unlikely((mode & reqmode) != reqmode))
50962 + return 0;
50963 +
50964 + return reqmode;
50965 +}
50966 +
50967 +__u32
50968 +gr_acl_handle_creat(const struct dentry * dentry,
50969 + const struct dentry * p_dentry,
50970 + const struct vfsmount * p_mnt, const int fmode,
50971 + const int imode)
50972 +{
50973 + __u32 reqmode = GR_WRITE | GR_CREATE;
50974 + __u32 mode;
50975 +
50976 + if (unlikely(fmode & O_APPEND))
50977 + reqmode |= GR_APPEND;
50978 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50979 + reqmode |= GR_READ;
50980 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
50981 + reqmode |= GR_SETID;
50982 +
50983 + mode =
50984 + gr_check_create(dentry, p_dentry, p_mnt,
50985 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
50986 +
50987 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50988 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50989 + reqmode & GR_READ ? " reading" : "",
50990 + reqmode & GR_WRITE ? " writing" : reqmode &
50991 + GR_APPEND ? " appending" : "");
50992 + return reqmode;
50993 + } else
50994 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50995 + {
50996 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
50997 + reqmode & GR_READ ? " reading" : "",
50998 + reqmode & GR_WRITE ? " writing" : reqmode &
50999 + GR_APPEND ? " appending" : "");
51000 + return 0;
51001 + } else if (unlikely((mode & reqmode) != reqmode))
51002 + return 0;
51003 +
51004 + return reqmode;
51005 +}
51006 +
51007 +__u32
51008 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51009 + const int fmode)
51010 +{
51011 + __u32 mode, reqmode = GR_FIND;
51012 +
51013 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51014 + reqmode |= GR_EXEC;
51015 + if (fmode & S_IWOTH)
51016 + reqmode |= GR_WRITE;
51017 + if (fmode & S_IROTH)
51018 + reqmode |= GR_READ;
51019 +
51020 + mode =
51021 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51022 + mnt);
51023 +
51024 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51025 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51026 + reqmode & GR_READ ? " reading" : "",
51027 + reqmode & GR_WRITE ? " writing" : "",
51028 + reqmode & GR_EXEC ? " executing" : "");
51029 + return reqmode;
51030 + } else
51031 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51032 + {
51033 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51034 + reqmode & GR_READ ? " reading" : "",
51035 + reqmode & GR_WRITE ? " writing" : "",
51036 + reqmode & GR_EXEC ? " executing" : "");
51037 + return 0;
51038 + } else if (unlikely((mode & reqmode) != reqmode))
51039 + return 0;
51040 +
51041 + return reqmode;
51042 +}
51043 +
51044 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51045 +{
51046 + __u32 mode;
51047 +
51048 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51049 +
51050 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51051 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51052 + return mode;
51053 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51054 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51055 + return 0;
51056 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51057 + return 0;
51058 +
51059 + return (reqmode);
51060 +}
51061 +
51062 +__u32
51063 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51064 +{
51065 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51066 +}
51067 +
51068 +__u32
51069 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51070 +{
51071 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51072 +}
51073 +
51074 +__u32
51075 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51076 +{
51077 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51078 +}
51079 +
51080 +__u32
51081 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51082 +{
51083 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51084 +}
51085 +
51086 +__u32
51087 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51088 + mode_t mode)
51089 +{
51090 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51091 + return 1;
51092 +
51093 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51094 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51095 + GR_FCHMOD_ACL_MSG);
51096 + } else {
51097 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51098 + }
51099 +}
51100 +
51101 +__u32
51102 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51103 + mode_t mode)
51104 +{
51105 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51106 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51107 + GR_CHMOD_ACL_MSG);
51108 + } else {
51109 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51110 + }
51111 +}
51112 +
51113 +__u32
51114 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51115 +{
51116 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51117 +}
51118 +
51119 +__u32
51120 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51121 +{
51122 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51123 +}
51124 +
51125 +__u32
51126 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51127 +{
51128 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51129 +}
51130 +
51131 +__u32
51132 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51133 +{
51134 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51135 + GR_UNIXCONNECT_ACL_MSG);
51136 +}
51137 +
51138 +/* hardlinks require at minimum create permission,
51139 + any additional privilege required is based on the
51140 + privilege of the file being linked to
51141 +*/
51142 +__u32
51143 +gr_acl_handle_link(const struct dentry * new_dentry,
51144 + const struct dentry * parent_dentry,
51145 + const struct vfsmount * parent_mnt,
51146 + const struct dentry * old_dentry,
51147 + const struct vfsmount * old_mnt, const char *to)
51148 +{
51149 + __u32 mode;
51150 + __u32 needmode = GR_CREATE | GR_LINK;
51151 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51152 +
51153 + mode =
51154 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51155 + old_mnt);
51156 +
51157 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51158 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51159 + return mode;
51160 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51161 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51162 + return 0;
51163 + } else if (unlikely((mode & needmode) != needmode))
51164 + return 0;
51165 +
51166 + return 1;
51167 +}
51168 +
51169 +__u32
51170 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51171 + const struct dentry * parent_dentry,
51172 + const struct vfsmount * parent_mnt, const char *from)
51173 +{
51174 + __u32 needmode = GR_WRITE | GR_CREATE;
51175 + __u32 mode;
51176 +
51177 + mode =
51178 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51179 + GR_CREATE | GR_AUDIT_CREATE |
51180 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51181 +
51182 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51183 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51184 + return mode;
51185 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51186 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51187 + return 0;
51188 + } else if (unlikely((mode & needmode) != needmode))
51189 + return 0;
51190 +
51191 + return (GR_WRITE | GR_CREATE);
51192 +}
51193 +
51194 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51195 +{
51196 + __u32 mode;
51197 +
51198 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51199 +
51200 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51201 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51202 + return mode;
51203 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51204 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51205 + return 0;
51206 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51207 + return 0;
51208 +
51209 + return (reqmode);
51210 +}
51211 +
51212 +__u32
51213 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51214 + const struct dentry * parent_dentry,
51215 + const struct vfsmount * parent_mnt,
51216 + const int mode)
51217 +{
51218 + __u32 reqmode = GR_WRITE | GR_CREATE;
51219 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51220 + reqmode |= GR_SETID;
51221 +
51222 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51223 + reqmode, GR_MKNOD_ACL_MSG);
51224 +}
51225 +
51226 +__u32
51227 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51228 + const struct dentry *parent_dentry,
51229 + const struct vfsmount *parent_mnt)
51230 +{
51231 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51232 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51233 +}
51234 +
51235 +#define RENAME_CHECK_SUCCESS(old, new) \
51236 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51237 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51238 +
51239 +int
51240 +gr_acl_handle_rename(struct dentry *new_dentry,
51241 + struct dentry *parent_dentry,
51242 + const struct vfsmount *parent_mnt,
51243 + struct dentry *old_dentry,
51244 + struct inode *old_parent_inode,
51245 + struct vfsmount *old_mnt, const char *newname)
51246 +{
51247 + __u32 comp1, comp2;
51248 + int error = 0;
51249 +
51250 + if (unlikely(!gr_acl_is_enabled()))
51251 + return 0;
51252 +
51253 + if (!new_dentry->d_inode) {
51254 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51255 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51256 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51257 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51258 + GR_DELETE | GR_AUDIT_DELETE |
51259 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51260 + GR_SUPPRESS, old_mnt);
51261 + } else {
51262 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51263 + GR_CREATE | GR_DELETE |
51264 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51265 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51266 + GR_SUPPRESS, parent_mnt);
51267 + comp2 =
51268 + gr_search_file(old_dentry,
51269 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51270 + GR_DELETE | GR_AUDIT_DELETE |
51271 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51272 + }
51273 +
51274 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51275 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51276 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51277 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51278 + && !(comp2 & GR_SUPPRESS)) {
51279 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51280 + error = -EACCES;
51281 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51282 + error = -EACCES;
51283 +
51284 + return error;
51285 +}
51286 +
51287 +void
51288 +gr_acl_handle_exit(void)
51289 +{
51290 + u16 id;
51291 + char *rolename;
51292 + struct file *exec_file;
51293 +
51294 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51295 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51296 + id = current->acl_role_id;
51297 + rolename = current->role->rolename;
51298 + gr_set_acls(1);
51299 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51300 + }
51301 +
51302 + write_lock(&grsec_exec_file_lock);
51303 + exec_file = current->exec_file;
51304 + current->exec_file = NULL;
51305 + write_unlock(&grsec_exec_file_lock);
51306 +
51307 + if (exec_file)
51308 + fput(exec_file);
51309 +}
51310 +
51311 +int
51312 +gr_acl_handle_procpidmem(const struct task_struct *task)
51313 +{
51314 + if (unlikely(!gr_acl_is_enabled()))
51315 + return 0;
51316 +
51317 + if (task != current && task->acl->mode & GR_PROTPROCFD)
51318 + return -EACCES;
51319 +
51320 + return 0;
51321 +}
51322 diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51323 --- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51324 +++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51325 @@ -0,0 +1,382 @@
51326 +#include <linux/kernel.h>
51327 +#include <asm/uaccess.h>
51328 +#include <asm/errno.h>
51329 +#include <net/sock.h>
51330 +#include <linux/file.h>
51331 +#include <linux/fs.h>
51332 +#include <linux/net.h>
51333 +#include <linux/in.h>
51334 +#include <linux/skbuff.h>
51335 +#include <linux/ip.h>
51336 +#include <linux/udp.h>
51337 +#include <linux/smp_lock.h>
51338 +#include <linux/types.h>
51339 +#include <linux/sched.h>
51340 +#include <linux/netdevice.h>
51341 +#include <linux/inetdevice.h>
51342 +#include <linux/gracl.h>
51343 +#include <linux/grsecurity.h>
51344 +#include <linux/grinternal.h>
51345 +
51346 +#define GR_BIND 0x01
51347 +#define GR_CONNECT 0x02
51348 +#define GR_INVERT 0x04
51349 +#define GR_BINDOVERRIDE 0x08
51350 +#define GR_CONNECTOVERRIDE 0x10
51351 +#define GR_SOCK_FAMILY 0x20
51352 +
51353 +static const char * gr_protocols[IPPROTO_MAX] = {
51354 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51355 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51356 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51357 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51358 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51359 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51360 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51361 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51362 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51363 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51364 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51365 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51366 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51367 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51368 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51369 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51370 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51371 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51372 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51373 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51374 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51375 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51376 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51377 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51378 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51379 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51380 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51381 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51382 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51383 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51384 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51385 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51386 + };
51387 +
51388 +static const char * gr_socktypes[SOCK_MAX] = {
51389 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51390 + "unknown:7", "unknown:8", "unknown:9", "packet"
51391 + };
51392 +
51393 +static const char * gr_sockfamilies[AF_MAX+1] = {
51394 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51395 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51396 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51397 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51398 + };
51399 +
51400 +const char *
51401 +gr_proto_to_name(unsigned char proto)
51402 +{
51403 + return gr_protocols[proto];
51404 +}
51405 +
51406 +const char *
51407 +gr_socktype_to_name(unsigned char type)
51408 +{
51409 + return gr_socktypes[type];
51410 +}
51411 +
51412 +const char *
51413 +gr_sockfamily_to_name(unsigned char family)
51414 +{
51415 + return gr_sockfamilies[family];
51416 +}
51417 +
51418 +int
51419 +gr_search_socket(const int domain, const int type, const int protocol)
51420 +{
51421 + struct acl_subject_label *curr;
51422 + const struct cred *cred = current_cred();
51423 +
51424 + if (unlikely(!gr_acl_is_enabled()))
51425 + goto exit;
51426 +
51427 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
51428 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51429 + goto exit; // let the kernel handle it
51430 +
51431 + curr = current->acl;
51432 +
51433 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51434 + /* the family is allowed, if this is PF_INET allow it only if
51435 + the extra sock type/protocol checks pass */
51436 + if (domain == PF_INET)
51437 + goto inet_check;
51438 + goto exit;
51439 + } else {
51440 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51441 + __u32 fakeip = 0;
51442 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51443 + current->role->roletype, cred->uid,
51444 + cred->gid, current->exec_file ?
51445 + gr_to_filename(current->exec_file->f_path.dentry,
51446 + current->exec_file->f_path.mnt) :
51447 + curr->filename, curr->filename,
51448 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51449 + &current->signal->saved_ip);
51450 + goto exit;
51451 + }
51452 + goto exit_fail;
51453 + }
51454 +
51455 +inet_check:
51456 + /* the rest of this checking is for IPv4 only */
51457 + if (!curr->ips)
51458 + goto exit;
51459 +
51460 + if ((curr->ip_type & (1 << type)) &&
51461 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51462 + goto exit;
51463 +
51464 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51465 + /* we don't place acls on raw sockets , and sometimes
51466 + dgram/ip sockets are opened for ioctl and not
51467 + bind/connect, so we'll fake a bind learn log */
51468 + if (type == SOCK_RAW || type == SOCK_PACKET) {
51469 + __u32 fakeip = 0;
51470 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51471 + current->role->roletype, cred->uid,
51472 + cred->gid, current->exec_file ?
51473 + gr_to_filename(current->exec_file->f_path.dentry,
51474 + current->exec_file->f_path.mnt) :
51475 + curr->filename, curr->filename,
51476 + &fakeip, 0, type,
51477 + protocol, GR_CONNECT, &current->signal->saved_ip);
51478 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51479 + __u32 fakeip = 0;
51480 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51481 + current->role->roletype, cred->uid,
51482 + cred->gid, current->exec_file ?
51483 + gr_to_filename(current->exec_file->f_path.dentry,
51484 + current->exec_file->f_path.mnt) :
51485 + curr->filename, curr->filename,
51486 + &fakeip, 0, type,
51487 + protocol, GR_BIND, &current->signal->saved_ip);
51488 + }
51489 + /* we'll log when they use connect or bind */
51490 + goto exit;
51491 + }
51492 +
51493 +exit_fail:
51494 + if (domain == PF_INET)
51495 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51496 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
51497 + else
51498 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51499 + gr_socktype_to_name(type), protocol);
51500 +
51501 + return 0;
51502 +exit:
51503 + return 1;
51504 +}
51505 +
51506 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51507 +{
51508 + if ((ip->mode & mode) &&
51509 + (ip_port >= ip->low) &&
51510 + (ip_port <= ip->high) &&
51511 + ((ntohl(ip_addr) & our_netmask) ==
51512 + (ntohl(our_addr) & our_netmask))
51513 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51514 + && (ip->type & (1 << type))) {
51515 + if (ip->mode & GR_INVERT)
51516 + return 2; // specifically denied
51517 + else
51518 + return 1; // allowed
51519 + }
51520 +
51521 + return 0; // not specifically allowed, may continue parsing
51522 +}
51523 +
51524 +static int
51525 +gr_search_connectbind(const int full_mode, struct sock *sk,
51526 + struct sockaddr_in *addr, const int type)
51527 +{
51528 + char iface[IFNAMSIZ] = {0};
51529 + struct acl_subject_label *curr;
51530 + struct acl_ip_label *ip;
51531 + struct inet_sock *isk;
51532 + struct net_device *dev;
51533 + struct in_device *idev;
51534 + unsigned long i;
51535 + int ret;
51536 + int mode = full_mode & (GR_BIND | GR_CONNECT);
51537 + __u32 ip_addr = 0;
51538 + __u32 our_addr;
51539 + __u32 our_netmask;
51540 + char *p;
51541 + __u16 ip_port = 0;
51542 + const struct cred *cred = current_cred();
51543 +
51544 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51545 + return 0;
51546 +
51547 + curr = current->acl;
51548 + isk = inet_sk(sk);
51549 +
51550 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51551 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51552 + addr->sin_addr.s_addr = curr->inaddr_any_override;
51553 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51554 + struct sockaddr_in saddr;
51555 + int err;
51556 +
51557 + saddr.sin_family = AF_INET;
51558 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
51559 + saddr.sin_port = isk->sport;
51560 +
51561 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51562 + if (err)
51563 + return err;
51564 +
51565 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51566 + if (err)
51567 + return err;
51568 + }
51569 +
51570 + if (!curr->ips)
51571 + return 0;
51572 +
51573 + ip_addr = addr->sin_addr.s_addr;
51574 + ip_port = ntohs(addr->sin_port);
51575 +
51576 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51577 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51578 + current->role->roletype, cred->uid,
51579 + cred->gid, current->exec_file ?
51580 + gr_to_filename(current->exec_file->f_path.dentry,
51581 + current->exec_file->f_path.mnt) :
51582 + curr->filename, curr->filename,
51583 + &ip_addr, ip_port, type,
51584 + sk->sk_protocol, mode, &current->signal->saved_ip);
51585 + return 0;
51586 + }
51587 +
51588 + for (i = 0; i < curr->ip_num; i++) {
51589 + ip = *(curr->ips + i);
51590 + if (ip->iface != NULL) {
51591 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
51592 + p = strchr(iface, ':');
51593 + if (p != NULL)
51594 + *p = '\0';
51595 + dev = dev_get_by_name(sock_net(sk), iface);
51596 + if (dev == NULL)
51597 + continue;
51598 + idev = in_dev_get(dev);
51599 + if (idev == NULL) {
51600 + dev_put(dev);
51601 + continue;
51602 + }
51603 + rcu_read_lock();
51604 + for_ifa(idev) {
51605 + if (!strcmp(ip->iface, ifa->ifa_label)) {
51606 + our_addr = ifa->ifa_address;
51607 + our_netmask = 0xffffffff;
51608 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51609 + if (ret == 1) {
51610 + rcu_read_unlock();
51611 + in_dev_put(idev);
51612 + dev_put(dev);
51613 + return 0;
51614 + } else if (ret == 2) {
51615 + rcu_read_unlock();
51616 + in_dev_put(idev);
51617 + dev_put(dev);
51618 + goto denied;
51619 + }
51620 + }
51621 + } endfor_ifa(idev);
51622 + rcu_read_unlock();
51623 + in_dev_put(idev);
51624 + dev_put(dev);
51625 + } else {
51626 + our_addr = ip->addr;
51627 + our_netmask = ip->netmask;
51628 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51629 + if (ret == 1)
51630 + return 0;
51631 + else if (ret == 2)
51632 + goto denied;
51633 + }
51634 + }
51635 +
51636 +denied:
51637 + if (mode == GR_BIND)
51638 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51639 + else if (mode == GR_CONNECT)
51640 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51641 +
51642 + return -EACCES;
51643 +}
51644 +
51645 +int
51646 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51647 +{
51648 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51649 +}
51650 +
51651 +int
51652 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51653 +{
51654 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51655 +}
51656 +
51657 +int gr_search_listen(struct socket *sock)
51658 +{
51659 + struct sock *sk = sock->sk;
51660 + struct sockaddr_in addr;
51661 +
51662 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51663 + addr.sin_port = inet_sk(sk)->sport;
51664 +
51665 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51666 +}
51667 +
51668 +int gr_search_accept(struct socket *sock)
51669 +{
51670 + struct sock *sk = sock->sk;
51671 + struct sockaddr_in addr;
51672 +
51673 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51674 + addr.sin_port = inet_sk(sk)->sport;
51675 +
51676 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51677 +}
51678 +
51679 +int
51680 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51681 +{
51682 + if (addr)
51683 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51684 + else {
51685 + struct sockaddr_in sin;
51686 + const struct inet_sock *inet = inet_sk(sk);
51687 +
51688 + sin.sin_addr.s_addr = inet->daddr;
51689 + sin.sin_port = inet->dport;
51690 +
51691 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51692 + }
51693 +}
51694 +
51695 +int
51696 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51697 +{
51698 + struct sockaddr_in sin;
51699 +
51700 + if (unlikely(skb->len < sizeof (struct udphdr)))
51701 + return 0; // skip this packet
51702 +
51703 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51704 + sin.sin_port = udp_hdr(skb)->source;
51705 +
51706 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51707 +}
51708 diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
51709 --- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51710 +++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51711 @@ -0,0 +1,208 @@
51712 +#include <linux/kernel.h>
51713 +#include <linux/mm.h>
51714 +#include <linux/sched.h>
51715 +#include <linux/poll.h>
51716 +#include <linux/smp_lock.h>
51717 +#include <linux/string.h>
51718 +#include <linux/file.h>
51719 +#include <linux/types.h>
51720 +#include <linux/vmalloc.h>
51721 +#include <linux/grinternal.h>
51722 +
51723 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51724 + size_t count, loff_t *ppos);
51725 +extern int gr_acl_is_enabled(void);
51726 +
51727 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51728 +static int gr_learn_attached;
51729 +
51730 +/* use a 512k buffer */
51731 +#define LEARN_BUFFER_SIZE (512 * 1024)
51732 +
51733 +static DEFINE_SPINLOCK(gr_learn_lock);
51734 +static DEFINE_MUTEX(gr_learn_user_mutex);
51735 +
51736 +/* we need to maintain two buffers, so that the kernel context of grlearn
51737 + uses a semaphore around the userspace copying, and the other kernel contexts
51738 + use a spinlock when copying into the buffer, since they cannot sleep
51739 +*/
51740 +static char *learn_buffer;
51741 +static char *learn_buffer_user;
51742 +static int learn_buffer_len;
51743 +static int learn_buffer_user_len;
51744 +
51745 +static ssize_t
51746 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51747 +{
51748 + DECLARE_WAITQUEUE(wait, current);
51749 + ssize_t retval = 0;
51750 +
51751 + add_wait_queue(&learn_wait, &wait);
51752 + set_current_state(TASK_INTERRUPTIBLE);
51753 + do {
51754 + mutex_lock(&gr_learn_user_mutex);
51755 + spin_lock(&gr_learn_lock);
51756 + if (learn_buffer_len)
51757 + break;
51758 + spin_unlock(&gr_learn_lock);
51759 + mutex_unlock(&gr_learn_user_mutex);
51760 + if (file->f_flags & O_NONBLOCK) {
51761 + retval = -EAGAIN;
51762 + goto out;
51763 + }
51764 + if (signal_pending(current)) {
51765 + retval = -ERESTARTSYS;
51766 + goto out;
51767 + }
51768 +
51769 + schedule();
51770 + } while (1);
51771 +
51772 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51773 + learn_buffer_user_len = learn_buffer_len;
51774 + retval = learn_buffer_len;
51775 + learn_buffer_len = 0;
51776 +
51777 + spin_unlock(&gr_learn_lock);
51778 +
51779 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51780 + retval = -EFAULT;
51781 +
51782 + mutex_unlock(&gr_learn_user_mutex);
51783 +out:
51784 + set_current_state(TASK_RUNNING);
51785 + remove_wait_queue(&learn_wait, &wait);
51786 + return retval;
51787 +}
51788 +
51789 +static unsigned int
51790 +poll_learn(struct file * file, poll_table * wait)
51791 +{
51792 + poll_wait(file, &learn_wait, wait);
51793 +
51794 + if (learn_buffer_len)
51795 + return (POLLIN | POLLRDNORM);
51796 +
51797 + return 0;
51798 +}
51799 +
51800 +void
51801 +gr_clear_learn_entries(void)
51802 +{
51803 + char *tmp;
51804 +
51805 + mutex_lock(&gr_learn_user_mutex);
51806 + spin_lock(&gr_learn_lock);
51807 + tmp = learn_buffer;
51808 + learn_buffer = NULL;
51809 + spin_unlock(&gr_learn_lock);
51810 + if (tmp)
51811 + vfree(tmp);
51812 + if (learn_buffer_user != NULL) {
51813 + vfree(learn_buffer_user);
51814 + learn_buffer_user = NULL;
51815 + }
51816 + learn_buffer_len = 0;
51817 + mutex_unlock(&gr_learn_user_mutex);
51818 +
51819 + return;
51820 +}
51821 +
51822 +void
51823 +gr_add_learn_entry(const char *fmt, ...)
51824 +{
51825 + va_list args;
51826 + unsigned int len;
51827 +
51828 + if (!gr_learn_attached)
51829 + return;
51830 +
51831 + spin_lock(&gr_learn_lock);
51832 +
51833 + /* leave a gap at the end so we know when it's "full" but don't have to
51834 + compute the exact length of the string we're trying to append
51835 + */
51836 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
51837 + spin_unlock(&gr_learn_lock);
51838 + wake_up_interruptible(&learn_wait);
51839 + return;
51840 + }
51841 + if (learn_buffer == NULL) {
51842 + spin_unlock(&gr_learn_lock);
51843 + return;
51844 + }
51845 +
51846 + va_start(args, fmt);
51847 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
51848 + va_end(args);
51849 +
51850 + learn_buffer_len += len + 1;
51851 +
51852 + spin_unlock(&gr_learn_lock);
51853 + wake_up_interruptible(&learn_wait);
51854 +
51855 + return;
51856 +}
51857 +
51858 +static int
51859 +open_learn(struct inode *inode, struct file *file)
51860 +{
51861 + if (file->f_mode & FMODE_READ && gr_learn_attached)
51862 + return -EBUSY;
51863 + if (file->f_mode & FMODE_READ) {
51864 + int retval = 0;
51865 + mutex_lock(&gr_learn_user_mutex);
51866 + if (learn_buffer == NULL)
51867 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
51868 + if (learn_buffer_user == NULL)
51869 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
51870 + if (learn_buffer == NULL) {
51871 + retval = -ENOMEM;
51872 + goto out_error;
51873 + }
51874 + if (learn_buffer_user == NULL) {
51875 + retval = -ENOMEM;
51876 + goto out_error;
51877 + }
51878 + learn_buffer_len = 0;
51879 + learn_buffer_user_len = 0;
51880 + gr_learn_attached = 1;
51881 +out_error:
51882 + mutex_unlock(&gr_learn_user_mutex);
51883 + return retval;
51884 + }
51885 + return 0;
51886 +}
51887 +
51888 +static int
51889 +close_learn(struct inode *inode, struct file *file)
51890 +{
51891 + if (file->f_mode & FMODE_READ) {
51892 + char *tmp = NULL;
51893 + mutex_lock(&gr_learn_user_mutex);
51894 + spin_lock(&gr_learn_lock);
51895 + tmp = learn_buffer;
51896 + learn_buffer = NULL;
51897 + spin_unlock(&gr_learn_lock);
51898 + if (tmp)
51899 + vfree(tmp);
51900 + if (learn_buffer_user != NULL) {
51901 + vfree(learn_buffer_user);
51902 + learn_buffer_user = NULL;
51903 + }
51904 + learn_buffer_len = 0;
51905 + learn_buffer_user_len = 0;
51906 + gr_learn_attached = 0;
51907 + mutex_unlock(&gr_learn_user_mutex);
51908 + }
51909 +
51910 + return 0;
51911 +}
51912 +
51913 +const struct file_operations grsec_fops = {
51914 + .read = read_learn,
51915 + .write = write_grsec_handler,
51916 + .open = open_learn,
51917 + .release = close_learn,
51918 + .poll = poll_learn,
51919 +};
51920 diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
51921 --- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
51922 +++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
51923 @@ -0,0 +1,67 @@
51924 +#include <linux/kernel.h>
51925 +#include <linux/sched.h>
51926 +#include <linux/gracl.h>
51927 +#include <linux/grinternal.h>
51928 +
51929 +static const char *restab_log[] = {
51930 + [RLIMIT_CPU] = "RLIMIT_CPU",
51931 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
51932 + [RLIMIT_DATA] = "RLIMIT_DATA",
51933 + [RLIMIT_STACK] = "RLIMIT_STACK",
51934 + [RLIMIT_CORE] = "RLIMIT_CORE",
51935 + [RLIMIT_RSS] = "RLIMIT_RSS",
51936 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
51937 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
51938 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
51939 + [RLIMIT_AS] = "RLIMIT_AS",
51940 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
51941 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
51942 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
51943 + [RLIMIT_NICE] = "RLIMIT_NICE",
51944 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
51945 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
51946 + [GR_CRASH_RES] = "RLIMIT_CRASH"
51947 +};
51948 +
51949 +void
51950 +gr_log_resource(const struct task_struct *task,
51951 + const int res, const unsigned long wanted, const int gt)
51952 +{
51953 + const struct cred *cred;
51954 + unsigned long rlim;
51955 +
51956 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
51957 + return;
51958 +
51959 + // not yet supported resource
51960 + if (unlikely(!restab_log[res]))
51961 + return;
51962 +
51963 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
51964 + rlim = task->signal->rlim[res].rlim_max;
51965 + else
51966 + rlim = task->signal->rlim[res].rlim_cur;
51967 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
51968 + return;
51969 +
51970 + rcu_read_lock();
51971 + cred = __task_cred(task);
51972 +
51973 + if (res == RLIMIT_NPROC &&
51974 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
51975 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
51976 + goto out_rcu_unlock;
51977 + else if (res == RLIMIT_MEMLOCK &&
51978 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
51979 + goto out_rcu_unlock;
51980 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
51981 + goto out_rcu_unlock;
51982 + rcu_read_unlock();
51983 +
51984 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
51985 +
51986 + return;
51987 +out_rcu_unlock:
51988 + rcu_read_unlock();
51989 + return;
51990 +}
51991 diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
51992 --- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
51993 +++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
51994 @@ -0,0 +1,284 @@
51995 +#include <linux/kernel.h>
51996 +#include <linux/mm.h>
51997 +#include <asm/uaccess.h>
51998 +#include <asm/errno.h>
51999 +#include <asm/mman.h>
52000 +#include <net/sock.h>
52001 +#include <linux/file.h>
52002 +#include <linux/fs.h>
52003 +#include <linux/net.h>
52004 +#include <linux/in.h>
52005 +#include <linux/smp_lock.h>
52006 +#include <linux/slab.h>
52007 +#include <linux/types.h>
52008 +#include <linux/sched.h>
52009 +#include <linux/timer.h>
52010 +#include <linux/gracl.h>
52011 +#include <linux/grsecurity.h>
52012 +#include <linux/grinternal.h>
52013 +
52014 +static struct crash_uid *uid_set;
52015 +static unsigned short uid_used;
52016 +static DEFINE_SPINLOCK(gr_uid_lock);
52017 +extern rwlock_t gr_inode_lock;
52018 +extern struct acl_subject_label *
52019 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52020 + struct acl_role_label *role);
52021 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52022 +
52023 +int
52024 +gr_init_uidset(void)
52025 +{
52026 + uid_set =
52027 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52028 + uid_used = 0;
52029 +
52030 + return uid_set ? 1 : 0;
52031 +}
52032 +
52033 +void
52034 +gr_free_uidset(void)
52035 +{
52036 + if (uid_set)
52037 + kfree(uid_set);
52038 +
52039 + return;
52040 +}
52041 +
52042 +int
52043 +gr_find_uid(const uid_t uid)
52044 +{
52045 + struct crash_uid *tmp = uid_set;
52046 + uid_t buid;
52047 + int low = 0, high = uid_used - 1, mid;
52048 +
52049 + while (high >= low) {
52050 + mid = (low + high) >> 1;
52051 + buid = tmp[mid].uid;
52052 + if (buid == uid)
52053 + return mid;
52054 + if (buid > uid)
52055 + high = mid - 1;
52056 + if (buid < uid)
52057 + low = mid + 1;
52058 + }
52059 +
52060 + return -1;
52061 +}
52062 +
52063 +static __inline__ void
52064 +gr_insertsort(void)
52065 +{
52066 + unsigned short i, j;
52067 + struct crash_uid index;
52068 +
52069 + for (i = 1; i < uid_used; i++) {
52070 + index = uid_set[i];
52071 + j = i;
52072 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52073 + uid_set[j] = uid_set[j - 1];
52074 + j--;
52075 + }
52076 + uid_set[j] = index;
52077 + }
52078 +
52079 + return;
52080 +}
52081 +
52082 +static __inline__ void
52083 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52084 +{
52085 + int loc;
52086 +
52087 + if (uid_used == GR_UIDTABLE_MAX)
52088 + return;
52089 +
52090 + loc = gr_find_uid(uid);
52091 +
52092 + if (loc >= 0) {
52093 + uid_set[loc].expires = expires;
52094 + return;
52095 + }
52096 +
52097 + uid_set[uid_used].uid = uid;
52098 + uid_set[uid_used].expires = expires;
52099 + uid_used++;
52100 +
52101 + gr_insertsort();
52102 +
52103 + return;
52104 +}
52105 +
52106 +void
52107 +gr_remove_uid(const unsigned short loc)
52108 +{
52109 + unsigned short i;
52110 +
52111 + for (i = loc + 1; i < uid_used; i++)
52112 + uid_set[i - 1] = uid_set[i];
52113 +
52114 + uid_used--;
52115 +
52116 + return;
52117 +}
52118 +
52119 +int
52120 +gr_check_crash_uid(const uid_t uid)
52121 +{
52122 + int loc;
52123 + int ret = 0;
52124 +
52125 + if (unlikely(!gr_acl_is_enabled()))
52126 + return 0;
52127 +
52128 + spin_lock(&gr_uid_lock);
52129 + loc = gr_find_uid(uid);
52130 +
52131 + if (loc < 0)
52132 + goto out_unlock;
52133 +
52134 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52135 + gr_remove_uid(loc);
52136 + else
52137 + ret = 1;
52138 +
52139 +out_unlock:
52140 + spin_unlock(&gr_uid_lock);
52141 + return ret;
52142 +}
52143 +
52144 +static __inline__ int
52145 +proc_is_setxid(const struct cred *cred)
52146 +{
52147 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52148 + cred->uid != cred->fsuid)
52149 + return 1;
52150 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52151 + cred->gid != cred->fsgid)
52152 + return 1;
52153 +
52154 + return 0;
52155 +}
52156 +
52157 +void
52158 +gr_handle_crash(struct task_struct *task, const int sig)
52159 +{
52160 + struct acl_subject_label *curr;
52161 + struct acl_subject_label *curr2;
52162 + struct task_struct *tsk, *tsk2;
52163 + const struct cred *cred;
52164 + const struct cred *cred2;
52165 +
52166 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52167 + return;
52168 +
52169 + if (unlikely(!gr_acl_is_enabled()))
52170 + return;
52171 +
52172 + curr = task->acl;
52173 +
52174 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52175 + return;
52176 +
52177 + if (time_before_eq(curr->expires, get_seconds())) {
52178 + curr->expires = 0;
52179 + curr->crashes = 0;
52180 + }
52181 +
52182 + curr->crashes++;
52183 +
52184 + if (!curr->expires)
52185 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52186 +
52187 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52188 + time_after(curr->expires, get_seconds())) {
52189 + rcu_read_lock();
52190 + cred = __task_cred(task);
52191 + if (cred->uid && proc_is_setxid(cred)) {
52192 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52193 + spin_lock(&gr_uid_lock);
52194 + gr_insert_uid(cred->uid, curr->expires);
52195 + spin_unlock(&gr_uid_lock);
52196 + curr->expires = 0;
52197 + curr->crashes = 0;
52198 + read_lock(&tasklist_lock);
52199 + do_each_thread(tsk2, tsk) {
52200 + cred2 = __task_cred(tsk);
52201 + if (tsk != task && cred2->uid == cred->uid)
52202 + gr_fake_force_sig(SIGKILL, tsk);
52203 + } while_each_thread(tsk2, tsk);
52204 + read_unlock(&tasklist_lock);
52205 + } else {
52206 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52207 + read_lock(&tasklist_lock);
52208 + do_each_thread(tsk2, tsk) {
52209 + if (likely(tsk != task)) {
52210 + curr2 = tsk->acl;
52211 +
52212 + if (curr2->device == curr->device &&
52213 + curr2->inode == curr->inode)
52214 + gr_fake_force_sig(SIGKILL, tsk);
52215 + }
52216 + } while_each_thread(tsk2, tsk);
52217 + read_unlock(&tasklist_lock);
52218 + }
52219 + rcu_read_unlock();
52220 + }
52221 +
52222 + return;
52223 +}
52224 +
52225 +int
52226 +gr_check_crash_exec(const struct file *filp)
52227 +{
52228 + struct acl_subject_label *curr;
52229 +
52230 + if (unlikely(!gr_acl_is_enabled()))
52231 + return 0;
52232 +
52233 + read_lock(&gr_inode_lock);
52234 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52235 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52236 + current->role);
52237 + read_unlock(&gr_inode_lock);
52238 +
52239 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52240 + (!curr->crashes && !curr->expires))
52241 + return 0;
52242 +
52243 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52244 + time_after(curr->expires, get_seconds()))
52245 + return 1;
52246 + else if (time_before_eq(curr->expires, get_seconds())) {
52247 + curr->crashes = 0;
52248 + curr->expires = 0;
52249 + }
52250 +
52251 + return 0;
52252 +}
52253 +
52254 +void
52255 +gr_handle_alertkill(struct task_struct *task)
52256 +{
52257 + struct acl_subject_label *curracl;
52258 + __u32 curr_ip;
52259 + struct task_struct *p, *p2;
52260 +
52261 + if (unlikely(!gr_acl_is_enabled()))
52262 + return;
52263 +
52264 + curracl = task->acl;
52265 + curr_ip = task->signal->curr_ip;
52266 +
52267 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52268 + read_lock(&tasklist_lock);
52269 + do_each_thread(p2, p) {
52270 + if (p->signal->curr_ip == curr_ip)
52271 + gr_fake_force_sig(SIGKILL, p);
52272 + } while_each_thread(p2, p);
52273 + read_unlock(&tasklist_lock);
52274 + } else if (curracl->mode & GR_KILLPROC)
52275 + gr_fake_force_sig(SIGKILL, task);
52276 +
52277 + return;
52278 +}
52279 diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52280 --- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52281 +++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52282 @@ -0,0 +1,40 @@
52283 +#include <linux/kernel.h>
52284 +#include <linux/mm.h>
52285 +#include <linux/sched.h>
52286 +#include <linux/file.h>
52287 +#include <linux/ipc.h>
52288 +#include <linux/gracl.h>
52289 +#include <linux/grsecurity.h>
52290 +#include <linux/grinternal.h>
52291 +
52292 +int
52293 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52294 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52295 +{
52296 + struct task_struct *task;
52297 +
52298 + if (!gr_acl_is_enabled())
52299 + return 1;
52300 +
52301 + rcu_read_lock();
52302 + read_lock(&tasklist_lock);
52303 +
52304 + task = find_task_by_vpid(shm_cprid);
52305 +
52306 + if (unlikely(!task))
52307 + task = find_task_by_vpid(shm_lapid);
52308 +
52309 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52310 + (task->pid == shm_lapid)) &&
52311 + (task->acl->mode & GR_PROTSHM) &&
52312 + (task->acl != current->acl))) {
52313 + read_unlock(&tasklist_lock);
52314 + rcu_read_unlock();
52315 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52316 + return 0;
52317 + }
52318 + read_unlock(&tasklist_lock);
52319 + rcu_read_unlock();
52320 +
52321 + return 1;
52322 +}
52323 diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52324 --- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52325 +++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52326 @@ -0,0 +1,19 @@
52327 +#include <linux/kernel.h>
52328 +#include <linux/sched.h>
52329 +#include <linux/fs.h>
52330 +#include <linux/file.h>
52331 +#include <linux/grsecurity.h>
52332 +#include <linux/grinternal.h>
52333 +
52334 +void
52335 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52336 +{
52337 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52338 + if ((grsec_enable_chdir && grsec_enable_group &&
52339 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52340 + !grsec_enable_group)) {
52341 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52342 + }
52343 +#endif
52344 + return;
52345 +}
52346 diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52347 --- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52348 +++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52349 @@ -0,0 +1,384 @@
52350 +#include <linux/kernel.h>
52351 +#include <linux/module.h>
52352 +#include <linux/sched.h>
52353 +#include <linux/file.h>
52354 +#include <linux/fs.h>
52355 +#include <linux/mount.h>
52356 +#include <linux/types.h>
52357 +#include <linux/pid_namespace.h>
52358 +#include <linux/grsecurity.h>
52359 +#include <linux/grinternal.h>
52360 +
52361 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52362 +{
52363 +#ifdef CONFIG_GRKERNSEC
52364 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52365 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52366 + task->gr_is_chrooted = 1;
52367 + else
52368 + task->gr_is_chrooted = 0;
52369 +
52370 + task->gr_chroot_dentry = path->dentry;
52371 +#endif
52372 + return;
52373 +}
52374 +
52375 +void gr_clear_chroot_entries(struct task_struct *task)
52376 +{
52377 +#ifdef CONFIG_GRKERNSEC
52378 + task->gr_is_chrooted = 0;
52379 + task->gr_chroot_dentry = NULL;
52380 +#endif
52381 + return;
52382 +}
52383 +
52384 +int
52385 +gr_handle_chroot_unix(const pid_t pid)
52386 +{
52387 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52388 + struct task_struct *p;
52389 +
52390 + if (unlikely(!grsec_enable_chroot_unix))
52391 + return 1;
52392 +
52393 + if (likely(!proc_is_chrooted(current)))
52394 + return 1;
52395 +
52396 + rcu_read_lock();
52397 + read_lock(&tasklist_lock);
52398 +
52399 + p = find_task_by_vpid_unrestricted(pid);
52400 + if (unlikely(p && !have_same_root(current, p))) {
52401 + read_unlock(&tasklist_lock);
52402 + rcu_read_unlock();
52403 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52404 + return 0;
52405 + }
52406 + read_unlock(&tasklist_lock);
52407 + rcu_read_unlock();
52408 +#endif
52409 + return 1;
52410 +}
52411 +
52412 +int
52413 +gr_handle_chroot_nice(void)
52414 +{
52415 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52416 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52417 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52418 + return -EPERM;
52419 + }
52420 +#endif
52421 + return 0;
52422 +}
52423 +
52424 +int
52425 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52426 +{
52427 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52428 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52429 + && proc_is_chrooted(current)) {
52430 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52431 + return -EACCES;
52432 + }
52433 +#endif
52434 + return 0;
52435 +}
52436 +
52437 +int
52438 +gr_handle_chroot_rawio(const struct inode *inode)
52439 +{
52440 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52441 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52442 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52443 + return 1;
52444 +#endif
52445 + return 0;
52446 +}
52447 +
52448 +int
52449 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52450 +{
52451 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52452 + struct task_struct *p;
52453 + int ret = 0;
52454 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52455 + return ret;
52456 +
52457 + read_lock(&tasklist_lock);
52458 + do_each_pid_task(pid, type, p) {
52459 + if (!have_same_root(current, p)) {
52460 + ret = 1;
52461 + goto out;
52462 + }
52463 + } while_each_pid_task(pid, type, p);
52464 +out:
52465 + read_unlock(&tasklist_lock);
52466 + return ret;
52467 +#endif
52468 + return 0;
52469 +}
52470 +
52471 +int
52472 +gr_pid_is_chrooted(struct task_struct *p)
52473 +{
52474 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52475 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52476 + return 0;
52477 +
52478 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52479 + !have_same_root(current, p)) {
52480 + return 1;
52481 + }
52482 +#endif
52483 + return 0;
52484 +}
52485 +
52486 +EXPORT_SYMBOL(gr_pid_is_chrooted);
52487 +
52488 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52489 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52490 +{
52491 + struct dentry *dentry = (struct dentry *)u_dentry;
52492 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52493 + struct dentry *realroot;
52494 + struct vfsmount *realrootmnt;
52495 + struct dentry *currentroot;
52496 + struct vfsmount *currentmnt;
52497 + struct task_struct *reaper = &init_task;
52498 + int ret = 1;
52499 +
52500 + read_lock(&reaper->fs->lock);
52501 + realrootmnt = mntget(reaper->fs->root.mnt);
52502 + realroot = dget(reaper->fs->root.dentry);
52503 + read_unlock(&reaper->fs->lock);
52504 +
52505 + read_lock(&current->fs->lock);
52506 + currentmnt = mntget(current->fs->root.mnt);
52507 + currentroot = dget(current->fs->root.dentry);
52508 + read_unlock(&current->fs->lock);
52509 +
52510 + spin_lock(&dcache_lock);
52511 + for (;;) {
52512 + if (unlikely((dentry == realroot && mnt == realrootmnt)
52513 + || (dentry == currentroot && mnt == currentmnt)))
52514 + break;
52515 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52516 + if (mnt->mnt_parent == mnt)
52517 + break;
52518 + dentry = mnt->mnt_mountpoint;
52519 + mnt = mnt->mnt_parent;
52520 + continue;
52521 + }
52522 + dentry = dentry->d_parent;
52523 + }
52524 + spin_unlock(&dcache_lock);
52525 +
52526 + dput(currentroot);
52527 + mntput(currentmnt);
52528 +
52529 + /* access is outside of chroot */
52530 + if (dentry == realroot && mnt == realrootmnt)
52531 + ret = 0;
52532 +
52533 + dput(realroot);
52534 + mntput(realrootmnt);
52535 + return ret;
52536 +}
52537 +#endif
52538 +
52539 +int
52540 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52541 +{
52542 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52543 + if (!grsec_enable_chroot_fchdir)
52544 + return 1;
52545 +
52546 + if (!proc_is_chrooted(current))
52547 + return 1;
52548 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52549 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52550 + return 0;
52551 + }
52552 +#endif
52553 + return 1;
52554 +}
52555 +
52556 +int
52557 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52558 + const time_t shm_createtime)
52559 +{
52560 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52561 + struct task_struct *p;
52562 + time_t starttime;
52563 +
52564 + if (unlikely(!grsec_enable_chroot_shmat))
52565 + return 1;
52566 +
52567 + if (likely(!proc_is_chrooted(current)))
52568 + return 1;
52569 +
52570 + rcu_read_lock();
52571 + read_lock(&tasklist_lock);
52572 +
52573 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52574 + starttime = p->start_time.tv_sec;
52575 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52576 + if (have_same_root(current, p)) {
52577 + goto allow;
52578 + } else {
52579 + read_unlock(&tasklist_lock);
52580 + rcu_read_unlock();
52581 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52582 + return 0;
52583 + }
52584 + }
52585 + /* creator exited, pid reuse, fall through to next check */
52586 + }
52587 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52588 + if (unlikely(!have_same_root(current, p))) {
52589 + read_unlock(&tasklist_lock);
52590 + rcu_read_unlock();
52591 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52592 + return 0;
52593 + }
52594 + }
52595 +
52596 +allow:
52597 + read_unlock(&tasklist_lock);
52598 + rcu_read_unlock();
52599 +#endif
52600 + return 1;
52601 +}
52602 +
52603 +void
52604 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52605 +{
52606 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52607 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52608 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52609 +#endif
52610 + return;
52611 +}
52612 +
52613 +int
52614 +gr_handle_chroot_mknod(const struct dentry *dentry,
52615 + const struct vfsmount *mnt, const int mode)
52616 +{
52617 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52618 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52619 + proc_is_chrooted(current)) {
52620 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52621 + return -EPERM;
52622 + }
52623 +#endif
52624 + return 0;
52625 +}
52626 +
52627 +int
52628 +gr_handle_chroot_mount(const struct dentry *dentry,
52629 + const struct vfsmount *mnt, const char *dev_name)
52630 +{
52631 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52632 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52633 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52634 + return -EPERM;
52635 + }
52636 +#endif
52637 + return 0;
52638 +}
52639 +
52640 +int
52641 +gr_handle_chroot_pivot(void)
52642 +{
52643 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52644 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52645 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52646 + return -EPERM;
52647 + }
52648 +#endif
52649 + return 0;
52650 +}
52651 +
52652 +int
52653 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52654 +{
52655 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52656 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52657 + !gr_is_outside_chroot(dentry, mnt)) {
52658 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52659 + return -EPERM;
52660 + }
52661 +#endif
52662 + return 0;
52663 +}
52664 +
52665 +int
52666 +gr_handle_chroot_caps(struct path *path)
52667 +{
52668 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52669 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52670 + (init_task.fs->root.dentry != path->dentry) &&
52671 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52672 +
52673 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52674 + const struct cred *old = current_cred();
52675 + struct cred *new = prepare_creds();
52676 + if (new == NULL)
52677 + return 1;
52678 +
52679 + new->cap_permitted = cap_drop(old->cap_permitted,
52680 + chroot_caps);
52681 + new->cap_inheritable = cap_drop(old->cap_inheritable,
52682 + chroot_caps);
52683 + new->cap_effective = cap_drop(old->cap_effective,
52684 + chroot_caps);
52685 +
52686 + commit_creds(new);
52687 +
52688 + return 0;
52689 + }
52690 +#endif
52691 + return 0;
52692 +}
52693 +
52694 +int
52695 +gr_handle_chroot_sysctl(const int op)
52696 +{
52697 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52698 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52699 + && (op & MAY_WRITE))
52700 + return -EACCES;
52701 +#endif
52702 + return 0;
52703 +}
52704 +
52705 +void
52706 +gr_handle_chroot_chdir(struct path *path)
52707 +{
52708 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52709 + if (grsec_enable_chroot_chdir)
52710 + set_fs_pwd(current->fs, path);
52711 +#endif
52712 + return;
52713 +}
52714 +
52715 +int
52716 +gr_handle_chroot_chmod(const struct dentry *dentry,
52717 + const struct vfsmount *mnt, const int mode)
52718 +{
52719 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52720 + /* allow chmod +s on directories, but not on files */
52721 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52722 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52723 + proc_is_chrooted(current)) {
52724 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52725 + return -EPERM;
52726 + }
52727 +#endif
52728 + return 0;
52729 +}
52730 +
52731 +#ifdef CONFIG_SECURITY
52732 +EXPORT_SYMBOL(gr_handle_chroot_caps);
52733 +#endif
52734 diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
52735 --- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52736 +++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52737 @@ -0,0 +1,447 @@
52738 +#include <linux/kernel.h>
52739 +#include <linux/module.h>
52740 +#include <linux/sched.h>
52741 +#include <linux/file.h>
52742 +#include <linux/fs.h>
52743 +#include <linux/kdev_t.h>
52744 +#include <linux/net.h>
52745 +#include <linux/in.h>
52746 +#include <linux/ip.h>
52747 +#include <linux/skbuff.h>
52748 +#include <linux/sysctl.h>
52749 +
52750 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52751 +void
52752 +pax_set_initial_flags(struct linux_binprm *bprm)
52753 +{
52754 + return;
52755 +}
52756 +#endif
52757 +
52758 +#ifdef CONFIG_SYSCTL
52759 +__u32
52760 +gr_handle_sysctl(const struct ctl_table * table, const int op)
52761 +{
52762 + return 0;
52763 +}
52764 +#endif
52765 +
52766 +#ifdef CONFIG_TASKSTATS
52767 +int gr_is_taskstats_denied(int pid)
52768 +{
52769 + return 0;
52770 +}
52771 +#endif
52772 +
52773 +int
52774 +gr_acl_is_enabled(void)
52775 +{
52776 + return 0;
52777 +}
52778 +
52779 +int
52780 +gr_handle_rawio(const struct inode *inode)
52781 +{
52782 + return 0;
52783 +}
52784 +
52785 +void
52786 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52787 +{
52788 + return;
52789 +}
52790 +
52791 +int
52792 +gr_handle_ptrace(struct task_struct *task, const long request)
52793 +{
52794 + return 0;
52795 +}
52796 +
52797 +int
52798 +gr_handle_proc_ptrace(struct task_struct *task)
52799 +{
52800 + return 0;
52801 +}
52802 +
52803 +void
52804 +gr_learn_resource(const struct task_struct *task,
52805 + const int res, const unsigned long wanted, const int gt)
52806 +{
52807 + return;
52808 +}
52809 +
52810 +int
52811 +gr_set_acls(const int type)
52812 +{
52813 + return 0;
52814 +}
52815 +
52816 +int
52817 +gr_check_hidden_task(const struct task_struct *tsk)
52818 +{
52819 + return 0;
52820 +}
52821 +
52822 +int
52823 +gr_check_protected_task(const struct task_struct *task)
52824 +{
52825 + return 0;
52826 +}
52827 +
52828 +int
52829 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52830 +{
52831 + return 0;
52832 +}
52833 +
52834 +void
52835 +gr_copy_label(struct task_struct *tsk)
52836 +{
52837 + return;
52838 +}
52839 +
52840 +void
52841 +gr_set_pax_flags(struct task_struct *task)
52842 +{
52843 + return;
52844 +}
52845 +
52846 +int
52847 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52848 + const int unsafe_share)
52849 +{
52850 + return 0;
52851 +}
52852 +
52853 +void
52854 +gr_handle_delete(const ino_t ino, const dev_t dev)
52855 +{
52856 + return;
52857 +}
52858 +
52859 +void
52860 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52861 +{
52862 + return;
52863 +}
52864 +
52865 +void
52866 +gr_handle_crash(struct task_struct *task, const int sig)
52867 +{
52868 + return;
52869 +}
52870 +
52871 +int
52872 +gr_check_crash_exec(const struct file *filp)
52873 +{
52874 + return 0;
52875 +}
52876 +
52877 +int
52878 +gr_check_crash_uid(const uid_t uid)
52879 +{
52880 + return 0;
52881 +}
52882 +
52883 +void
52884 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52885 + struct dentry *old_dentry,
52886 + struct dentry *new_dentry,
52887 + struct vfsmount *mnt, const __u8 replace)
52888 +{
52889 + return;
52890 +}
52891 +
52892 +int
52893 +gr_search_socket(const int family, const int type, const int protocol)
52894 +{
52895 + return 1;
52896 +}
52897 +
52898 +int
52899 +gr_search_connectbind(const int mode, const struct socket *sock,
52900 + const struct sockaddr_in *addr)
52901 +{
52902 + return 0;
52903 +}
52904 +
52905 +int
52906 +gr_is_capable(const int cap)
52907 +{
52908 + return 1;
52909 +}
52910 +
52911 +int
52912 +gr_is_capable_nolog(const int cap)
52913 +{
52914 + return 1;
52915 +}
52916 +
52917 +void
52918 +gr_handle_alertkill(struct task_struct *task)
52919 +{
52920 + return;
52921 +}
52922 +
52923 +__u32
52924 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
52925 +{
52926 + return 1;
52927 +}
52928 +
52929 +__u32
52930 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52931 + const struct vfsmount * mnt)
52932 +{
52933 + return 1;
52934 +}
52935 +
52936 +__u32
52937 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52938 + const int fmode)
52939 +{
52940 + return 1;
52941 +}
52942 +
52943 +__u32
52944 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52945 +{
52946 + return 1;
52947 +}
52948 +
52949 +__u32
52950 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
52951 +{
52952 + return 1;
52953 +}
52954 +
52955 +int
52956 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
52957 + unsigned int *vm_flags)
52958 +{
52959 + return 1;
52960 +}
52961 +
52962 +__u32
52963 +gr_acl_handle_truncate(const struct dentry * dentry,
52964 + const struct vfsmount * mnt)
52965 +{
52966 + return 1;
52967 +}
52968 +
52969 +__u32
52970 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
52971 +{
52972 + return 1;
52973 +}
52974 +
52975 +__u32
52976 +gr_acl_handle_access(const struct dentry * dentry,
52977 + const struct vfsmount * mnt, const int fmode)
52978 +{
52979 + return 1;
52980 +}
52981 +
52982 +__u32
52983 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
52984 + mode_t mode)
52985 +{
52986 + return 1;
52987 +}
52988 +
52989 +__u32
52990 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
52991 + mode_t mode)
52992 +{
52993 + return 1;
52994 +}
52995 +
52996 +__u32
52997 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
52998 +{
52999 + return 1;
53000 +}
53001 +
53002 +__u32
53003 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53004 +{
53005 + return 1;
53006 +}
53007 +
53008 +void
53009 +grsecurity_init(void)
53010 +{
53011 + return;
53012 +}
53013 +
53014 +__u32
53015 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53016 + const struct dentry * parent_dentry,
53017 + const struct vfsmount * parent_mnt,
53018 + const int mode)
53019 +{
53020 + return 1;
53021 +}
53022 +
53023 +__u32
53024 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53025 + const struct dentry * parent_dentry,
53026 + const struct vfsmount * parent_mnt)
53027 +{
53028 + return 1;
53029 +}
53030 +
53031 +__u32
53032 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53033 + const struct dentry * parent_dentry,
53034 + const struct vfsmount * parent_mnt, const char *from)
53035 +{
53036 + return 1;
53037 +}
53038 +
53039 +__u32
53040 +gr_acl_handle_link(const struct dentry * new_dentry,
53041 + const struct dentry * parent_dentry,
53042 + const struct vfsmount * parent_mnt,
53043 + const struct dentry * old_dentry,
53044 + const struct vfsmount * old_mnt, const char *to)
53045 +{
53046 + return 1;
53047 +}
53048 +
53049 +int
53050 +gr_acl_handle_rename(const struct dentry *new_dentry,
53051 + const struct dentry *parent_dentry,
53052 + const struct vfsmount *parent_mnt,
53053 + const struct dentry *old_dentry,
53054 + const struct inode *old_parent_inode,
53055 + const struct vfsmount *old_mnt, const char *newname)
53056 +{
53057 + return 0;
53058 +}
53059 +
53060 +int
53061 +gr_acl_handle_filldir(const struct file *file, const char *name,
53062 + const int namelen, const ino_t ino)
53063 +{
53064 + return 1;
53065 +}
53066 +
53067 +int
53068 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53069 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53070 +{
53071 + return 1;
53072 +}
53073 +
53074 +int
53075 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53076 +{
53077 + return 0;
53078 +}
53079 +
53080 +int
53081 +gr_search_accept(const struct socket *sock)
53082 +{
53083 + return 0;
53084 +}
53085 +
53086 +int
53087 +gr_search_listen(const struct socket *sock)
53088 +{
53089 + return 0;
53090 +}
53091 +
53092 +int
53093 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53094 +{
53095 + return 0;
53096 +}
53097 +
53098 +__u32
53099 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53100 +{
53101 + return 1;
53102 +}
53103 +
53104 +__u32
53105 +gr_acl_handle_creat(const struct dentry * dentry,
53106 + const struct dentry * p_dentry,
53107 + const struct vfsmount * p_mnt, const int fmode,
53108 + const int imode)
53109 +{
53110 + return 1;
53111 +}
53112 +
53113 +void
53114 +gr_acl_handle_exit(void)
53115 +{
53116 + return;
53117 +}
53118 +
53119 +int
53120 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53121 +{
53122 + return 1;
53123 +}
53124 +
53125 +void
53126 +gr_set_role_label(const uid_t uid, const gid_t gid)
53127 +{
53128 + return;
53129 +}
53130 +
53131 +int
53132 +gr_acl_handle_procpidmem(const struct task_struct *task)
53133 +{
53134 + return 0;
53135 +}
53136 +
53137 +int
53138 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53139 +{
53140 + return 0;
53141 +}
53142 +
53143 +int
53144 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53145 +{
53146 + return 0;
53147 +}
53148 +
53149 +void
53150 +gr_set_kernel_label(struct task_struct *task)
53151 +{
53152 + return;
53153 +}
53154 +
53155 +int
53156 +gr_check_user_change(int real, int effective, int fs)
53157 +{
53158 + return 0;
53159 +}
53160 +
53161 +int
53162 +gr_check_group_change(int real, int effective, int fs)
53163 +{
53164 + return 0;
53165 +}
53166 +
53167 +int gr_acl_enable_at_secure(void)
53168 +{
53169 + return 0;
53170 +}
53171 +
53172 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53173 +{
53174 + return dentry->d_inode->i_sb->s_dev;
53175 +}
53176 +
53177 +EXPORT_SYMBOL(gr_is_capable);
53178 +EXPORT_SYMBOL(gr_is_capable_nolog);
53179 +EXPORT_SYMBOL(gr_learn_resource);
53180 +EXPORT_SYMBOL(gr_set_kernel_label);
53181 +#ifdef CONFIG_SECURITY
53182 +EXPORT_SYMBOL(gr_check_user_change);
53183 +EXPORT_SYMBOL(gr_check_group_change);
53184 +#endif
53185 diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53186 --- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53187 +++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53188 @@ -0,0 +1,132 @@
53189 +#include <linux/kernel.h>
53190 +#include <linux/sched.h>
53191 +#include <linux/file.h>
53192 +#include <linux/binfmts.h>
53193 +#include <linux/smp_lock.h>
53194 +#include <linux/fs.h>
53195 +#include <linux/types.h>
53196 +#include <linux/grdefs.h>
53197 +#include <linux/grinternal.h>
53198 +#include <linux/capability.h>
53199 +#include <linux/compat.h>
53200 +
53201 +#include <asm/uaccess.h>
53202 +
53203 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53204 +static char gr_exec_arg_buf[132];
53205 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53206 +#endif
53207 +
53208 +void
53209 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53210 +{
53211 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53212 + char *grarg = gr_exec_arg_buf;
53213 + unsigned int i, x, execlen = 0;
53214 + char c;
53215 +
53216 + if (!((grsec_enable_execlog && grsec_enable_group &&
53217 + in_group_p(grsec_audit_gid))
53218 + || (grsec_enable_execlog && !grsec_enable_group)))
53219 + return;
53220 +
53221 + mutex_lock(&gr_exec_arg_mutex);
53222 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53223 +
53224 + if (unlikely(argv == NULL))
53225 + goto log;
53226 +
53227 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53228 + const char __user *p;
53229 + unsigned int len;
53230 +
53231 + if (copy_from_user(&p, argv + i, sizeof(p)))
53232 + goto log;
53233 + if (!p)
53234 + goto log;
53235 + len = strnlen_user(p, 128 - execlen);
53236 + if (len > 128 - execlen)
53237 + len = 128 - execlen;
53238 + else if (len > 0)
53239 + len--;
53240 + if (copy_from_user(grarg + execlen, p, len))
53241 + goto log;
53242 +
53243 + /* rewrite unprintable characters */
53244 + for (x = 0; x < len; x++) {
53245 + c = *(grarg + execlen + x);
53246 + if (c < 32 || c > 126)
53247 + *(grarg + execlen + x) = ' ';
53248 + }
53249 +
53250 + execlen += len;
53251 + *(grarg + execlen) = ' ';
53252 + *(grarg + execlen + 1) = '\0';
53253 + execlen++;
53254 + }
53255 +
53256 + log:
53257 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53258 + bprm->file->f_path.mnt, grarg);
53259 + mutex_unlock(&gr_exec_arg_mutex);
53260 +#endif
53261 + return;
53262 +}
53263 +
53264 +#ifdef CONFIG_COMPAT
53265 +void
53266 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53267 +{
53268 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53269 + char *grarg = gr_exec_arg_buf;
53270 + unsigned int i, x, execlen = 0;
53271 + char c;
53272 +
53273 + if (!((grsec_enable_execlog && grsec_enable_group &&
53274 + in_group_p(grsec_audit_gid))
53275 + || (grsec_enable_execlog && !grsec_enable_group)))
53276 + return;
53277 +
53278 + mutex_lock(&gr_exec_arg_mutex);
53279 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53280 +
53281 + if (unlikely(argv == NULL))
53282 + goto log;
53283 +
53284 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53285 + compat_uptr_t p;
53286 + unsigned int len;
53287 +
53288 + if (get_user(p, argv + i))
53289 + goto log;
53290 + len = strnlen_user(compat_ptr(p), 128 - execlen);
53291 + if (len > 128 - execlen)
53292 + len = 128 - execlen;
53293 + else if (len > 0)
53294 + len--;
53295 + else
53296 + goto log;
53297 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53298 + goto log;
53299 +
53300 + /* rewrite unprintable characters */
53301 + for (x = 0; x < len; x++) {
53302 + c = *(grarg + execlen + x);
53303 + if (c < 32 || c > 126)
53304 + *(grarg + execlen + x) = ' ';
53305 + }
53306 +
53307 + execlen += len;
53308 + *(grarg + execlen) = ' ';
53309 + *(grarg + execlen + 1) = '\0';
53310 + execlen++;
53311 + }
53312 +
53313 + log:
53314 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53315 + bprm->file->f_path.mnt, grarg);
53316 + mutex_unlock(&gr_exec_arg_mutex);
53317 +#endif
53318 + return;
53319 +}
53320 +#endif
53321 diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53322 --- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53323 +++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53324 @@ -0,0 +1,24 @@
53325 +#include <linux/kernel.h>
53326 +#include <linux/sched.h>
53327 +#include <linux/fs.h>
53328 +#include <linux/file.h>
53329 +#include <linux/grinternal.h>
53330 +
53331 +int
53332 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53333 + const struct dentry *dir, const int flag, const int acc_mode)
53334 +{
53335 +#ifdef CONFIG_GRKERNSEC_FIFO
53336 + const struct cred *cred = current_cred();
53337 +
53338 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53339 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53340 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53341 + (cred->fsuid != dentry->d_inode->i_uid)) {
53342 + if (!inode_permission(dentry->d_inode, acc_mode))
53343 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53344 + return -EACCES;
53345 + }
53346 +#endif
53347 + return 0;
53348 +}
53349 diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53350 --- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53351 +++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53352 @@ -0,0 +1,23 @@
53353 +#include <linux/kernel.h>
53354 +#include <linux/sched.h>
53355 +#include <linux/grsecurity.h>
53356 +#include <linux/grinternal.h>
53357 +#include <linux/errno.h>
53358 +
53359 +void
53360 +gr_log_forkfail(const int retval)
53361 +{
53362 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53363 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53364 + switch (retval) {
53365 + case -EAGAIN:
53366 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53367 + break;
53368 + case -ENOMEM:
53369 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53370 + break;
53371 + }
53372 + }
53373 +#endif
53374 + return;
53375 +}
53376 diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53377 --- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53378 +++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53379 @@ -0,0 +1,270 @@
53380 +#include <linux/kernel.h>
53381 +#include <linux/sched.h>
53382 +#include <linux/mm.h>
53383 +#include <linux/smp_lock.h>
53384 +#include <linux/gracl.h>
53385 +#include <linux/slab.h>
53386 +#include <linux/vmalloc.h>
53387 +#include <linux/percpu.h>
53388 +#include <linux/module.h>
53389 +
53390 +int grsec_enable_brute;
53391 +int grsec_enable_link;
53392 +int grsec_enable_dmesg;
53393 +int grsec_enable_harden_ptrace;
53394 +int grsec_enable_fifo;
53395 +int grsec_enable_execlog;
53396 +int grsec_enable_signal;
53397 +int grsec_enable_forkfail;
53398 +int grsec_enable_audit_ptrace;
53399 +int grsec_enable_time;
53400 +int grsec_enable_audit_textrel;
53401 +int grsec_enable_group;
53402 +int grsec_audit_gid;
53403 +int grsec_enable_chdir;
53404 +int grsec_enable_mount;
53405 +int grsec_enable_rofs;
53406 +int grsec_enable_chroot_findtask;
53407 +int grsec_enable_chroot_mount;
53408 +int grsec_enable_chroot_shmat;
53409 +int grsec_enable_chroot_fchdir;
53410 +int grsec_enable_chroot_double;
53411 +int grsec_enable_chroot_pivot;
53412 +int grsec_enable_chroot_chdir;
53413 +int grsec_enable_chroot_chmod;
53414 +int grsec_enable_chroot_mknod;
53415 +int grsec_enable_chroot_nice;
53416 +int grsec_enable_chroot_execlog;
53417 +int grsec_enable_chroot_caps;
53418 +int grsec_enable_chroot_sysctl;
53419 +int grsec_enable_chroot_unix;
53420 +int grsec_enable_tpe;
53421 +int grsec_tpe_gid;
53422 +int grsec_enable_blackhole;
53423 +#ifdef CONFIG_IPV6_MODULE
53424 +EXPORT_SYMBOL(grsec_enable_blackhole);
53425 +#endif
53426 +int grsec_lastack_retries;
53427 +int grsec_enable_tpe_all;
53428 +int grsec_enable_tpe_invert;
53429 +int grsec_enable_socket_all;
53430 +int grsec_socket_all_gid;
53431 +int grsec_enable_socket_client;
53432 +int grsec_socket_client_gid;
53433 +int grsec_enable_socket_server;
53434 +int grsec_socket_server_gid;
53435 +int grsec_resource_logging;
53436 +int grsec_disable_privio;
53437 +int grsec_enable_log_rwxmaps;
53438 +int grsec_lock;
53439 +
53440 +DEFINE_SPINLOCK(grsec_alert_lock);
53441 +unsigned long grsec_alert_wtime = 0;
53442 +unsigned long grsec_alert_fyet = 0;
53443 +
53444 +DEFINE_SPINLOCK(grsec_audit_lock);
53445 +
53446 +DEFINE_RWLOCK(grsec_exec_file_lock);
53447 +
53448 +char *gr_shared_page[4];
53449 +
53450 +char *gr_alert_log_fmt;
53451 +char *gr_audit_log_fmt;
53452 +char *gr_alert_log_buf;
53453 +char *gr_audit_log_buf;
53454 +
53455 +extern struct gr_arg *gr_usermode;
53456 +extern unsigned char *gr_system_salt;
53457 +extern unsigned char *gr_system_sum;
53458 +
53459 +void __init
53460 +grsecurity_init(void)
53461 +{
53462 + int j;
53463 + /* create the per-cpu shared pages */
53464 +
53465 +#ifdef CONFIG_X86
53466 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53467 +#endif
53468 +
53469 + for (j = 0; j < 4; j++) {
53470 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53471 + if (gr_shared_page[j] == NULL) {
53472 + panic("Unable to allocate grsecurity shared page");
53473 + return;
53474 + }
53475 + }
53476 +
53477 + /* allocate log buffers */
53478 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53479 + if (!gr_alert_log_fmt) {
53480 + panic("Unable to allocate grsecurity alert log format buffer");
53481 + return;
53482 + }
53483 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53484 + if (!gr_audit_log_fmt) {
53485 + panic("Unable to allocate grsecurity audit log format buffer");
53486 + return;
53487 + }
53488 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53489 + if (!gr_alert_log_buf) {
53490 + panic("Unable to allocate grsecurity alert log buffer");
53491 + return;
53492 + }
53493 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53494 + if (!gr_audit_log_buf) {
53495 + panic("Unable to allocate grsecurity audit log buffer");
53496 + return;
53497 + }
53498 +
53499 + /* allocate memory for authentication structure */
53500 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53501 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53502 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53503 +
53504 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53505 + panic("Unable to allocate grsecurity authentication structure");
53506 + return;
53507 + }
53508 +
53509 +
53510 +#ifdef CONFIG_GRKERNSEC_IO
53511 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53512 + grsec_disable_privio = 1;
53513 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53514 + grsec_disable_privio = 1;
53515 +#else
53516 + grsec_disable_privio = 0;
53517 +#endif
53518 +#endif
53519 +
53520 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53521 + /* for backward compatibility, tpe_invert always defaults to on if
53522 + enabled in the kernel
53523 + */
53524 + grsec_enable_tpe_invert = 1;
53525 +#endif
53526 +
53527 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53528 +#ifndef CONFIG_GRKERNSEC_SYSCTL
53529 + grsec_lock = 1;
53530 +#endif
53531 +
53532 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53533 + grsec_enable_audit_textrel = 1;
53534 +#endif
53535 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53536 + grsec_enable_log_rwxmaps = 1;
53537 +#endif
53538 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53539 + grsec_enable_group = 1;
53540 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53541 +#endif
53542 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53543 + grsec_enable_chdir = 1;
53544 +#endif
53545 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53546 + grsec_enable_harden_ptrace = 1;
53547 +#endif
53548 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53549 + grsec_enable_mount = 1;
53550 +#endif
53551 +#ifdef CONFIG_GRKERNSEC_LINK
53552 + grsec_enable_link = 1;
53553 +#endif
53554 +#ifdef CONFIG_GRKERNSEC_BRUTE
53555 + grsec_enable_brute = 1;
53556 +#endif
53557 +#ifdef CONFIG_GRKERNSEC_DMESG
53558 + grsec_enable_dmesg = 1;
53559 +#endif
53560 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53561 + grsec_enable_blackhole = 1;
53562 + grsec_lastack_retries = 4;
53563 +#endif
53564 +#ifdef CONFIG_GRKERNSEC_FIFO
53565 + grsec_enable_fifo = 1;
53566 +#endif
53567 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53568 + grsec_enable_execlog = 1;
53569 +#endif
53570 +#ifdef CONFIG_GRKERNSEC_SIGNAL
53571 + grsec_enable_signal = 1;
53572 +#endif
53573 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
53574 + grsec_enable_forkfail = 1;
53575 +#endif
53576 +#ifdef CONFIG_GRKERNSEC_TIME
53577 + grsec_enable_time = 1;
53578 +#endif
53579 +#ifdef CONFIG_GRKERNSEC_RESLOG
53580 + grsec_resource_logging = 1;
53581 +#endif
53582 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53583 + grsec_enable_chroot_findtask = 1;
53584 +#endif
53585 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53586 + grsec_enable_chroot_unix = 1;
53587 +#endif
53588 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53589 + grsec_enable_chroot_mount = 1;
53590 +#endif
53591 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53592 + grsec_enable_chroot_fchdir = 1;
53593 +#endif
53594 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53595 + grsec_enable_chroot_shmat = 1;
53596 +#endif
53597 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53598 + grsec_enable_audit_ptrace = 1;
53599 +#endif
53600 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53601 + grsec_enable_chroot_double = 1;
53602 +#endif
53603 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53604 + grsec_enable_chroot_pivot = 1;
53605 +#endif
53606 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53607 + grsec_enable_chroot_chdir = 1;
53608 +#endif
53609 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53610 + grsec_enable_chroot_chmod = 1;
53611 +#endif
53612 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53613 + grsec_enable_chroot_mknod = 1;
53614 +#endif
53615 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53616 + grsec_enable_chroot_nice = 1;
53617 +#endif
53618 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53619 + grsec_enable_chroot_execlog = 1;
53620 +#endif
53621 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53622 + grsec_enable_chroot_caps = 1;
53623 +#endif
53624 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53625 + grsec_enable_chroot_sysctl = 1;
53626 +#endif
53627 +#ifdef CONFIG_GRKERNSEC_TPE
53628 + grsec_enable_tpe = 1;
53629 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53630 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53631 + grsec_enable_tpe_all = 1;
53632 +#endif
53633 +#endif
53634 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53635 + grsec_enable_socket_all = 1;
53636 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53637 +#endif
53638 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53639 + grsec_enable_socket_client = 1;
53640 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53641 +#endif
53642 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53643 + grsec_enable_socket_server = 1;
53644 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53645 +#endif
53646 +#endif
53647 +
53648 + return;
53649 +}
53650 diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
53651 --- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53652 +++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53653 @@ -0,0 +1,43 @@
53654 +#include <linux/kernel.h>
53655 +#include <linux/sched.h>
53656 +#include <linux/fs.h>
53657 +#include <linux/file.h>
53658 +#include <linux/grinternal.h>
53659 +
53660 +int
53661 +gr_handle_follow_link(const struct inode *parent,
53662 + const struct inode *inode,
53663 + const struct dentry *dentry, const struct vfsmount *mnt)
53664 +{
53665 +#ifdef CONFIG_GRKERNSEC_LINK
53666 + const struct cred *cred = current_cred();
53667 +
53668 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53669 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53670 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53671 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53672 + return -EACCES;
53673 + }
53674 +#endif
53675 + return 0;
53676 +}
53677 +
53678 +int
53679 +gr_handle_hardlink(const struct dentry *dentry,
53680 + const struct vfsmount *mnt,
53681 + struct inode *inode, const int mode, const char *to)
53682 +{
53683 +#ifdef CONFIG_GRKERNSEC_LINK
53684 + const struct cred *cred = current_cred();
53685 +
53686 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53687 + (!S_ISREG(mode) || (mode & S_ISUID) ||
53688 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53689 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53690 + !capable(CAP_FOWNER) && cred->uid) {
53691 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53692 + return -EPERM;
53693 + }
53694 +#endif
53695 + return 0;
53696 +}
53697 diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
53698 --- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53699 +++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53700 @@ -0,0 +1,310 @@
53701 +#include <linux/kernel.h>
53702 +#include <linux/sched.h>
53703 +#include <linux/file.h>
53704 +#include <linux/tty.h>
53705 +#include <linux/fs.h>
53706 +#include <linux/grinternal.h>
53707 +
53708 +#ifdef CONFIG_TREE_PREEMPT_RCU
53709 +#define DISABLE_PREEMPT() preempt_disable()
53710 +#define ENABLE_PREEMPT() preempt_enable()
53711 +#else
53712 +#define DISABLE_PREEMPT()
53713 +#define ENABLE_PREEMPT()
53714 +#endif
53715 +
53716 +#define BEGIN_LOCKS(x) \
53717 + DISABLE_PREEMPT(); \
53718 + rcu_read_lock(); \
53719 + read_lock(&tasklist_lock); \
53720 + read_lock(&grsec_exec_file_lock); \
53721 + if (x != GR_DO_AUDIT) \
53722 + spin_lock(&grsec_alert_lock); \
53723 + else \
53724 + spin_lock(&grsec_audit_lock)
53725 +
53726 +#define END_LOCKS(x) \
53727 + if (x != GR_DO_AUDIT) \
53728 + spin_unlock(&grsec_alert_lock); \
53729 + else \
53730 + spin_unlock(&grsec_audit_lock); \
53731 + read_unlock(&grsec_exec_file_lock); \
53732 + read_unlock(&tasklist_lock); \
53733 + rcu_read_unlock(); \
53734 + ENABLE_PREEMPT(); \
53735 + if (x == GR_DONT_AUDIT) \
53736 + gr_handle_alertkill(current)
53737 +
53738 +enum {
53739 + FLOODING,
53740 + NO_FLOODING
53741 +};
53742 +
53743 +extern char *gr_alert_log_fmt;
53744 +extern char *gr_audit_log_fmt;
53745 +extern char *gr_alert_log_buf;
53746 +extern char *gr_audit_log_buf;
53747 +
53748 +static int gr_log_start(int audit)
53749 +{
53750 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53751 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53752 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53753 +
53754 + if (audit == GR_DO_AUDIT)
53755 + goto set_fmt;
53756 +
53757 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
53758 + grsec_alert_wtime = jiffies;
53759 + grsec_alert_fyet = 0;
53760 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53761 + grsec_alert_fyet++;
53762 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53763 + grsec_alert_wtime = jiffies;
53764 + grsec_alert_fyet++;
53765 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53766 + return FLOODING;
53767 + } else return FLOODING;
53768 +
53769 +set_fmt:
53770 + memset(buf, 0, PAGE_SIZE);
53771 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
53772 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53773 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53774 + } else if (current->signal->curr_ip) {
53775 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53776 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53777 + } else if (gr_acl_is_enabled()) {
53778 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53779 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53780 + } else {
53781 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
53782 + strcpy(buf, fmt);
53783 + }
53784 +
53785 + return NO_FLOODING;
53786 +}
53787 +
53788 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53789 + __attribute__ ((format (printf, 2, 0)));
53790 +
53791 +static void gr_log_middle(int audit, const char *msg, va_list ap)
53792 +{
53793 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53794 + unsigned int len = strlen(buf);
53795 +
53796 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53797 +
53798 + return;
53799 +}
53800 +
53801 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53802 + __attribute__ ((format (printf, 2, 3)));
53803 +
53804 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
53805 +{
53806 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53807 + unsigned int len = strlen(buf);
53808 + va_list ap;
53809 +
53810 + va_start(ap, msg);
53811 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53812 + va_end(ap);
53813 +
53814 + return;
53815 +}
53816 +
53817 +static void gr_log_end(int audit)
53818 +{
53819 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53820 + unsigned int len = strlen(buf);
53821 +
53822 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53823 + printk("%s\n", buf);
53824 +
53825 + return;
53826 +}
53827 +
53828 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53829 +{
53830 + int logtype;
53831 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53832 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53833 + void *voidptr = NULL;
53834 + int num1 = 0, num2 = 0;
53835 + unsigned long ulong1 = 0, ulong2 = 0;
53836 + struct dentry *dentry = NULL;
53837 + struct vfsmount *mnt = NULL;
53838 + struct file *file = NULL;
53839 + struct task_struct *task = NULL;
53840 + const struct cred *cred, *pcred;
53841 + va_list ap;
53842 +
53843 + BEGIN_LOCKS(audit);
53844 + logtype = gr_log_start(audit);
53845 + if (logtype == FLOODING) {
53846 + END_LOCKS(audit);
53847 + return;
53848 + }
53849 + va_start(ap, argtypes);
53850 + switch (argtypes) {
53851 + case GR_TTYSNIFF:
53852 + task = va_arg(ap, struct task_struct *);
53853 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
53854 + break;
53855 + case GR_SYSCTL_HIDDEN:
53856 + str1 = va_arg(ap, char *);
53857 + gr_log_middle_varargs(audit, msg, result, str1);
53858 + break;
53859 + case GR_RBAC:
53860 + dentry = va_arg(ap, struct dentry *);
53861 + mnt = va_arg(ap, struct vfsmount *);
53862 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
53863 + break;
53864 + case GR_RBAC_STR:
53865 + dentry = va_arg(ap, struct dentry *);
53866 + mnt = va_arg(ap, struct vfsmount *);
53867 + str1 = va_arg(ap, char *);
53868 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
53869 + break;
53870 + case GR_STR_RBAC:
53871 + str1 = va_arg(ap, char *);
53872 + dentry = va_arg(ap, struct dentry *);
53873 + mnt = va_arg(ap, struct vfsmount *);
53874 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
53875 + break;
53876 + case GR_RBAC_MODE2:
53877 + dentry = va_arg(ap, struct dentry *);
53878 + mnt = va_arg(ap, struct vfsmount *);
53879 + str1 = va_arg(ap, char *);
53880 + str2 = va_arg(ap, char *);
53881 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
53882 + break;
53883 + case GR_RBAC_MODE3:
53884 + dentry = va_arg(ap, struct dentry *);
53885 + mnt = va_arg(ap, struct vfsmount *);
53886 + str1 = va_arg(ap, char *);
53887 + str2 = va_arg(ap, char *);
53888 + str3 = va_arg(ap, char *);
53889 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
53890 + break;
53891 + case GR_FILENAME:
53892 + dentry = va_arg(ap, struct dentry *);
53893 + mnt = va_arg(ap, struct vfsmount *);
53894 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
53895 + break;
53896 + case GR_STR_FILENAME:
53897 + str1 = va_arg(ap, char *);
53898 + dentry = va_arg(ap, struct dentry *);
53899 + mnt = va_arg(ap, struct vfsmount *);
53900 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
53901 + break;
53902 + case GR_FILENAME_STR:
53903 + dentry = va_arg(ap, struct dentry *);
53904 + mnt = va_arg(ap, struct vfsmount *);
53905 + str1 = va_arg(ap, char *);
53906 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
53907 + break;
53908 + case GR_FILENAME_TWO_INT:
53909 + dentry = va_arg(ap, struct dentry *);
53910 + mnt = va_arg(ap, struct vfsmount *);
53911 + num1 = va_arg(ap, int);
53912 + num2 = va_arg(ap, int);
53913 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
53914 + break;
53915 + case GR_FILENAME_TWO_INT_STR:
53916 + dentry = va_arg(ap, struct dentry *);
53917 + mnt = va_arg(ap, struct vfsmount *);
53918 + num1 = va_arg(ap, int);
53919 + num2 = va_arg(ap, int);
53920 + str1 = va_arg(ap, char *);
53921 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
53922 + break;
53923 + case GR_TEXTREL:
53924 + file = va_arg(ap, struct file *);
53925 + ulong1 = va_arg(ap, unsigned long);
53926 + ulong2 = va_arg(ap, unsigned long);
53927 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
53928 + break;
53929 + case GR_PTRACE:
53930 + task = va_arg(ap, struct task_struct *);
53931 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
53932 + break;
53933 + case GR_RESOURCE:
53934 + task = va_arg(ap, struct task_struct *);
53935 + cred = __task_cred(task);
53936 + pcred = __task_cred(task->real_parent);
53937 + ulong1 = va_arg(ap, unsigned long);
53938 + str1 = va_arg(ap, char *);
53939 + ulong2 = va_arg(ap, unsigned long);
53940 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53941 + break;
53942 + case GR_CAP:
53943 + task = va_arg(ap, struct task_struct *);
53944 + cred = __task_cred(task);
53945 + pcred = __task_cred(task->real_parent);
53946 + str1 = va_arg(ap, char *);
53947 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53948 + break;
53949 + case GR_SIG:
53950 + str1 = va_arg(ap, char *);
53951 + voidptr = va_arg(ap, void *);
53952 + gr_log_middle_varargs(audit, msg, str1, voidptr);
53953 + break;
53954 + case GR_SIG2:
53955 + task = va_arg(ap, struct task_struct *);
53956 + cred = __task_cred(task);
53957 + pcred = __task_cred(task->real_parent);
53958 + num1 = va_arg(ap, int);
53959 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53960 + break;
53961 + case GR_CRASH1:
53962 + task = va_arg(ap, struct task_struct *);
53963 + cred = __task_cred(task);
53964 + pcred = __task_cred(task->real_parent);
53965 + ulong1 = va_arg(ap, unsigned long);
53966 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
53967 + break;
53968 + case GR_CRASH2:
53969 + task = va_arg(ap, struct task_struct *);
53970 + cred = __task_cred(task);
53971 + pcred = __task_cred(task->real_parent);
53972 + ulong1 = va_arg(ap, unsigned long);
53973 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
53974 + break;
53975 + case GR_RWXMAP:
53976 + file = va_arg(ap, struct file *);
53977 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
53978 + break;
53979 + case GR_PSACCT:
53980 + {
53981 + unsigned int wday, cday;
53982 + __u8 whr, chr;
53983 + __u8 wmin, cmin;
53984 + __u8 wsec, csec;
53985 + char cur_tty[64] = { 0 };
53986 + char parent_tty[64] = { 0 };
53987 +
53988 + task = va_arg(ap, struct task_struct *);
53989 + wday = va_arg(ap, unsigned int);
53990 + cday = va_arg(ap, unsigned int);
53991 + whr = va_arg(ap, int);
53992 + chr = va_arg(ap, int);
53993 + wmin = va_arg(ap, int);
53994 + cmin = va_arg(ap, int);
53995 + wsec = va_arg(ap, int);
53996 + csec = va_arg(ap, int);
53997 + ulong1 = va_arg(ap, unsigned long);
53998 + cred = __task_cred(task);
53999 + pcred = __task_cred(task->real_parent);
54000 +
54001 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54002 + }
54003 + break;
54004 + default:
54005 + gr_log_middle(audit, msg, ap);
54006 + }
54007 + va_end(ap);
54008 + gr_log_end(audit);
54009 + END_LOCKS(audit);
54010 +}
54011 diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54012 --- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54013 +++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54014 @@ -0,0 +1,33 @@
54015 +#include <linux/kernel.h>
54016 +#include <linux/sched.h>
54017 +#include <linux/mm.h>
54018 +#include <linux/mman.h>
54019 +#include <linux/grinternal.h>
54020 +
54021 +void
54022 +gr_handle_ioperm(void)
54023 +{
54024 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54025 + return;
54026 +}
54027 +
54028 +void
54029 +gr_handle_iopl(void)
54030 +{
54031 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54032 + return;
54033 +}
54034 +
54035 +void
54036 +gr_handle_mem_readwrite(u64 from, u64 to)
54037 +{
54038 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54039 + return;
54040 +}
54041 +
54042 +void
54043 +gr_handle_vm86(void)
54044 +{
54045 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54046 + return;
54047 +}
54048 diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54049 --- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54050 +++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54051 @@ -0,0 +1,62 @@
54052 +#include <linux/kernel.h>
54053 +#include <linux/sched.h>
54054 +#include <linux/mount.h>
54055 +#include <linux/grsecurity.h>
54056 +#include <linux/grinternal.h>
54057 +
54058 +void
54059 +gr_log_remount(const char *devname, const int retval)
54060 +{
54061 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54062 + if (grsec_enable_mount && (retval >= 0))
54063 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54064 +#endif
54065 + return;
54066 +}
54067 +
54068 +void
54069 +gr_log_unmount(const char *devname, const int retval)
54070 +{
54071 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54072 + if (grsec_enable_mount && (retval >= 0))
54073 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54074 +#endif
54075 + return;
54076 +}
54077 +
54078 +void
54079 +gr_log_mount(const char *from, const char *to, const int retval)
54080 +{
54081 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54082 + if (grsec_enable_mount && (retval >= 0))
54083 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54084 +#endif
54085 + return;
54086 +}
54087 +
54088 +int
54089 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54090 +{
54091 +#ifdef CONFIG_GRKERNSEC_ROFS
54092 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54093 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54094 + return -EPERM;
54095 + } else
54096 + return 0;
54097 +#endif
54098 + return 0;
54099 +}
54100 +
54101 +int
54102 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54103 +{
54104 +#ifdef CONFIG_GRKERNSEC_ROFS
54105 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54106 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54107 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54108 + return -EPERM;
54109 + } else
54110 + return 0;
54111 +#endif
54112 + return 0;
54113 +}
54114 diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54115 --- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54116 +++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54117 @@ -0,0 +1,36 @@
54118 +#include <linux/kernel.h>
54119 +#include <linux/sched.h>
54120 +#include <linux/mm.h>
54121 +#include <linux/file.h>
54122 +#include <linux/grinternal.h>
54123 +#include <linux/grsecurity.h>
54124 +
54125 +void
54126 +gr_log_textrel(struct vm_area_struct * vma)
54127 +{
54128 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54129 + if (grsec_enable_audit_textrel)
54130 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54131 +#endif
54132 + return;
54133 +}
54134 +
54135 +void
54136 +gr_log_rwxmmap(struct file *file)
54137 +{
54138 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54139 + if (grsec_enable_log_rwxmaps)
54140 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54141 +#endif
54142 + return;
54143 +}
54144 +
54145 +void
54146 +gr_log_rwxmprotect(struct file *file)
54147 +{
54148 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54149 + if (grsec_enable_log_rwxmaps)
54150 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54151 +#endif
54152 + return;
54153 +}
54154 diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54155 --- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54156 +++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54157 @@ -0,0 +1,14 @@
54158 +#include <linux/kernel.h>
54159 +#include <linux/sched.h>
54160 +#include <linux/grinternal.h>
54161 +#include <linux/grsecurity.h>
54162 +
54163 +void
54164 +gr_audit_ptrace(struct task_struct *task)
54165 +{
54166 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54167 + if (grsec_enable_audit_ptrace)
54168 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54169 +#endif
54170 + return;
54171 +}
54172 diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54173 --- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54174 +++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54175 @@ -0,0 +1,205 @@
54176 +#include <linux/kernel.h>
54177 +#include <linux/sched.h>
54178 +#include <linux/delay.h>
54179 +#include <linux/grsecurity.h>
54180 +#include <linux/grinternal.h>
54181 +#include <linux/hardirq.h>
54182 +
54183 +char *signames[] = {
54184 + [SIGSEGV] = "Segmentation fault",
54185 + [SIGILL] = "Illegal instruction",
54186 + [SIGABRT] = "Abort",
54187 + [SIGBUS] = "Invalid alignment/Bus error"
54188 +};
54189 +
54190 +void
54191 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54192 +{
54193 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54194 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54195 + (sig == SIGABRT) || (sig == SIGBUS))) {
54196 + if (t->pid == current->pid) {
54197 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54198 + } else {
54199 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54200 + }
54201 + }
54202 +#endif
54203 + return;
54204 +}
54205 +
54206 +int
54207 +gr_handle_signal(const struct task_struct *p, const int sig)
54208 +{
54209 +#ifdef CONFIG_GRKERNSEC
54210 + if (current->pid > 1 && gr_check_protected_task(p)) {
54211 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54212 + return -EPERM;
54213 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54214 + return -EPERM;
54215 + }
54216 +#endif
54217 + return 0;
54218 +}
54219 +
54220 +#ifdef CONFIG_GRKERNSEC
54221 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54222 +
54223 +int gr_fake_force_sig(int sig, struct task_struct *t)
54224 +{
54225 + unsigned long int flags;
54226 + int ret, blocked, ignored;
54227 + struct k_sigaction *action;
54228 +
54229 + spin_lock_irqsave(&t->sighand->siglock, flags);
54230 + action = &t->sighand->action[sig-1];
54231 + ignored = action->sa.sa_handler == SIG_IGN;
54232 + blocked = sigismember(&t->blocked, sig);
54233 + if (blocked || ignored) {
54234 + action->sa.sa_handler = SIG_DFL;
54235 + if (blocked) {
54236 + sigdelset(&t->blocked, sig);
54237 + recalc_sigpending_and_wake(t);
54238 + }
54239 + }
54240 + if (action->sa.sa_handler == SIG_DFL)
54241 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54242 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54243 +
54244 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54245 +
54246 + return ret;
54247 +}
54248 +#endif
54249 +
54250 +#ifdef CONFIG_GRKERNSEC_BRUTE
54251 +#define GR_USER_BAN_TIME (15 * 60)
54252 +
54253 +static int __get_dumpable(unsigned long mm_flags)
54254 +{
54255 + int ret;
54256 +
54257 + ret = mm_flags & MMF_DUMPABLE_MASK;
54258 + return (ret >= 2) ? 2 : ret;
54259 +}
54260 +#endif
54261 +
54262 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54263 +{
54264 +#ifdef CONFIG_GRKERNSEC_BRUTE
54265 + uid_t uid = 0;
54266 +
54267 + if (!grsec_enable_brute)
54268 + return;
54269 +
54270 + rcu_read_lock();
54271 + read_lock(&tasklist_lock);
54272 + read_lock(&grsec_exec_file_lock);
54273 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54274 + p->real_parent->brute = 1;
54275 + else {
54276 + const struct cred *cred = __task_cred(p), *cred2;
54277 + struct task_struct *tsk, *tsk2;
54278 +
54279 + if (!__get_dumpable(mm_flags) && cred->uid) {
54280 + struct user_struct *user;
54281 +
54282 + uid = cred->uid;
54283 +
54284 + /* this is put upon execution past expiration */
54285 + user = find_user(uid);
54286 + if (user == NULL)
54287 + goto unlock;
54288 + user->banned = 1;
54289 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54290 + if (user->ban_expires == ~0UL)
54291 + user->ban_expires--;
54292 +
54293 + do_each_thread(tsk2, tsk) {
54294 + cred2 = __task_cred(tsk);
54295 + if (tsk != p && cred2->uid == uid)
54296 + gr_fake_force_sig(SIGKILL, tsk);
54297 + } while_each_thread(tsk2, tsk);
54298 + }
54299 + }
54300 +unlock:
54301 + read_unlock(&grsec_exec_file_lock);
54302 + read_unlock(&tasklist_lock);
54303 + rcu_read_unlock();
54304 +
54305 + if (uid)
54306 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54307 +#endif
54308 + return;
54309 +}
54310 +
54311 +void gr_handle_brute_check(void)
54312 +{
54313 +#ifdef CONFIG_GRKERNSEC_BRUTE
54314 + if (current->brute)
54315 + msleep(30 * 1000);
54316 +#endif
54317 + return;
54318 +}
54319 +
54320 +void gr_handle_kernel_exploit(void)
54321 +{
54322 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54323 + const struct cred *cred;
54324 + struct task_struct *tsk, *tsk2;
54325 + struct user_struct *user;
54326 + uid_t uid;
54327 +
54328 + if (in_irq() || in_serving_softirq() || in_nmi())
54329 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54330 +
54331 + uid = current_uid();
54332 +
54333 + if (uid == 0)
54334 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
54335 + else {
54336 + /* kill all the processes of this user, hold a reference
54337 + to their creds struct, and prevent them from creating
54338 + another process until system reset
54339 + */
54340 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54341 + /* we intentionally leak this ref */
54342 + user = get_uid(current->cred->user);
54343 + if (user) {
54344 + user->banned = 1;
54345 + user->ban_expires = ~0UL;
54346 + }
54347 +
54348 + read_lock(&tasklist_lock);
54349 + do_each_thread(tsk2, tsk) {
54350 + cred = __task_cred(tsk);
54351 + if (cred->uid == uid)
54352 + gr_fake_force_sig(SIGKILL, tsk);
54353 + } while_each_thread(tsk2, tsk);
54354 + read_unlock(&tasklist_lock);
54355 + }
54356 +#endif
54357 +}
54358 +
54359 +int __gr_process_user_ban(struct user_struct *user)
54360 +{
54361 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54362 + if (unlikely(user->banned)) {
54363 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54364 + user->banned = 0;
54365 + user->ban_expires = 0;
54366 + free_uid(user);
54367 + } else
54368 + return -EPERM;
54369 + }
54370 +#endif
54371 + return 0;
54372 +}
54373 +
54374 +int gr_process_user_ban(void)
54375 +{
54376 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54377 + return __gr_process_user_ban(current->cred->user);
54378 +#endif
54379 + return 0;
54380 +}
54381 diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54382 --- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54383 +++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54384 @@ -0,0 +1,275 @@
54385 +#include <linux/kernel.h>
54386 +#include <linux/module.h>
54387 +#include <linux/sched.h>
54388 +#include <linux/file.h>
54389 +#include <linux/net.h>
54390 +#include <linux/in.h>
54391 +#include <linux/ip.h>
54392 +#include <net/sock.h>
54393 +#include <net/inet_sock.h>
54394 +#include <linux/grsecurity.h>
54395 +#include <linux/grinternal.h>
54396 +#include <linux/gracl.h>
54397 +
54398 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54399 +EXPORT_SYMBOL(gr_cap_rtnetlink);
54400 +
54401 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54402 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54403 +
54404 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
54405 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
54406 +
54407 +#ifdef CONFIG_UNIX_MODULE
54408 +EXPORT_SYMBOL(gr_acl_handle_unix);
54409 +EXPORT_SYMBOL(gr_acl_handle_mknod);
54410 +EXPORT_SYMBOL(gr_handle_chroot_unix);
54411 +EXPORT_SYMBOL(gr_handle_create);
54412 +#endif
54413 +
54414 +#ifdef CONFIG_GRKERNSEC
54415 +#define gr_conn_table_size 32749
54416 +struct conn_table_entry {
54417 + struct conn_table_entry *next;
54418 + struct signal_struct *sig;
54419 +};
54420 +
54421 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54422 +DEFINE_SPINLOCK(gr_conn_table_lock);
54423 +
54424 +extern const char * gr_socktype_to_name(unsigned char type);
54425 +extern const char * gr_proto_to_name(unsigned char proto);
54426 +extern const char * gr_sockfamily_to_name(unsigned char family);
54427 +
54428 +static __inline__ int
54429 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54430 +{
54431 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54432 +}
54433 +
54434 +static __inline__ int
54435 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54436 + __u16 sport, __u16 dport)
54437 +{
54438 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54439 + sig->gr_sport == sport && sig->gr_dport == dport))
54440 + return 1;
54441 + else
54442 + return 0;
54443 +}
54444 +
54445 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54446 +{
54447 + struct conn_table_entry **match;
54448 + unsigned int index;
54449 +
54450 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54451 + sig->gr_sport, sig->gr_dport,
54452 + gr_conn_table_size);
54453 +
54454 + newent->sig = sig;
54455 +
54456 + match = &gr_conn_table[index];
54457 + newent->next = *match;
54458 + *match = newent;
54459 +
54460 + return;
54461 +}
54462 +
54463 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54464 +{
54465 + struct conn_table_entry *match, *last = NULL;
54466 + unsigned int index;
54467 +
54468 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54469 + sig->gr_sport, sig->gr_dport,
54470 + gr_conn_table_size);
54471 +
54472 + match = gr_conn_table[index];
54473 + while (match && !conn_match(match->sig,
54474 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54475 + sig->gr_dport)) {
54476 + last = match;
54477 + match = match->next;
54478 + }
54479 +
54480 + if (match) {
54481 + if (last)
54482 + last->next = match->next;
54483 + else
54484 + gr_conn_table[index] = NULL;
54485 + kfree(match);
54486 + }
54487 +
54488 + return;
54489 +}
54490 +
54491 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54492 + __u16 sport, __u16 dport)
54493 +{
54494 + struct conn_table_entry *match;
54495 + unsigned int index;
54496 +
54497 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54498 +
54499 + match = gr_conn_table[index];
54500 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54501 + match = match->next;
54502 +
54503 + if (match)
54504 + return match->sig;
54505 + else
54506 + return NULL;
54507 +}
54508 +
54509 +#endif
54510 +
54511 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54512 +{
54513 +#ifdef CONFIG_GRKERNSEC
54514 + struct signal_struct *sig = task->signal;
54515 + struct conn_table_entry *newent;
54516 +
54517 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54518 + if (newent == NULL)
54519 + return;
54520 + /* no bh lock needed since we are called with bh disabled */
54521 + spin_lock(&gr_conn_table_lock);
54522 + gr_del_task_from_ip_table_nolock(sig);
54523 + sig->gr_saddr = inet->rcv_saddr;
54524 + sig->gr_daddr = inet->daddr;
54525 + sig->gr_sport = inet->sport;
54526 + sig->gr_dport = inet->dport;
54527 + gr_add_to_task_ip_table_nolock(sig, newent);
54528 + spin_unlock(&gr_conn_table_lock);
54529 +#endif
54530 + return;
54531 +}
54532 +
54533 +void gr_del_task_from_ip_table(struct task_struct *task)
54534 +{
54535 +#ifdef CONFIG_GRKERNSEC
54536 + spin_lock_bh(&gr_conn_table_lock);
54537 + gr_del_task_from_ip_table_nolock(task->signal);
54538 + spin_unlock_bh(&gr_conn_table_lock);
54539 +#endif
54540 + return;
54541 +}
54542 +
54543 +void
54544 +gr_attach_curr_ip(const struct sock *sk)
54545 +{
54546 +#ifdef CONFIG_GRKERNSEC
54547 + struct signal_struct *p, *set;
54548 + const struct inet_sock *inet = inet_sk(sk);
54549 +
54550 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54551 + return;
54552 +
54553 + set = current->signal;
54554 +
54555 + spin_lock_bh(&gr_conn_table_lock);
54556 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54557 + inet->dport, inet->sport);
54558 + if (unlikely(p != NULL)) {
54559 + set->curr_ip = p->curr_ip;
54560 + set->used_accept = 1;
54561 + gr_del_task_from_ip_table_nolock(p);
54562 + spin_unlock_bh(&gr_conn_table_lock);
54563 + return;
54564 + }
54565 + spin_unlock_bh(&gr_conn_table_lock);
54566 +
54567 + set->curr_ip = inet->daddr;
54568 + set->used_accept = 1;
54569 +#endif
54570 + return;
54571 +}
54572 +
54573 +int
54574 +gr_handle_sock_all(const int family, const int type, const int protocol)
54575 +{
54576 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54577 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54578 + (family != AF_UNIX)) {
54579 + if (family == AF_INET)
54580 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54581 + else
54582 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54583 + return -EACCES;
54584 + }
54585 +#endif
54586 + return 0;
54587 +}
54588 +
54589 +int
54590 +gr_handle_sock_server(const struct sockaddr *sck)
54591 +{
54592 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54593 + if (grsec_enable_socket_server &&
54594 + in_group_p(grsec_socket_server_gid) &&
54595 + sck && (sck->sa_family != AF_UNIX) &&
54596 + (sck->sa_family != AF_LOCAL)) {
54597 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54598 + return -EACCES;
54599 + }
54600 +#endif
54601 + return 0;
54602 +}
54603 +
54604 +int
54605 +gr_handle_sock_server_other(const struct sock *sck)
54606 +{
54607 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54608 + if (grsec_enable_socket_server &&
54609 + in_group_p(grsec_socket_server_gid) &&
54610 + sck && (sck->sk_family != AF_UNIX) &&
54611 + (sck->sk_family != AF_LOCAL)) {
54612 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54613 + return -EACCES;
54614 + }
54615 +#endif
54616 + return 0;
54617 +}
54618 +
54619 +int
54620 +gr_handle_sock_client(const struct sockaddr *sck)
54621 +{
54622 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54623 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54624 + sck && (sck->sa_family != AF_UNIX) &&
54625 + (sck->sa_family != AF_LOCAL)) {
54626 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54627 + return -EACCES;
54628 + }
54629 +#endif
54630 + return 0;
54631 +}
54632 +
54633 +kernel_cap_t
54634 +gr_cap_rtnetlink(struct sock *sock)
54635 +{
54636 +#ifdef CONFIG_GRKERNSEC
54637 + if (!gr_acl_is_enabled())
54638 + return current_cap();
54639 + else if (sock->sk_protocol == NETLINK_ISCSI &&
54640 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54641 + gr_is_capable(CAP_SYS_ADMIN))
54642 + return current_cap();
54643 + else if (sock->sk_protocol == NETLINK_AUDIT &&
54644 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54645 + gr_is_capable(CAP_AUDIT_WRITE) &&
54646 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54647 + gr_is_capable(CAP_AUDIT_CONTROL))
54648 + return current_cap();
54649 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54650 + ((sock->sk_protocol == NETLINK_ROUTE) ?
54651 + gr_is_capable_nolog(CAP_NET_ADMIN) :
54652 + gr_is_capable(CAP_NET_ADMIN)))
54653 + return current_cap();
54654 + else
54655 + return __cap_empty_set;
54656 +#else
54657 + return current_cap();
54658 +#endif
54659 +}
54660 diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
54661 --- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54662 +++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54663 @@ -0,0 +1,479 @@
54664 +#include <linux/kernel.h>
54665 +#include <linux/sched.h>
54666 +#include <linux/sysctl.h>
54667 +#include <linux/grsecurity.h>
54668 +#include <linux/grinternal.h>
54669 +
54670 +int
54671 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54672 +{
54673 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54674 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54675 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54676 + return -EACCES;
54677 + }
54678 +#endif
54679 + return 0;
54680 +}
54681 +
54682 +#ifdef CONFIG_GRKERNSEC_ROFS
54683 +static int __maybe_unused one = 1;
54684 +#endif
54685 +
54686 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54687 +ctl_table grsecurity_table[] = {
54688 +#ifdef CONFIG_GRKERNSEC_SYSCTL
54689 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54690 +#ifdef CONFIG_GRKERNSEC_IO
54691 + {
54692 + .ctl_name = CTL_UNNUMBERED,
54693 + .procname = "disable_priv_io",
54694 + .data = &grsec_disable_privio,
54695 + .maxlen = sizeof(int),
54696 + .mode = 0600,
54697 + .proc_handler = &proc_dointvec,
54698 + },
54699 +#endif
54700 +#endif
54701 +#ifdef CONFIG_GRKERNSEC_LINK
54702 + {
54703 + .ctl_name = CTL_UNNUMBERED,
54704 + .procname = "linking_restrictions",
54705 + .data = &grsec_enable_link,
54706 + .maxlen = sizeof(int),
54707 + .mode = 0600,
54708 + .proc_handler = &proc_dointvec,
54709 + },
54710 +#endif
54711 +#ifdef CONFIG_GRKERNSEC_BRUTE
54712 + {
54713 + .ctl_name = CTL_UNNUMBERED,
54714 + .procname = "deter_bruteforce",
54715 + .data = &grsec_enable_brute,
54716 + .maxlen = sizeof(int),
54717 + .mode = 0600,
54718 + .proc_handler = &proc_dointvec,
54719 + },
54720 +#endif
54721 +#ifdef CONFIG_GRKERNSEC_FIFO
54722 + {
54723 + .ctl_name = CTL_UNNUMBERED,
54724 + .procname = "fifo_restrictions",
54725 + .data = &grsec_enable_fifo,
54726 + .maxlen = sizeof(int),
54727 + .mode = 0600,
54728 + .proc_handler = &proc_dointvec,
54729 + },
54730 +#endif
54731 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54732 + {
54733 + .ctl_name = CTL_UNNUMBERED,
54734 + .procname = "ip_blackhole",
54735 + .data = &grsec_enable_blackhole,
54736 + .maxlen = sizeof(int),
54737 + .mode = 0600,
54738 + .proc_handler = &proc_dointvec,
54739 + },
54740 + {
54741 + .ctl_name = CTL_UNNUMBERED,
54742 + .procname = "lastack_retries",
54743 + .data = &grsec_lastack_retries,
54744 + .maxlen = sizeof(int),
54745 + .mode = 0600,
54746 + .proc_handler = &proc_dointvec,
54747 + },
54748 +#endif
54749 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54750 + {
54751 + .ctl_name = CTL_UNNUMBERED,
54752 + .procname = "exec_logging",
54753 + .data = &grsec_enable_execlog,
54754 + .maxlen = sizeof(int),
54755 + .mode = 0600,
54756 + .proc_handler = &proc_dointvec,
54757 + },
54758 +#endif
54759 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54760 + {
54761 + .ctl_name = CTL_UNNUMBERED,
54762 + .procname = "rwxmap_logging",
54763 + .data = &grsec_enable_log_rwxmaps,
54764 + .maxlen = sizeof(int),
54765 + .mode = 0600,
54766 + .proc_handler = &proc_dointvec,
54767 + },
54768 +#endif
54769 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54770 + {
54771 + .ctl_name = CTL_UNNUMBERED,
54772 + .procname = "signal_logging",
54773 + .data = &grsec_enable_signal,
54774 + .maxlen = sizeof(int),
54775 + .mode = 0600,
54776 + .proc_handler = &proc_dointvec,
54777 + },
54778 +#endif
54779 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54780 + {
54781 + .ctl_name = CTL_UNNUMBERED,
54782 + .procname = "forkfail_logging",
54783 + .data = &grsec_enable_forkfail,
54784 + .maxlen = sizeof(int),
54785 + .mode = 0600,
54786 + .proc_handler = &proc_dointvec,
54787 + },
54788 +#endif
54789 +#ifdef CONFIG_GRKERNSEC_TIME
54790 + {
54791 + .ctl_name = CTL_UNNUMBERED,
54792 + .procname = "timechange_logging",
54793 + .data = &grsec_enable_time,
54794 + .maxlen = sizeof(int),
54795 + .mode = 0600,
54796 + .proc_handler = &proc_dointvec,
54797 + },
54798 +#endif
54799 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54800 + {
54801 + .ctl_name = CTL_UNNUMBERED,
54802 + .procname = "chroot_deny_shmat",
54803 + .data = &grsec_enable_chroot_shmat,
54804 + .maxlen = sizeof(int),
54805 + .mode = 0600,
54806 + .proc_handler = &proc_dointvec,
54807 + },
54808 +#endif
54809 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54810 + {
54811 + .ctl_name = CTL_UNNUMBERED,
54812 + .procname = "chroot_deny_unix",
54813 + .data = &grsec_enable_chroot_unix,
54814 + .maxlen = sizeof(int),
54815 + .mode = 0600,
54816 + .proc_handler = &proc_dointvec,
54817 + },
54818 +#endif
54819 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54820 + {
54821 + .ctl_name = CTL_UNNUMBERED,
54822 + .procname = "chroot_deny_mount",
54823 + .data = &grsec_enable_chroot_mount,
54824 + .maxlen = sizeof(int),
54825 + .mode = 0600,
54826 + .proc_handler = &proc_dointvec,
54827 + },
54828 +#endif
54829 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54830 + {
54831 + .ctl_name = CTL_UNNUMBERED,
54832 + .procname = "chroot_deny_fchdir",
54833 + .data = &grsec_enable_chroot_fchdir,
54834 + .maxlen = sizeof(int),
54835 + .mode = 0600,
54836 + .proc_handler = &proc_dointvec,
54837 + },
54838 +#endif
54839 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54840 + {
54841 + .ctl_name = CTL_UNNUMBERED,
54842 + .procname = "chroot_deny_chroot",
54843 + .data = &grsec_enable_chroot_double,
54844 + .maxlen = sizeof(int),
54845 + .mode = 0600,
54846 + .proc_handler = &proc_dointvec,
54847 + },
54848 +#endif
54849 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54850 + {
54851 + .ctl_name = CTL_UNNUMBERED,
54852 + .procname = "chroot_deny_pivot",
54853 + .data = &grsec_enable_chroot_pivot,
54854 + .maxlen = sizeof(int),
54855 + .mode = 0600,
54856 + .proc_handler = &proc_dointvec,
54857 + },
54858 +#endif
54859 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54860 + {
54861 + .ctl_name = CTL_UNNUMBERED,
54862 + .procname = "chroot_enforce_chdir",
54863 + .data = &grsec_enable_chroot_chdir,
54864 + .maxlen = sizeof(int),
54865 + .mode = 0600,
54866 + .proc_handler = &proc_dointvec,
54867 + },
54868 +#endif
54869 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54870 + {
54871 + .ctl_name = CTL_UNNUMBERED,
54872 + .procname = "chroot_deny_chmod",
54873 + .data = &grsec_enable_chroot_chmod,
54874 + .maxlen = sizeof(int),
54875 + .mode = 0600,
54876 + .proc_handler = &proc_dointvec,
54877 + },
54878 +#endif
54879 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54880 + {
54881 + .ctl_name = CTL_UNNUMBERED,
54882 + .procname = "chroot_deny_mknod",
54883 + .data = &grsec_enable_chroot_mknod,
54884 + .maxlen = sizeof(int),
54885 + .mode = 0600,
54886 + .proc_handler = &proc_dointvec,
54887 + },
54888 +#endif
54889 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54890 + {
54891 + .ctl_name = CTL_UNNUMBERED,
54892 + .procname = "chroot_restrict_nice",
54893 + .data = &grsec_enable_chroot_nice,
54894 + .maxlen = sizeof(int),
54895 + .mode = 0600,
54896 + .proc_handler = &proc_dointvec,
54897 + },
54898 +#endif
54899 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54900 + {
54901 + .ctl_name = CTL_UNNUMBERED,
54902 + .procname = "chroot_execlog",
54903 + .data = &grsec_enable_chroot_execlog,
54904 + .maxlen = sizeof(int),
54905 + .mode = 0600,
54906 + .proc_handler = &proc_dointvec,
54907 + },
54908 +#endif
54909 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54910 + {
54911 + .ctl_name = CTL_UNNUMBERED,
54912 + .procname = "chroot_caps",
54913 + .data = &grsec_enable_chroot_caps,
54914 + .maxlen = sizeof(int),
54915 + .mode = 0600,
54916 + .proc_handler = &proc_dointvec,
54917 + },
54918 +#endif
54919 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54920 + {
54921 + .ctl_name = CTL_UNNUMBERED,
54922 + .procname = "chroot_deny_sysctl",
54923 + .data = &grsec_enable_chroot_sysctl,
54924 + .maxlen = sizeof(int),
54925 + .mode = 0600,
54926 + .proc_handler = &proc_dointvec,
54927 + },
54928 +#endif
54929 +#ifdef CONFIG_GRKERNSEC_TPE
54930 + {
54931 + .ctl_name = CTL_UNNUMBERED,
54932 + .procname = "tpe",
54933 + .data = &grsec_enable_tpe,
54934 + .maxlen = sizeof(int),
54935 + .mode = 0600,
54936 + .proc_handler = &proc_dointvec,
54937 + },
54938 + {
54939 + .ctl_name = CTL_UNNUMBERED,
54940 + .procname = "tpe_gid",
54941 + .data = &grsec_tpe_gid,
54942 + .maxlen = sizeof(int),
54943 + .mode = 0600,
54944 + .proc_handler = &proc_dointvec,
54945 + },
54946 +#endif
54947 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54948 + {
54949 + .ctl_name = CTL_UNNUMBERED,
54950 + .procname = "tpe_invert",
54951 + .data = &grsec_enable_tpe_invert,
54952 + .maxlen = sizeof(int),
54953 + .mode = 0600,
54954 + .proc_handler = &proc_dointvec,
54955 + },
54956 +#endif
54957 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
54958 + {
54959 + .ctl_name = CTL_UNNUMBERED,
54960 + .procname = "tpe_restrict_all",
54961 + .data = &grsec_enable_tpe_all,
54962 + .maxlen = sizeof(int),
54963 + .mode = 0600,
54964 + .proc_handler = &proc_dointvec,
54965 + },
54966 +#endif
54967 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54968 + {
54969 + .ctl_name = CTL_UNNUMBERED,
54970 + .procname = "socket_all",
54971 + .data = &grsec_enable_socket_all,
54972 + .maxlen = sizeof(int),
54973 + .mode = 0600,
54974 + .proc_handler = &proc_dointvec,
54975 + },
54976 + {
54977 + .ctl_name = CTL_UNNUMBERED,
54978 + .procname = "socket_all_gid",
54979 + .data = &grsec_socket_all_gid,
54980 + .maxlen = sizeof(int),
54981 + .mode = 0600,
54982 + .proc_handler = &proc_dointvec,
54983 + },
54984 +#endif
54985 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54986 + {
54987 + .ctl_name = CTL_UNNUMBERED,
54988 + .procname = "socket_client",
54989 + .data = &grsec_enable_socket_client,
54990 + .maxlen = sizeof(int),
54991 + .mode = 0600,
54992 + .proc_handler = &proc_dointvec,
54993 + },
54994 + {
54995 + .ctl_name = CTL_UNNUMBERED,
54996 + .procname = "socket_client_gid",
54997 + .data = &grsec_socket_client_gid,
54998 + .maxlen = sizeof(int),
54999 + .mode = 0600,
55000 + .proc_handler = &proc_dointvec,
55001 + },
55002 +#endif
55003 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55004 + {
55005 + .ctl_name = CTL_UNNUMBERED,
55006 + .procname = "socket_server",
55007 + .data = &grsec_enable_socket_server,
55008 + .maxlen = sizeof(int),
55009 + .mode = 0600,
55010 + .proc_handler = &proc_dointvec,
55011 + },
55012 + {
55013 + .ctl_name = CTL_UNNUMBERED,
55014 + .procname = "socket_server_gid",
55015 + .data = &grsec_socket_server_gid,
55016 + .maxlen = sizeof(int),
55017 + .mode = 0600,
55018 + .proc_handler = &proc_dointvec,
55019 + },
55020 +#endif
55021 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55022 + {
55023 + .ctl_name = CTL_UNNUMBERED,
55024 + .procname = "audit_group",
55025 + .data = &grsec_enable_group,
55026 + .maxlen = sizeof(int),
55027 + .mode = 0600,
55028 + .proc_handler = &proc_dointvec,
55029 + },
55030 + {
55031 + .ctl_name = CTL_UNNUMBERED,
55032 + .procname = "audit_gid",
55033 + .data = &grsec_audit_gid,
55034 + .maxlen = sizeof(int),
55035 + .mode = 0600,
55036 + .proc_handler = &proc_dointvec,
55037 + },
55038 +#endif
55039 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55040 + {
55041 + .ctl_name = CTL_UNNUMBERED,
55042 + .procname = "audit_chdir",
55043 + .data = &grsec_enable_chdir,
55044 + .maxlen = sizeof(int),
55045 + .mode = 0600,
55046 + .proc_handler = &proc_dointvec,
55047 + },
55048 +#endif
55049 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55050 + {
55051 + .ctl_name = CTL_UNNUMBERED,
55052 + .procname = "audit_mount",
55053 + .data = &grsec_enable_mount,
55054 + .maxlen = sizeof(int),
55055 + .mode = 0600,
55056 + .proc_handler = &proc_dointvec,
55057 + },
55058 +#endif
55059 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55060 + {
55061 + .ctl_name = CTL_UNNUMBERED,
55062 + .procname = "audit_textrel",
55063 + .data = &grsec_enable_audit_textrel,
55064 + .maxlen = sizeof(int),
55065 + .mode = 0600,
55066 + .proc_handler = &proc_dointvec,
55067 + },
55068 +#endif
55069 +#ifdef CONFIG_GRKERNSEC_DMESG
55070 + {
55071 + .ctl_name = CTL_UNNUMBERED,
55072 + .procname = "dmesg",
55073 + .data = &grsec_enable_dmesg,
55074 + .maxlen = sizeof(int),
55075 + .mode = 0600,
55076 + .proc_handler = &proc_dointvec,
55077 + },
55078 +#endif
55079 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55080 + {
55081 + .ctl_name = CTL_UNNUMBERED,
55082 + .procname = "chroot_findtask",
55083 + .data = &grsec_enable_chroot_findtask,
55084 + .maxlen = sizeof(int),
55085 + .mode = 0600,
55086 + .proc_handler = &proc_dointvec,
55087 + },
55088 +#endif
55089 +#ifdef CONFIG_GRKERNSEC_RESLOG
55090 + {
55091 + .ctl_name = CTL_UNNUMBERED,
55092 + .procname = "resource_logging",
55093 + .data = &grsec_resource_logging,
55094 + .maxlen = sizeof(int),
55095 + .mode = 0600,
55096 + .proc_handler = &proc_dointvec,
55097 + },
55098 +#endif
55099 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55100 + {
55101 + .ctl_name = CTL_UNNUMBERED,
55102 + .procname = "audit_ptrace",
55103 + .data = &grsec_enable_audit_ptrace,
55104 + .maxlen = sizeof(int),
55105 + .mode = 0600,
55106 + .proc_handler = &proc_dointvec,
55107 + },
55108 +#endif
55109 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55110 + {
55111 + .ctl_name = CTL_UNNUMBERED,
55112 + .procname = "harden_ptrace",
55113 + .data = &grsec_enable_harden_ptrace,
55114 + .maxlen = sizeof(int),
55115 + .mode = 0600,
55116 + .proc_handler = &proc_dointvec,
55117 + },
55118 +#endif
55119 + {
55120 + .ctl_name = CTL_UNNUMBERED,
55121 + .procname = "grsec_lock",
55122 + .data = &grsec_lock,
55123 + .maxlen = sizeof(int),
55124 + .mode = 0600,
55125 + .proc_handler = &proc_dointvec,
55126 + },
55127 +#endif
55128 +#ifdef CONFIG_GRKERNSEC_ROFS
55129 + {
55130 + .ctl_name = CTL_UNNUMBERED,
55131 + .procname = "romount_protect",
55132 + .data = &grsec_enable_rofs,
55133 + .maxlen = sizeof(int),
55134 + .mode = 0600,
55135 + .proc_handler = &proc_dointvec_minmax,
55136 + .extra1 = &one,
55137 + .extra2 = &one,
55138 + },
55139 +#endif
55140 + { .ctl_name = 0 }
55141 +};
55142 +#endif
55143 diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55144 --- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55145 +++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55146 @@ -0,0 +1,16 @@
55147 +#include <linux/kernel.h>
55148 +#include <linux/sched.h>
55149 +#include <linux/grinternal.h>
55150 +#include <linux/module.h>
55151 +
55152 +void
55153 +gr_log_timechange(void)
55154 +{
55155 +#ifdef CONFIG_GRKERNSEC_TIME
55156 + if (grsec_enable_time)
55157 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55158 +#endif
55159 + return;
55160 +}
55161 +
55162 +EXPORT_SYMBOL(gr_log_timechange);
55163 diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55164 --- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55165 +++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55166 @@ -0,0 +1,39 @@
55167 +#include <linux/kernel.h>
55168 +#include <linux/sched.h>
55169 +#include <linux/file.h>
55170 +#include <linux/fs.h>
55171 +#include <linux/grinternal.h>
55172 +
55173 +extern int gr_acl_tpe_check(void);
55174 +
55175 +int
55176 +gr_tpe_allow(const struct file *file)
55177 +{
55178 +#ifdef CONFIG_GRKERNSEC
55179 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55180 + const struct cred *cred = current_cred();
55181 +
55182 + if (cred->uid && ((grsec_enable_tpe &&
55183 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55184 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55185 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55186 +#else
55187 + in_group_p(grsec_tpe_gid)
55188 +#endif
55189 + ) || gr_acl_tpe_check()) &&
55190 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55191 + (inode->i_mode & S_IWOTH))))) {
55192 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55193 + return 0;
55194 + }
55195 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55196 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55197 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55198 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55199 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55200 + return 0;
55201 + }
55202 +#endif
55203 +#endif
55204 + return 1;
55205 +}
55206 diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55207 --- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55208 +++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55209 @@ -0,0 +1,61 @@
55210 +#include <linux/err.h>
55211 +#include <linux/kernel.h>
55212 +#include <linux/sched.h>
55213 +#include <linux/mm.h>
55214 +#include <linux/scatterlist.h>
55215 +#include <linux/crypto.h>
55216 +#include <linux/gracl.h>
55217 +
55218 +
55219 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55220 +#error "crypto and sha256 must be built into the kernel"
55221 +#endif
55222 +
55223 +int
55224 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55225 +{
55226 + char *p;
55227 + struct crypto_hash *tfm;
55228 + struct hash_desc desc;
55229 + struct scatterlist sg;
55230 + unsigned char temp_sum[GR_SHA_LEN];
55231 + volatile int retval = 0;
55232 + volatile int dummy = 0;
55233 + unsigned int i;
55234 +
55235 + sg_init_table(&sg, 1);
55236 +
55237 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55238 + if (IS_ERR(tfm)) {
55239 + /* should never happen, since sha256 should be built in */
55240 + return 1;
55241 + }
55242 +
55243 + desc.tfm = tfm;
55244 + desc.flags = 0;
55245 +
55246 + crypto_hash_init(&desc);
55247 +
55248 + p = salt;
55249 + sg_set_buf(&sg, p, GR_SALT_LEN);
55250 + crypto_hash_update(&desc, &sg, sg.length);
55251 +
55252 + p = entry->pw;
55253 + sg_set_buf(&sg, p, strlen(p));
55254 +
55255 + crypto_hash_update(&desc, &sg, sg.length);
55256 +
55257 + crypto_hash_final(&desc, temp_sum);
55258 +
55259 + memset(entry->pw, 0, GR_PW_LEN);
55260 +
55261 + for (i = 0; i < GR_SHA_LEN; i++)
55262 + if (sum[i] != temp_sum[i])
55263 + retval = 1;
55264 + else
55265 + dummy = 1; // waste a cycle
55266 +
55267 + crypto_free_hash(tfm);
55268 +
55269 + return retval;
55270 +}
55271 diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55272 --- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55273 +++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55274 @@ -0,0 +1,1037 @@
55275 +#
55276 +# grecurity configuration
55277 +#
55278 +
55279 +menu "Grsecurity"
55280 +
55281 +config GRKERNSEC
55282 + bool "Grsecurity"
55283 + select CRYPTO
55284 + select CRYPTO_SHA256
55285 + help
55286 + If you say Y here, you will be able to configure many features
55287 + that will enhance the security of your system. It is highly
55288 + recommended that you say Y here and read through the help
55289 + for each option so that you fully understand the features and
55290 + can evaluate their usefulness for your machine.
55291 +
55292 +choice
55293 + prompt "Security Level"
55294 + depends on GRKERNSEC
55295 + default GRKERNSEC_CUSTOM
55296 +
55297 +config GRKERNSEC_LOW
55298 + bool "Low"
55299 + select GRKERNSEC_LINK
55300 + select GRKERNSEC_FIFO
55301 + select GRKERNSEC_RANDNET
55302 + select GRKERNSEC_DMESG
55303 + select GRKERNSEC_CHROOT
55304 + select GRKERNSEC_CHROOT_CHDIR
55305 +
55306 + help
55307 + If you choose this option, several of the grsecurity options will
55308 + be enabled that will give you greater protection against a number
55309 + of attacks, while assuring that none of your software will have any
55310 + conflicts with the additional security measures. If you run a lot
55311 + of unusual software, or you are having problems with the higher
55312 + security levels, you should say Y here. With this option, the
55313 + following features are enabled:
55314 +
55315 + - Linking restrictions
55316 + - FIFO restrictions
55317 + - Restricted dmesg
55318 + - Enforced chdir("/") on chroot
55319 + - Runtime module disabling
55320 +
55321 +config GRKERNSEC_MEDIUM
55322 + bool "Medium"
55323 + select PAX
55324 + select PAX_EI_PAX
55325 + select PAX_PT_PAX_FLAGS
55326 + select PAX_HAVE_ACL_FLAGS
55327 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55328 + select GRKERNSEC_CHROOT
55329 + select GRKERNSEC_CHROOT_SYSCTL
55330 + select GRKERNSEC_LINK
55331 + select GRKERNSEC_FIFO
55332 + select GRKERNSEC_DMESG
55333 + select GRKERNSEC_RANDNET
55334 + select GRKERNSEC_FORKFAIL
55335 + select GRKERNSEC_TIME
55336 + select GRKERNSEC_SIGNAL
55337 + select GRKERNSEC_CHROOT
55338 + select GRKERNSEC_CHROOT_UNIX
55339 + select GRKERNSEC_CHROOT_MOUNT
55340 + select GRKERNSEC_CHROOT_PIVOT
55341 + select GRKERNSEC_CHROOT_DOUBLE
55342 + select GRKERNSEC_CHROOT_CHDIR
55343 + select GRKERNSEC_CHROOT_MKNOD
55344 + select GRKERNSEC_PROC
55345 + select GRKERNSEC_PROC_USERGROUP
55346 + select PAX_RANDUSTACK
55347 + select PAX_ASLR
55348 + select PAX_RANDMMAP
55349 + select PAX_REFCOUNT if (X86 || SPARC64)
55350 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55351 +
55352 + help
55353 + If you say Y here, several features in addition to those included
55354 + in the low additional security level will be enabled. These
55355 + features provide even more security to your system, though in rare
55356 + cases they may be incompatible with very old or poorly written
55357 + software. If you enable this option, make sure that your auth
55358 + service (identd) is running as gid 1001. With this option,
55359 + the following features (in addition to those provided in the
55360 + low additional security level) will be enabled:
55361 +
55362 + - Failed fork logging
55363 + - Time change logging
55364 + - Signal logging
55365 + - Deny mounts in chroot
55366 + - Deny double chrooting
55367 + - Deny sysctl writes in chroot
55368 + - Deny mknod in chroot
55369 + - Deny access to abstract AF_UNIX sockets out of chroot
55370 + - Deny pivot_root in chroot
55371 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55372 + - /proc restrictions with special GID set to 10 (usually wheel)
55373 + - Address Space Layout Randomization (ASLR)
55374 + - Prevent exploitation of most refcount overflows
55375 + - Bounds checking of copying between the kernel and userland
55376 +
55377 +config GRKERNSEC_HIGH
55378 + bool "High"
55379 + select GRKERNSEC_LINK
55380 + select GRKERNSEC_FIFO
55381 + select GRKERNSEC_DMESG
55382 + select GRKERNSEC_FORKFAIL
55383 + select GRKERNSEC_TIME
55384 + select GRKERNSEC_SIGNAL
55385 + select GRKERNSEC_CHROOT
55386 + select GRKERNSEC_CHROOT_SHMAT
55387 + select GRKERNSEC_CHROOT_UNIX
55388 + select GRKERNSEC_CHROOT_MOUNT
55389 + select GRKERNSEC_CHROOT_FCHDIR
55390 + select GRKERNSEC_CHROOT_PIVOT
55391 + select GRKERNSEC_CHROOT_DOUBLE
55392 + select GRKERNSEC_CHROOT_CHDIR
55393 + select GRKERNSEC_CHROOT_MKNOD
55394 + select GRKERNSEC_CHROOT_CAPS
55395 + select GRKERNSEC_CHROOT_SYSCTL
55396 + select GRKERNSEC_CHROOT_FINDTASK
55397 + select GRKERNSEC_SYSFS_RESTRICT
55398 + select GRKERNSEC_PROC
55399 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55400 + select GRKERNSEC_HIDESYM
55401 + select GRKERNSEC_BRUTE
55402 + select GRKERNSEC_PROC_USERGROUP
55403 + select GRKERNSEC_KMEM
55404 + select GRKERNSEC_RESLOG
55405 + select GRKERNSEC_RANDNET
55406 + select GRKERNSEC_PROC_ADD
55407 + select GRKERNSEC_CHROOT_CHMOD
55408 + select GRKERNSEC_CHROOT_NICE
55409 + select GRKERNSEC_AUDIT_MOUNT
55410 + select GRKERNSEC_MODHARDEN if (MODULES)
55411 + select GRKERNSEC_HARDEN_PTRACE
55412 + select GRKERNSEC_VM86 if (X86_32)
55413 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55414 + select PAX
55415 + select PAX_RANDUSTACK
55416 + select PAX_ASLR
55417 + select PAX_RANDMMAP
55418 + select PAX_NOEXEC
55419 + select PAX_MPROTECT
55420 + select PAX_EI_PAX
55421 + select PAX_PT_PAX_FLAGS
55422 + select PAX_HAVE_ACL_FLAGS
55423 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55424 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
55425 + select PAX_RANDKSTACK if (X86_TSC && X86)
55426 + select PAX_SEGMEXEC if (X86_32)
55427 + select PAX_PAGEEXEC
55428 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55429 + select PAX_EMUTRAMP if (PARISC)
55430 + select PAX_EMUSIGRT if (PARISC)
55431 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55432 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55433 + select PAX_REFCOUNT if (X86 || SPARC64)
55434 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55435 + help
55436 + If you say Y here, many of the features of grsecurity will be
55437 + enabled, which will protect you against many kinds of attacks
55438 + against your system. The heightened security comes at a cost
55439 + of an increased chance of incompatibilities with rare software
55440 + on your machine. Since this security level enables PaX, you should
55441 + view <http://pax.grsecurity.net> and read about the PaX
55442 + project. While you are there, download chpax and run it on
55443 + binaries that cause problems with PaX. Also remember that
55444 + since the /proc restrictions are enabled, you must run your
55445 + identd as gid 1001. This security level enables the following
55446 + features in addition to those listed in the low and medium
55447 + security levels:
55448 +
55449 + - Additional /proc restrictions
55450 + - Chmod restrictions in chroot
55451 + - No signals, ptrace, or viewing of processes outside of chroot
55452 + - Capability restrictions in chroot
55453 + - Deny fchdir out of chroot
55454 + - Priority restrictions in chroot
55455 + - Segmentation-based implementation of PaX
55456 + - Mprotect restrictions
55457 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55458 + - Kernel stack randomization
55459 + - Mount/unmount/remount logging
55460 + - Kernel symbol hiding
55461 + - Prevention of memory exhaustion-based exploits
55462 + - Hardening of module auto-loading
55463 + - Ptrace restrictions
55464 + - Restricted vm86 mode
55465 + - Restricted sysfs/debugfs
55466 + - Active kernel exploit response
55467 +
55468 +config GRKERNSEC_CUSTOM
55469 + bool "Custom"
55470 + help
55471 + If you say Y here, you will be able to configure every grsecurity
55472 + option, which allows you to enable many more features that aren't
55473 + covered in the basic security levels. These additional features
55474 + include TPE, socket restrictions, and the sysctl system for
55475 + grsecurity. It is advised that you read through the help for
55476 + each option to determine its usefulness in your situation.
55477 +
55478 +endchoice
55479 +
55480 +menu "Address Space Protection"
55481 +depends on GRKERNSEC
55482 +
55483 +config GRKERNSEC_KMEM
55484 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55485 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55486 + help
55487 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55488 + be written to via mmap or otherwise to modify the running kernel.
55489 + /dev/port will also not be allowed to be opened. If you have module
55490 + support disabled, enabling this will close up four ways that are
55491 + currently used to insert malicious code into the running kernel.
55492 + Even with all these features enabled, we still highly recommend that
55493 + you use the RBAC system, as it is still possible for an attacker to
55494 + modify the running kernel through privileged I/O granted by ioperm/iopl.
55495 + If you are not using XFree86, you may be able to stop this additional
55496 + case by enabling the 'Disable privileged I/O' option. Though nothing
55497 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55498 + but only to video memory, which is the only writing we allow in this
55499 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55500 + not be allowed to mprotect it with PROT_WRITE later.
55501 + It is highly recommended that you say Y here if you meet all the
55502 + conditions above.
55503 +
55504 +config GRKERNSEC_VM86
55505 + bool "Restrict VM86 mode"
55506 + depends on X86_32
55507 +
55508 + help
55509 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55510 + make use of a special execution mode on 32bit x86 processors called
55511 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55512 + video cards and will still work with this option enabled. The purpose
55513 + of the option is to prevent exploitation of emulation errors in
55514 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
55515 + Nearly all users should be able to enable this option.
55516 +
55517 +config GRKERNSEC_IO
55518 + bool "Disable privileged I/O"
55519 + depends on X86
55520 + select RTC_CLASS
55521 + select RTC_INTF_DEV
55522 + select RTC_DRV_CMOS
55523 +
55524 + help
55525 + If you say Y here, all ioperm and iopl calls will return an error.
55526 + Ioperm and iopl can be used to modify the running kernel.
55527 + Unfortunately, some programs need this access to operate properly,
55528 + the most notable of which are XFree86 and hwclock. hwclock can be
55529 + remedied by having RTC support in the kernel, so real-time
55530 + clock support is enabled if this option is enabled, to ensure
55531 + that hwclock operates correctly. XFree86 still will not
55532 + operate correctly with this option enabled, so DO NOT CHOOSE Y
55533 + IF YOU USE XFree86. If you use XFree86 and you still want to
55534 + protect your kernel against modification, use the RBAC system.
55535 +
55536 +config GRKERNSEC_PROC_MEMMAP
55537 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55538 + default y if (PAX_NOEXEC || PAX_ASLR)
55539 + depends on PAX_NOEXEC || PAX_ASLR
55540 + help
55541 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55542 + give no information about the addresses of its mappings if
55543 + PaX features that rely on random addresses are enabled on the task.
55544 + If you use PaX it is greatly recommended that you say Y here as it
55545 + closes up a hole that makes the full ASLR useless for suid
55546 + binaries.
55547 +
55548 +config GRKERNSEC_BRUTE
55549 + bool "Deter exploit bruteforcing"
55550 + help
55551 + If you say Y here, attempts to bruteforce exploits against forking
55552 + daemons such as apache or sshd, as well as against suid/sgid binaries
55553 + will be deterred. When a child of a forking daemon is killed by PaX
55554 + or crashes due to an illegal instruction or other suspicious signal,
55555 + the parent process will be delayed 30 seconds upon every subsequent
55556 + fork until the administrator is able to assess the situation and
55557 + restart the daemon.
55558 + In the suid/sgid case, the attempt is logged, the user has all their
55559 + processes terminated, and they are prevented from executing any further
55560 + processes for 15 minutes.
55561 + It is recommended that you also enable signal logging in the auditing
55562 + section so that logs are generated when a process triggers a suspicious
55563 + signal.
55564 + If the sysctl option is enabled, a sysctl option with name
55565 + "deter_bruteforce" is created.
55566 +
55567 +config GRKERNSEC_MODHARDEN
55568 + bool "Harden module auto-loading"
55569 + depends on MODULES
55570 + help
55571 + If you say Y here, module auto-loading in response to use of some
55572 + feature implemented by an unloaded module will be restricted to
55573 + root users. Enabling this option helps defend against attacks
55574 + by unprivileged users who abuse the auto-loading behavior to
55575 + cause a vulnerable module to load that is then exploited.
55576 +
55577 + If this option prevents a legitimate use of auto-loading for a
55578 + non-root user, the administrator can execute modprobe manually
55579 + with the exact name of the module mentioned in the alert log.
55580 + Alternatively, the administrator can add the module to the list
55581 + of modules loaded at boot by modifying init scripts.
55582 +
55583 + Modification of init scripts will most likely be needed on
55584 + Ubuntu servers with encrypted home directory support enabled,
55585 + as the first non-root user logging in will cause the ecb(aes),
55586 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55587 +
55588 +config GRKERNSEC_HIDESYM
55589 + bool "Hide kernel symbols"
55590 + help
55591 + If you say Y here, getting information on loaded modules, and
55592 + displaying all kernel symbols through a syscall will be restricted
55593 + to users with CAP_SYS_MODULE. For software compatibility reasons,
55594 + /proc/kallsyms will be restricted to the root user. The RBAC
55595 + system can hide that entry even from root.
55596 +
55597 + This option also prevents leaking of kernel addresses through
55598 + several /proc entries.
55599 +
55600 + Note that this option is only effective provided the following
55601 + conditions are met:
55602 + 1) The kernel using grsecurity is not precompiled by some distribution
55603 + 2) You have also enabled GRKERNSEC_DMESG
55604 + 3) You are using the RBAC system and hiding other files such as your
55605 + kernel image and System.map. Alternatively, enabling this option
55606 + causes the permissions on /boot, /lib/modules, and the kernel
55607 + source directory to change at compile time to prevent
55608 + reading by non-root users.
55609 + If the above conditions are met, this option will aid in providing a
55610 + useful protection against local kernel exploitation of overflows
55611 + and arbitrary read/write vulnerabilities.
55612 +
55613 +config GRKERNSEC_KERN_LOCKOUT
55614 + bool "Active kernel exploit response"
55615 + depends on X86 || ARM || PPC || SPARC
55616 + help
55617 + If you say Y here, when a PaX alert is triggered due to suspicious
55618 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55619 + or an OOPs occurs due to bad memory accesses, instead of just
55620 + terminating the offending process (and potentially allowing
55621 + a subsequent exploit from the same user), we will take one of two
55622 + actions:
55623 + If the user was root, we will panic the system
55624 + If the user was non-root, we will log the attempt, terminate
55625 + all processes owned by the user, then prevent them from creating
55626 + any new processes until the system is restarted
55627 + This deters repeated kernel exploitation/bruteforcing attempts
55628 + and is useful for later forensics.
55629 +
55630 +endmenu
55631 +menu "Role Based Access Control Options"
55632 +depends on GRKERNSEC
55633 +
55634 +config GRKERNSEC_RBAC_DEBUG
55635 + bool
55636 +
55637 +config GRKERNSEC_NO_RBAC
55638 + bool "Disable RBAC system"
55639 + help
55640 + If you say Y here, the /dev/grsec device will be removed from the kernel,
55641 + preventing the RBAC system from being enabled. You should only say Y
55642 + here if you have no intention of using the RBAC system, so as to prevent
55643 + an attacker with root access from misusing the RBAC system to hide files
55644 + and processes when loadable module support and /dev/[k]mem have been
55645 + locked down.
55646 +
55647 +config GRKERNSEC_ACL_HIDEKERN
55648 + bool "Hide kernel processes"
55649 + help
55650 + If you say Y here, all kernel threads will be hidden to all
55651 + processes but those whose subject has the "view hidden processes"
55652 + flag.
55653 +
55654 +config GRKERNSEC_ACL_MAXTRIES
55655 + int "Maximum tries before password lockout"
55656 + default 3
55657 + help
55658 + This option enforces the maximum number of times a user can attempt
55659 + to authorize themselves with the grsecurity RBAC system before being
55660 + denied the ability to attempt authorization again for a specified time.
55661 + The lower the number, the harder it will be to brute-force a password.
55662 +
55663 +config GRKERNSEC_ACL_TIMEOUT
55664 + int "Time to wait after max password tries, in seconds"
55665 + default 30
55666 + help
55667 + This option specifies the time the user must wait after attempting to
55668 + authorize to the RBAC system with the maximum number of invalid
55669 + passwords. The higher the number, the harder it will be to brute-force
55670 + a password.
55671 +
55672 +endmenu
55673 +menu "Filesystem Protections"
55674 +depends on GRKERNSEC
55675 +
55676 +config GRKERNSEC_PROC
55677 + bool "Proc restrictions"
55678 + help
55679 + If you say Y here, the permissions of the /proc filesystem
55680 + will be altered to enhance system security and privacy. You MUST
55681 + choose either a user only restriction or a user and group restriction.
55682 + Depending upon the option you choose, you can either restrict users to
55683 + see only the processes they themselves run, or choose a group that can
55684 + view all processes and files normally restricted to root if you choose
55685 + the "restrict to user only" option. NOTE: If you're running identd as
55686 + a non-root user, you will have to run it as the group you specify here.
55687 +
55688 +config GRKERNSEC_PROC_USER
55689 + bool "Restrict /proc to user only"
55690 + depends on GRKERNSEC_PROC
55691 + help
55692 + If you say Y here, non-root users will only be able to view their own
55693 + processes, and restricts them from viewing network-related information,
55694 + and viewing kernel symbol and module information.
55695 +
55696 +config GRKERNSEC_PROC_USERGROUP
55697 + bool "Allow special group"
55698 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55699 + help
55700 + If you say Y here, you will be able to select a group that will be
55701 + able to view all processes and network-related information. If you've
55702 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55703 + remain hidden. This option is useful if you want to run identd as
55704 + a non-root user.
55705 +
55706 +config GRKERNSEC_PROC_GID
55707 + int "GID for special group"
55708 + depends on GRKERNSEC_PROC_USERGROUP
55709 + default 1001
55710 +
55711 +config GRKERNSEC_PROC_ADD
55712 + bool "Additional restrictions"
55713 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55714 + help
55715 + If you say Y here, additional restrictions will be placed on
55716 + /proc that keep normal users from viewing device information and
55717 + slabinfo information that could be useful for exploits.
55718 +
55719 +config GRKERNSEC_LINK
55720 + bool "Linking restrictions"
55721 + help
55722 + If you say Y here, /tmp race exploits will be prevented, since users
55723 + will no longer be able to follow symlinks owned by other users in
55724 + world-writable +t directories (e.g. /tmp), unless the owner of the
55725 + symlink is the owner of the directory. users will also not be
55726 + able to hardlink to files they do not own. If the sysctl option is
55727 + enabled, a sysctl option with name "linking_restrictions" is created.
55728 +
55729 +config GRKERNSEC_FIFO
55730 + bool "FIFO restrictions"
55731 + help
55732 + If you say Y here, users will not be able to write to FIFOs they don't
55733 + own in world-writable +t directories (e.g. /tmp), unless the owner of
55734 + the FIFO is the same owner of the directory it's held in. If the sysctl
55735 + option is enabled, a sysctl option with name "fifo_restrictions" is
55736 + created.
55737 +
55738 +config GRKERNSEC_SYSFS_RESTRICT
55739 + bool "Sysfs/debugfs restriction"
55740 + depends on SYSFS
55741 + help
55742 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55743 + any filesystem normally mounted under it (e.g. debugfs) will only
55744 + be accessible by root. These filesystems generally provide access
55745 + to hardware and debug information that isn't appropriate for unprivileged
55746 + users of the system. Sysfs and debugfs have also become a large source
55747 + of new vulnerabilities, ranging from infoleaks to local compromise.
55748 + There has been very little oversight with an eye toward security involved
55749 + in adding new exporters of information to these filesystems, so their
55750 + use is discouraged.
55751 + This option is equivalent to a chmod 0700 of the mount paths.
55752 +
55753 +config GRKERNSEC_ROFS
55754 + bool "Runtime read-only mount protection"
55755 + help
55756 + If you say Y here, a sysctl option with name "romount_protect" will
55757 + be created. By setting this option to 1 at runtime, filesystems
55758 + will be protected in the following ways:
55759 + * No new writable mounts will be allowed
55760 + * Existing read-only mounts won't be able to be remounted read/write
55761 + * Write operations will be denied on all block devices
55762 + This option acts independently of grsec_lock: once it is set to 1,
55763 + it cannot be turned off. Therefore, please be mindful of the resulting
55764 + behavior if this option is enabled in an init script on a read-only
55765 + filesystem. This feature is mainly intended for secure embedded systems.
55766 +
55767 +config GRKERNSEC_CHROOT
55768 + bool "Chroot jail restrictions"
55769 + help
55770 + If you say Y here, you will be able to choose several options that will
55771 + make breaking out of a chrooted jail much more difficult. If you
55772 + encounter no software incompatibilities with the following options, it
55773 + is recommended that you enable each one.
55774 +
55775 +config GRKERNSEC_CHROOT_MOUNT
55776 + bool "Deny mounts"
55777 + depends on GRKERNSEC_CHROOT
55778 + help
55779 + If you say Y here, processes inside a chroot will not be able to
55780 + mount or remount filesystems. If the sysctl option is enabled, a
55781 + sysctl option with name "chroot_deny_mount" is created.
55782 +
55783 +config GRKERNSEC_CHROOT_DOUBLE
55784 + bool "Deny double-chroots"
55785 + depends on GRKERNSEC_CHROOT
55786 + help
55787 + If you say Y here, processes inside a chroot will not be able to chroot
55788 + again outside the chroot. This is a widely used method of breaking
55789 + out of a chroot jail and should not be allowed. If the sysctl
55790 + option is enabled, a sysctl option with name
55791 + "chroot_deny_chroot" is created.
55792 +
55793 +config GRKERNSEC_CHROOT_PIVOT
55794 + bool "Deny pivot_root in chroot"
55795 + depends on GRKERNSEC_CHROOT
55796 + help
55797 + If you say Y here, processes inside a chroot will not be able to use
55798 + a function called pivot_root() that was introduced in Linux 2.3.41. It
55799 + works similar to chroot in that it changes the root filesystem. This
55800 + function could be misused in a chrooted process to attempt to break out
55801 + of the chroot, and therefore should not be allowed. If the sysctl
55802 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
55803 + created.
55804 +
55805 +config GRKERNSEC_CHROOT_CHDIR
55806 + bool "Enforce chdir(\"/\") on all chroots"
55807 + depends on GRKERNSEC_CHROOT
55808 + help
55809 + If you say Y here, the current working directory of all newly-chrooted
55810 + applications will be set to the the root directory of the chroot.
55811 + The man page on chroot(2) states:
55812 + Note that this call does not change the current working
55813 + directory, so that `.' can be outside the tree rooted at
55814 + `/'. In particular, the super-user can escape from a
55815 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55816 +
55817 + It is recommended that you say Y here, since it's not known to break
55818 + any software. If the sysctl option is enabled, a sysctl option with
55819 + name "chroot_enforce_chdir" is created.
55820 +
55821 +config GRKERNSEC_CHROOT_CHMOD
55822 + bool "Deny (f)chmod +s"
55823 + depends on GRKERNSEC_CHROOT
55824 + help
55825 + If you say Y here, processes inside a chroot will not be able to chmod
55826 + or fchmod files to make them have suid or sgid bits. This protects
55827 + against another published method of breaking a chroot. If the sysctl
55828 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
55829 + created.
55830 +
55831 +config GRKERNSEC_CHROOT_FCHDIR
55832 + bool "Deny fchdir out of chroot"
55833 + depends on GRKERNSEC_CHROOT
55834 + help
55835 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
55836 + to a file descriptor of the chrooting process that points to a directory
55837 + outside the filesystem will be stopped. If the sysctl option
55838 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55839 +
55840 +config GRKERNSEC_CHROOT_MKNOD
55841 + bool "Deny mknod"
55842 + depends on GRKERNSEC_CHROOT
55843 + help
55844 + If you say Y here, processes inside a chroot will not be allowed to
55845 + mknod. The problem with using mknod inside a chroot is that it
55846 + would allow an attacker to create a device entry that is the same
55847 + as one on the physical root of your system, which could range from
55848 + anything from the console device to a device for your harddrive (which
55849 + they could then use to wipe the drive or steal data). It is recommended
55850 + that you say Y here, unless you run into software incompatibilities.
55851 + If the sysctl option is enabled, a sysctl option with name
55852 + "chroot_deny_mknod" is created.
55853 +
55854 +config GRKERNSEC_CHROOT_SHMAT
55855 + bool "Deny shmat() out of chroot"
55856 + depends on GRKERNSEC_CHROOT
55857 + help
55858 + If you say Y here, processes inside a chroot will not be able to attach
55859 + to shared memory segments that were created outside of the chroot jail.
55860 + It is recommended that you say Y here. If the sysctl option is enabled,
55861 + a sysctl option with name "chroot_deny_shmat" is created.
55862 +
55863 +config GRKERNSEC_CHROOT_UNIX
55864 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
55865 + depends on GRKERNSEC_CHROOT
55866 + help
55867 + If you say Y here, processes inside a chroot will not be able to
55868 + connect to abstract (meaning not belonging to a filesystem) Unix
55869 + domain sockets that were bound outside of a chroot. It is recommended
55870 + that you say Y here. If the sysctl option is enabled, a sysctl option
55871 + with name "chroot_deny_unix" is created.
55872 +
55873 +config GRKERNSEC_CHROOT_FINDTASK
55874 + bool "Protect outside processes"
55875 + depends on GRKERNSEC_CHROOT
55876 + help
55877 + If you say Y here, processes inside a chroot will not be able to
55878 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55879 + getsid, or view any process outside of the chroot. If the sysctl
55880 + option is enabled, a sysctl option with name "chroot_findtask" is
55881 + created.
55882 +
55883 +config GRKERNSEC_CHROOT_NICE
55884 + bool "Restrict priority changes"
55885 + depends on GRKERNSEC_CHROOT
55886 + help
55887 + If you say Y here, processes inside a chroot will not be able to raise
55888 + the priority of processes in the chroot, or alter the priority of
55889 + processes outside the chroot. This provides more security than simply
55890 + removing CAP_SYS_NICE from the process' capability set. If the
55891 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55892 + is created.
55893 +
55894 +config GRKERNSEC_CHROOT_SYSCTL
55895 + bool "Deny sysctl writes"
55896 + depends on GRKERNSEC_CHROOT
55897 + help
55898 + If you say Y here, an attacker in a chroot will not be able to
55899 + write to sysctl entries, either by sysctl(2) or through a /proc
55900 + interface. It is strongly recommended that you say Y here. If the
55901 + sysctl option is enabled, a sysctl option with name
55902 + "chroot_deny_sysctl" is created.
55903 +
55904 +config GRKERNSEC_CHROOT_CAPS
55905 + bool "Capability restrictions"
55906 + depends on GRKERNSEC_CHROOT
55907 + help
55908 + If you say Y here, the capabilities on all root processes within a
55909 + chroot jail will be lowered to stop module insertion, raw i/o,
55910 + system and net admin tasks, rebooting the system, modifying immutable
55911 + files, modifying IPC owned by another, and changing the system time.
55912 + This is left an option because it can break some apps. Disable this
55913 + if your chrooted apps are having problems performing those kinds of
55914 + tasks. If the sysctl option is enabled, a sysctl option with
55915 + name "chroot_caps" is created.
55916 +
55917 +endmenu
55918 +menu "Kernel Auditing"
55919 +depends on GRKERNSEC
55920 +
55921 +config GRKERNSEC_AUDIT_GROUP
55922 + bool "Single group for auditing"
55923 + help
55924 + If you say Y here, the exec, chdir, and (un)mount logging features
55925 + will only operate on a group you specify. This option is recommended
55926 + if you only want to watch certain users instead of having a large
55927 + amount of logs from the entire system. If the sysctl option is enabled,
55928 + a sysctl option with name "audit_group" is created.
55929 +
55930 +config GRKERNSEC_AUDIT_GID
55931 + int "GID for auditing"
55932 + depends on GRKERNSEC_AUDIT_GROUP
55933 + default 1007
55934 +
55935 +config GRKERNSEC_EXECLOG
55936 + bool "Exec logging"
55937 + help
55938 + If you say Y here, all execve() calls will be logged (since the
55939 + other exec*() calls are frontends to execve(), all execution
55940 + will be logged). Useful for shell-servers that like to keep track
55941 + of their users. If the sysctl option is enabled, a sysctl option with
55942 + name "exec_logging" is created.
55943 + WARNING: This option when enabled will produce a LOT of logs, especially
55944 + on an active system.
55945 +
55946 +config GRKERNSEC_RESLOG
55947 + bool "Resource logging"
55948 + help
55949 + If you say Y here, all attempts to overstep resource limits will
55950 + be logged with the resource name, the requested size, and the current
55951 + limit. It is highly recommended that you say Y here. If the sysctl
55952 + option is enabled, a sysctl option with name "resource_logging" is
55953 + created. If the RBAC system is enabled, the sysctl value is ignored.
55954 +
55955 +config GRKERNSEC_CHROOT_EXECLOG
55956 + bool "Log execs within chroot"
55957 + help
55958 + If you say Y here, all executions inside a chroot jail will be logged
55959 + to syslog. This can cause a large amount of logs if certain
55960 + applications (eg. djb's daemontools) are installed on the system, and
55961 + is therefore left as an option. If the sysctl option is enabled, a
55962 + sysctl option with name "chroot_execlog" is created.
55963 +
55964 +config GRKERNSEC_AUDIT_PTRACE
55965 + bool "Ptrace logging"
55966 + help
55967 + If you say Y here, all attempts to attach to a process via ptrace
55968 + will be logged. If the sysctl option is enabled, a sysctl option
55969 + with name "audit_ptrace" is created.
55970 +
55971 +config GRKERNSEC_AUDIT_CHDIR
55972 + bool "Chdir logging"
55973 + help
55974 + If you say Y here, all chdir() calls will be logged. If the sysctl
55975 + option is enabled, a sysctl option with name "audit_chdir" is created.
55976 +
55977 +config GRKERNSEC_AUDIT_MOUNT
55978 + bool "(Un)Mount logging"
55979 + help
55980 + If you say Y here, all mounts and unmounts will be logged. If the
55981 + sysctl option is enabled, a sysctl option with name "audit_mount" is
55982 + created.
55983 +
55984 +config GRKERNSEC_SIGNAL
55985 + bool "Signal logging"
55986 + help
55987 + If you say Y here, certain important signals will be logged, such as
55988 + SIGSEGV, which will as a result inform you of when a error in a program
55989 + occurred, which in some cases could mean a possible exploit attempt.
55990 + If the sysctl option is enabled, a sysctl option with name
55991 + "signal_logging" is created.
55992 +
55993 +config GRKERNSEC_FORKFAIL
55994 + bool "Fork failure logging"
55995 + help
55996 + If you say Y here, all failed fork() attempts will be logged.
55997 + This could suggest a fork bomb, or someone attempting to overstep
55998 + their process limit. If the sysctl option is enabled, a sysctl option
55999 + with name "forkfail_logging" is created.
56000 +
56001 +config GRKERNSEC_TIME
56002 + bool "Time change logging"
56003 + help
56004 + If you say Y here, any changes of the system clock will be logged.
56005 + If the sysctl option is enabled, a sysctl option with name
56006 + "timechange_logging" is created.
56007 +
56008 +config GRKERNSEC_PROC_IPADDR
56009 + bool "/proc/<pid>/ipaddr support"
56010 + help
56011 + If you say Y here, a new entry will be added to each /proc/<pid>
56012 + directory that contains the IP address of the person using the task.
56013 + The IP is carried across local TCP and AF_UNIX stream sockets.
56014 + This information can be useful for IDS/IPSes to perform remote response
56015 + to a local attack. The entry is readable by only the owner of the
56016 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56017 + the RBAC system), and thus does not create privacy concerns.
56018 +
56019 +config GRKERNSEC_RWXMAP_LOG
56020 + bool 'Denied RWX mmap/mprotect logging'
56021 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56022 + help
56023 + If you say Y here, calls to mmap() and mprotect() with explicit
56024 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56025 + denied by the PAX_MPROTECT feature. If the sysctl option is
56026 + enabled, a sysctl option with name "rwxmap_logging" is created.
56027 +
56028 +config GRKERNSEC_AUDIT_TEXTREL
56029 + bool 'ELF text relocations logging (READ HELP)'
56030 + depends on PAX_MPROTECT
56031 + help
56032 + If you say Y here, text relocations will be logged with the filename
56033 + of the offending library or binary. The purpose of the feature is
56034 + to help Linux distribution developers get rid of libraries and
56035 + binaries that need text relocations which hinder the future progress
56036 + of PaX. Only Linux distribution developers should say Y here, and
56037 + never on a production machine, as this option creates an information
56038 + leak that could aid an attacker in defeating the randomization of
56039 + a single memory region. If the sysctl option is enabled, a sysctl
56040 + option with name "audit_textrel" is created.
56041 +
56042 +endmenu
56043 +
56044 +menu "Executable Protections"
56045 +depends on GRKERNSEC
56046 +
56047 +config GRKERNSEC_DMESG
56048 + bool "Dmesg(8) restriction"
56049 + help
56050 + If you say Y here, non-root users will not be able to use dmesg(8)
56051 + to view up to the last 4kb of messages in the kernel's log buffer.
56052 + The kernel's log buffer often contains kernel addresses and other
56053 + identifying information useful to an attacker in fingerprinting a
56054 + system for a targeted exploit.
56055 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56056 + created.
56057 +
56058 +config GRKERNSEC_HARDEN_PTRACE
56059 + bool "Deter ptrace-based process snooping"
56060 + help
56061 + If you say Y here, TTY sniffers and other malicious monitoring
56062 + programs implemented through ptrace will be defeated. If you
56063 + have been using the RBAC system, this option has already been
56064 + enabled for several years for all users, with the ability to make
56065 + fine-grained exceptions.
56066 +
56067 + This option only affects the ability of non-root users to ptrace
56068 + processes that are not a descendent of the ptracing process.
56069 + This means that strace ./binary and gdb ./binary will still work,
56070 + but attaching to arbitrary processes will not. If the sysctl
56071 + option is enabled, a sysctl option with name "harden_ptrace" is
56072 + created.
56073 +
56074 +config GRKERNSEC_TPE
56075 + bool "Trusted Path Execution (TPE)"
56076 + help
56077 + If you say Y here, you will be able to choose a gid to add to the
56078 + supplementary groups of users you want to mark as "untrusted."
56079 + These users will not be able to execute any files that are not in
56080 + root-owned directories writable only by root. If the sysctl option
56081 + is enabled, a sysctl option with name "tpe" is created.
56082 +
56083 +config GRKERNSEC_TPE_ALL
56084 + bool "Partially restrict all non-root users"
56085 + depends on GRKERNSEC_TPE
56086 + help
56087 + If you say Y here, all non-root users will be covered under
56088 + a weaker TPE restriction. This is separate from, and in addition to,
56089 + the main TPE options that you have selected elsewhere. Thus, if a
56090 + "trusted" GID is chosen, this restriction applies to even that GID.
56091 + Under this restriction, all non-root users will only be allowed to
56092 + execute files in directories they own that are not group or
56093 + world-writable, or in directories owned by root and writable only by
56094 + root. If the sysctl option is enabled, a sysctl option with name
56095 + "tpe_restrict_all" is created.
56096 +
56097 +config GRKERNSEC_TPE_INVERT
56098 + bool "Invert GID option"
56099 + depends on GRKERNSEC_TPE
56100 + help
56101 + If you say Y here, the group you specify in the TPE configuration will
56102 + decide what group TPE restrictions will be *disabled* for. This
56103 + option is useful if you want TPE restrictions to be applied to most
56104 + users on the system. If the sysctl option is enabled, a sysctl option
56105 + with name "tpe_invert" is created. Unlike other sysctl options, this
56106 + entry will default to on for backward-compatibility.
56107 +
56108 +config GRKERNSEC_TPE_GID
56109 + int "GID for untrusted users"
56110 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56111 + default 1005
56112 + help
56113 + Setting this GID determines what group TPE restrictions will be
56114 + *enabled* for. If the sysctl option is enabled, a sysctl option
56115 + with name "tpe_gid" is created.
56116 +
56117 +config GRKERNSEC_TPE_GID
56118 + int "GID for trusted users"
56119 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56120 + default 1005
56121 + help
56122 + Setting this GID determines what group TPE restrictions will be
56123 + *disabled* for. If the sysctl option is enabled, a sysctl option
56124 + with name "tpe_gid" is created.
56125 +
56126 +endmenu
56127 +menu "Network Protections"
56128 +depends on GRKERNSEC
56129 +
56130 +config GRKERNSEC_RANDNET
56131 + bool "Larger entropy pools"
56132 + help
56133 + If you say Y here, the entropy pools used for many features of Linux
56134 + and grsecurity will be doubled in size. Since several grsecurity
56135 + features use additional randomness, it is recommended that you say Y
56136 + here. Saying Y here has a similar effect as modifying
56137 + /proc/sys/kernel/random/poolsize.
56138 +
56139 +config GRKERNSEC_BLACKHOLE
56140 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56141 + depends on NET
56142 + help
56143 + If you say Y here, neither TCP resets nor ICMP
56144 + destination-unreachable packets will be sent in response to packets
56145 + sent to ports for which no associated listening process exists.
56146 + This feature supports both IPV4 and IPV6 and exempts the
56147 + loopback interface from blackholing. Enabling this feature
56148 + makes a host more resilient to DoS attacks and reduces network
56149 + visibility against scanners.
56150 +
56151 + The blackhole feature as-implemented is equivalent to the FreeBSD
56152 + blackhole feature, as it prevents RST responses to all packets, not
56153 + just SYNs. Under most application behavior this causes no
56154 + problems, but applications (like haproxy) may not close certain
56155 + connections in a way that cleanly terminates them on the remote
56156 + end, leaving the remote host in LAST_ACK state. Because of this
56157 + side-effect and to prevent intentional LAST_ACK DoSes, this
56158 + feature also adds automatic mitigation against such attacks.
56159 + The mitigation drastically reduces the amount of time a socket
56160 + can spend in LAST_ACK state. If you're using haproxy and not
56161 + all servers it connects to have this option enabled, consider
56162 + disabling this feature on the haproxy host.
56163 +
56164 + If the sysctl option is enabled, two sysctl options with names
56165 + "ip_blackhole" and "lastack_retries" will be created.
56166 + While "ip_blackhole" takes the standard zero/non-zero on/off
56167 + toggle, "lastack_retries" uses the same kinds of values as
56168 + "tcp_retries1" and "tcp_retries2". The default value of 4
56169 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56170 + state.
56171 +
56172 +config GRKERNSEC_SOCKET
56173 + bool "Socket restrictions"
56174 + depends on NET
56175 + help
56176 + If you say Y here, you will be able to choose from several options.
56177 + If you assign a GID on your system and add it to the supplementary
56178 + groups of users you want to restrict socket access to, this patch
56179 + will perform up to three things, based on the option(s) you choose.
56180 +
56181 +config GRKERNSEC_SOCKET_ALL
56182 + bool "Deny any sockets to group"
56183 + depends on GRKERNSEC_SOCKET
56184 + help
56185 + If you say Y here, you will be able to choose a GID of whose users will
56186 + be unable to connect to other hosts from your machine or run server
56187 + applications from your machine. If the sysctl option is enabled, a
56188 + sysctl option with name "socket_all" is created.
56189 +
56190 +config GRKERNSEC_SOCKET_ALL_GID
56191 + int "GID to deny all sockets for"
56192 + depends on GRKERNSEC_SOCKET_ALL
56193 + default 1004
56194 + help
56195 + Here you can choose the GID to disable socket access for. Remember to
56196 + add the users you want socket access disabled for to the GID
56197 + specified here. If the sysctl option is enabled, a sysctl option
56198 + with name "socket_all_gid" is created.
56199 +
56200 +config GRKERNSEC_SOCKET_CLIENT
56201 + bool "Deny client sockets to group"
56202 + depends on GRKERNSEC_SOCKET
56203 + help
56204 + If you say Y here, you will be able to choose a GID of whose users will
56205 + be unable to connect to other hosts from your machine, but will be
56206 + able to run servers. If this option is enabled, all users in the group
56207 + you specify will have to use passive mode when initiating ftp transfers
56208 + from the shell on your machine. If the sysctl option is enabled, a
56209 + sysctl option with name "socket_client" is created.
56210 +
56211 +config GRKERNSEC_SOCKET_CLIENT_GID
56212 + int "GID to deny client sockets for"
56213 + depends on GRKERNSEC_SOCKET_CLIENT
56214 + default 1003
56215 + help
56216 + Here you can choose the GID to disable client socket access for.
56217 + Remember to add the users you want client socket access disabled for to
56218 + the GID specified here. If the sysctl option is enabled, a sysctl
56219 + option with name "socket_client_gid" is created.
56220 +
56221 +config GRKERNSEC_SOCKET_SERVER
56222 + bool "Deny server sockets to group"
56223 + depends on GRKERNSEC_SOCKET
56224 + help
56225 + If you say Y here, you will be able to choose a GID of whose users will
56226 + be unable to run server applications from your machine. If the sysctl
56227 + option is enabled, a sysctl option with name "socket_server" is created.
56228 +
56229 +config GRKERNSEC_SOCKET_SERVER_GID
56230 + int "GID to deny server sockets for"
56231 + depends on GRKERNSEC_SOCKET_SERVER
56232 + default 1002
56233 + help
56234 + Here you can choose the GID to disable server socket access for.
56235 + Remember to add the users you want server socket access disabled for to
56236 + the GID specified here. If the sysctl option is enabled, a sysctl
56237 + option with name "socket_server_gid" is created.
56238 +
56239 +endmenu
56240 +menu "Sysctl support"
56241 +depends on GRKERNSEC && SYSCTL
56242 +
56243 +config GRKERNSEC_SYSCTL
56244 + bool "Sysctl support"
56245 + help
56246 + If you say Y here, you will be able to change the options that
56247 + grsecurity runs with at bootup, without having to recompile your
56248 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56249 + to enable (1) or disable (0) various features. All the sysctl entries
56250 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56251 + All features enabled in the kernel configuration are disabled at boot
56252 + if you do not say Y to the "Turn on features by default" option.
56253 + All options should be set at startup, and the grsec_lock entry should
56254 + be set to a non-zero value after all the options are set.
56255 + *THIS IS EXTREMELY IMPORTANT*
56256 +
56257 +config GRKERNSEC_SYSCTL_DISTRO
56258 + bool "Extra sysctl support for distro makers (READ HELP)"
56259 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56260 + help
56261 + If you say Y here, additional sysctl options will be created
56262 + for features that affect processes running as root. Therefore,
56263 + it is critical when using this option that the grsec_lock entry be
56264 + enabled after boot. Only distros with prebuilt kernel packages
56265 + with this option enabled that can ensure grsec_lock is enabled
56266 + after boot should use this option.
56267 + *Failure to set grsec_lock after boot makes all grsec features
56268 + this option covers useless*
56269 +
56270 + Currently this option creates the following sysctl entries:
56271 + "Disable Privileged I/O": "disable_priv_io"
56272 +
56273 +config GRKERNSEC_SYSCTL_ON
56274 + bool "Turn on features by default"
56275 + depends on GRKERNSEC_SYSCTL
56276 + help
56277 + If you say Y here, instead of having all features enabled in the
56278 + kernel configuration disabled at boot time, the features will be
56279 + enabled at boot time. It is recommended you say Y here unless
56280 + there is some reason you would want all sysctl-tunable features to
56281 + be disabled by default. As mentioned elsewhere, it is important
56282 + to enable the grsec_lock entry once you have finished modifying
56283 + the sysctl entries.
56284 +
56285 +endmenu
56286 +menu "Logging Options"
56287 +depends on GRKERNSEC
56288 +
56289 +config GRKERNSEC_FLOODTIME
56290 + int "Seconds in between log messages (minimum)"
56291 + default 10
56292 + help
56293 + This option allows you to enforce the number of seconds between
56294 + grsecurity log messages. The default should be suitable for most
56295 + people, however, if you choose to change it, choose a value small enough
56296 + to allow informative logs to be produced, but large enough to
56297 + prevent flooding.
56298 +
56299 +config GRKERNSEC_FLOODBURST
56300 + int "Number of messages in a burst (maximum)"
56301 + default 4
56302 + help
56303 + This option allows you to choose the maximum number of messages allowed
56304 + within the flood time interval you chose in a separate option. The
56305 + default should be suitable for most people, however if you find that
56306 + many of your logs are being interpreted as flooding, you may want to
56307 + raise this value.
56308 +
56309 +endmenu
56310 +
56311 +endmenu
56312 diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56313 --- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56314 +++ linux-2.6.32.45/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56315 @@ -0,0 +1,34 @@
56316 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56317 +# during 2001-2009 it has been completely redesigned by Brad Spengler
56318 +# into an RBAC system
56319 +#
56320 +# All code in this directory and various hooks inserted throughout the kernel
56321 +# are copyright Brad Spengler - Open Source Security, Inc., and released
56322 +# under the GPL v2 or higher
56323 +
56324 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56325 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
56326 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56327 +
56328 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56329 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56330 + gracl_learn.o grsec_log.o
56331 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56332 +
56333 +ifdef CONFIG_NET
56334 +obj-y += grsec_sock.o
56335 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56336 +endif
56337 +
56338 +ifndef CONFIG_GRKERNSEC
56339 +obj-y += grsec_disabled.o
56340 +endif
56341 +
56342 +ifdef CONFIG_GRKERNSEC_HIDESYM
56343 +extra-y := grsec_hidesym.o
56344 +$(obj)/grsec_hidesym.o:
56345 + @-chmod -f 500 /boot
56346 + @-chmod -f 500 /lib/modules
56347 + @-chmod -f 700 .
56348 + @echo ' grsec: protected kernel image paths'
56349 +endif
56350 diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56351 --- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56352 +++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56353 @@ -107,7 +107,7 @@ struct acpi_device_ops {
56354 acpi_op_bind bind;
56355 acpi_op_unbind unbind;
56356 acpi_op_notify notify;
56357 -};
56358 +} __no_const;
56359
56360 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56361
56362 diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56363 --- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56364 +++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56365 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56366 Dock Station
56367 -------------------------------------------------------------------------- */
56368 struct acpi_dock_ops {
56369 - acpi_notify_handler handler;
56370 - acpi_notify_handler uevent;
56371 + const acpi_notify_handler handler;
56372 + const acpi_notify_handler uevent;
56373 };
56374
56375 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56376 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56377 extern int register_dock_notifier(struct notifier_block *nb);
56378 extern void unregister_dock_notifier(struct notifier_block *nb);
56379 extern int register_hotplug_dock_device(acpi_handle handle,
56380 - struct acpi_dock_ops *ops,
56381 + const struct acpi_dock_ops *ops,
56382 void *context);
56383 extern void unregister_hotplug_dock_device(acpi_handle handle);
56384 #else
56385 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56386 {
56387 }
56388 static inline int register_hotplug_dock_device(acpi_handle handle,
56389 - struct acpi_dock_ops *ops,
56390 + const struct acpi_dock_ops *ops,
56391 void *context)
56392 {
56393 return -ENODEV;
56394 diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56395 --- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56396 +++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56397 @@ -22,6 +22,12 @@
56398
56399 typedef atomic64_t atomic_long_t;
56400
56401 +#ifdef CONFIG_PAX_REFCOUNT
56402 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
56403 +#else
56404 +typedef atomic64_t atomic_long_unchecked_t;
56405 +#endif
56406 +
56407 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56408
56409 static inline long atomic_long_read(atomic_long_t *l)
56410 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56411 return (long)atomic64_read(v);
56412 }
56413
56414 +#ifdef CONFIG_PAX_REFCOUNT
56415 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56416 +{
56417 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56418 +
56419 + return (long)atomic64_read_unchecked(v);
56420 +}
56421 +#endif
56422 +
56423 static inline void atomic_long_set(atomic_long_t *l, long i)
56424 {
56425 atomic64_t *v = (atomic64_t *)l;
56426 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56427 atomic64_set(v, i);
56428 }
56429
56430 +#ifdef CONFIG_PAX_REFCOUNT
56431 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56432 +{
56433 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56434 +
56435 + atomic64_set_unchecked(v, i);
56436 +}
56437 +#endif
56438 +
56439 static inline void atomic_long_inc(atomic_long_t *l)
56440 {
56441 atomic64_t *v = (atomic64_t *)l;
56442 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56443 atomic64_inc(v);
56444 }
56445
56446 +#ifdef CONFIG_PAX_REFCOUNT
56447 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56448 +{
56449 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56450 +
56451 + atomic64_inc_unchecked(v);
56452 +}
56453 +#endif
56454 +
56455 static inline void atomic_long_dec(atomic_long_t *l)
56456 {
56457 atomic64_t *v = (atomic64_t *)l;
56458 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56459 atomic64_dec(v);
56460 }
56461
56462 +#ifdef CONFIG_PAX_REFCOUNT
56463 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56464 +{
56465 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56466 +
56467 + atomic64_dec_unchecked(v);
56468 +}
56469 +#endif
56470 +
56471 static inline void atomic_long_add(long i, atomic_long_t *l)
56472 {
56473 atomic64_t *v = (atomic64_t *)l;
56474 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56475 atomic64_add(i, v);
56476 }
56477
56478 +#ifdef CONFIG_PAX_REFCOUNT
56479 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56480 +{
56481 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56482 +
56483 + atomic64_add_unchecked(i, v);
56484 +}
56485 +#endif
56486 +
56487 static inline void atomic_long_sub(long i, atomic_long_t *l)
56488 {
56489 atomic64_t *v = (atomic64_t *)l;
56490 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56491 return (long)atomic64_inc_return(v);
56492 }
56493
56494 +#ifdef CONFIG_PAX_REFCOUNT
56495 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56496 +{
56497 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56498 +
56499 + return (long)atomic64_inc_return_unchecked(v);
56500 +}
56501 +#endif
56502 +
56503 static inline long atomic_long_dec_return(atomic_long_t *l)
56504 {
56505 atomic64_t *v = (atomic64_t *)l;
56506 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56507
56508 typedef atomic_t atomic_long_t;
56509
56510 +#ifdef CONFIG_PAX_REFCOUNT
56511 +typedef atomic_unchecked_t atomic_long_unchecked_t;
56512 +#else
56513 +typedef atomic_t atomic_long_unchecked_t;
56514 +#endif
56515 +
56516 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56517 static inline long atomic_long_read(atomic_long_t *l)
56518 {
56519 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56520 return (long)atomic_read(v);
56521 }
56522
56523 +#ifdef CONFIG_PAX_REFCOUNT
56524 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56525 +{
56526 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56527 +
56528 + return (long)atomic_read_unchecked(v);
56529 +}
56530 +#endif
56531 +
56532 static inline void atomic_long_set(atomic_long_t *l, long i)
56533 {
56534 atomic_t *v = (atomic_t *)l;
56535 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56536 atomic_set(v, i);
56537 }
56538
56539 +#ifdef CONFIG_PAX_REFCOUNT
56540 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56541 +{
56542 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56543 +
56544 + atomic_set_unchecked(v, i);
56545 +}
56546 +#endif
56547 +
56548 static inline void atomic_long_inc(atomic_long_t *l)
56549 {
56550 atomic_t *v = (atomic_t *)l;
56551 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56552 atomic_inc(v);
56553 }
56554
56555 +#ifdef CONFIG_PAX_REFCOUNT
56556 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56557 +{
56558 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56559 +
56560 + atomic_inc_unchecked(v);
56561 +}
56562 +#endif
56563 +
56564 static inline void atomic_long_dec(atomic_long_t *l)
56565 {
56566 atomic_t *v = (atomic_t *)l;
56567 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56568 atomic_dec(v);
56569 }
56570
56571 +#ifdef CONFIG_PAX_REFCOUNT
56572 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56573 +{
56574 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56575 +
56576 + atomic_dec_unchecked(v);
56577 +}
56578 +#endif
56579 +
56580 static inline void atomic_long_add(long i, atomic_long_t *l)
56581 {
56582 atomic_t *v = (atomic_t *)l;
56583 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56584 atomic_add(i, v);
56585 }
56586
56587 +#ifdef CONFIG_PAX_REFCOUNT
56588 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56589 +{
56590 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56591 +
56592 + atomic_add_unchecked(i, v);
56593 +}
56594 +#endif
56595 +
56596 static inline void atomic_long_sub(long i, atomic_long_t *l)
56597 {
56598 atomic_t *v = (atomic_t *)l;
56599 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56600 return (long)atomic_inc_return(v);
56601 }
56602
56603 +#ifdef CONFIG_PAX_REFCOUNT
56604 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56605 +{
56606 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56607 +
56608 + return (long)atomic_inc_return_unchecked(v);
56609 +}
56610 +#endif
56611 +
56612 static inline long atomic_long_dec_return(atomic_long_t *l)
56613 {
56614 atomic_t *v = (atomic_t *)l;
56615 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56616
56617 #endif /* BITS_PER_LONG == 64 */
56618
56619 +#ifdef CONFIG_PAX_REFCOUNT
56620 +static inline void pax_refcount_needs_these_functions(void)
56621 +{
56622 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
56623 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56624 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56625 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56626 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56627 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56628 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56629 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56630 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56631 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56632 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56633 +
56634 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56635 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56636 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56637 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56638 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56639 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56640 +}
56641 +#else
56642 +#define atomic_read_unchecked(v) atomic_read(v)
56643 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56644 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56645 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56646 +#define atomic_inc_unchecked(v) atomic_inc(v)
56647 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56648 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56649 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56650 +#define atomic_dec_unchecked(v) atomic_dec(v)
56651 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56652 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56653 +
56654 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
56655 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56656 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56657 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56658 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56659 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56660 +#endif
56661 +
56662 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56663 diff -urNp linux-2.6.32.45/include/asm-generic/bug.h linux-2.6.32.45/include/asm-generic/bug.h
56664 --- linux-2.6.32.45/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
56665 +++ linux-2.6.32.45/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
56666 @@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
56667
56668 #else /* !CONFIG_BUG */
56669 #ifndef HAVE_ARCH_BUG
56670 -#define BUG() do {} while(0)
56671 +#define BUG() do { for (;;) ; } while(0)
56672 #endif
56673
56674 #ifndef HAVE_ARCH_BUG_ON
56675 -#define BUG_ON(condition) do { if (condition) ; } while(0)
56676 +#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
56677 #endif
56678
56679 #ifndef HAVE_ARCH_WARN_ON
56680 diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
56681 --- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56682 +++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56683 @@ -6,7 +6,7 @@
56684 * cache lines need to provide their own cache.h.
56685 */
56686
56687 -#define L1_CACHE_SHIFT 5
56688 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56689 +#define L1_CACHE_SHIFT 5UL
56690 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56691
56692 #endif /* __ASM_GENERIC_CACHE_H */
56693 diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
56694 --- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56695 +++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56696 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56697 enum dma_data_direction dir,
56698 struct dma_attrs *attrs)
56699 {
56700 - struct dma_map_ops *ops = get_dma_ops(dev);
56701 + const struct dma_map_ops *ops = get_dma_ops(dev);
56702 dma_addr_t addr;
56703
56704 kmemcheck_mark_initialized(ptr, size);
56705 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56706 enum dma_data_direction dir,
56707 struct dma_attrs *attrs)
56708 {
56709 - struct dma_map_ops *ops = get_dma_ops(dev);
56710 + const struct dma_map_ops *ops = get_dma_ops(dev);
56711
56712 BUG_ON(!valid_dma_direction(dir));
56713 if (ops->unmap_page)
56714 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56715 int nents, enum dma_data_direction dir,
56716 struct dma_attrs *attrs)
56717 {
56718 - struct dma_map_ops *ops = get_dma_ops(dev);
56719 + const struct dma_map_ops *ops = get_dma_ops(dev);
56720 int i, ents;
56721 struct scatterlist *s;
56722
56723 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56724 int nents, enum dma_data_direction dir,
56725 struct dma_attrs *attrs)
56726 {
56727 - struct dma_map_ops *ops = get_dma_ops(dev);
56728 + const struct dma_map_ops *ops = get_dma_ops(dev);
56729
56730 BUG_ON(!valid_dma_direction(dir));
56731 debug_dma_unmap_sg(dev, sg, nents, dir);
56732 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56733 size_t offset, size_t size,
56734 enum dma_data_direction dir)
56735 {
56736 - struct dma_map_ops *ops = get_dma_ops(dev);
56737 + const struct dma_map_ops *ops = get_dma_ops(dev);
56738 dma_addr_t addr;
56739
56740 kmemcheck_mark_initialized(page_address(page) + offset, size);
56741 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56742 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56743 size_t size, enum dma_data_direction dir)
56744 {
56745 - struct dma_map_ops *ops = get_dma_ops(dev);
56746 + const struct dma_map_ops *ops = get_dma_ops(dev);
56747
56748 BUG_ON(!valid_dma_direction(dir));
56749 if (ops->unmap_page)
56750 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56751 size_t size,
56752 enum dma_data_direction dir)
56753 {
56754 - struct dma_map_ops *ops = get_dma_ops(dev);
56755 + const struct dma_map_ops *ops = get_dma_ops(dev);
56756
56757 BUG_ON(!valid_dma_direction(dir));
56758 if (ops->sync_single_for_cpu)
56759 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
56760 dma_addr_t addr, size_t size,
56761 enum dma_data_direction dir)
56762 {
56763 - struct dma_map_ops *ops = get_dma_ops(dev);
56764 + const struct dma_map_ops *ops = get_dma_ops(dev);
56765
56766 BUG_ON(!valid_dma_direction(dir));
56767 if (ops->sync_single_for_device)
56768 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
56769 size_t size,
56770 enum dma_data_direction dir)
56771 {
56772 - struct dma_map_ops *ops = get_dma_ops(dev);
56773 + const struct dma_map_ops *ops = get_dma_ops(dev);
56774
56775 BUG_ON(!valid_dma_direction(dir));
56776 if (ops->sync_single_range_for_cpu) {
56777 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
56778 size_t size,
56779 enum dma_data_direction dir)
56780 {
56781 - struct dma_map_ops *ops = get_dma_ops(dev);
56782 + const struct dma_map_ops *ops = get_dma_ops(dev);
56783
56784 BUG_ON(!valid_dma_direction(dir));
56785 if (ops->sync_single_range_for_device) {
56786 @@ -155,7 +155,7 @@ static inline void
56787 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
56788 int nelems, enum dma_data_direction dir)
56789 {
56790 - struct dma_map_ops *ops = get_dma_ops(dev);
56791 + const struct dma_map_ops *ops = get_dma_ops(dev);
56792
56793 BUG_ON(!valid_dma_direction(dir));
56794 if (ops->sync_sg_for_cpu)
56795 @@ -167,7 +167,7 @@ static inline void
56796 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
56797 int nelems, enum dma_data_direction dir)
56798 {
56799 - struct dma_map_ops *ops = get_dma_ops(dev);
56800 + const struct dma_map_ops *ops = get_dma_ops(dev);
56801
56802 BUG_ON(!valid_dma_direction(dir));
56803 if (ops->sync_sg_for_device)
56804 diff -urNp linux-2.6.32.45/include/asm-generic/emergency-restart.h linux-2.6.32.45/include/asm-generic/emergency-restart.h
56805 --- linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
56806 +++ linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
56807 @@ -1,7 +1,7 @@
56808 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
56809 #define _ASM_GENERIC_EMERGENCY_RESTART_H
56810
56811 -static inline void machine_emergency_restart(void)
56812 +static inline __noreturn void machine_emergency_restart(void)
56813 {
56814 machine_restart(NULL);
56815 }
56816 diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
56817 --- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
56818 +++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
56819 @@ -6,7 +6,7 @@
56820 #include <asm/errno.h>
56821
56822 static inline int
56823 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56824 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
56825 {
56826 int op = (encoded_op >> 28) & 7;
56827 int cmp = (encoded_op >> 24) & 15;
56828 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
56829 }
56830
56831 static inline int
56832 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
56833 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
56834 {
56835 return -ENOSYS;
56836 }
56837 diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
56838 --- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
56839 +++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
56840 @@ -46,6 +46,8 @@ typedef unsigned int u32;
56841 typedef signed long s64;
56842 typedef unsigned long u64;
56843
56844 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56845 +
56846 #define S8_C(x) x
56847 #define U8_C(x) x ## U
56848 #define S16_C(x) x
56849 diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
56850 --- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
56851 +++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
56852 @@ -51,6 +51,8 @@ typedef unsigned int u32;
56853 typedef signed long long s64;
56854 typedef unsigned long long u64;
56855
56856 +typedef unsigned long long intoverflow_t;
56857 +
56858 #define S8_C(x) x
56859 #define U8_C(x) x ## U
56860 #define S16_C(x) x
56861 diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
56862 --- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
56863 +++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
56864 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
56865 KMAP_D(16) KM_IRQ_PTE,
56866 KMAP_D(17) KM_NMI,
56867 KMAP_D(18) KM_NMI_PTE,
56868 -KMAP_D(19) KM_TYPE_NR
56869 +KMAP_D(19) KM_CLEARPAGE,
56870 +KMAP_D(20) KM_TYPE_NR
56871 };
56872
56873 #undef KMAP_D
56874 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
56875 --- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
56876 +++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
56877 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
56878 unsigned long size);
56879 #endif
56880
56881 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56882 +static inline unsigned long pax_open_kernel(void) { return 0; }
56883 +#endif
56884 +
56885 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56886 +static inline unsigned long pax_close_kernel(void) { return 0; }
56887 +#endif
56888 +
56889 #endif /* !__ASSEMBLY__ */
56890
56891 #endif /* _ASM_GENERIC_PGTABLE_H */
56892 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
56893 --- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
56894 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
56895 @@ -1,14 +1,19 @@
56896 #ifndef _PGTABLE_NOPMD_H
56897 #define _PGTABLE_NOPMD_H
56898
56899 -#ifndef __ASSEMBLY__
56900 -
56901 #include <asm-generic/pgtable-nopud.h>
56902
56903 -struct mm_struct;
56904 -
56905 #define __PAGETABLE_PMD_FOLDED
56906
56907 +#define PMD_SHIFT PUD_SHIFT
56908 +#define PTRS_PER_PMD 1
56909 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56910 +#define PMD_MASK (~(PMD_SIZE-1))
56911 +
56912 +#ifndef __ASSEMBLY__
56913 +
56914 +struct mm_struct;
56915 +
56916 /*
56917 * Having the pmd type consist of a pud gets the size right, and allows
56918 * us to conceptually access the pud entry that this pmd is folded into
56919 @@ -16,11 +21,6 @@ struct mm_struct;
56920 */
56921 typedef struct { pud_t pud; } pmd_t;
56922
56923 -#define PMD_SHIFT PUD_SHIFT
56924 -#define PTRS_PER_PMD 1
56925 -#define PMD_SIZE (1UL << PMD_SHIFT)
56926 -#define PMD_MASK (~(PMD_SIZE-1))
56927 -
56928 /*
56929 * The "pud_xxx()" functions here are trivial for a folded two-level
56930 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56931 diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
56932 --- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
56933 +++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
56934 @@ -1,10 +1,15 @@
56935 #ifndef _PGTABLE_NOPUD_H
56936 #define _PGTABLE_NOPUD_H
56937
56938 -#ifndef __ASSEMBLY__
56939 -
56940 #define __PAGETABLE_PUD_FOLDED
56941
56942 +#define PUD_SHIFT PGDIR_SHIFT
56943 +#define PTRS_PER_PUD 1
56944 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56945 +#define PUD_MASK (~(PUD_SIZE-1))
56946 +
56947 +#ifndef __ASSEMBLY__
56948 +
56949 /*
56950 * Having the pud type consist of a pgd gets the size right, and allows
56951 * us to conceptually access the pgd entry that this pud is folded into
56952 @@ -12,11 +17,6 @@
56953 */
56954 typedef struct { pgd_t pgd; } pud_t;
56955
56956 -#define PUD_SHIFT PGDIR_SHIFT
56957 -#define PTRS_PER_PUD 1
56958 -#define PUD_SIZE (1UL << PUD_SHIFT)
56959 -#define PUD_MASK (~(PUD_SIZE-1))
56960 -
56961 /*
56962 * The "pgd_xxx()" functions here are trivial for a folded two-level
56963 * setup: the pud is never bad, and a pud always exists (as it's folded
56964 diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
56965 --- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
56966 +++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
56967 @@ -199,6 +199,7 @@
56968 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56969 VMLINUX_SYMBOL(__start_rodata) = .; \
56970 *(.rodata) *(.rodata.*) \
56971 + *(.data.read_only) \
56972 *(__vermagic) /* Kernel version magic */ \
56973 *(__markers_strings) /* Markers: strings */ \
56974 *(__tracepoints_strings)/* Tracepoints: strings */ \
56975 @@ -656,22 +657,24 @@
56976 * section in the linker script will go there too. @phdr should have
56977 * a leading colon.
56978 *
56979 - * Note that this macros defines __per_cpu_load as an absolute symbol.
56980 + * Note that this macros defines per_cpu_load as an absolute symbol.
56981 * If there is no need to put the percpu section at a predetermined
56982 * address, use PERCPU().
56983 */
56984 #define PERCPU_VADDR(vaddr, phdr) \
56985 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
56986 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56987 + per_cpu_load = .; \
56988 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
56989 - LOAD_OFFSET) { \
56990 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
56991 VMLINUX_SYMBOL(__per_cpu_start) = .; \
56992 *(.data.percpu.first) \
56993 - *(.data.percpu.page_aligned) \
56994 *(.data.percpu) \
56995 + . = ALIGN(PAGE_SIZE); \
56996 + *(.data.percpu.page_aligned) \
56997 *(.data.percpu.shared_aligned) \
56998 VMLINUX_SYMBOL(__per_cpu_end) = .; \
56999 } phdr \
57000 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57001 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57002
57003 /**
57004 * PERCPU - define output section for percpu area, simple version
57005 diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57006 --- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57007 +++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57008 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57009
57010 /* reload the current crtc LUT */
57011 void (*load_lut)(struct drm_crtc *crtc);
57012 -};
57013 +} __no_const;
57014
57015 struct drm_encoder_helper_funcs {
57016 void (*dpms)(struct drm_encoder *encoder, int mode);
57017 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57018 struct drm_connector *connector);
57019 /* disable encoder when not in use - more explicit than dpms off */
57020 void (*disable)(struct drm_encoder *encoder);
57021 -};
57022 +} __no_const;
57023
57024 struct drm_connector_helper_funcs {
57025 int (*get_modes)(struct drm_connector *connector);
57026 diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57027 --- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57028 +++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57029 @@ -71,6 +71,7 @@
57030 #include <linux/workqueue.h>
57031 #include <linux/poll.h>
57032 #include <asm/pgalloc.h>
57033 +#include <asm/local.h>
57034 #include "drm.h"
57035
57036 #include <linux/idr.h>
57037 @@ -814,7 +815,7 @@ struct drm_driver {
57038 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57039
57040 /* Driver private ops for this object */
57041 - struct vm_operations_struct *gem_vm_ops;
57042 + const struct vm_operations_struct *gem_vm_ops;
57043
57044 int major;
57045 int minor;
57046 @@ -917,7 +918,7 @@ struct drm_device {
57047
57048 /** \name Usage Counters */
57049 /*@{ */
57050 - int open_count; /**< Outstanding files open */
57051 + local_t open_count; /**< Outstanding files open */
57052 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57053 atomic_t vma_count; /**< Outstanding vma areas open */
57054 int buf_use; /**< Buffers in use -- cannot alloc */
57055 @@ -928,7 +929,7 @@ struct drm_device {
57056 /*@{ */
57057 unsigned long counters;
57058 enum drm_stat_type types[15];
57059 - atomic_t counts[15];
57060 + atomic_unchecked_t counts[15];
57061 /*@} */
57062
57063 struct list_head filelist;
57064 @@ -1016,7 +1017,7 @@ struct drm_device {
57065 struct pci_controller *hose;
57066 #endif
57067 struct drm_sg_mem *sg; /**< Scatter gather memory */
57068 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
57069 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
57070 void *dev_private; /**< device private data */
57071 void *mm_private;
57072 struct address_space *dev_mapping;
57073 @@ -1042,11 +1043,11 @@ struct drm_device {
57074 spinlock_t object_name_lock;
57075 struct idr object_name_idr;
57076 atomic_t object_count;
57077 - atomic_t object_memory;
57078 + atomic_unchecked_t object_memory;
57079 atomic_t pin_count;
57080 - atomic_t pin_memory;
57081 + atomic_unchecked_t pin_memory;
57082 atomic_t gtt_count;
57083 - atomic_t gtt_memory;
57084 + atomic_unchecked_t gtt_memory;
57085 uint32_t gtt_total;
57086 uint32_t invalidate_domains; /* domains pending invalidation */
57087 uint32_t flush_domains; /* domains pending flush */
57088 diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57089 --- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57090 +++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57091 @@ -47,7 +47,7 @@
57092
57093 struct ttm_mem_shrink {
57094 int (*do_shrink) (struct ttm_mem_shrink *);
57095 -};
57096 +} __no_const;
57097
57098 /**
57099 * struct ttm_mem_global - Global memory accounting structure.
57100 diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57101 --- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57102 +++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57103 @@ -39,6 +39,14 @@ enum machine_type {
57104 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57105 };
57106
57107 +/* Constants for the N_FLAGS field */
57108 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57109 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57110 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57111 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57112 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57113 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57114 +
57115 #if !defined (N_MAGIC)
57116 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57117 #endif
57118 diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57119 --- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57120 +++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57121 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57122 #endif
57123
57124 struct k_atm_aal_stats {
57125 -#define __HANDLE_ITEM(i) atomic_t i
57126 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57127 __AAL_STAT_ITEMS
57128 #undef __HANDLE_ITEM
57129 };
57130 diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57131 --- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57132 +++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57133 @@ -36,18 +36,18 @@ struct backlight_device;
57134 struct fb_info;
57135
57136 struct backlight_ops {
57137 - unsigned int options;
57138 + const unsigned int options;
57139
57140 #define BL_CORE_SUSPENDRESUME (1 << 0)
57141
57142 /* Notify the backlight driver some property has changed */
57143 - int (*update_status)(struct backlight_device *);
57144 + int (* const update_status)(struct backlight_device *);
57145 /* Return the current backlight brightness (accounting for power,
57146 fb_blank etc.) */
57147 - int (*get_brightness)(struct backlight_device *);
57148 + int (* const get_brightness)(struct backlight_device *);
57149 /* Check if given framebuffer device is the one bound to this backlight;
57150 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57151 - int (*check_fb)(struct fb_info *);
57152 + int (* const check_fb)(struct fb_info *);
57153 };
57154
57155 /* This structure defines all the properties of a backlight */
57156 @@ -86,7 +86,7 @@ struct backlight_device {
57157 registered this device has been unloaded, and if class_get_devdata()
57158 points to something in the body of that driver, it is also invalid. */
57159 struct mutex ops_lock;
57160 - struct backlight_ops *ops;
57161 + const struct backlight_ops *ops;
57162
57163 /* The framebuffer notifier block */
57164 struct notifier_block fb_notif;
57165 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57166 }
57167
57168 extern struct backlight_device *backlight_device_register(const char *name,
57169 - struct device *dev, void *devdata, struct backlight_ops *ops);
57170 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57171 extern void backlight_device_unregister(struct backlight_device *bd);
57172 extern void backlight_force_update(struct backlight_device *bd,
57173 enum backlight_update_reason reason);
57174 diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57175 --- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57176 +++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57177 @@ -83,6 +83,7 @@ struct linux_binfmt {
57178 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57179 int (*load_shlib)(struct file *);
57180 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57181 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57182 unsigned long min_coredump; /* minimal dump size */
57183 int hasvdso;
57184 };
57185 diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57186 --- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57187 +++ linux-2.6.32.45/include/linux/blkdev.h 2011-08-26 20:27:21.000000000 -0400
57188 @@ -1278,7 +1278,7 @@ struct block_device_operations {
57189 int (*revalidate_disk) (struct gendisk *);
57190 int (*getgeo)(struct block_device *, struct hd_geometry *);
57191 struct module *owner;
57192 -};
57193 +} __do_const;
57194
57195 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57196 unsigned long);
57197 diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57198 --- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57199 +++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57200 @@ -160,7 +160,7 @@ struct blk_trace {
57201 struct dentry *dir;
57202 struct dentry *dropped_file;
57203 struct dentry *msg_file;
57204 - atomic_t dropped;
57205 + atomic_unchecked_t dropped;
57206 };
57207
57208 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57209 diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57210 --- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57211 +++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57212 @@ -42,51 +42,51 @@
57213
57214 static inline __le64 __cpu_to_le64p(const __u64 *p)
57215 {
57216 - return (__force __le64)*p;
57217 + return (__force const __le64)*p;
57218 }
57219 static inline __u64 __le64_to_cpup(const __le64 *p)
57220 {
57221 - return (__force __u64)*p;
57222 + return (__force const __u64)*p;
57223 }
57224 static inline __le32 __cpu_to_le32p(const __u32 *p)
57225 {
57226 - return (__force __le32)*p;
57227 + return (__force const __le32)*p;
57228 }
57229 static inline __u32 __le32_to_cpup(const __le32 *p)
57230 {
57231 - return (__force __u32)*p;
57232 + return (__force const __u32)*p;
57233 }
57234 static inline __le16 __cpu_to_le16p(const __u16 *p)
57235 {
57236 - return (__force __le16)*p;
57237 + return (__force const __le16)*p;
57238 }
57239 static inline __u16 __le16_to_cpup(const __le16 *p)
57240 {
57241 - return (__force __u16)*p;
57242 + return (__force const __u16)*p;
57243 }
57244 static inline __be64 __cpu_to_be64p(const __u64 *p)
57245 {
57246 - return (__force __be64)__swab64p(p);
57247 + return (__force const __be64)__swab64p(p);
57248 }
57249 static inline __u64 __be64_to_cpup(const __be64 *p)
57250 {
57251 - return __swab64p((__u64 *)p);
57252 + return __swab64p((const __u64 *)p);
57253 }
57254 static inline __be32 __cpu_to_be32p(const __u32 *p)
57255 {
57256 - return (__force __be32)__swab32p(p);
57257 + return (__force const __be32)__swab32p(p);
57258 }
57259 static inline __u32 __be32_to_cpup(const __be32 *p)
57260 {
57261 - return __swab32p((__u32 *)p);
57262 + return __swab32p((const __u32 *)p);
57263 }
57264 static inline __be16 __cpu_to_be16p(const __u16 *p)
57265 {
57266 - return (__force __be16)__swab16p(p);
57267 + return (__force const __be16)__swab16p(p);
57268 }
57269 static inline __u16 __be16_to_cpup(const __be16 *p)
57270 {
57271 - return __swab16p((__u16 *)p);
57272 + return __swab16p((const __u16 *)p);
57273 }
57274 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57275 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57276 diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57277 --- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57278 +++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57279 @@ -16,6 +16,10 @@
57280 #define __read_mostly
57281 #endif
57282
57283 +#ifndef __read_only
57284 +#define __read_only __read_mostly
57285 +#endif
57286 +
57287 #ifndef ____cacheline_aligned
57288 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57289 #endif
57290 diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57291 --- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57292 +++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57293 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57294 (security_real_capable_noaudit((t), (cap)) == 0)
57295
57296 extern int capable(int cap);
57297 +int capable_nolog(int cap);
57298
57299 /* audit system wants to get cap info from files as well */
57300 struct dentry;
57301 diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57302 --- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57303 +++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-26 20:19:09.000000000 -0400
57304 @@ -36,4 +36,16 @@
57305 the kernel context */
57306 #define __cold __attribute__((__cold__))
57307
57308 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57309 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57310 +#define __bos0(ptr) __bos((ptr), 0)
57311 +#define __bos1(ptr) __bos((ptr), 1)
57312 +
57313 +#if __GNUC_MINOR__ >= 5
57314 +#ifdef CONSTIFY_PLUGIN
57315 +#define __no_const __attribute__((no_const))
57316 +#define __do_const __attribute__((do_const))
57317 +#endif
57318 +#endif
57319 +
57320 #endif
57321 diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57322 --- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57323 +++ linux-2.6.32.45/include/linux/compiler.h 2011-08-26 20:19:09.000000000 -0400
57324 @@ -247,6 +247,14 @@ void ftrace_likely_update(struct ftrace_
57325 # define __attribute_const__ /* unimplemented */
57326 #endif
57327
57328 +#ifndef __no_const
57329 +# define __no_const
57330 +#endif
57331 +
57332 +#ifndef __do_const
57333 +# define __do_const
57334 +#endif
57335 +
57336 /*
57337 * Tell gcc if a function is cold. The compiler will assume any path
57338 * directly leading to the call is unlikely.
57339 @@ -256,6 +264,22 @@ void ftrace_likely_update(struct ftrace_
57340 #define __cold
57341 #endif
57342
57343 +#ifndef __alloc_size
57344 +#define __alloc_size(...)
57345 +#endif
57346 +
57347 +#ifndef __bos
57348 +#define __bos(ptr, arg)
57349 +#endif
57350 +
57351 +#ifndef __bos0
57352 +#define __bos0(ptr)
57353 +#endif
57354 +
57355 +#ifndef __bos1
57356 +#define __bos1(ptr)
57357 +#endif
57358 +
57359 /* Simple shorthand for a section definition */
57360 #ifndef __section
57361 # define __section(S) __attribute__ ((__section__(#S)))
57362 @@ -278,6 +302,7 @@ void ftrace_likely_update(struct ftrace_
57363 * use is to mediate communication between process-level code and irq/NMI
57364 * handlers, all running on the same CPU.
57365 */
57366 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57367 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57368 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57369
57370 #endif /* __LINUX_COMPILER_H */
57371 diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57372 --- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57373 +++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57374 @@ -394,7 +394,7 @@ struct cipher_tfm {
57375 const u8 *key, unsigned int keylen);
57376 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57377 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57378 -};
57379 +} __no_const;
57380
57381 struct hash_tfm {
57382 int (*init)(struct hash_desc *desc);
57383 @@ -415,13 +415,13 @@ struct compress_tfm {
57384 int (*cot_decompress)(struct crypto_tfm *tfm,
57385 const u8 *src, unsigned int slen,
57386 u8 *dst, unsigned int *dlen);
57387 -};
57388 +} __no_const;
57389
57390 struct rng_tfm {
57391 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57392 unsigned int dlen);
57393 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57394 -};
57395 +} __no_const;
57396
57397 #define crt_ablkcipher crt_u.ablkcipher
57398 #define crt_aead crt_u.aead
57399 diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57400 --- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57401 +++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57402 @@ -119,6 +119,8 @@ struct dentry {
57403 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57404 };
57405
57406 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57407 +
57408 /*
57409 * dentry->d_lock spinlock nesting subclasses:
57410 *
57411 diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57412 --- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57413 +++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57414 @@ -78,7 +78,7 @@ static void free(void *where)
57415 * warnings when not needed (indeed large_malloc / large_free are not
57416 * needed by inflate */
57417
57418 -#define malloc(a) kmalloc(a, GFP_KERNEL)
57419 +#define malloc(a) kmalloc((a), GFP_KERNEL)
57420 #define free(a) kfree(a)
57421
57422 #define large_malloc(a) vmalloc(a)
57423 diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57424 --- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57425 +++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-08-26 20:19:09.000000000 -0400
57426 @@ -16,51 +16,51 @@ enum dma_data_direction {
57427 };
57428
57429 struct dma_map_ops {
57430 - void* (*alloc_coherent)(struct device *dev, size_t size,
57431 + void* (* const alloc_coherent)(struct device *dev, size_t size,
57432 dma_addr_t *dma_handle, gfp_t gfp);
57433 - void (*free_coherent)(struct device *dev, size_t size,
57434 + void (* const free_coherent)(struct device *dev, size_t size,
57435 void *vaddr, dma_addr_t dma_handle);
57436 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
57437 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57438 unsigned long offset, size_t size,
57439 enum dma_data_direction dir,
57440 struct dma_attrs *attrs);
57441 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57442 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57443 size_t size, enum dma_data_direction dir,
57444 struct dma_attrs *attrs);
57445 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
57446 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57447 int nents, enum dma_data_direction dir,
57448 struct dma_attrs *attrs);
57449 - void (*unmap_sg)(struct device *dev,
57450 + void (* const unmap_sg)(struct device *dev,
57451 struct scatterlist *sg, int nents,
57452 enum dma_data_direction dir,
57453 struct dma_attrs *attrs);
57454 - void (*sync_single_for_cpu)(struct device *dev,
57455 + void (* const sync_single_for_cpu)(struct device *dev,
57456 dma_addr_t dma_handle, size_t size,
57457 enum dma_data_direction dir);
57458 - void (*sync_single_for_device)(struct device *dev,
57459 + void (* const sync_single_for_device)(struct device *dev,
57460 dma_addr_t dma_handle, size_t size,
57461 enum dma_data_direction dir);
57462 - void (*sync_single_range_for_cpu)(struct device *dev,
57463 + void (* const sync_single_range_for_cpu)(struct device *dev,
57464 dma_addr_t dma_handle,
57465 unsigned long offset,
57466 size_t size,
57467 enum dma_data_direction dir);
57468 - void (*sync_single_range_for_device)(struct device *dev,
57469 + void (* const sync_single_range_for_device)(struct device *dev,
57470 dma_addr_t dma_handle,
57471 unsigned long offset,
57472 size_t size,
57473 enum dma_data_direction dir);
57474 - void (*sync_sg_for_cpu)(struct device *dev,
57475 + void (* const sync_sg_for_cpu)(struct device *dev,
57476 struct scatterlist *sg, int nents,
57477 enum dma_data_direction dir);
57478 - void (*sync_sg_for_device)(struct device *dev,
57479 + void (* const sync_sg_for_device)(struct device *dev,
57480 struct scatterlist *sg, int nents,
57481 enum dma_data_direction dir);
57482 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57483 - int (*dma_supported)(struct device *dev, u64 mask);
57484 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57485 + int (* const dma_supported)(struct device *dev, u64 mask);
57486 int (*set_dma_mask)(struct device *dev, u64 mask);
57487 int is_phys;
57488 -};
57489 +} __do_const;
57490
57491 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57492
57493 diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57494 --- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57495 +++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57496 @@ -380,7 +380,7 @@ struct dst_node
57497 struct thread_pool *pool;
57498
57499 /* Transaction IDs live here */
57500 - atomic_long_t gen;
57501 + atomic_long_unchecked_t gen;
57502
57503 /*
57504 * How frequently and how many times transaction
57505 diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57506 --- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57507 +++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57508 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57509 #define PT_GNU_EH_FRAME 0x6474e550
57510
57511 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57512 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57513 +
57514 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57515 +
57516 +/* Constants for the e_flags field */
57517 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57518 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57519 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57520 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57521 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57522 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57523
57524 /* These constants define the different elf file types */
57525 #define ET_NONE 0
57526 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57527 #define DT_DEBUG 21
57528 #define DT_TEXTREL 22
57529 #define DT_JMPREL 23
57530 +#define DT_FLAGS 30
57531 + #define DF_TEXTREL 0x00000004
57532 #define DT_ENCODING 32
57533 #define OLD_DT_LOOS 0x60000000
57534 #define DT_LOOS 0x6000000d
57535 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57536 #define PF_W 0x2
57537 #define PF_X 0x1
57538
57539 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57540 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57541 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57542 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57543 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57544 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57545 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57546 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57547 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57548 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57549 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57550 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57551 +
57552 typedef struct elf32_phdr{
57553 Elf32_Word p_type;
57554 Elf32_Off p_offset;
57555 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57556 #define EI_OSABI 7
57557 #define EI_PAD 8
57558
57559 +#define EI_PAX 14
57560 +
57561 #define ELFMAG0 0x7f /* EI_MAG */
57562 #define ELFMAG1 'E'
57563 #define ELFMAG2 'L'
57564 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57565 #define elf_phdr elf32_phdr
57566 #define elf_note elf32_note
57567 #define elf_addr_t Elf32_Off
57568 +#define elf_dyn Elf32_Dyn
57569
57570 #else
57571
57572 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57573 #define elf_phdr elf64_phdr
57574 #define elf_note elf64_note
57575 #define elf_addr_t Elf64_Off
57576 +#define elf_dyn Elf64_Dyn
57577
57578 #endif
57579
57580 diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
57581 --- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57582 +++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57583 @@ -116,7 +116,7 @@ struct fscache_operation {
57584 #endif
57585 };
57586
57587 -extern atomic_t fscache_op_debug_id;
57588 +extern atomic_unchecked_t fscache_op_debug_id;
57589 extern const struct slow_work_ops fscache_op_slow_work_ops;
57590
57591 extern void fscache_enqueue_operation(struct fscache_operation *);
57592 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57593 fscache_operation_release_t release)
57594 {
57595 atomic_set(&op->usage, 1);
57596 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57597 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57598 op->release = release;
57599 INIT_LIST_HEAD(&op->pend_link);
57600 fscache_set_op_state(op, "Init");
57601 diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
57602 --- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57603 +++ linux-2.6.32.45/include/linux/fs.h 2011-08-26 20:19:09.000000000 -0400
57604 @@ -90,6 +90,11 @@ struct inodes_stat_t {
57605 /* Expect random access pattern */
57606 #define FMODE_RANDOM ((__force fmode_t)4096)
57607
57608 +/* Hack for grsec so as not to require read permission simply to execute
57609 + * a binary
57610 + */
57611 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57612 +
57613 /*
57614 * The below are the various read and write types that we support. Some of
57615 * them include behavioral modifiers that send information down to the
57616 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57617 unsigned long, unsigned long);
57618
57619 struct address_space_operations {
57620 - int (*writepage)(struct page *page, struct writeback_control *wbc);
57621 - int (*readpage)(struct file *, struct page *);
57622 - void (*sync_page)(struct page *);
57623 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
57624 + int (* const readpage)(struct file *, struct page *);
57625 + void (* const sync_page)(struct page *);
57626
57627 /* Write back some dirty pages from this mapping. */
57628 - int (*writepages)(struct address_space *, struct writeback_control *);
57629 + int (* const writepages)(struct address_space *, struct writeback_control *);
57630
57631 /* Set a page dirty. Return true if this dirtied it */
57632 - int (*set_page_dirty)(struct page *page);
57633 + int (* const set_page_dirty)(struct page *page);
57634
57635 - int (*readpages)(struct file *filp, struct address_space *mapping,
57636 + int (* const readpages)(struct file *filp, struct address_space *mapping,
57637 struct list_head *pages, unsigned nr_pages);
57638
57639 - int (*write_begin)(struct file *, struct address_space *mapping,
57640 + int (* const write_begin)(struct file *, struct address_space *mapping,
57641 loff_t pos, unsigned len, unsigned flags,
57642 struct page **pagep, void **fsdata);
57643 - int (*write_end)(struct file *, struct address_space *mapping,
57644 + int (* const write_end)(struct file *, struct address_space *mapping,
57645 loff_t pos, unsigned len, unsigned copied,
57646 struct page *page, void *fsdata);
57647
57648 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57649 - sector_t (*bmap)(struct address_space *, sector_t);
57650 - void (*invalidatepage) (struct page *, unsigned long);
57651 - int (*releasepage) (struct page *, gfp_t);
57652 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57653 + sector_t (* const bmap)(struct address_space *, sector_t);
57654 + void (* const invalidatepage) (struct page *, unsigned long);
57655 + int (* const releasepage) (struct page *, gfp_t);
57656 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57657 loff_t offset, unsigned long nr_segs);
57658 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57659 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57660 void **, unsigned long *);
57661 /* migrate the contents of a page to the specified target */
57662 - int (*migratepage) (struct address_space *,
57663 + int (* const migratepage) (struct address_space *,
57664 struct page *, struct page *);
57665 - int (*launder_page) (struct page *);
57666 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57667 + int (* const launder_page) (struct page *);
57668 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57669 unsigned long);
57670 - int (*error_remove_page)(struct address_space *, struct page *);
57671 + int (* const error_remove_page)(struct address_space *, struct page *);
57672 };
57673
57674 /*
57675 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57676 typedef struct files_struct *fl_owner_t;
57677
57678 struct file_lock_operations {
57679 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57680 - void (*fl_release_private)(struct file_lock *);
57681 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57682 + void (* const fl_release_private)(struct file_lock *);
57683 };
57684
57685 struct lock_manager_operations {
57686 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57687 - void (*fl_notify)(struct file_lock *); /* unblock callback */
57688 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57689 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57690 - void (*fl_release_private)(struct file_lock *);
57691 - void (*fl_break)(struct file_lock *);
57692 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
57693 - int (*fl_change)(struct file_lock **, int);
57694 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57695 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
57696 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57697 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57698 + void (* const fl_release_private)(struct file_lock *);
57699 + void (* const fl_break)(struct file_lock *);
57700 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57701 + int (* const fl_change)(struct file_lock **, int);
57702 };
57703
57704 struct lock_manager {
57705 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57706 unsigned int fi_flags; /* Flags as passed from user */
57707 unsigned int fi_extents_mapped; /* Number of mapped extents */
57708 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57709 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57710 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57711 * array */
57712 };
57713 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57714 @@ -1512,7 +1517,8 @@ struct file_operations {
57715 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
57716 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
57717 int (*setlease)(struct file *, long, struct file_lock **);
57718 -};
57719 +} __do_const;
57720 +typedef struct file_operations __no_const file_operations_no_const;
57721
57722 struct inode_operations {
57723 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
57724 @@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
57725 unsigned long, loff_t *);
57726
57727 struct super_operations {
57728 - struct inode *(*alloc_inode)(struct super_block *sb);
57729 - void (*destroy_inode)(struct inode *);
57730 + struct inode *(* const alloc_inode)(struct super_block *sb);
57731 + void (* const destroy_inode)(struct inode *);
57732
57733 - void (*dirty_inode) (struct inode *);
57734 - int (*write_inode) (struct inode *, int);
57735 - void (*drop_inode) (struct inode *);
57736 - void (*delete_inode) (struct inode *);
57737 - void (*put_super) (struct super_block *);
57738 - void (*write_super) (struct super_block *);
57739 - int (*sync_fs)(struct super_block *sb, int wait);
57740 - int (*freeze_fs) (struct super_block *);
57741 - int (*unfreeze_fs) (struct super_block *);
57742 - int (*statfs) (struct dentry *, struct kstatfs *);
57743 - int (*remount_fs) (struct super_block *, int *, char *);
57744 - void (*clear_inode) (struct inode *);
57745 - void (*umount_begin) (struct super_block *);
57746 + void (* const dirty_inode) (struct inode *);
57747 + int (* const write_inode) (struct inode *, int);
57748 + void (* const drop_inode) (struct inode *);
57749 + void (* const delete_inode) (struct inode *);
57750 + void (* const put_super) (struct super_block *);
57751 + void (* const write_super) (struct super_block *);
57752 + int (* const sync_fs)(struct super_block *sb, int wait);
57753 + int (* const freeze_fs) (struct super_block *);
57754 + int (* const unfreeze_fs) (struct super_block *);
57755 + int (* const statfs) (struct dentry *, struct kstatfs *);
57756 + int (* const remount_fs) (struct super_block *, int *, char *);
57757 + void (* const clear_inode) (struct inode *);
57758 + void (* const umount_begin) (struct super_block *);
57759
57760 - int (*show_options)(struct seq_file *, struct vfsmount *);
57761 - int (*show_stats)(struct seq_file *, struct vfsmount *);
57762 + int (* const show_options)(struct seq_file *, struct vfsmount *);
57763 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
57764 #ifdef CONFIG_QUOTA
57765 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
57766 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57767 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
57768 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57769 #endif
57770 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57771 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57772 };
57773
57774 /*
57775 diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
57776 --- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
57777 +++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
57778 @@ -4,7 +4,7 @@
57779 #include <linux/path.h>
57780
57781 struct fs_struct {
57782 - int users;
57783 + atomic_t users;
57784 rwlock_t lock;
57785 int umask;
57786 int in_exec;
57787 diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
57788 --- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
57789 +++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
57790 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
57791 int filter_type);
57792 extern int trace_define_common_fields(struct ftrace_event_call *call);
57793
57794 -#define is_signed_type(type) (((type)(-1)) < 0)
57795 +#define is_signed_type(type) (((type)(-1)) < (type)1)
57796
57797 int trace_set_clr_event(const char *system, const char *event, int set);
57798
57799 diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
57800 --- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
57801 +++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
57802 @@ -161,7 +161,7 @@ struct gendisk {
57803
57804 struct timer_rand_state *random;
57805
57806 - atomic_t sync_io; /* RAID */
57807 + atomic_unchecked_t sync_io; /* RAID */
57808 struct work_struct async_notify;
57809 #ifdef CONFIG_BLK_DEV_INTEGRITY
57810 struct blk_integrity *integrity;
57811 diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
57812 --- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57813 +++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
57814 @@ -0,0 +1,317 @@
57815 +#ifndef GR_ACL_H
57816 +#define GR_ACL_H
57817 +
57818 +#include <linux/grdefs.h>
57819 +#include <linux/resource.h>
57820 +#include <linux/capability.h>
57821 +#include <linux/dcache.h>
57822 +#include <asm/resource.h>
57823 +
57824 +/* Major status information */
57825 +
57826 +#define GR_VERSION "grsecurity 2.2.2"
57827 +#define GRSECURITY_VERSION 0x2202
57828 +
57829 +enum {
57830 + GR_SHUTDOWN = 0,
57831 + GR_ENABLE = 1,
57832 + GR_SPROLE = 2,
57833 + GR_RELOAD = 3,
57834 + GR_SEGVMOD = 4,
57835 + GR_STATUS = 5,
57836 + GR_UNSPROLE = 6,
57837 + GR_PASSSET = 7,
57838 + GR_SPROLEPAM = 8,
57839 +};
57840 +
57841 +/* Password setup definitions
57842 + * kernel/grhash.c */
57843 +enum {
57844 + GR_PW_LEN = 128,
57845 + GR_SALT_LEN = 16,
57846 + GR_SHA_LEN = 32,
57847 +};
57848 +
57849 +enum {
57850 + GR_SPROLE_LEN = 64,
57851 +};
57852 +
57853 +enum {
57854 + GR_NO_GLOB = 0,
57855 + GR_REG_GLOB,
57856 + GR_CREATE_GLOB
57857 +};
57858 +
57859 +#define GR_NLIMITS 32
57860 +
57861 +/* Begin Data Structures */
57862 +
57863 +struct sprole_pw {
57864 + unsigned char *rolename;
57865 + unsigned char salt[GR_SALT_LEN];
57866 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57867 +};
57868 +
57869 +struct name_entry {
57870 + __u32 key;
57871 + ino_t inode;
57872 + dev_t device;
57873 + char *name;
57874 + __u16 len;
57875 + __u8 deleted;
57876 + struct name_entry *prev;
57877 + struct name_entry *next;
57878 +};
57879 +
57880 +struct inodev_entry {
57881 + struct name_entry *nentry;
57882 + struct inodev_entry *prev;
57883 + struct inodev_entry *next;
57884 +};
57885 +
57886 +struct acl_role_db {
57887 + struct acl_role_label **r_hash;
57888 + __u32 r_size;
57889 +};
57890 +
57891 +struct inodev_db {
57892 + struct inodev_entry **i_hash;
57893 + __u32 i_size;
57894 +};
57895 +
57896 +struct name_db {
57897 + struct name_entry **n_hash;
57898 + __u32 n_size;
57899 +};
57900 +
57901 +struct crash_uid {
57902 + uid_t uid;
57903 + unsigned long expires;
57904 +};
57905 +
57906 +struct gr_hash_struct {
57907 + void **table;
57908 + void **nametable;
57909 + void *first;
57910 + __u32 table_size;
57911 + __u32 used_size;
57912 + int type;
57913 +};
57914 +
57915 +/* Userspace Grsecurity ACL data structures */
57916 +
57917 +struct acl_subject_label {
57918 + char *filename;
57919 + ino_t inode;
57920 + dev_t device;
57921 + __u32 mode;
57922 + kernel_cap_t cap_mask;
57923 + kernel_cap_t cap_lower;
57924 + kernel_cap_t cap_invert_audit;
57925 +
57926 + struct rlimit res[GR_NLIMITS];
57927 + __u32 resmask;
57928 +
57929 + __u8 user_trans_type;
57930 + __u8 group_trans_type;
57931 + uid_t *user_transitions;
57932 + gid_t *group_transitions;
57933 + __u16 user_trans_num;
57934 + __u16 group_trans_num;
57935 +
57936 + __u32 sock_families[2];
57937 + __u32 ip_proto[8];
57938 + __u32 ip_type;
57939 + struct acl_ip_label **ips;
57940 + __u32 ip_num;
57941 + __u32 inaddr_any_override;
57942 +
57943 + __u32 crashes;
57944 + unsigned long expires;
57945 +
57946 + struct acl_subject_label *parent_subject;
57947 + struct gr_hash_struct *hash;
57948 + struct acl_subject_label *prev;
57949 + struct acl_subject_label *next;
57950 +
57951 + struct acl_object_label **obj_hash;
57952 + __u32 obj_hash_size;
57953 + __u16 pax_flags;
57954 +};
57955 +
57956 +struct role_allowed_ip {
57957 + __u32 addr;
57958 + __u32 netmask;
57959 +
57960 + struct role_allowed_ip *prev;
57961 + struct role_allowed_ip *next;
57962 +};
57963 +
57964 +struct role_transition {
57965 + char *rolename;
57966 +
57967 + struct role_transition *prev;
57968 + struct role_transition *next;
57969 +};
57970 +
57971 +struct acl_role_label {
57972 + char *rolename;
57973 + uid_t uidgid;
57974 + __u16 roletype;
57975 +
57976 + __u16 auth_attempts;
57977 + unsigned long expires;
57978 +
57979 + struct acl_subject_label *root_label;
57980 + struct gr_hash_struct *hash;
57981 +
57982 + struct acl_role_label *prev;
57983 + struct acl_role_label *next;
57984 +
57985 + struct role_transition *transitions;
57986 + struct role_allowed_ip *allowed_ips;
57987 + uid_t *domain_children;
57988 + __u16 domain_child_num;
57989 +
57990 + struct acl_subject_label **subj_hash;
57991 + __u32 subj_hash_size;
57992 +};
57993 +
57994 +struct user_acl_role_db {
57995 + struct acl_role_label **r_table;
57996 + __u32 num_pointers; /* Number of allocations to track */
57997 + __u32 num_roles; /* Number of roles */
57998 + __u32 num_domain_children; /* Number of domain children */
57999 + __u32 num_subjects; /* Number of subjects */
58000 + __u32 num_objects; /* Number of objects */
58001 +};
58002 +
58003 +struct acl_object_label {
58004 + char *filename;
58005 + ino_t inode;
58006 + dev_t device;
58007 + __u32 mode;
58008 +
58009 + struct acl_subject_label *nested;
58010 + struct acl_object_label *globbed;
58011 +
58012 + /* next two structures not used */
58013 +
58014 + struct acl_object_label *prev;
58015 + struct acl_object_label *next;
58016 +};
58017 +
58018 +struct acl_ip_label {
58019 + char *iface;
58020 + __u32 addr;
58021 + __u32 netmask;
58022 + __u16 low, high;
58023 + __u8 mode;
58024 + __u32 type;
58025 + __u32 proto[8];
58026 +
58027 + /* next two structures not used */
58028 +
58029 + struct acl_ip_label *prev;
58030 + struct acl_ip_label *next;
58031 +};
58032 +
58033 +struct gr_arg {
58034 + struct user_acl_role_db role_db;
58035 + unsigned char pw[GR_PW_LEN];
58036 + unsigned char salt[GR_SALT_LEN];
58037 + unsigned char sum[GR_SHA_LEN];
58038 + unsigned char sp_role[GR_SPROLE_LEN];
58039 + struct sprole_pw *sprole_pws;
58040 + dev_t segv_device;
58041 + ino_t segv_inode;
58042 + uid_t segv_uid;
58043 + __u16 num_sprole_pws;
58044 + __u16 mode;
58045 +};
58046 +
58047 +struct gr_arg_wrapper {
58048 + struct gr_arg *arg;
58049 + __u32 version;
58050 + __u32 size;
58051 +};
58052 +
58053 +struct subject_map {
58054 + struct acl_subject_label *user;
58055 + struct acl_subject_label *kernel;
58056 + struct subject_map *prev;
58057 + struct subject_map *next;
58058 +};
58059 +
58060 +struct acl_subj_map_db {
58061 + struct subject_map **s_hash;
58062 + __u32 s_size;
58063 +};
58064 +
58065 +/* End Data Structures Section */
58066 +
58067 +/* Hash functions generated by empirical testing by Brad Spengler
58068 + Makes good use of the low bits of the inode. Generally 0-1 times
58069 + in loop for successful match. 0-3 for unsuccessful match.
58070 + Shift/add algorithm with modulus of table size and an XOR*/
58071 +
58072 +static __inline__ unsigned int
58073 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58074 +{
58075 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58076 +}
58077 +
58078 + static __inline__ unsigned int
58079 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58080 +{
58081 + return ((const unsigned long)userp % sz);
58082 +}
58083 +
58084 +static __inline__ unsigned int
58085 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58086 +{
58087 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58088 +}
58089 +
58090 +static __inline__ unsigned int
58091 +nhash(const char *name, const __u16 len, const unsigned int sz)
58092 +{
58093 + return full_name_hash((const unsigned char *)name, len) % sz;
58094 +}
58095 +
58096 +#define FOR_EACH_ROLE_START(role) \
58097 + role = role_list; \
58098 + while (role) {
58099 +
58100 +#define FOR_EACH_ROLE_END(role) \
58101 + role = role->prev; \
58102 + }
58103 +
58104 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58105 + subj = NULL; \
58106 + iter = 0; \
58107 + while (iter < role->subj_hash_size) { \
58108 + if (subj == NULL) \
58109 + subj = role->subj_hash[iter]; \
58110 + if (subj == NULL) { \
58111 + iter++; \
58112 + continue; \
58113 + }
58114 +
58115 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58116 + subj = subj->next; \
58117 + if (subj == NULL) \
58118 + iter++; \
58119 + }
58120 +
58121 +
58122 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58123 + subj = role->hash->first; \
58124 + while (subj != NULL) {
58125 +
58126 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58127 + subj = subj->next; \
58128 + }
58129 +
58130 +#endif
58131 +
58132 diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58133 --- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58134 +++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58135 @@ -0,0 +1,9 @@
58136 +#ifndef __GRALLOC_H
58137 +#define __GRALLOC_H
58138 +
58139 +void acl_free_all(void);
58140 +int acl_alloc_stack_init(unsigned long size);
58141 +void *acl_alloc(unsigned long len);
58142 +void *acl_alloc_num(unsigned long num, unsigned long len);
58143 +
58144 +#endif
58145 diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58146 --- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58147 +++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58148 @@ -0,0 +1,140 @@
58149 +#ifndef GRDEFS_H
58150 +#define GRDEFS_H
58151 +
58152 +/* Begin grsecurity status declarations */
58153 +
58154 +enum {
58155 + GR_READY = 0x01,
58156 + GR_STATUS_INIT = 0x00 // disabled state
58157 +};
58158 +
58159 +/* Begin ACL declarations */
58160 +
58161 +/* Role flags */
58162 +
58163 +enum {
58164 + GR_ROLE_USER = 0x0001,
58165 + GR_ROLE_GROUP = 0x0002,
58166 + GR_ROLE_DEFAULT = 0x0004,
58167 + GR_ROLE_SPECIAL = 0x0008,
58168 + GR_ROLE_AUTH = 0x0010,
58169 + GR_ROLE_NOPW = 0x0020,
58170 + GR_ROLE_GOD = 0x0040,
58171 + GR_ROLE_LEARN = 0x0080,
58172 + GR_ROLE_TPE = 0x0100,
58173 + GR_ROLE_DOMAIN = 0x0200,
58174 + GR_ROLE_PAM = 0x0400,
58175 + GR_ROLE_PERSIST = 0x800
58176 +};
58177 +
58178 +/* ACL Subject and Object mode flags */
58179 +enum {
58180 + GR_DELETED = 0x80000000
58181 +};
58182 +
58183 +/* ACL Object-only mode flags */
58184 +enum {
58185 + GR_READ = 0x00000001,
58186 + GR_APPEND = 0x00000002,
58187 + GR_WRITE = 0x00000004,
58188 + GR_EXEC = 0x00000008,
58189 + GR_FIND = 0x00000010,
58190 + GR_INHERIT = 0x00000020,
58191 + GR_SETID = 0x00000040,
58192 + GR_CREATE = 0x00000080,
58193 + GR_DELETE = 0x00000100,
58194 + GR_LINK = 0x00000200,
58195 + GR_AUDIT_READ = 0x00000400,
58196 + GR_AUDIT_APPEND = 0x00000800,
58197 + GR_AUDIT_WRITE = 0x00001000,
58198 + GR_AUDIT_EXEC = 0x00002000,
58199 + GR_AUDIT_FIND = 0x00004000,
58200 + GR_AUDIT_INHERIT= 0x00008000,
58201 + GR_AUDIT_SETID = 0x00010000,
58202 + GR_AUDIT_CREATE = 0x00020000,
58203 + GR_AUDIT_DELETE = 0x00040000,
58204 + GR_AUDIT_LINK = 0x00080000,
58205 + GR_PTRACERD = 0x00100000,
58206 + GR_NOPTRACE = 0x00200000,
58207 + GR_SUPPRESS = 0x00400000,
58208 + GR_NOLEARN = 0x00800000,
58209 + GR_INIT_TRANSFER= 0x01000000
58210 +};
58211 +
58212 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58213 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58214 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58215 +
58216 +/* ACL subject-only mode flags */
58217 +enum {
58218 + GR_KILL = 0x00000001,
58219 + GR_VIEW = 0x00000002,
58220 + GR_PROTECTED = 0x00000004,
58221 + GR_LEARN = 0x00000008,
58222 + GR_OVERRIDE = 0x00000010,
58223 + /* just a placeholder, this mode is only used in userspace */
58224 + GR_DUMMY = 0x00000020,
58225 + GR_PROTSHM = 0x00000040,
58226 + GR_KILLPROC = 0x00000080,
58227 + GR_KILLIPPROC = 0x00000100,
58228 + /* just a placeholder, this mode is only used in userspace */
58229 + GR_NOTROJAN = 0x00000200,
58230 + GR_PROTPROCFD = 0x00000400,
58231 + GR_PROCACCT = 0x00000800,
58232 + GR_RELAXPTRACE = 0x00001000,
58233 + GR_NESTED = 0x00002000,
58234 + GR_INHERITLEARN = 0x00004000,
58235 + GR_PROCFIND = 0x00008000,
58236 + GR_POVERRIDE = 0x00010000,
58237 + GR_KERNELAUTH = 0x00020000,
58238 + GR_ATSECURE = 0x00040000,
58239 + GR_SHMEXEC = 0x00080000
58240 +};
58241 +
58242 +enum {
58243 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58244 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58245 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58246 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58247 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58248 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58249 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58250 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58251 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58252 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58253 +};
58254 +
58255 +enum {
58256 + GR_ID_USER = 0x01,
58257 + GR_ID_GROUP = 0x02,
58258 +};
58259 +
58260 +enum {
58261 + GR_ID_ALLOW = 0x01,
58262 + GR_ID_DENY = 0x02,
58263 +};
58264 +
58265 +#define GR_CRASH_RES 31
58266 +#define GR_UIDTABLE_MAX 500
58267 +
58268 +/* begin resource learning section */
58269 +enum {
58270 + GR_RLIM_CPU_BUMP = 60,
58271 + GR_RLIM_FSIZE_BUMP = 50000,
58272 + GR_RLIM_DATA_BUMP = 10000,
58273 + GR_RLIM_STACK_BUMP = 1000,
58274 + GR_RLIM_CORE_BUMP = 10000,
58275 + GR_RLIM_RSS_BUMP = 500000,
58276 + GR_RLIM_NPROC_BUMP = 1,
58277 + GR_RLIM_NOFILE_BUMP = 5,
58278 + GR_RLIM_MEMLOCK_BUMP = 50000,
58279 + GR_RLIM_AS_BUMP = 500000,
58280 + GR_RLIM_LOCKS_BUMP = 2,
58281 + GR_RLIM_SIGPENDING_BUMP = 5,
58282 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58283 + GR_RLIM_NICE_BUMP = 1,
58284 + GR_RLIM_RTPRIO_BUMP = 1,
58285 + GR_RLIM_RTTIME_BUMP = 1000000
58286 +};
58287 +
58288 +#endif
58289 diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58290 --- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58291 +++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58292 @@ -0,0 +1,217 @@
58293 +#ifndef __GRINTERNAL_H
58294 +#define __GRINTERNAL_H
58295 +
58296 +#ifdef CONFIG_GRKERNSEC
58297 +
58298 +#include <linux/fs.h>
58299 +#include <linux/mnt_namespace.h>
58300 +#include <linux/nsproxy.h>
58301 +#include <linux/gracl.h>
58302 +#include <linux/grdefs.h>
58303 +#include <linux/grmsg.h>
58304 +
58305 +void gr_add_learn_entry(const char *fmt, ...)
58306 + __attribute__ ((format (printf, 1, 2)));
58307 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58308 + const struct vfsmount *mnt);
58309 +__u32 gr_check_create(const struct dentry *new_dentry,
58310 + const struct dentry *parent,
58311 + const struct vfsmount *mnt, const __u32 mode);
58312 +int gr_check_protected_task(const struct task_struct *task);
58313 +__u32 to_gr_audit(const __u32 reqmode);
58314 +int gr_set_acls(const int type);
58315 +int gr_apply_subject_to_task(struct task_struct *task);
58316 +int gr_acl_is_enabled(void);
58317 +char gr_roletype_to_char(void);
58318 +
58319 +void gr_handle_alertkill(struct task_struct *task);
58320 +char *gr_to_filename(const struct dentry *dentry,
58321 + const struct vfsmount *mnt);
58322 +char *gr_to_filename1(const struct dentry *dentry,
58323 + const struct vfsmount *mnt);
58324 +char *gr_to_filename2(const struct dentry *dentry,
58325 + const struct vfsmount *mnt);
58326 +char *gr_to_filename3(const struct dentry *dentry,
58327 + const struct vfsmount *mnt);
58328 +
58329 +extern int grsec_enable_harden_ptrace;
58330 +extern int grsec_enable_link;
58331 +extern int grsec_enable_fifo;
58332 +extern int grsec_enable_shm;
58333 +extern int grsec_enable_execlog;
58334 +extern int grsec_enable_signal;
58335 +extern int grsec_enable_audit_ptrace;
58336 +extern int grsec_enable_forkfail;
58337 +extern int grsec_enable_time;
58338 +extern int grsec_enable_rofs;
58339 +extern int grsec_enable_chroot_shmat;
58340 +extern int grsec_enable_chroot_mount;
58341 +extern int grsec_enable_chroot_double;
58342 +extern int grsec_enable_chroot_pivot;
58343 +extern int grsec_enable_chroot_chdir;
58344 +extern int grsec_enable_chroot_chmod;
58345 +extern int grsec_enable_chroot_mknod;
58346 +extern int grsec_enable_chroot_fchdir;
58347 +extern int grsec_enable_chroot_nice;
58348 +extern int grsec_enable_chroot_execlog;
58349 +extern int grsec_enable_chroot_caps;
58350 +extern int grsec_enable_chroot_sysctl;
58351 +extern int grsec_enable_chroot_unix;
58352 +extern int grsec_enable_tpe;
58353 +extern int grsec_tpe_gid;
58354 +extern int grsec_enable_tpe_all;
58355 +extern int grsec_enable_tpe_invert;
58356 +extern int grsec_enable_socket_all;
58357 +extern int grsec_socket_all_gid;
58358 +extern int grsec_enable_socket_client;
58359 +extern int grsec_socket_client_gid;
58360 +extern int grsec_enable_socket_server;
58361 +extern int grsec_socket_server_gid;
58362 +extern int grsec_audit_gid;
58363 +extern int grsec_enable_group;
58364 +extern int grsec_enable_audit_textrel;
58365 +extern int grsec_enable_log_rwxmaps;
58366 +extern int grsec_enable_mount;
58367 +extern int grsec_enable_chdir;
58368 +extern int grsec_resource_logging;
58369 +extern int grsec_enable_blackhole;
58370 +extern int grsec_lastack_retries;
58371 +extern int grsec_enable_brute;
58372 +extern int grsec_lock;
58373 +
58374 +extern spinlock_t grsec_alert_lock;
58375 +extern unsigned long grsec_alert_wtime;
58376 +extern unsigned long grsec_alert_fyet;
58377 +
58378 +extern spinlock_t grsec_audit_lock;
58379 +
58380 +extern rwlock_t grsec_exec_file_lock;
58381 +
58382 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58383 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58384 + (tsk)->exec_file->f_vfsmnt) : "/")
58385 +
58386 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58387 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58388 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58389 +
58390 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58391 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58392 + (tsk)->exec_file->f_vfsmnt) : "/")
58393 +
58394 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58395 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58396 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58397 +
58398 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58399 +
58400 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58401 +
58402 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58403 + (task)->pid, (cred)->uid, \
58404 + (cred)->euid, (cred)->gid, (cred)->egid, \
58405 + gr_parent_task_fullpath(task), \
58406 + (task)->real_parent->comm, (task)->real_parent->pid, \
58407 + (pcred)->uid, (pcred)->euid, \
58408 + (pcred)->gid, (pcred)->egid
58409 +
58410 +#define GR_CHROOT_CAPS {{ \
58411 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58412 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58413 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58414 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58415 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58416 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58417 +
58418 +#define security_learn(normal_msg,args...) \
58419 +({ \
58420 + read_lock(&grsec_exec_file_lock); \
58421 + gr_add_learn_entry(normal_msg "\n", ## args); \
58422 + read_unlock(&grsec_exec_file_lock); \
58423 +})
58424 +
58425 +enum {
58426 + GR_DO_AUDIT,
58427 + GR_DONT_AUDIT,
58428 + GR_DONT_AUDIT_GOOD
58429 +};
58430 +
58431 +enum {
58432 + GR_TTYSNIFF,
58433 + GR_RBAC,
58434 + GR_RBAC_STR,
58435 + GR_STR_RBAC,
58436 + GR_RBAC_MODE2,
58437 + GR_RBAC_MODE3,
58438 + GR_FILENAME,
58439 + GR_SYSCTL_HIDDEN,
58440 + GR_NOARGS,
58441 + GR_ONE_INT,
58442 + GR_ONE_INT_TWO_STR,
58443 + GR_ONE_STR,
58444 + GR_STR_INT,
58445 + GR_TWO_STR_INT,
58446 + GR_TWO_INT,
58447 + GR_TWO_U64,
58448 + GR_THREE_INT,
58449 + GR_FIVE_INT_TWO_STR,
58450 + GR_TWO_STR,
58451 + GR_THREE_STR,
58452 + GR_FOUR_STR,
58453 + GR_STR_FILENAME,
58454 + GR_FILENAME_STR,
58455 + GR_FILENAME_TWO_INT,
58456 + GR_FILENAME_TWO_INT_STR,
58457 + GR_TEXTREL,
58458 + GR_PTRACE,
58459 + GR_RESOURCE,
58460 + GR_CAP,
58461 + GR_SIG,
58462 + GR_SIG2,
58463 + GR_CRASH1,
58464 + GR_CRASH2,
58465 + GR_PSACCT,
58466 + GR_RWXMAP
58467 +};
58468 +
58469 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58470 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58471 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58472 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58473 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58474 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58475 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58476 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58477 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58478 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58479 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58480 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58481 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58482 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58483 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58484 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58485 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58486 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58487 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58488 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58489 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58490 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58491 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58492 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58493 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58494 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58495 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58496 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58497 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58498 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58499 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58500 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58501 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58502 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58503 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58504 +
58505 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58506 +
58507 +#endif
58508 +
58509 +#endif
58510 diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58511 --- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58512 +++ linux-2.6.32.45/include/linux/grmsg.h 2011-08-25 17:28:11.000000000 -0400
58513 @@ -0,0 +1,107 @@
58514 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58515 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58516 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58517 +#define GR_STOPMOD_MSG "denied modification of module state by "
58518 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58519 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58520 +#define GR_IOPERM_MSG "denied use of ioperm() by "
58521 +#define GR_IOPL_MSG "denied use of iopl() by "
58522 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58523 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58524 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58525 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58526 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58527 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58528 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58529 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58530 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58531 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58532 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58533 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58534 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58535 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58536 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58537 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58538 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58539 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58540 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58541 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58542 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58543 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58544 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58545 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58546 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58547 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58548 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58549 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58550 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58551 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58552 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58553 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58554 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58555 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58556 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58557 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58558 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58559 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58560 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58561 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58562 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58563 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58564 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58565 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58566 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58567 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58568 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58569 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58570 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58571 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58572 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58573 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58574 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58575 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58576 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58577 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58578 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58579 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58580 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58581 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58582 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58583 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58584 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58585 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58586 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
58587 +#define GR_NICE_CHROOT_MSG "denied priority change by "
58588 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58589 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58590 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58591 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58592 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58593 +#define GR_TIME_MSG "time set by "
58594 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58595 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58596 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58597 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58598 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58599 +#define GR_BIND_MSG "denied bind() by "
58600 +#define GR_CONNECT_MSG "denied connect() by "
58601 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58602 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58603 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58604 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58605 +#define GR_CAP_ACL_MSG "use of %s denied for "
58606 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58607 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58608 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58609 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58610 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58611 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58612 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58613 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58614 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58615 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58616 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58617 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58618 +#define GR_VM86_MSG "denied use of vm86 by "
58619 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58620 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58621 diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
58622 --- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58623 +++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58624 @@ -0,0 +1,217 @@
58625 +#ifndef GR_SECURITY_H
58626 +#define GR_SECURITY_H
58627 +#include <linux/fs.h>
58628 +#include <linux/fs_struct.h>
58629 +#include <linux/binfmts.h>
58630 +#include <linux/gracl.h>
58631 +#include <linux/compat.h>
58632 +
58633 +/* notify of brain-dead configs */
58634 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58635 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58636 +#endif
58637 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58638 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58639 +#endif
58640 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58641 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58642 +#endif
58643 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58644 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58645 +#endif
58646 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58647 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58648 +#endif
58649 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58650 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
58651 +#endif
58652 +
58653 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58654 +void gr_handle_brute_check(void);
58655 +void gr_handle_kernel_exploit(void);
58656 +int gr_process_user_ban(void);
58657 +
58658 +char gr_roletype_to_char(void);
58659 +
58660 +int gr_acl_enable_at_secure(void);
58661 +
58662 +int gr_check_user_change(int real, int effective, int fs);
58663 +int gr_check_group_change(int real, int effective, int fs);
58664 +
58665 +void gr_del_task_from_ip_table(struct task_struct *p);
58666 +
58667 +int gr_pid_is_chrooted(struct task_struct *p);
58668 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58669 +int gr_handle_chroot_nice(void);
58670 +int gr_handle_chroot_sysctl(const int op);
58671 +int gr_handle_chroot_setpriority(struct task_struct *p,
58672 + const int niceval);
58673 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58674 +int gr_handle_chroot_chroot(const struct dentry *dentry,
58675 + const struct vfsmount *mnt);
58676 +int gr_handle_chroot_caps(struct path *path);
58677 +void gr_handle_chroot_chdir(struct path *path);
58678 +int gr_handle_chroot_chmod(const struct dentry *dentry,
58679 + const struct vfsmount *mnt, const int mode);
58680 +int gr_handle_chroot_mknod(const struct dentry *dentry,
58681 + const struct vfsmount *mnt, const int mode);
58682 +int gr_handle_chroot_mount(const struct dentry *dentry,
58683 + const struct vfsmount *mnt,
58684 + const char *dev_name);
58685 +int gr_handle_chroot_pivot(void);
58686 +int gr_handle_chroot_unix(const pid_t pid);
58687 +
58688 +int gr_handle_rawio(const struct inode *inode);
58689 +
58690 +void gr_handle_ioperm(void);
58691 +void gr_handle_iopl(void);
58692 +
58693 +int gr_tpe_allow(const struct file *file);
58694 +
58695 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58696 +void gr_clear_chroot_entries(struct task_struct *task);
58697 +
58698 +void gr_log_forkfail(const int retval);
58699 +void gr_log_timechange(void);
58700 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58701 +void gr_log_chdir(const struct dentry *dentry,
58702 + const struct vfsmount *mnt);
58703 +void gr_log_chroot_exec(const struct dentry *dentry,
58704 + const struct vfsmount *mnt);
58705 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58706 +#ifdef CONFIG_COMPAT
58707 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58708 +#endif
58709 +void gr_log_remount(const char *devname, const int retval);
58710 +void gr_log_unmount(const char *devname, const int retval);
58711 +void gr_log_mount(const char *from, const char *to, const int retval);
58712 +void gr_log_textrel(struct vm_area_struct *vma);
58713 +void gr_log_rwxmmap(struct file *file);
58714 +void gr_log_rwxmprotect(struct file *file);
58715 +
58716 +int gr_handle_follow_link(const struct inode *parent,
58717 + const struct inode *inode,
58718 + const struct dentry *dentry,
58719 + const struct vfsmount *mnt);
58720 +int gr_handle_fifo(const struct dentry *dentry,
58721 + const struct vfsmount *mnt,
58722 + const struct dentry *dir, const int flag,
58723 + const int acc_mode);
58724 +int gr_handle_hardlink(const struct dentry *dentry,
58725 + const struct vfsmount *mnt,
58726 + struct inode *inode,
58727 + const int mode, const char *to);
58728 +
58729 +int gr_is_capable(const int cap);
58730 +int gr_is_capable_nolog(const int cap);
58731 +void gr_learn_resource(const struct task_struct *task, const int limit,
58732 + const unsigned long wanted, const int gt);
58733 +void gr_copy_label(struct task_struct *tsk);
58734 +void gr_handle_crash(struct task_struct *task, const int sig);
58735 +int gr_handle_signal(const struct task_struct *p, const int sig);
58736 +int gr_check_crash_uid(const uid_t uid);
58737 +int gr_check_protected_task(const struct task_struct *task);
58738 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58739 +int gr_acl_handle_mmap(const struct file *file,
58740 + const unsigned long prot);
58741 +int gr_acl_handle_mprotect(const struct file *file,
58742 + const unsigned long prot);
58743 +int gr_check_hidden_task(const struct task_struct *tsk);
58744 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58745 + const struct vfsmount *mnt);
58746 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
58747 + const struct vfsmount *mnt);
58748 +__u32 gr_acl_handle_access(const struct dentry *dentry,
58749 + const struct vfsmount *mnt, const int fmode);
58750 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58751 + const struct vfsmount *mnt, mode_t mode);
58752 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58753 + const struct vfsmount *mnt, mode_t mode);
58754 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
58755 + const struct vfsmount *mnt);
58756 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58757 + const struct vfsmount *mnt);
58758 +int gr_handle_ptrace(struct task_struct *task, const long request);
58759 +int gr_handle_proc_ptrace(struct task_struct *task);
58760 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
58761 + const struct vfsmount *mnt);
58762 +int gr_check_crash_exec(const struct file *filp);
58763 +int gr_acl_is_enabled(void);
58764 +void gr_set_kernel_label(struct task_struct *task);
58765 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
58766 + const gid_t gid);
58767 +int gr_set_proc_label(const struct dentry *dentry,
58768 + const struct vfsmount *mnt,
58769 + const int unsafe_share);
58770 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58771 + const struct vfsmount *mnt);
58772 +__u32 gr_acl_handle_open(const struct dentry *dentry,
58773 + const struct vfsmount *mnt, const int fmode);
58774 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
58775 + const struct dentry *p_dentry,
58776 + const struct vfsmount *p_mnt, const int fmode,
58777 + const int imode);
58778 +void gr_handle_create(const struct dentry *dentry,
58779 + const struct vfsmount *mnt);
58780 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58781 + const struct dentry *parent_dentry,
58782 + const struct vfsmount *parent_mnt,
58783 + const int mode);
58784 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58785 + const struct dentry *parent_dentry,
58786 + const struct vfsmount *parent_mnt);
58787 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58788 + const struct vfsmount *mnt);
58789 +void gr_handle_delete(const ino_t ino, const dev_t dev);
58790 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58791 + const struct vfsmount *mnt);
58792 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58793 + const struct dentry *parent_dentry,
58794 + const struct vfsmount *parent_mnt,
58795 + const char *from);
58796 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58797 + const struct dentry *parent_dentry,
58798 + const struct vfsmount *parent_mnt,
58799 + const struct dentry *old_dentry,
58800 + const struct vfsmount *old_mnt, const char *to);
58801 +int gr_acl_handle_rename(struct dentry *new_dentry,
58802 + struct dentry *parent_dentry,
58803 + const struct vfsmount *parent_mnt,
58804 + struct dentry *old_dentry,
58805 + struct inode *old_parent_inode,
58806 + struct vfsmount *old_mnt, const char *newname);
58807 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58808 + struct dentry *old_dentry,
58809 + struct dentry *new_dentry,
58810 + struct vfsmount *mnt, const __u8 replace);
58811 +__u32 gr_check_link(const struct dentry *new_dentry,
58812 + const struct dentry *parent_dentry,
58813 + const struct vfsmount *parent_mnt,
58814 + const struct dentry *old_dentry,
58815 + const struct vfsmount *old_mnt);
58816 +int gr_acl_handle_filldir(const struct file *file, const char *name,
58817 + const unsigned int namelen, const ino_t ino);
58818 +
58819 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
58820 + const struct vfsmount *mnt);
58821 +void gr_acl_handle_exit(void);
58822 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
58823 +int gr_acl_handle_procpidmem(const struct task_struct *task);
58824 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58825 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58826 +void gr_audit_ptrace(struct task_struct *task);
58827 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58828 +
58829 +#ifdef CONFIG_GRKERNSEC
58830 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58831 +void gr_handle_vm86(void);
58832 +void gr_handle_mem_readwrite(u64 from, u64 to);
58833 +
58834 +extern int grsec_enable_dmesg;
58835 +extern int grsec_disable_privio;
58836 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58837 +extern int grsec_enable_chroot_findtask;
58838 +#endif
58839 +#endif
58840 +
58841 +#endif
58842 diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
58843 --- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
58844 +++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
58845 @@ -3,7 +3,7 @@
58846 struct cpustate_t {
58847 spinlock_t lock;
58848 int excl;
58849 - int open_count;
58850 + atomic_t open_count;
58851 unsigned char cached_val;
58852 int inited;
58853 unsigned long *set_addr;
58854 diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
58855 --- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
58856 +++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
58857 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
58858 kunmap_atomic(kaddr, KM_USER0);
58859 }
58860
58861 +static inline void sanitize_highpage(struct page *page)
58862 +{
58863 + void *kaddr;
58864 + unsigned long flags;
58865 +
58866 + local_irq_save(flags);
58867 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
58868 + clear_page(kaddr);
58869 + kunmap_atomic(kaddr, KM_CLEARPAGE);
58870 + local_irq_restore(flags);
58871 +}
58872 +
58873 static inline void zero_user_segments(struct page *page,
58874 unsigned start1, unsigned end1,
58875 unsigned start2, unsigned end2)
58876 diff -urNp linux-2.6.32.45/include/linux/i2c.h linux-2.6.32.45/include/linux/i2c.h
58877 --- linux-2.6.32.45/include/linux/i2c.h 2011-03-27 14:31:47.000000000 -0400
58878 +++ linux-2.6.32.45/include/linux/i2c.h 2011-08-23 21:22:38.000000000 -0400
58879 @@ -325,6 +325,7 @@ struct i2c_algorithm {
58880 /* To determine what the adapter supports */
58881 u32 (*functionality) (struct i2c_adapter *);
58882 };
58883 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58884
58885 /*
58886 * i2c_adapter is the structure used to identify a physical i2c bus along
58887 diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
58888 --- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
58889 +++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
58890 @@ -564,7 +564,7 @@ struct i2o_controller {
58891 struct i2o_device *exec; /* Executive */
58892 #if BITS_PER_LONG == 64
58893 spinlock_t context_list_lock; /* lock for context_list */
58894 - atomic_t context_list_counter; /* needed for unique contexts */
58895 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58896 struct list_head context_list; /* list of context id's
58897 and pointers */
58898 #endif
58899 diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
58900 --- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
58901 +++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
58902 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
58903 #define INIT_IDS
58904 #endif
58905
58906 +#ifdef CONFIG_X86
58907 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58908 +#else
58909 +#define INIT_TASK_THREAD_INFO
58910 +#endif
58911 +
58912 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
58913 /*
58914 * Because of the reduced scope of CAP_SETPCAP when filesystem
58915 @@ -156,6 +162,7 @@ extern struct cred init_cred;
58916 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
58917 .comm = "swapper", \
58918 .thread = INIT_THREAD, \
58919 + INIT_TASK_THREAD_INFO \
58920 .fs = &init_fs, \
58921 .files = &init_files, \
58922 .signal = &init_signals, \
58923 diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
58924 --- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
58925 +++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
58926 @@ -296,7 +296,7 @@ struct iommu_flush {
58927 u8 fm, u64 type);
58928 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58929 unsigned int size_order, u64 type);
58930 -};
58931 +} __no_const;
58932
58933 enum {
58934 SR_DMAR_FECTL_REG,
58935 diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
58936 --- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
58937 +++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
58938 @@ -363,7 +363,7 @@ enum
58939 /* map softirq index to softirq name. update 'softirq_to_name' in
58940 * kernel/softirq.c when adding a new softirq.
58941 */
58942 -extern char *softirq_to_name[NR_SOFTIRQS];
58943 +extern const char * const softirq_to_name[NR_SOFTIRQS];
58944
58945 /* softirq mask and active fields moved to irq_cpustat_t in
58946 * asm/hardirq.h to get better cache usage. KAO
58947 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58948
58949 struct softirq_action
58950 {
58951 - void (*action)(struct softirq_action *);
58952 + void (*action)(void);
58953 };
58954
58955 asmlinkage void do_softirq(void);
58956 asmlinkage void __do_softirq(void);
58957 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58958 +extern void open_softirq(int nr, void (*action)(void));
58959 extern void softirq_init(void);
58960 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
58961 extern void raise_softirq_irqoff(unsigned int nr);
58962 diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
58963 --- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
58964 +++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
58965 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
58966 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
58967 bool boot)
58968 {
58969 +#ifdef CONFIG_CPUMASK_OFFSTACK
58970 gfp_t gfp = GFP_ATOMIC;
58971
58972 if (boot)
58973 gfp = GFP_NOWAIT;
58974
58975 -#ifdef CONFIG_CPUMASK_OFFSTACK
58976 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
58977 return false;
58978
58979 diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
58980 --- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
58981 +++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
58982 @@ -15,7 +15,8 @@
58983
58984 struct module;
58985
58986 -#ifdef CONFIG_KALLSYMS
58987 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58988 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
58989 /* Lookup the address for a symbol. Returns 0 if not found. */
58990 unsigned long kallsyms_lookup_name(const char *name);
58991
58992 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
58993 /* Stupid that this does nothing, but I didn't create this mess. */
58994 #define __print_symbol(fmt, addr)
58995 #endif /*CONFIG_KALLSYMS*/
58996 +#else /* when included by kallsyms.c, vsnprintf.c, or
58997 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58998 +extern void __print_symbol(const char *fmt, unsigned long address);
58999 +extern int sprint_symbol(char *buffer, unsigned long address);
59000 +const char *kallsyms_lookup(unsigned long addr,
59001 + unsigned long *symbolsize,
59002 + unsigned long *offset,
59003 + char **modname, char *namebuf);
59004 +#endif
59005
59006 /* This macro allows us to keep printk typechecking */
59007 static void __check_printsym_format(const char *fmt, ...)
59008 diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59009 --- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59010 +++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-26 20:25:20.000000000 -0400
59011 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59012
59013 extern int kgdb_connected;
59014
59015 -extern atomic_t kgdb_setting_breakpoint;
59016 -extern atomic_t kgdb_cpu_doing_single_step;
59017 +extern atomic_unchecked_t kgdb_setting_breakpoint;
59018 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59019
59020 extern struct task_struct *kgdb_usethread;
59021 extern struct task_struct *kgdb_contthread;
59022 @@ -235,7 +235,7 @@ struct kgdb_arch {
59023 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
59024 void (*remove_all_hw_break)(void);
59025 void (*correct_hw_break)(void);
59026 -};
59027 +} __do_const;
59028
59029 /**
59030 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59031 @@ -257,14 +257,14 @@ struct kgdb_io {
59032 int (*init) (void);
59033 void (*pre_exception) (void);
59034 void (*post_exception) (void);
59035 -};
59036 +} __do_const;
59037
59038 -extern struct kgdb_arch arch_kgdb_ops;
59039 +extern const struct kgdb_arch arch_kgdb_ops;
59040
59041 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59042
59043 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59044 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59045 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59046 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59047
59048 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59049 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59050 diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59051 --- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59052 +++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59053 @@ -31,6 +31,8 @@
59054 * usually useless though. */
59055 extern int __request_module(bool wait, const char *name, ...) \
59056 __attribute__((format(printf, 2, 3)));
59057 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59058 + __attribute__((format(printf, 3, 4)));
59059 #define request_module(mod...) __request_module(true, mod)
59060 #define request_module_nowait(mod...) __request_module(false, mod)
59061 #define try_then_request_module(x, mod...) \
59062 diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59063 --- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59064 +++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59065 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59066
59067 struct kobj_type {
59068 void (*release)(struct kobject *kobj);
59069 - struct sysfs_ops *sysfs_ops;
59070 + const struct sysfs_ops *sysfs_ops;
59071 struct attribute **default_attrs;
59072 };
59073
59074 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59075 };
59076
59077 struct kset_uevent_ops {
59078 - int (*filter)(struct kset *kset, struct kobject *kobj);
59079 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59080 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59081 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59082 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59083 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59084 struct kobj_uevent_env *env);
59085 };
59086
59087 @@ -132,7 +132,7 @@ struct kobj_attribute {
59088 const char *buf, size_t count);
59089 };
59090
59091 -extern struct sysfs_ops kobj_sysfs_ops;
59092 +extern const struct sysfs_ops kobj_sysfs_ops;
59093
59094 /**
59095 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59096 @@ -155,14 +155,14 @@ struct kset {
59097 struct list_head list;
59098 spinlock_t list_lock;
59099 struct kobject kobj;
59100 - struct kset_uevent_ops *uevent_ops;
59101 + const struct kset_uevent_ops *uevent_ops;
59102 };
59103
59104 extern void kset_init(struct kset *kset);
59105 extern int __must_check kset_register(struct kset *kset);
59106 extern void kset_unregister(struct kset *kset);
59107 extern struct kset * __must_check kset_create_and_add(const char *name,
59108 - struct kset_uevent_ops *u,
59109 + const struct kset_uevent_ops *u,
59110 struct kobject *parent_kobj);
59111
59112 static inline struct kset *to_kset(struct kobject *kobj)
59113 diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59114 --- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59115 +++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59116 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59117 void vcpu_load(struct kvm_vcpu *vcpu);
59118 void vcpu_put(struct kvm_vcpu *vcpu);
59119
59120 -int kvm_init(void *opaque, unsigned int vcpu_size,
59121 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59122 struct module *module);
59123 void kvm_exit(void);
59124
59125 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59126 struct kvm_guest_debug *dbg);
59127 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59128
59129 -int kvm_arch_init(void *opaque);
59130 +int kvm_arch_init(const void *opaque);
59131 void kvm_arch_exit(void);
59132
59133 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59134 diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59135 --- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59136 +++ linux-2.6.32.45/include/linux/libata.h 2011-08-26 20:19:09.000000000 -0400
59137 @@ -525,11 +525,11 @@ struct ata_ioports {
59138
59139 struct ata_host {
59140 spinlock_t lock;
59141 - struct device *dev;
59142 + struct device *dev;
59143 void __iomem * const *iomap;
59144 unsigned int n_ports;
59145 void *private_data;
59146 - struct ata_port_operations *ops;
59147 + const struct ata_port_operations *ops;
59148 unsigned long flags;
59149 #ifdef CONFIG_ATA_ACPI
59150 acpi_handle acpi_handle;
59151 @@ -710,7 +710,7 @@ struct ata_link {
59152
59153 struct ata_port {
59154 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59155 - struct ata_port_operations *ops;
59156 + const struct ata_port_operations *ops;
59157 spinlock_t *lock;
59158 /* Flags owned by the EH context. Only EH should touch these once the
59159 port is active */
59160 @@ -884,7 +884,7 @@ struct ata_port_operations {
59161 * fields must be pointers.
59162 */
59163 const struct ata_port_operations *inherits;
59164 -};
59165 +} __do_const;
59166
59167 struct ata_port_info {
59168 unsigned long flags;
59169 @@ -892,7 +892,7 @@ struct ata_port_info {
59170 unsigned long pio_mask;
59171 unsigned long mwdma_mask;
59172 unsigned long udma_mask;
59173 - struct ata_port_operations *port_ops;
59174 + const struct ata_port_operations *port_ops;
59175 void *private_data;
59176 };
59177
59178 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59179 extern const unsigned long sata_deb_timing_hotplug[];
59180 extern const unsigned long sata_deb_timing_long[];
59181
59182 -extern struct ata_port_operations ata_dummy_port_ops;
59183 +extern const struct ata_port_operations ata_dummy_port_ops;
59184 extern const struct ata_port_info ata_dummy_port_info;
59185
59186 static inline const unsigned long *
59187 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59188 struct scsi_host_template *sht);
59189 extern void ata_host_detach(struct ata_host *host);
59190 extern void ata_host_init(struct ata_host *, struct device *,
59191 - unsigned long, struct ata_port_operations *);
59192 + unsigned long, const struct ata_port_operations *);
59193 extern int ata_scsi_detect(struct scsi_host_template *sht);
59194 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59195 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59196 diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59197 --- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59198 +++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59199 @@ -23,13 +23,13 @@ struct svc_rqst;
59200 * This is the set of functions for lockd->nfsd communication
59201 */
59202 struct nlmsvc_binding {
59203 - __be32 (*fopen)(struct svc_rqst *,
59204 + __be32 (* const fopen)(struct svc_rqst *,
59205 struct nfs_fh *,
59206 struct file **);
59207 - void (*fclose)(struct file *);
59208 + void (* const fclose)(struct file *);
59209 };
59210
59211 -extern struct nlmsvc_binding * nlmsvc_ops;
59212 +extern const struct nlmsvc_binding * nlmsvc_ops;
59213
59214 /*
59215 * Similar to nfs_client_initdata, but without the NFS-specific
59216 diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59217 --- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59218 +++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59219 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59220 int region);
59221 void * (*mca_transform_memory)(struct mca_device *,
59222 void *memory);
59223 -};
59224 +} __no_const;
59225
59226 struct mca_bus {
59227 u64 default_dma_mask;
59228 diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59229 --- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59230 +++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59231 @@ -108,7 +108,7 @@ struct memory_accessor {
59232 size_t count);
59233 ssize_t (*write)(struct memory_accessor *, const char *buf,
59234 off_t offset, size_t count);
59235 -};
59236 +} __no_const;
59237
59238 /*
59239 * Kernel text modification mutex, used for code patching. Users of this lock
59240 diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59241 --- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59242 +++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59243 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59244
59245 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59246 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59247 +
59248 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59249 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59250 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59251 +#else
59252 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59253 +#endif
59254 +
59255 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59256 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59257
59258 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59259 int set_page_dirty_lock(struct page *page);
59260 int clear_page_dirty_for_io(struct page *page);
59261
59262 -/* Is the vma a continuation of the stack vma above it? */
59263 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59264 -{
59265 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59266 -}
59267 -
59268 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59269 unsigned long old_addr, struct vm_area_struct *new_vma,
59270 unsigned long new_addr, unsigned long len);
59271 @@ -890,6 +891,8 @@ struct shrinker {
59272 extern void register_shrinker(struct shrinker *);
59273 extern void unregister_shrinker(struct shrinker *);
59274
59275 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
59276 +
59277 int vma_wants_writenotify(struct vm_area_struct *vma);
59278
59279 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59280 @@ -1162,6 +1165,7 @@ out:
59281 }
59282
59283 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59284 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59285
59286 extern unsigned long do_brk(unsigned long, unsigned long);
59287
59288 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59289 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59290 struct vm_area_struct **pprev);
59291
59292 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59293 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59294 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59295 +
59296 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59297 NULL if none. Assume start_addr < end_addr. */
59298 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59299 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59300 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59301 }
59302
59303 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59304 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59305 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59306 unsigned long pfn, unsigned long size, pgprot_t);
59307 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59308 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59309 extern int sysctl_memory_failure_early_kill;
59310 extern int sysctl_memory_failure_recovery;
59311 -extern atomic_long_t mce_bad_pages;
59312 +extern atomic_long_unchecked_t mce_bad_pages;
59313 +
59314 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59315 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59316 +#else
59317 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59318 +#endif
59319
59320 #endif /* __KERNEL__ */
59321 #endif /* _LINUX_MM_H */
59322 diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59323 --- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59324 +++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59325 @@ -186,6 +186,8 @@ struct vm_area_struct {
59326 #ifdef CONFIG_NUMA
59327 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59328 #endif
59329 +
59330 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59331 };
59332
59333 struct core_thread {
59334 @@ -287,6 +289,24 @@ struct mm_struct {
59335 #ifdef CONFIG_MMU_NOTIFIER
59336 struct mmu_notifier_mm *mmu_notifier_mm;
59337 #endif
59338 +
59339 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59340 + unsigned long pax_flags;
59341 +#endif
59342 +
59343 +#ifdef CONFIG_PAX_DLRESOLVE
59344 + unsigned long call_dl_resolve;
59345 +#endif
59346 +
59347 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59348 + unsigned long call_syscall;
59349 +#endif
59350 +
59351 +#ifdef CONFIG_PAX_ASLR
59352 + unsigned long delta_mmap; /* randomized offset */
59353 + unsigned long delta_stack; /* randomized offset */
59354 +#endif
59355 +
59356 };
59357
59358 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59359 diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59360 --- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59361 +++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59362 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59363 */
59364 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59365 ({ \
59366 - pte_t __pte; \
59367 + pte_t ___pte; \
59368 struct vm_area_struct *___vma = __vma; \
59369 unsigned long ___address = __address; \
59370 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59371 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59372 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59373 - __pte; \
59374 + ___pte; \
59375 })
59376
59377 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59378 diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59379 --- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59380 +++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59381 @@ -350,7 +350,7 @@ struct zone {
59382 unsigned long flags; /* zone flags, see below */
59383
59384 /* Zone statistics */
59385 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59386 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59387
59388 /*
59389 * prev_priority holds the scanning priority for this zone. It is
59390 diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59391 --- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59392 +++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59393 @@ -12,7 +12,7 @@
59394 typedef unsigned long kernel_ulong_t;
59395 #endif
59396
59397 -#define PCI_ANY_ID (~0)
59398 +#define PCI_ANY_ID ((__u16)~0)
59399
59400 struct pci_device_id {
59401 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59402 @@ -131,7 +131,7 @@ struct usb_device_id {
59403 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59404 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59405
59406 -#define HID_ANY_ID (~0)
59407 +#define HID_ANY_ID (~0U)
59408
59409 struct hid_device_id {
59410 __u16 bus;
59411 diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59412 --- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59413 +++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59414 @@ -16,6 +16,7 @@
59415 #include <linux/kobject.h>
59416 #include <linux/moduleparam.h>
59417 #include <linux/tracepoint.h>
59418 +#include <linux/fs.h>
59419
59420 #include <asm/local.h>
59421 #include <asm/module.h>
59422 @@ -287,16 +288,16 @@ struct module
59423 int (*init)(void);
59424
59425 /* If this is non-NULL, vfree after init() returns */
59426 - void *module_init;
59427 + void *module_init_rx, *module_init_rw;
59428
59429 /* Here is the actual code + data, vfree'd on unload. */
59430 - void *module_core;
59431 + void *module_core_rx, *module_core_rw;
59432
59433 /* Here are the sizes of the init and core sections */
59434 - unsigned int init_size, core_size;
59435 + unsigned int init_size_rw, core_size_rw;
59436
59437 /* The size of the executable code in each section. */
59438 - unsigned int init_text_size, core_text_size;
59439 + unsigned int init_size_rx, core_size_rx;
59440
59441 /* Arch-specific module values */
59442 struct mod_arch_specific arch;
59443 @@ -345,6 +346,10 @@ struct module
59444 #ifdef CONFIG_EVENT_TRACING
59445 struct ftrace_event_call *trace_events;
59446 unsigned int num_trace_events;
59447 + struct file_operations trace_id;
59448 + struct file_operations trace_enable;
59449 + struct file_operations trace_format;
59450 + struct file_operations trace_filter;
59451 #endif
59452 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59453 unsigned long *ftrace_callsites;
59454 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59455 bool is_module_address(unsigned long addr);
59456 bool is_module_text_address(unsigned long addr);
59457
59458 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59459 +{
59460 +
59461 +#ifdef CONFIG_PAX_KERNEXEC
59462 + if (ktla_ktva(addr) >= (unsigned long)start &&
59463 + ktla_ktva(addr) < (unsigned long)start + size)
59464 + return 1;
59465 +#endif
59466 +
59467 + return ((void *)addr >= start && (void *)addr < start + size);
59468 +}
59469 +
59470 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59471 +{
59472 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59473 +}
59474 +
59475 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59476 +{
59477 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59478 +}
59479 +
59480 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59481 +{
59482 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59483 +}
59484 +
59485 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59486 +{
59487 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59488 +}
59489 +
59490 static inline int within_module_core(unsigned long addr, struct module *mod)
59491 {
59492 - return (unsigned long)mod->module_core <= addr &&
59493 - addr < (unsigned long)mod->module_core + mod->core_size;
59494 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59495 }
59496
59497 static inline int within_module_init(unsigned long addr, struct module *mod)
59498 {
59499 - return (unsigned long)mod->module_init <= addr &&
59500 - addr < (unsigned long)mod->module_init + mod->init_size;
59501 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59502 }
59503
59504 /* Search for module by name: must hold module_mutex. */
59505 diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59506 --- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59507 +++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59508 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59509 sections. Returns NULL on failure. */
59510 void *module_alloc(unsigned long size);
59511
59512 +#ifdef CONFIG_PAX_KERNEXEC
59513 +void *module_alloc_exec(unsigned long size);
59514 +#else
59515 +#define module_alloc_exec(x) module_alloc(x)
59516 +#endif
59517 +
59518 /* Free memory returned from module_alloc. */
59519 void module_free(struct module *mod, void *module_region);
59520
59521 +#ifdef CONFIG_PAX_KERNEXEC
59522 +void module_free_exec(struct module *mod, void *module_region);
59523 +#else
59524 +#define module_free_exec(x, y) module_free((x), (y))
59525 +#endif
59526 +
59527 /* Apply the given relocation to the (simplified) ELF. Return -error
59528 or 0. */
59529 int apply_relocate(Elf_Shdr *sechdrs,
59530 diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
59531 --- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59532 +++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59533 @@ -132,7 +132,7 @@ struct kparam_array
59534
59535 /* Actually copy string: maxlen param is usually sizeof(string). */
59536 #define module_param_string(name, string, len, perm) \
59537 - static const struct kparam_string __param_string_##name \
59538 + static const struct kparam_string __param_string_##name __used \
59539 = { len, string }; \
59540 __module_param_call(MODULE_PARAM_PREFIX, name, \
59541 param_set_copystring, param_get_string, \
59542 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59543
59544 /* Comma-separated array: *nump is set to number they actually specified. */
59545 #define module_param_array_named(name, array, type, nump, perm) \
59546 - static const struct kparam_array __param_arr_##name \
59547 + static const struct kparam_array __param_arr_##name __used \
59548 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59549 sizeof(array[0]), array }; \
59550 __module_param_call(MODULE_PARAM_PREFIX, name, \
59551 diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
59552 --- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59553 +++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59554 @@ -51,7 +51,7 @@ struct mutex {
59555 spinlock_t wait_lock;
59556 struct list_head wait_list;
59557 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59558 - struct thread_info *owner;
59559 + struct task_struct *owner;
59560 #endif
59561 #ifdef CONFIG_DEBUG_MUTEXES
59562 const char *name;
59563 diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
59564 --- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59565 +++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59566 @@ -22,7 +22,7 @@ struct nameidata {
59567 unsigned int flags;
59568 int last_type;
59569 unsigned depth;
59570 - char *saved_names[MAX_NESTED_LINKS + 1];
59571 + const char *saved_names[MAX_NESTED_LINKS + 1];
59572
59573 /* Intent data */
59574 union {
59575 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59576 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59577 extern void unlock_rename(struct dentry *, struct dentry *);
59578
59579 -static inline void nd_set_link(struct nameidata *nd, char *path)
59580 +static inline void nd_set_link(struct nameidata *nd, const char *path)
59581 {
59582 nd->saved_names[nd->depth] = path;
59583 }
59584
59585 -static inline char *nd_get_link(struct nameidata *nd)
59586 +static inline const char *nd_get_link(const struct nameidata *nd)
59587 {
59588 return nd->saved_names[nd->depth];
59589 }
59590 diff -urNp linux-2.6.32.45/include/linux/netdevice.h linux-2.6.32.45/include/linux/netdevice.h
59591 --- linux-2.6.32.45/include/linux/netdevice.h 2011-08-09 18:35:30.000000000 -0400
59592 +++ linux-2.6.32.45/include/linux/netdevice.h 2011-08-23 21:22:38.000000000 -0400
59593 @@ -637,6 +637,7 @@ struct net_device_ops {
59594 u16 xid);
59595 #endif
59596 };
59597 +typedef struct net_device_ops __no_const net_device_ops_no_const;
59598
59599 /*
59600 * The DEVICE structure.
59601 diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
59602 --- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59603 +++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59604 @@ -0,0 +1,9 @@
59605 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
59606 +#define _LINUX_NETFILTER_XT_GRADM_H 1
59607 +
59608 +struct xt_gradm_mtinfo {
59609 + __u16 flags;
59610 + __u16 invflags;
59611 +};
59612 +
59613 +#endif
59614 diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
59615 --- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59616 +++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59617 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59618
59619 #define any_online_node(mask) \
59620 ({ \
59621 - int node; \
59622 - for_each_node_mask(node, (mask)) \
59623 - if (node_online(node)) \
59624 + int __node; \
59625 + for_each_node_mask(__node, (mask)) \
59626 + if (node_online(__node)) \
59627 break; \
59628 - node; \
59629 + __node; \
59630 })
59631
59632 #define num_online_nodes() num_node_state(N_ONLINE)
59633 diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
59634 --- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59635 +++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59636 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59637 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59638 char const * name, ulong * val);
59639
59640 -/** Create a file for read-only access to an atomic_t. */
59641 +/** Create a file for read-only access to an atomic_unchecked_t. */
59642 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59643 - char const * name, atomic_t * val);
59644 + char const * name, atomic_unchecked_t * val);
59645
59646 /** create a directory */
59647 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59648 diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
59649 --- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59650 +++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59651 @@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59652 if (((unsigned long)uaddr & PAGE_MASK) !=
59653 ((unsigned long)end & PAGE_MASK))
59654 ret = __get_user(c, end);
59655 + (void)c;
59656 }
59657 return ret;
59658 }
59659 diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
59660 --- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59661 +++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59662 @@ -476,7 +476,7 @@ struct hw_perf_event {
59663 struct hrtimer hrtimer;
59664 };
59665 };
59666 - atomic64_t prev_count;
59667 + atomic64_unchecked_t prev_count;
59668 u64 sample_period;
59669 u64 last_period;
59670 atomic64_t period_left;
59671 @@ -557,7 +557,7 @@ struct perf_event {
59672 const struct pmu *pmu;
59673
59674 enum perf_event_active_state state;
59675 - atomic64_t count;
59676 + atomic64_unchecked_t count;
59677
59678 /*
59679 * These are the total time in nanoseconds that the event
59680 @@ -595,8 +595,8 @@ struct perf_event {
59681 * These accumulate total time (in nanoseconds) that children
59682 * events have been enabled and running, respectively.
59683 */
59684 - atomic64_t child_total_time_enabled;
59685 - atomic64_t child_total_time_running;
59686 + atomic64_unchecked_t child_total_time_enabled;
59687 + atomic64_unchecked_t child_total_time_running;
59688
59689 /*
59690 * Protect attach/detach and child_list:
59691 diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
59692 --- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59693 +++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59694 @@ -46,9 +46,9 @@ struct pipe_inode_info {
59695 wait_queue_head_t wait;
59696 unsigned int nrbufs, curbuf;
59697 struct page *tmp_page;
59698 - unsigned int readers;
59699 - unsigned int writers;
59700 - unsigned int waiting_writers;
59701 + atomic_t readers;
59702 + atomic_t writers;
59703 + atomic_t waiting_writers;
59704 unsigned int r_counter;
59705 unsigned int w_counter;
59706 struct fasync_struct *fasync_readers;
59707 diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
59708 --- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59709 +++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59710 @@ -19,8 +19,8 @@
59711 * under normal circumstances, used to verify that nobody uses
59712 * non-initialized list entries.
59713 */
59714 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59715 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59716 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59717 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59718
59719 /********** include/linux/timer.h **********/
59720 /*
59721 diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
59722 --- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59723 +++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59724 @@ -67,7 +67,7 @@ struct k_itimer {
59725 };
59726
59727 struct k_clock {
59728 - int res; /* in nanoseconds */
59729 + const int res; /* in nanoseconds */
59730 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59731 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59732 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59733 diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
59734 --- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59735 +++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59736 @@ -110,7 +110,7 @@ struct preempt_ops {
59737 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59738 void (*sched_out)(struct preempt_notifier *notifier,
59739 struct task_struct *next);
59740 -};
59741 +} __no_const;
59742
59743 /**
59744 * preempt_notifier - key for installing preemption notifiers
59745 diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
59746 --- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59747 +++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59748 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59749 return proc_create_data(name, mode, parent, proc_fops, NULL);
59750 }
59751
59752 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59753 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59754 +{
59755 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59756 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59757 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59758 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59759 +#else
59760 + return proc_create_data(name, mode, parent, proc_fops, NULL);
59761 +#endif
59762 +}
59763 +
59764 +
59765 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59766 mode_t mode, struct proc_dir_entry *base,
59767 read_proc_t *read_proc, void * data)
59768 @@ -256,7 +269,7 @@ union proc_op {
59769 int (*proc_show)(struct seq_file *m,
59770 struct pid_namespace *ns, struct pid *pid,
59771 struct task_struct *task);
59772 -};
59773 +} __no_const;
59774
59775 struct ctl_table_header;
59776 struct ctl_table;
59777 diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
59778 --- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
59779 +++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
59780 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
59781 extern void exit_ptrace(struct task_struct *tracer);
59782 #define PTRACE_MODE_READ 1
59783 #define PTRACE_MODE_ATTACH 2
59784 -/* Returns 0 on success, -errno on denial. */
59785 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59786 /* Returns true on success, false on denial. */
59787 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59788 +/* Returns true on success, false on denial. */
59789 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59790
59791 static inline int ptrace_reparented(struct task_struct *child)
59792 {
59793 diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
59794 --- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
59795 +++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
59796 @@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
59797 u32 random32(void);
59798 void srandom32(u32 seed);
59799
59800 +static inline unsigned long pax_get_random_long(void)
59801 +{
59802 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59803 +}
59804 +
59805 #endif /* __KERNEL___ */
59806
59807 #endif /* _LINUX_RANDOM_H */
59808 diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
59809 --- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
59810 +++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
59811 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59812 * Architecture-specific implementations of sys_reboot commands.
59813 */
59814
59815 -extern void machine_restart(char *cmd);
59816 -extern void machine_halt(void);
59817 -extern void machine_power_off(void);
59818 +extern void machine_restart(char *cmd) __noreturn;
59819 +extern void machine_halt(void) __noreturn;
59820 +extern void machine_power_off(void) __noreturn;
59821
59822 extern void machine_shutdown(void);
59823 struct pt_regs;
59824 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59825 */
59826
59827 extern void kernel_restart_prepare(char *cmd);
59828 -extern void kernel_restart(char *cmd);
59829 -extern void kernel_halt(void);
59830 -extern void kernel_power_off(void);
59831 +extern void kernel_restart(char *cmd) __noreturn;
59832 +extern void kernel_halt(void) __noreturn;
59833 +extern void kernel_power_off(void) __noreturn;
59834
59835 void ctrl_alt_del(void);
59836
59837 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
59838 * Emergency restart, callable from an interrupt handler.
59839 */
59840
59841 -extern void emergency_restart(void);
59842 +extern void emergency_restart(void) __noreturn;
59843 #include <asm/emergency-restart.h>
59844
59845 #endif
59846 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
59847 --- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
59848 +++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
59849 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
59850 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59851
59852 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59853 -#define get_generation(s) atomic_read (&fs_generation(s))
59854 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59855 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59856 #define __fs_changed(gen,s) (gen != get_generation (s))
59857 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
59858 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
59859 */
59860
59861 struct item_operations {
59862 - int (*bytes_number) (struct item_head * ih, int block_size);
59863 - void (*decrement_key) (struct cpu_key *);
59864 - int (*is_left_mergeable) (struct reiserfs_key * ih,
59865 + int (* const bytes_number) (struct item_head * ih, int block_size);
59866 + void (* const decrement_key) (struct cpu_key *);
59867 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
59868 unsigned long bsize);
59869 - void (*print_item) (struct item_head *, char *item);
59870 - void (*check_item) (struct item_head *, char *item);
59871 + void (* const print_item) (struct item_head *, char *item);
59872 + void (* const check_item) (struct item_head *, char *item);
59873
59874 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59875 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59876 int is_affected, int insert_size);
59877 - int (*check_left) (struct virtual_item * vi, int free,
59878 + int (* const check_left) (struct virtual_item * vi, int free,
59879 int start_skip, int end_skip);
59880 - int (*check_right) (struct virtual_item * vi, int free);
59881 - int (*part_size) (struct virtual_item * vi, int from, int to);
59882 - int (*unit_num) (struct virtual_item * vi);
59883 - void (*print_vi) (struct virtual_item * vi);
59884 + int (* const check_right) (struct virtual_item * vi, int free);
59885 + int (* const part_size) (struct virtual_item * vi, int from, int to);
59886 + int (* const unit_num) (struct virtual_item * vi);
59887 + void (* const print_vi) (struct virtual_item * vi);
59888 };
59889
59890 -extern struct item_operations *item_ops[TYPE_ANY + 1];
59891 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
59892
59893 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
59894 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
59895 diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
59896 --- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
59897 +++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
59898 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
59899 /* Comment? -Hans */
59900 wait_queue_head_t s_wait;
59901 /* To be obsoleted soon by per buffer seals.. -Hans */
59902 - atomic_t s_generation_counter; // increased by one every time the
59903 + atomic_unchecked_t s_generation_counter; // increased by one every time the
59904 // tree gets re-balanced
59905 unsigned long s_properties; /* File system properties. Currently holds
59906 on-disk FS format */
59907 diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
59908 --- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
59909 +++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
59910 @@ -159,7 +159,7 @@ struct rchan_callbacks
59911 * The callback should return 0 if successful, negative if not.
59912 */
59913 int (*remove_buf_file)(struct dentry *dentry);
59914 -};
59915 +} __no_const;
59916
59917 /*
59918 * CONFIG_RELAY kernel API, kernel/relay.c
59919 diff -urNp linux-2.6.32.45/include/linux/rfkill.h linux-2.6.32.45/include/linux/rfkill.h
59920 --- linux-2.6.32.45/include/linux/rfkill.h 2011-03-27 14:31:47.000000000 -0400
59921 +++ linux-2.6.32.45/include/linux/rfkill.h 2011-08-23 21:22:38.000000000 -0400
59922 @@ -144,6 +144,7 @@ struct rfkill_ops {
59923 void (*query)(struct rfkill *rfkill, void *data);
59924 int (*set_block)(void *data, bool blocked);
59925 };
59926 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59927
59928 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59929 /**
59930 diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
59931 --- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
59932 +++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
59933 @@ -101,6 +101,7 @@ struct bio;
59934 struct fs_struct;
59935 struct bts_context;
59936 struct perf_event_context;
59937 +struct linux_binprm;
59938
59939 /*
59940 * List of flags we want to share for kernel threads,
59941 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
59942 extern signed long schedule_timeout_uninterruptible(signed long timeout);
59943 asmlinkage void __schedule(void);
59944 asmlinkage void schedule(void);
59945 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
59946 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
59947
59948 struct nsproxy;
59949 struct user_namespace;
59950 @@ -371,9 +372,12 @@ struct user_namespace;
59951 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59952
59953 extern int sysctl_max_map_count;
59954 +extern unsigned long sysctl_heap_stack_gap;
59955
59956 #include <linux/aio.h>
59957
59958 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59959 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59960 extern unsigned long
59961 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59962 unsigned long, unsigned long);
59963 @@ -666,6 +670,16 @@ struct signal_struct {
59964 struct tty_audit_buf *tty_audit_buf;
59965 #endif
59966
59967 +#ifdef CONFIG_GRKERNSEC
59968 + u32 curr_ip;
59969 + u32 saved_ip;
59970 + u32 gr_saddr;
59971 + u32 gr_daddr;
59972 + u16 gr_sport;
59973 + u16 gr_dport;
59974 + u8 used_accept:1;
59975 +#endif
59976 +
59977 int oom_adj; /* OOM kill score adjustment (bit shift) */
59978 };
59979
59980 @@ -723,6 +737,11 @@ struct user_struct {
59981 struct key *session_keyring; /* UID's default session keyring */
59982 #endif
59983
59984 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59985 + unsigned int banned;
59986 + unsigned long ban_expires;
59987 +#endif
59988 +
59989 /* Hash table maintenance information */
59990 struct hlist_node uidhash_node;
59991 uid_t uid;
59992 @@ -1328,8 +1347,8 @@ struct task_struct {
59993 struct list_head thread_group;
59994
59995 struct completion *vfork_done; /* for vfork() */
59996 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
59997 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
59998 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
59999 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60000
60001 cputime_t utime, stime, utimescaled, stimescaled;
60002 cputime_t gtime;
60003 @@ -1343,16 +1362,6 @@ struct task_struct {
60004 struct task_cputime cputime_expires;
60005 struct list_head cpu_timers[3];
60006
60007 -/* process credentials */
60008 - const struct cred *real_cred; /* objective and real subjective task
60009 - * credentials (COW) */
60010 - const struct cred *cred; /* effective (overridable) subjective task
60011 - * credentials (COW) */
60012 - struct mutex cred_guard_mutex; /* guard against foreign influences on
60013 - * credential calculations
60014 - * (notably. ptrace) */
60015 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60016 -
60017 char comm[TASK_COMM_LEN]; /* executable name excluding path
60018 - access with [gs]et_task_comm (which lock
60019 it with task_lock())
60020 @@ -1369,6 +1378,10 @@ struct task_struct {
60021 #endif
60022 /* CPU-specific state of this task */
60023 struct thread_struct thread;
60024 +/* thread_info moved to task_struct */
60025 +#ifdef CONFIG_X86
60026 + struct thread_info tinfo;
60027 +#endif
60028 /* filesystem information */
60029 struct fs_struct *fs;
60030 /* open file information */
60031 @@ -1436,6 +1449,15 @@ struct task_struct {
60032 int hardirq_context;
60033 int softirq_context;
60034 #endif
60035 +
60036 +/* process credentials */
60037 + const struct cred *real_cred; /* objective and real subjective task
60038 + * credentials (COW) */
60039 + struct mutex cred_guard_mutex; /* guard against foreign influences on
60040 + * credential calculations
60041 + * (notably. ptrace) */
60042 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60043 +
60044 #ifdef CONFIG_LOCKDEP
60045 # define MAX_LOCK_DEPTH 48UL
60046 u64 curr_chain_key;
60047 @@ -1456,6 +1478,9 @@ struct task_struct {
60048
60049 struct backing_dev_info *backing_dev_info;
60050
60051 + const struct cred *cred; /* effective (overridable) subjective task
60052 + * credentials (COW) */
60053 +
60054 struct io_context *io_context;
60055
60056 unsigned long ptrace_message;
60057 @@ -1519,6 +1544,21 @@ struct task_struct {
60058 unsigned long default_timer_slack_ns;
60059
60060 struct list_head *scm_work_list;
60061 +
60062 +#ifdef CONFIG_GRKERNSEC
60063 + /* grsecurity */
60064 + struct dentry *gr_chroot_dentry;
60065 + struct acl_subject_label *acl;
60066 + struct acl_role_label *role;
60067 + struct file *exec_file;
60068 + u16 acl_role_id;
60069 + /* is this the task that authenticated to the special role */
60070 + u8 acl_sp_role;
60071 + u8 is_writable;
60072 + u8 brute;
60073 + u8 gr_is_chrooted;
60074 +#endif
60075 +
60076 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60077 /* Index of current stored adress in ret_stack */
60078 int curr_ret_stack;
60079 @@ -1542,6 +1582,57 @@ struct task_struct {
60080 #endif /* CONFIG_TRACING */
60081 };
60082
60083 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60084 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60085 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60086 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60087 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60088 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60089 +
60090 +#ifdef CONFIG_PAX_SOFTMODE
60091 +extern int pax_softmode;
60092 +#endif
60093 +
60094 +extern int pax_check_flags(unsigned long *);
60095 +
60096 +/* if tsk != current then task_lock must be held on it */
60097 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60098 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60099 +{
60100 + if (likely(tsk->mm))
60101 + return tsk->mm->pax_flags;
60102 + else
60103 + return 0UL;
60104 +}
60105 +
60106 +/* if tsk != current then task_lock must be held on it */
60107 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60108 +{
60109 + if (likely(tsk->mm)) {
60110 + tsk->mm->pax_flags = flags;
60111 + return 0;
60112 + }
60113 + return -EINVAL;
60114 +}
60115 +#endif
60116 +
60117 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60118 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60119 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60120 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60121 +#endif
60122 +
60123 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60124 +extern void pax_report_insns(void *pc, void *sp);
60125 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60126 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60127 +
60128 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60129 +extern void pax_track_stack(void);
60130 +#else
60131 +static inline void pax_track_stack(void) {}
60132 +#endif
60133 +
60134 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60135 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60136
60137 @@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60138 #define PF_DUMPCORE 0x00000200 /* dumped core */
60139 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60140 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60141 -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60142 +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60143 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60144 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60145 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60146 @@ -1978,7 +2069,9 @@ void yield(void);
60147 extern struct exec_domain default_exec_domain;
60148
60149 union thread_union {
60150 +#ifndef CONFIG_X86
60151 struct thread_info thread_info;
60152 +#endif
60153 unsigned long stack[THREAD_SIZE/sizeof(long)];
60154 };
60155
60156 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60157 */
60158
60159 extern struct task_struct *find_task_by_vpid(pid_t nr);
60160 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60161 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60162 struct pid_namespace *ns);
60163
60164 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60165 extern void exit_itimers(struct signal_struct *);
60166 extern void flush_itimer_signals(void);
60167
60168 -extern NORET_TYPE void do_group_exit(int);
60169 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60170
60171 extern void daemonize(const char *, ...);
60172 extern int allow_signal(int);
60173 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60174
60175 #endif
60176
60177 -static inline int object_is_on_stack(void *obj)
60178 +static inline int object_starts_on_stack(void *obj)
60179 {
60180 - void *stack = task_stack_page(current);
60181 + const void *stack = task_stack_page(current);
60182
60183 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60184 }
60185
60186 +#ifdef CONFIG_PAX_USERCOPY
60187 +extern int object_is_on_stack(const void *obj, unsigned long len);
60188 +#endif
60189 +
60190 extern void thread_info_cache_init(void);
60191
60192 #ifdef CONFIG_DEBUG_STACK_USAGE
60193 diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60194 --- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60195 +++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60196 @@ -42,7 +42,8 @@ struct screen_info {
60197 __u16 pages; /* 0x32 */
60198 __u16 vesa_attributes; /* 0x34 */
60199 __u32 capabilities; /* 0x36 */
60200 - __u8 _reserved[6]; /* 0x3a */
60201 + __u16 vesapm_size; /* 0x3a */
60202 + __u8 _reserved[4]; /* 0x3c */
60203 } __attribute__((packed));
60204
60205 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60206 diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60207 --- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60208 +++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60209 @@ -34,6 +34,7 @@
60210 #include <linux/key.h>
60211 #include <linux/xfrm.h>
60212 #include <linux/gfp.h>
60213 +#include <linux/grsecurity.h>
60214 #include <net/flow.h>
60215
60216 /* Maximum number of letters for an LSM name string */
60217 diff -urNp linux-2.6.32.45/include/linux/seq_file.h linux-2.6.32.45/include/linux/seq_file.h
60218 --- linux-2.6.32.45/include/linux/seq_file.h 2011-03-27 14:31:47.000000000 -0400
60219 +++ linux-2.6.32.45/include/linux/seq_file.h 2011-08-23 21:22:38.000000000 -0400
60220 @@ -32,6 +32,7 @@ struct seq_operations {
60221 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60222 int (*show) (struct seq_file *m, void *v);
60223 };
60224 +typedef struct seq_operations __no_const seq_operations_no_const;
60225
60226 #define SEQ_SKIP 1
60227
60228 diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60229 --- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60230 +++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60231 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60232 pid_t shm_cprid;
60233 pid_t shm_lprid;
60234 struct user_struct *mlock_user;
60235 +#ifdef CONFIG_GRKERNSEC
60236 + time_t shm_createtime;
60237 + pid_t shm_lapid;
60238 +#endif
60239 };
60240
60241 /* shm_mode upper byte flags */
60242 diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60243 --- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60244 +++ linux-2.6.32.45/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60245 @@ -14,6 +14,7 @@
60246 #ifndef _LINUX_SKBUFF_H
60247 #define _LINUX_SKBUFF_H
60248
60249 +#include <linux/const.h>
60250 #include <linux/kernel.h>
60251 #include <linux/kmemcheck.h>
60252 #include <linux/compiler.h>
60253 @@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60254 */
60255 static inline int skb_queue_empty(const struct sk_buff_head *list)
60256 {
60257 - return list->next == (struct sk_buff *)list;
60258 + return list->next == (const struct sk_buff *)list;
60259 }
60260
60261 /**
60262 @@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60263 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60264 const struct sk_buff *skb)
60265 {
60266 - return (skb->next == (struct sk_buff *) list);
60267 + return (skb->next == (const struct sk_buff *) list);
60268 }
60269
60270 /**
60271 @@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60272 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60273 const struct sk_buff *skb)
60274 {
60275 - return (skb->prev == (struct sk_buff *) list);
60276 + return (skb->prev == (const struct sk_buff *) list);
60277 }
60278
60279 /**
60280 @@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60281 * headroom, you should not reduce this.
60282 */
60283 #ifndef NET_SKB_PAD
60284 -#define NET_SKB_PAD 32
60285 +#define NET_SKB_PAD (_AC(32,UL))
60286 #endif
60287
60288 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60289 diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60290 --- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60291 +++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60292 @@ -69,10 +69,10 @@ struct kmem_cache {
60293 unsigned long node_allocs;
60294 unsigned long node_frees;
60295 unsigned long node_overflow;
60296 - atomic_t allochit;
60297 - atomic_t allocmiss;
60298 - atomic_t freehit;
60299 - atomic_t freemiss;
60300 + atomic_unchecked_t allochit;
60301 + atomic_unchecked_t allocmiss;
60302 + atomic_unchecked_t freehit;
60303 + atomic_unchecked_t freemiss;
60304
60305 /*
60306 * If debugging is enabled, then the allocator can add additional
60307 diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60308 --- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60309 +++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60310 @@ -11,12 +11,20 @@
60311
60312 #include <linux/gfp.h>
60313 #include <linux/types.h>
60314 +#include <linux/err.h>
60315
60316 /*
60317 * Flags to pass to kmem_cache_create().
60318 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60319 */
60320 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60321 +
60322 +#ifdef CONFIG_PAX_USERCOPY
60323 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60324 +#else
60325 +#define SLAB_USERCOPY 0x00000000UL
60326 +#endif
60327 +
60328 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60329 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60330 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60331 @@ -82,10 +90,13 @@
60332 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60333 * Both make kfree a no-op.
60334 */
60335 -#define ZERO_SIZE_PTR ((void *)16)
60336 +#define ZERO_SIZE_PTR \
60337 +({ \
60338 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60339 + (void *)(-MAX_ERRNO-1L); \
60340 +})
60341
60342 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60343 - (unsigned long)ZERO_SIZE_PTR)
60344 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60345
60346 /*
60347 * struct kmem_cache related prototypes
60348 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60349 void kfree(const void *);
60350 void kzfree(const void *);
60351 size_t ksize(const void *);
60352 +void check_object_size(const void *ptr, unsigned long n, bool to);
60353
60354 /*
60355 * Allocator specific definitions. These are mainly used to establish optimized
60356 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60357
60358 void __init kmem_cache_init_late(void);
60359
60360 +#define kmalloc(x, y) \
60361 +({ \
60362 + void *___retval; \
60363 + intoverflow_t ___x = (intoverflow_t)x; \
60364 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60365 + ___retval = NULL; \
60366 + else \
60367 + ___retval = kmalloc((size_t)___x, (y)); \
60368 + ___retval; \
60369 +})
60370 +
60371 +#define kmalloc_node(x, y, z) \
60372 +({ \
60373 + void *___retval; \
60374 + intoverflow_t ___x = (intoverflow_t)x; \
60375 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60376 + ___retval = NULL; \
60377 + else \
60378 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60379 + ___retval; \
60380 +})
60381 +
60382 +#define kzalloc(x, y) \
60383 +({ \
60384 + void *___retval; \
60385 + intoverflow_t ___x = (intoverflow_t)x; \
60386 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60387 + ___retval = NULL; \
60388 + else \
60389 + ___retval = kzalloc((size_t)___x, (y)); \
60390 + ___retval; \
60391 +})
60392 +
60393 #endif /* _LINUX_SLAB_H */
60394 diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60395 --- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60396 +++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60397 @@ -86,7 +86,7 @@ struct kmem_cache {
60398 struct kmem_cache_order_objects max;
60399 struct kmem_cache_order_objects min;
60400 gfp_t allocflags; /* gfp flags to use on each alloc */
60401 - int refcount; /* Refcount for slab cache destroy */
60402 + atomic_t refcount; /* Refcount for slab cache destroy */
60403 void (*ctor)(void *);
60404 int inuse; /* Offset to metadata */
60405 int align; /* Alignment */
60406 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60407 #endif
60408
60409 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60410 -void *__kmalloc(size_t size, gfp_t flags);
60411 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60412
60413 #ifdef CONFIG_KMEMTRACE
60414 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60415 diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60416 --- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60417 +++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60418 @@ -61,7 +61,7 @@ struct sonet_stats {
60419 #include <asm/atomic.h>
60420
60421 struct k_sonet_stats {
60422 -#define __HANDLE_ITEM(i) atomic_t i
60423 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60424 __SONET_ITEMS
60425 #undef __HANDLE_ITEM
60426 };
60427 diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60428 --- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60429 +++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60430 @@ -125,7 +125,7 @@ struct cache_detail {
60431 */
60432 struct cache_req {
60433 struct cache_deferred_req *(*defer)(struct cache_req *req);
60434 -};
60435 +} __no_const;
60436 /* this must be embedded in a deferred_request that is being
60437 * delayed awaiting cache-fill
60438 */
60439 diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60440 --- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60441 +++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60442 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60443 {
60444 switch (sap->sa_family) {
60445 case AF_INET:
60446 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60447 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60448 case AF_INET6:
60449 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60450 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60451 }
60452 return 0;
60453 }
60454 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60455 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60456 const struct sockaddr *src)
60457 {
60458 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60459 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60460 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60461
60462 dsin->sin_family = ssin->sin_family;
60463 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60464 if (sa->sa_family != AF_INET6)
60465 return 0;
60466
60467 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60468 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60469 }
60470
60471 #endif /* __KERNEL__ */
60472 diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60473 --- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60474 +++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60475 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60476 extern unsigned int svcrdma_max_requests;
60477 extern unsigned int svcrdma_max_req_size;
60478
60479 -extern atomic_t rdma_stat_recv;
60480 -extern atomic_t rdma_stat_read;
60481 -extern atomic_t rdma_stat_write;
60482 -extern atomic_t rdma_stat_sq_starve;
60483 -extern atomic_t rdma_stat_rq_starve;
60484 -extern atomic_t rdma_stat_rq_poll;
60485 -extern atomic_t rdma_stat_rq_prod;
60486 -extern atomic_t rdma_stat_sq_poll;
60487 -extern atomic_t rdma_stat_sq_prod;
60488 +extern atomic_unchecked_t rdma_stat_recv;
60489 +extern atomic_unchecked_t rdma_stat_read;
60490 +extern atomic_unchecked_t rdma_stat_write;
60491 +extern atomic_unchecked_t rdma_stat_sq_starve;
60492 +extern atomic_unchecked_t rdma_stat_rq_starve;
60493 +extern atomic_unchecked_t rdma_stat_rq_poll;
60494 +extern atomic_unchecked_t rdma_stat_rq_prod;
60495 +extern atomic_unchecked_t rdma_stat_sq_poll;
60496 +extern atomic_unchecked_t rdma_stat_sq_prod;
60497
60498 #define RPCRDMA_VERSION 1
60499
60500 diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60501 --- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60502 +++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60503 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60504 * which require special recovery actions in that situation.
60505 */
60506 struct platform_suspend_ops {
60507 - int (*valid)(suspend_state_t state);
60508 - int (*begin)(suspend_state_t state);
60509 - int (*prepare)(void);
60510 - int (*prepare_late)(void);
60511 - int (*enter)(suspend_state_t state);
60512 - void (*wake)(void);
60513 - void (*finish)(void);
60514 - void (*end)(void);
60515 - void (*recover)(void);
60516 + int (* const valid)(suspend_state_t state);
60517 + int (* const begin)(suspend_state_t state);
60518 + int (* const prepare)(void);
60519 + int (* const prepare_late)(void);
60520 + int (* const enter)(suspend_state_t state);
60521 + void (* const wake)(void);
60522 + void (* const finish)(void);
60523 + void (* const end)(void);
60524 + void (* const recover)(void);
60525 };
60526
60527 #ifdef CONFIG_SUSPEND
60528 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
60529 * suspend_set_ops - set platform dependent suspend operations
60530 * @ops: The new suspend operations to set.
60531 */
60532 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
60533 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60534 extern int suspend_valid_only_mem(suspend_state_t state);
60535
60536 /**
60537 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60538 #else /* !CONFIG_SUSPEND */
60539 #define suspend_valid_only_mem NULL
60540
60541 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60542 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60543 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60544 #endif /* !CONFIG_SUSPEND */
60545
60546 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60547 * platforms which require special recovery actions in that situation.
60548 */
60549 struct platform_hibernation_ops {
60550 - int (*begin)(void);
60551 - void (*end)(void);
60552 - int (*pre_snapshot)(void);
60553 - void (*finish)(void);
60554 - int (*prepare)(void);
60555 - int (*enter)(void);
60556 - void (*leave)(void);
60557 - int (*pre_restore)(void);
60558 - void (*restore_cleanup)(void);
60559 - void (*recover)(void);
60560 + int (* const begin)(void);
60561 + void (* const end)(void);
60562 + int (* const pre_snapshot)(void);
60563 + void (* const finish)(void);
60564 + int (* const prepare)(void);
60565 + int (* const enter)(void);
60566 + void (* const leave)(void);
60567 + int (* const pre_restore)(void);
60568 + void (* const restore_cleanup)(void);
60569 + void (* const recover)(void);
60570 };
60571
60572 #ifdef CONFIG_HIBERNATION
60573 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60574 extern void swsusp_unset_page_free(struct page *);
60575 extern unsigned long get_safe_page(gfp_t gfp_mask);
60576
60577 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60578 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60579 extern int hibernate(void);
60580 extern bool system_entering_hibernation(void);
60581 #else /* CONFIG_HIBERNATION */
60582 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60583 static inline void swsusp_set_page_free(struct page *p) {}
60584 static inline void swsusp_unset_page_free(struct page *p) {}
60585
60586 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60587 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60588 static inline int hibernate(void) { return -ENOSYS; }
60589 static inline bool system_entering_hibernation(void) { return false; }
60590 #endif /* CONFIG_HIBERNATION */
60591 diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
60592 --- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60593 +++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60594 @@ -164,7 +164,11 @@ enum
60595 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60596 };
60597
60598 -
60599 +#ifdef CONFIG_PAX_SOFTMODE
60600 +enum {
60601 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60602 +};
60603 +#endif
60604
60605 /* CTL_VM names: */
60606 enum
60607 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60608
60609 extern int proc_dostring(struct ctl_table *, int,
60610 void __user *, size_t *, loff_t *);
60611 +extern int proc_dostring_modpriv(struct ctl_table *, int,
60612 + void __user *, size_t *, loff_t *);
60613 extern int proc_dointvec(struct ctl_table *, int,
60614 void __user *, size_t *, loff_t *);
60615 extern int proc_dointvec_minmax(struct ctl_table *, int,
60616 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60617
60618 extern ctl_handler sysctl_data;
60619 extern ctl_handler sysctl_string;
60620 +extern ctl_handler sysctl_string_modpriv;
60621 extern ctl_handler sysctl_intvec;
60622 extern ctl_handler sysctl_jiffies;
60623 extern ctl_handler sysctl_ms_jiffies;
60624 diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
60625 --- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60626 +++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60627 @@ -75,8 +75,8 @@ struct bin_attribute {
60628 };
60629
60630 struct sysfs_ops {
60631 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
60632 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60633 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60634 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60635 };
60636
60637 struct sysfs_dirent;
60638 diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
60639 --- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60640 +++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60641 @@ -23,7 +23,7 @@ struct restart_block {
60642 };
60643 /* For futex_wait and futex_wait_requeue_pi */
60644 struct {
60645 - u32 *uaddr;
60646 + u32 __user *uaddr;
60647 u32 val;
60648 u32 flags;
60649 u32 bitset;
60650 diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
60651 --- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60652 +++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60653 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60654 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60655 extern void tty_ldisc_enable(struct tty_struct *tty);
60656
60657 -
60658 /* n_tty.c */
60659 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60660
60661 diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
60662 --- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60663 +++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60664 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60665
60666 struct module *owner;
60667
60668 - int refcount;
60669 + atomic_t refcount;
60670 };
60671
60672 struct tty_ldisc {
60673 diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
60674 --- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60675 +++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60676 @@ -191,10 +191,26 @@ typedef struct {
60677 volatile int counter;
60678 } atomic_t;
60679
60680 +#ifdef CONFIG_PAX_REFCOUNT
60681 +typedef struct {
60682 + volatile int counter;
60683 +} atomic_unchecked_t;
60684 +#else
60685 +typedef atomic_t atomic_unchecked_t;
60686 +#endif
60687 +
60688 #ifdef CONFIG_64BIT
60689 typedef struct {
60690 volatile long counter;
60691 } atomic64_t;
60692 +
60693 +#ifdef CONFIG_PAX_REFCOUNT
60694 +typedef struct {
60695 + volatile long counter;
60696 +} atomic64_unchecked_t;
60697 +#else
60698 +typedef atomic64_t atomic64_unchecked_t;
60699 +#endif
60700 #endif
60701
60702 struct ustat {
60703 diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
60704 --- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60705 +++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60706 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60707 long ret; \
60708 mm_segment_t old_fs = get_fs(); \
60709 \
60710 - set_fs(KERNEL_DS); \
60711 pagefault_disable(); \
60712 + set_fs(KERNEL_DS); \
60713 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60714 - pagefault_enable(); \
60715 set_fs(old_fs); \
60716 + pagefault_enable(); \
60717 ret; \
60718 })
60719
60720 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60721 * Safely read from address @src to the buffer at @dst. If a kernel fault
60722 * happens, handle that and return -EFAULT.
60723 */
60724 -extern long probe_kernel_read(void *dst, void *src, size_t size);
60725 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
60726
60727 /*
60728 * probe_kernel_write(): safely attempt to write to a location
60729 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60730 * Safely write to address @dst from the buffer at @src. If a kernel fault
60731 * happens, handle that and return -EFAULT.
60732 */
60733 -extern long probe_kernel_write(void *dst, void *src, size_t size);
60734 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
60735
60736 #endif /* __LINUX_UACCESS_H__ */
60737 diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
60738 --- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60739 +++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60740 @@ -6,32 +6,32 @@
60741
60742 static inline u16 get_unaligned_le16(const void *p)
60743 {
60744 - return le16_to_cpup((__le16 *)p);
60745 + return le16_to_cpup((const __le16 *)p);
60746 }
60747
60748 static inline u32 get_unaligned_le32(const void *p)
60749 {
60750 - return le32_to_cpup((__le32 *)p);
60751 + return le32_to_cpup((const __le32 *)p);
60752 }
60753
60754 static inline u64 get_unaligned_le64(const void *p)
60755 {
60756 - return le64_to_cpup((__le64 *)p);
60757 + return le64_to_cpup((const __le64 *)p);
60758 }
60759
60760 static inline u16 get_unaligned_be16(const void *p)
60761 {
60762 - return be16_to_cpup((__be16 *)p);
60763 + return be16_to_cpup((const __be16 *)p);
60764 }
60765
60766 static inline u32 get_unaligned_be32(const void *p)
60767 {
60768 - return be32_to_cpup((__be32 *)p);
60769 + return be32_to_cpup((const __be32 *)p);
60770 }
60771
60772 static inline u64 get_unaligned_be64(const void *p)
60773 {
60774 - return be64_to_cpup((__be64 *)p);
60775 + return be64_to_cpup((const __be64 *)p);
60776 }
60777
60778 static inline void put_unaligned_le16(u16 val, void *p)
60779 diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
60780 --- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60781 +++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60782 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60783 #define VM_MAP 0x00000004 /* vmap()ed pages */
60784 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60785 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60786 +
60787 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60788 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60789 +#endif
60790 +
60791 /* bits [20..32] reserved for arch specific ioremap internals */
60792
60793 /*
60794 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60795
60796 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60797
60798 +#define vmalloc(x) \
60799 +({ \
60800 + void *___retval; \
60801 + intoverflow_t ___x = (intoverflow_t)x; \
60802 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60803 + ___retval = NULL; \
60804 + else \
60805 + ___retval = vmalloc((unsigned long)___x); \
60806 + ___retval; \
60807 +})
60808 +
60809 +#define __vmalloc(x, y, z) \
60810 +({ \
60811 + void *___retval; \
60812 + intoverflow_t ___x = (intoverflow_t)x; \
60813 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60814 + ___retval = NULL; \
60815 + else \
60816 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60817 + ___retval; \
60818 +})
60819 +
60820 +#define vmalloc_user(x) \
60821 +({ \
60822 + void *___retval; \
60823 + intoverflow_t ___x = (intoverflow_t)x; \
60824 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60825 + ___retval = NULL; \
60826 + else \
60827 + ___retval = vmalloc_user((unsigned long)___x); \
60828 + ___retval; \
60829 +})
60830 +
60831 +#define vmalloc_exec(x) \
60832 +({ \
60833 + void *___retval; \
60834 + intoverflow_t ___x = (intoverflow_t)x; \
60835 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60836 + ___retval = NULL; \
60837 + else \
60838 + ___retval = vmalloc_exec((unsigned long)___x); \
60839 + ___retval; \
60840 +})
60841 +
60842 +#define vmalloc_node(x, y) \
60843 +({ \
60844 + void *___retval; \
60845 + intoverflow_t ___x = (intoverflow_t)x; \
60846 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60847 + ___retval = NULL; \
60848 + else \
60849 + ___retval = vmalloc_node((unsigned long)___x, (y));\
60850 + ___retval; \
60851 +})
60852 +
60853 +#define vmalloc_32(x) \
60854 +({ \
60855 + void *___retval; \
60856 + intoverflow_t ___x = (intoverflow_t)x; \
60857 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60858 + ___retval = NULL; \
60859 + else \
60860 + ___retval = vmalloc_32((unsigned long)___x); \
60861 + ___retval; \
60862 +})
60863 +
60864 +#define vmalloc_32_user(x) \
60865 +({ \
60866 + void *___retval; \
60867 + intoverflow_t ___x = (intoverflow_t)x; \
60868 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60869 + ___retval = NULL; \
60870 + else \
60871 + ___retval = vmalloc_32_user((unsigned long)___x);\
60872 + ___retval; \
60873 +})
60874 +
60875 #endif /* _LINUX_VMALLOC_H */
60876 diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
60877 --- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
60878 +++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
60879 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
60880 /*
60881 * Zone based page accounting with per cpu differentials.
60882 */
60883 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60884 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60885
60886 static inline void zone_page_state_add(long x, struct zone *zone,
60887 enum zone_stat_item item)
60888 {
60889 - atomic_long_add(x, &zone->vm_stat[item]);
60890 - atomic_long_add(x, &vm_stat[item]);
60891 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60892 + atomic_long_add_unchecked(x, &vm_stat[item]);
60893 }
60894
60895 static inline unsigned long global_page_state(enum zone_stat_item item)
60896 {
60897 - long x = atomic_long_read(&vm_stat[item]);
60898 + long x = atomic_long_read_unchecked(&vm_stat[item]);
60899 #ifdef CONFIG_SMP
60900 if (x < 0)
60901 x = 0;
60902 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
60903 static inline unsigned long zone_page_state(struct zone *zone,
60904 enum zone_stat_item item)
60905 {
60906 - long x = atomic_long_read(&zone->vm_stat[item]);
60907 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60908 #ifdef CONFIG_SMP
60909 if (x < 0)
60910 x = 0;
60911 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
60912 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60913 enum zone_stat_item item)
60914 {
60915 - long x = atomic_long_read(&zone->vm_stat[item]);
60916 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60917
60918 #ifdef CONFIG_SMP
60919 int cpu;
60920 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
60921
60922 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60923 {
60924 - atomic_long_inc(&zone->vm_stat[item]);
60925 - atomic_long_inc(&vm_stat[item]);
60926 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
60927 + atomic_long_inc_unchecked(&vm_stat[item]);
60928 }
60929
60930 static inline void __inc_zone_page_state(struct page *page,
60931 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
60932
60933 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60934 {
60935 - atomic_long_dec(&zone->vm_stat[item]);
60936 - atomic_long_dec(&vm_stat[item]);
60937 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
60938 + atomic_long_dec_unchecked(&vm_stat[item]);
60939 }
60940
60941 static inline void __dec_zone_page_state(struct page *page,
60942 diff -urNp linux-2.6.32.45/include/media/saa7146_vv.h linux-2.6.32.45/include/media/saa7146_vv.h
60943 --- linux-2.6.32.45/include/media/saa7146_vv.h 2011-03-27 14:31:47.000000000 -0400
60944 +++ linux-2.6.32.45/include/media/saa7146_vv.h 2011-08-23 21:22:38.000000000 -0400
60945 @@ -167,7 +167,7 @@ struct saa7146_ext_vv
60946 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60947
60948 /* the extension can override this */
60949 - struct v4l2_ioctl_ops ops;
60950 + v4l2_ioctl_ops_no_const ops;
60951 /* pointer to the saa7146 core ops */
60952 const struct v4l2_ioctl_ops *core_ops;
60953
60954 diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
60955 --- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
60956 +++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
60957 @@ -34,7 +34,7 @@ struct v4l2_device;
60958 #define V4L2_FL_UNREGISTERED (0)
60959
60960 struct v4l2_file_operations {
60961 - struct module *owner;
60962 + struct module * const owner;
60963 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60964 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60965 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60966 diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
60967 --- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
60968 +++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
60969 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
60970 this function returns 0. If the name ends with a digit (e.g. cx18),
60971 then the name will be set to cx18-0 since cx180 looks really odd. */
60972 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
60973 - atomic_t *instance);
60974 + atomic_unchecked_t *instance);
60975
60976 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
60977 Since the parent disappears this ensures that v4l2_dev doesn't have an
60978 diff -urNp linux-2.6.32.45/include/media/v4l2-ioctl.h linux-2.6.32.45/include/media/v4l2-ioctl.h
60979 --- linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-03-27 14:31:47.000000000 -0400
60980 +++ linux-2.6.32.45/include/media/v4l2-ioctl.h 2011-08-23 21:22:38.000000000 -0400
60981 @@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
60982 long (*vidioc_default) (struct file *file, void *fh,
60983 int cmd, void *arg);
60984 };
60985 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60986
60987
60988 /* v4l debugging and diagnostics */
60989 diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
60990 --- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
60991 +++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
60992 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
60993 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
60994 u8 dir, flow_resolve_t resolver);
60995 extern void flow_cache_flush(void);
60996 -extern atomic_t flow_cache_genid;
60997 +extern atomic_unchecked_t flow_cache_genid;
60998
60999 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61000 {
61001 diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61002 --- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61003 +++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61004 @@ -24,7 +24,7 @@ struct inet_peer
61005 __u32 dtime; /* the time of last use of not
61006 * referenced entries */
61007 atomic_t refcnt;
61008 - atomic_t rid; /* Frag reception counter */
61009 + atomic_unchecked_t rid; /* Frag reception counter */
61010 __u32 tcp_ts;
61011 unsigned long tcp_ts_stamp;
61012 };
61013 diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61014 --- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61015 +++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61016 @@ -365,7 +365,7 @@ struct ip_vs_conn {
61017 struct ip_vs_conn *control; /* Master control connection */
61018 atomic_t n_control; /* Number of controlled ones */
61019 struct ip_vs_dest *dest; /* real server */
61020 - atomic_t in_pkts; /* incoming packet counter */
61021 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61022
61023 /* packet transmitter for different forwarding methods. If it
61024 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61025 @@ -466,7 +466,7 @@ struct ip_vs_dest {
61026 union nf_inet_addr addr; /* IP address of the server */
61027 __be16 port; /* port number of the server */
61028 volatile unsigned flags; /* dest status flags */
61029 - atomic_t conn_flags; /* flags to copy to conn */
61030 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61031 atomic_t weight; /* server weight */
61032
61033 atomic_t refcnt; /* reference counter */
61034 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61035 --- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61036 +++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61037 @@ -51,7 +51,7 @@ typedef struct {
61038 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61039 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61040 struct ircomm_info *);
61041 -} call_t;
61042 +} __no_const call_t;
61043
61044 struct ircomm_cb {
61045 irda_queue_t queue;
61046 diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61047 --- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61048 +++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61049 @@ -35,6 +35,7 @@
61050 #include <linux/termios.h>
61051 #include <linux/timer.h>
61052 #include <linux/tty.h> /* struct tty_struct */
61053 +#include <asm/local.h>
61054
61055 #include <net/irda/irias_object.h>
61056 #include <net/irda/ircomm_core.h>
61057 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61058 unsigned short close_delay;
61059 unsigned short closing_wait; /* time to wait before closing */
61060
61061 - int open_count;
61062 - int blocked_open; /* # of blocked opens */
61063 + local_t open_count;
61064 + local_t blocked_open; /* # of blocked opens */
61065
61066 /* Protect concurent access to :
61067 * o self->open_count
61068 diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61069 --- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61070 +++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61071 @@ -87,7 +87,7 @@ struct iucv_sock {
61072 struct iucv_sock_list {
61073 struct hlist_head head;
61074 rwlock_t lock;
61075 - atomic_t autobind_name;
61076 + atomic_unchecked_t autobind_name;
61077 };
61078
61079 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61080 diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61081 --- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61082 +++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61083 @@ -95,7 +95,7 @@ struct lapb_cb {
61084 struct sk_buff_head write_queue;
61085 struct sk_buff_head ack_queue;
61086 unsigned char window;
61087 - struct lapb_register_struct callbacks;
61088 + struct lapb_register_struct *callbacks;
61089
61090 /* FRMR control information */
61091 struct lapb_frame frmr_data;
61092 diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61093 --- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61094 +++ linux-2.6.32.45/include/net/neighbour.h 2011-08-26 20:29:08.000000000 -0400
61095 @@ -131,7 +131,7 @@ struct neigh_ops
61096 int (*connected_output)(struct sk_buff*);
61097 int (*hh_output)(struct sk_buff*);
61098 int (*queue_xmit)(struct sk_buff*);
61099 -};
61100 +} __do_const;
61101
61102 struct pneigh_entry
61103 {
61104 diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61105 --- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61106 +++ linux-2.6.32.45/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61107 @@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61108 {
61109 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61110 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61111 - nlh->nlmsg_len <= remaining);
61112 + nlh->nlmsg_len <= (unsigned int)remaining);
61113 }
61114
61115 /**
61116 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61117 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61118 {
61119 if (mark)
61120 - skb_trim(skb, (unsigned char *) mark - skb->data);
61121 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61122 }
61123
61124 /**
61125 diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61126 --- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61127 +++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61128 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61129 int current_rt_cache_rebuild_count;
61130
61131 struct timer_list rt_secret_timer;
61132 - atomic_t rt_genid;
61133 + atomic_unchecked_t rt_genid;
61134
61135 #ifdef CONFIG_IP_MROUTE
61136 struct sock *mroute_sk;
61137 diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61138 --- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61139 +++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61140 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61141
61142 #else /* SCTP_DEBUG */
61143
61144 -#define SCTP_DEBUG_PRINTK(whatever...)
61145 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61146 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61147 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61148 #define SCTP_ENABLE_DEBUG
61149 #define SCTP_DISABLE_DEBUG
61150 #define SCTP_ASSERT(expr, str, func)
61151 diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61152 --- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61153 +++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61154 @@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61155 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61156 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61157 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61158 - __be16 dport);
61159 + __be16 dport);
61160 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61161 __be16 sport, __be16 dport);
61162 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61163 - __be16 sport, __be16 dport);
61164 + __be16 sport, __be16 dport);
61165 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61166 - __be16 sport, __be16 dport);
61167 + __be16 sport, __be16 dport);
61168 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61169 - __be16 sport, __be16 dport);
61170 + __be16 sport, __be16 dport);
61171
61172 #endif /* _NET_SECURE_SEQ */
61173 diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61174 --- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61175 +++ linux-2.6.32.45/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61176 @@ -272,7 +272,7 @@ struct sock {
61177 rwlock_t sk_callback_lock;
61178 int sk_err,
61179 sk_err_soft;
61180 - atomic_t sk_drops;
61181 + atomic_unchecked_t sk_drops;
61182 unsigned short sk_ack_backlog;
61183 unsigned short sk_max_ack_backlog;
61184 __u32 sk_priority;
61185 @@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61186 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61187 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61188 #else
61189 -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61190 +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61191 int inc)
61192 {
61193 }
61194 diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61195 --- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61196 +++ linux-2.6.32.45/include/net/tcp.h 2011-08-23 21:29:10.000000000 -0400
61197 @@ -1444,8 +1444,8 @@ enum tcp_seq_states {
61198 struct tcp_seq_afinfo {
61199 char *name;
61200 sa_family_t family;
61201 - struct file_operations seq_fops;
61202 - struct seq_operations seq_ops;
61203 + file_operations_no_const seq_fops;
61204 + seq_operations_no_const seq_ops;
61205 };
61206
61207 struct tcp_iter_state {
61208 diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61209 --- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61210 +++ linux-2.6.32.45/include/net/udp.h 2011-08-23 21:29:34.000000000 -0400
61211 @@ -187,8 +187,8 @@ struct udp_seq_afinfo {
61212 char *name;
61213 sa_family_t family;
61214 struct udp_table *udp_table;
61215 - struct file_operations seq_fops;
61216 - struct seq_operations seq_ops;
61217 + file_operations_no_const seq_fops;
61218 + seq_operations_no_const seq_ops;
61219 };
61220
61221 struct udp_iter_state {
61222 diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61223 --- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61224 +++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61225 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61226 int backlog);
61227
61228 int (*destroy_listen)(struct iw_cm_id *cm_id);
61229 -};
61230 +} __no_const;
61231
61232 /**
61233 * iw_create_cm_id - Create an IW CM identifier.
61234 diff -urNp linux-2.6.32.45/include/scsi/libfc.h linux-2.6.32.45/include/scsi/libfc.h
61235 --- linux-2.6.32.45/include/scsi/libfc.h 2011-03-27 14:31:47.000000000 -0400
61236 +++ linux-2.6.32.45/include/scsi/libfc.h 2011-08-23 21:22:38.000000000 -0400
61237 @@ -675,6 +675,7 @@ struct libfc_function_template {
61238 */
61239 void (*disc_stop_final) (struct fc_lport *);
61240 };
61241 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61242
61243 /* information used by the discovery layer */
61244 struct fc_disc {
61245 @@ -707,7 +708,7 @@ struct fc_lport {
61246 struct fc_disc disc;
61247
61248 /* Operational Information */
61249 - struct libfc_function_template tt;
61250 + libfc_function_template_no_const tt;
61251 u8 link_up;
61252 u8 qfull;
61253 enum fc_lport_state state;
61254 diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61255 --- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61256 +++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61257 @@ -156,9 +156,9 @@ struct scsi_device {
61258 unsigned int max_device_blocked; /* what device_blocked counts down from */
61259 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61260
61261 - atomic_t iorequest_cnt;
61262 - atomic_t iodone_cnt;
61263 - atomic_t ioerr_cnt;
61264 + atomic_unchecked_t iorequest_cnt;
61265 + atomic_unchecked_t iodone_cnt;
61266 + atomic_unchecked_t ioerr_cnt;
61267
61268 struct device sdev_gendev,
61269 sdev_dev;
61270 diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61271 --- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61272 +++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-26 20:19:09.000000000 -0400
61273 @@ -708,7 +708,7 @@ struct fc_function_template {
61274 unsigned long show_host_system_hostname:1;
61275
61276 unsigned long disable_target_scan:1;
61277 -};
61278 +} __do_const;
61279
61280
61281 /**
61282 diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61283 --- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61284 +++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61285 @@ -419,15 +419,15 @@
61286 struct snd_ac97;
61287
61288 struct snd_ac97_build_ops {
61289 - int (*build_3d) (struct snd_ac97 *ac97);
61290 - int (*build_specific) (struct snd_ac97 *ac97);
61291 - int (*build_spdif) (struct snd_ac97 *ac97);
61292 - int (*build_post_spdif) (struct snd_ac97 *ac97);
61293 + int (* const build_3d) (struct snd_ac97 *ac97);
61294 + int (* const build_specific) (struct snd_ac97 *ac97);
61295 + int (* const build_spdif) (struct snd_ac97 *ac97);
61296 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
61297 #ifdef CONFIG_PM
61298 - void (*suspend) (struct snd_ac97 *ac97);
61299 - void (*resume) (struct snd_ac97 *ac97);
61300 + void (* const suspend) (struct snd_ac97 *ac97);
61301 + void (* const resume) (struct snd_ac97 *ac97);
61302 #endif
61303 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61304 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61305 };
61306
61307 struct snd_ac97_bus_ops {
61308 @@ -477,7 +477,7 @@ struct snd_ac97_template {
61309
61310 struct snd_ac97 {
61311 /* -- lowlevel (hardware) driver specific -- */
61312 - struct snd_ac97_build_ops * build_ops;
61313 + const struct snd_ac97_build_ops * build_ops;
61314 void *private_data;
61315 void (*private_free) (struct snd_ac97 *ac97);
61316 /* --- */
61317 diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61318 --- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61319 +++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61320 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61321 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61322 unsigned char val);
61323 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61324 -};
61325 +} __no_const;
61326
61327 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61328
61329 diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61330 --- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61331 +++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61332 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61333 struct snd_hwdep_dsp_status *status);
61334 int (*dsp_load)(struct snd_hwdep *hw,
61335 struct snd_hwdep_dsp_image *image);
61336 -};
61337 +} __no_const;
61338
61339 struct snd_hwdep {
61340 struct snd_card *card;
61341 diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61342 --- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61343 +++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61344 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61345 struct snd_info_buffer *buffer);
61346 void (*write)(struct snd_info_entry *entry,
61347 struct snd_info_buffer *buffer);
61348 -};
61349 +} __no_const;
61350
61351 struct snd_info_entry_ops {
61352 int (*open)(struct snd_info_entry *entry,
61353 diff -urNp linux-2.6.32.45/include/sound/pcm.h linux-2.6.32.45/include/sound/pcm.h
61354 --- linux-2.6.32.45/include/sound/pcm.h 2011-03-27 14:31:47.000000000 -0400
61355 +++ linux-2.6.32.45/include/sound/pcm.h 2011-08-23 21:22:38.000000000 -0400
61356 @@ -80,6 +80,7 @@ struct snd_pcm_ops {
61357 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61358 int (*ack)(struct snd_pcm_substream *substream);
61359 };
61360 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61361
61362 /*
61363 *
61364 diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61365 --- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61366 +++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61367 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61368 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61369 int (*csp_stop) (struct snd_sb_csp * p);
61370 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61371 -};
61372 +} __no_const;
61373
61374 /*
61375 * CSP private data
61376 diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61377 --- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61378 +++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61379 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61380 spinlock_t reg_lock;
61381 spinlock_t voice_lock;
61382 wait_queue_head_t interrupt_sleep;
61383 - atomic_t interrupt_sleep_count;
61384 + atomic_unchecked_t interrupt_sleep_count;
61385 struct snd_info_entry *proc_entry;
61386 const struct firmware *dsp_microcode;
61387 const struct firmware *controller_microcode;
61388 diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61389 --- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61390 +++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61391 @@ -34,7 +34,7 @@
61392 */
61393 TRACE_EVENT(irq_handler_entry,
61394
61395 - TP_PROTO(int irq, struct irqaction *action),
61396 + TP_PROTO(int irq, const struct irqaction *action),
61397
61398 TP_ARGS(irq, action),
61399
61400 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61401 */
61402 TRACE_EVENT(irq_handler_exit,
61403
61404 - TP_PROTO(int irq, struct irqaction *action, int ret),
61405 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61406
61407 TP_ARGS(irq, action, ret),
61408
61409 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61410 */
61411 TRACE_EVENT(softirq_entry,
61412
61413 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61414 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61415
61416 TP_ARGS(h, vec),
61417
61418 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61419 */
61420 TRACE_EVENT(softirq_exit,
61421
61422 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61423 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61424
61425 TP_ARGS(h, vec),
61426
61427 diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61428 --- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61429 +++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61430 @@ -177,6 +177,7 @@ struct uvesafb_par {
61431 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61432 u8 pmi_setpal; /* PMI for palette changes */
61433 u16 *pmi_base; /* protected mode interface location */
61434 + u8 *pmi_code; /* protected mode code location */
61435 void *pmi_start;
61436 void *pmi_pal;
61437 u8 *vbe_state_orig; /*
61438 diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61439 --- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61440 +++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61441 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61442
61443 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61444 {
61445 - int err = sys_mount(name, "/root", fs, flags, data);
61446 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61447 if (err)
61448 return err;
61449
61450 - sys_chdir("/root");
61451 + sys_chdir((__force const char __user *)"/root");
61452 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61453 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61454 current->fs->pwd.mnt->mnt_sb->s_type->name,
61455 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61456 va_start(args, fmt);
61457 vsprintf(buf, fmt, args);
61458 va_end(args);
61459 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61460 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61461 if (fd >= 0) {
61462 sys_ioctl(fd, FDEJECT, 0);
61463 sys_close(fd);
61464 }
61465 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61466 - fd = sys_open("/dev/console", O_RDWR, 0);
61467 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61468 if (fd >= 0) {
61469 sys_ioctl(fd, TCGETS, (long)&termios);
61470 termios.c_lflag &= ~ICANON;
61471 sys_ioctl(fd, TCSETSF, (long)&termios);
61472 - sys_read(fd, &c, 1);
61473 + sys_read(fd, (char __user *)&c, 1);
61474 termios.c_lflag |= ICANON;
61475 sys_ioctl(fd, TCSETSF, (long)&termios);
61476 sys_close(fd);
61477 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61478 mount_root();
61479 out:
61480 devtmpfs_mount("dev");
61481 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61482 - sys_chroot(".");
61483 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61484 + sys_chroot((__force char __user *)".");
61485 }
61486 diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61487 --- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61488 +++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61489 @@ -15,15 +15,15 @@ extern int root_mountflags;
61490
61491 static inline int create_dev(char *name, dev_t dev)
61492 {
61493 - sys_unlink(name);
61494 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61495 + sys_unlink((__force char __user *)name);
61496 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61497 }
61498
61499 #if BITS_PER_LONG == 32
61500 static inline u32 bstat(char *name)
61501 {
61502 struct stat64 stat;
61503 - if (sys_stat64(name, &stat) != 0)
61504 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61505 return 0;
61506 if (!S_ISBLK(stat.st_mode))
61507 return 0;
61508 diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61509 --- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61510 +++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61511 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61512 sys_close(old_fd);sys_close(root_fd);
61513 sys_close(0);sys_close(1);sys_close(2);
61514 sys_setsid();
61515 - (void) sys_open("/dev/console",O_RDWR,0);
61516 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61517 (void) sys_dup(0);
61518 (void) sys_dup(0);
61519 return kernel_execve(shell, argv, envp_init);
61520 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61521 create_dev("/dev/root.old", Root_RAM0);
61522 /* mount initrd on rootfs' /root */
61523 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61524 - sys_mkdir("/old", 0700);
61525 - root_fd = sys_open("/", 0, 0);
61526 - old_fd = sys_open("/old", 0, 0);
61527 + sys_mkdir((__force const char __user *)"/old", 0700);
61528 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
61529 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61530 /* move initrd over / and chdir/chroot in initrd root */
61531 - sys_chdir("/root");
61532 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
61533 - sys_chroot(".");
61534 + sys_chdir((__force const char __user *)"/root");
61535 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61536 + sys_chroot((__force const char __user *)".");
61537
61538 /*
61539 * In case that a resume from disk is carried out by linuxrc or one of
61540 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61541
61542 /* move initrd to rootfs' /old */
61543 sys_fchdir(old_fd);
61544 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
61545 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61546 /* switch root and cwd back to / of rootfs */
61547 sys_fchdir(root_fd);
61548 - sys_chroot(".");
61549 + sys_chroot((__force const char __user *)".");
61550 sys_close(old_fd);
61551 sys_close(root_fd);
61552
61553 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61554 - sys_chdir("/old");
61555 + sys_chdir((__force const char __user *)"/old");
61556 return;
61557 }
61558
61559 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61560 mount_root();
61561
61562 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61563 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61564 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61565 if (!error)
61566 printk("okay\n");
61567 else {
61568 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
61569 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61570 if (error == -ENOENT)
61571 printk("/initrd does not exist. Ignored.\n");
61572 else
61573 printk("failed\n");
61574 printk(KERN_NOTICE "Unmounting old root\n");
61575 - sys_umount("/old", MNT_DETACH);
61576 + sys_umount((__force char __user *)"/old", MNT_DETACH);
61577 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61578 if (fd < 0) {
61579 error = fd;
61580 @@ -119,11 +119,11 @@ int __init initrd_load(void)
61581 * mounted in the normal path.
61582 */
61583 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61584 - sys_unlink("/initrd.image");
61585 + sys_unlink((__force const char __user *)"/initrd.image");
61586 handle_initrd();
61587 return 1;
61588 }
61589 }
61590 - sys_unlink("/initrd.image");
61591 + sys_unlink((__force const char __user *)"/initrd.image");
61592 return 0;
61593 }
61594 diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
61595 --- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61596 +++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61597 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61598 partitioned ? "_d" : "", minor,
61599 md_setup_args[ent].device_names);
61600
61601 - fd = sys_open(name, 0, 0);
61602 + fd = sys_open((__force char __user *)name, 0, 0);
61603 if (fd < 0) {
61604 printk(KERN_ERR "md: open failed - cannot start "
61605 "array %s\n", name);
61606 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61607 * array without it
61608 */
61609 sys_close(fd);
61610 - fd = sys_open(name, 0, 0);
61611 + fd = sys_open((__force char __user *)name, 0, 0);
61612 sys_ioctl(fd, BLKRRPART, 0);
61613 }
61614 sys_close(fd);
61615 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61616
61617 wait_for_device_probe();
61618
61619 - fd = sys_open("/dev/md0", 0, 0);
61620 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61621 if (fd >= 0) {
61622 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61623 sys_close(fd);
61624 diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
61625 --- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61626 +++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61627 @@ -74,7 +74,7 @@ static void __init free_hash(void)
61628 }
61629 }
61630
61631 -static long __init do_utime(char __user *filename, time_t mtime)
61632 +static long __init do_utime(__force char __user *filename, time_t mtime)
61633 {
61634 struct timespec t[2];
61635
61636 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
61637 struct dir_entry *de, *tmp;
61638 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61639 list_del(&de->list);
61640 - do_utime(de->name, de->mtime);
61641 + do_utime((__force char __user *)de->name, de->mtime);
61642 kfree(de->name);
61643 kfree(de);
61644 }
61645 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
61646 if (nlink >= 2) {
61647 char *old = find_link(major, minor, ino, mode, collected);
61648 if (old)
61649 - return (sys_link(old, collected) < 0) ? -1 : 1;
61650 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61651 }
61652 return 0;
61653 }
61654 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
61655 {
61656 struct stat st;
61657
61658 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61659 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61660 if (S_ISDIR(st.st_mode))
61661 - sys_rmdir(path);
61662 + sys_rmdir((__force char __user *)path);
61663 else
61664 - sys_unlink(path);
61665 + sys_unlink((__force char __user *)path);
61666 }
61667 }
61668
61669 @@ -305,7 +305,7 @@ static int __init do_name(void)
61670 int openflags = O_WRONLY|O_CREAT;
61671 if (ml != 1)
61672 openflags |= O_TRUNC;
61673 - wfd = sys_open(collected, openflags, mode);
61674 + wfd = sys_open((__force char __user *)collected, openflags, mode);
61675
61676 if (wfd >= 0) {
61677 sys_fchown(wfd, uid, gid);
61678 @@ -317,17 +317,17 @@ static int __init do_name(void)
61679 }
61680 }
61681 } else if (S_ISDIR(mode)) {
61682 - sys_mkdir(collected, mode);
61683 - sys_chown(collected, uid, gid);
61684 - sys_chmod(collected, mode);
61685 + sys_mkdir((__force char __user *)collected, mode);
61686 + sys_chown((__force char __user *)collected, uid, gid);
61687 + sys_chmod((__force char __user *)collected, mode);
61688 dir_add(collected, mtime);
61689 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61690 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61691 if (maybe_link() == 0) {
61692 - sys_mknod(collected, mode, rdev);
61693 - sys_chown(collected, uid, gid);
61694 - sys_chmod(collected, mode);
61695 - do_utime(collected, mtime);
61696 + sys_mknod((__force char __user *)collected, mode, rdev);
61697 + sys_chown((__force char __user *)collected, uid, gid);
61698 + sys_chmod((__force char __user *)collected, mode);
61699 + do_utime((__force char __user *)collected, mtime);
61700 }
61701 }
61702 return 0;
61703 @@ -336,15 +336,15 @@ static int __init do_name(void)
61704 static int __init do_copy(void)
61705 {
61706 if (count >= body_len) {
61707 - sys_write(wfd, victim, body_len);
61708 + sys_write(wfd, (__force char __user *)victim, body_len);
61709 sys_close(wfd);
61710 - do_utime(vcollected, mtime);
61711 + do_utime((__force char __user *)vcollected, mtime);
61712 kfree(vcollected);
61713 eat(body_len);
61714 state = SkipIt;
61715 return 0;
61716 } else {
61717 - sys_write(wfd, victim, count);
61718 + sys_write(wfd, (__force char __user *)victim, count);
61719 body_len -= count;
61720 eat(count);
61721 return 1;
61722 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
61723 {
61724 collected[N_ALIGN(name_len) + body_len] = '\0';
61725 clean_path(collected, 0);
61726 - sys_symlink(collected + N_ALIGN(name_len), collected);
61727 - sys_lchown(collected, uid, gid);
61728 - do_utime(collected, mtime);
61729 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61730 + sys_lchown((__force char __user *)collected, uid, gid);
61731 + do_utime((__force char __user *)collected, mtime);
61732 state = SkipIt;
61733 next_state = Reset;
61734 return 0;
61735 diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
61736 --- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61737 +++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61738 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61739
61740 config COMPAT_BRK
61741 bool "Disable heap randomization"
61742 - default y
61743 + default n
61744 help
61745 Randomizing heap placement makes heap exploits harder, but it
61746 also breaks ancient binaries (including anything libc5 based).
61747 diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
61748 --- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
61749 +++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
61750 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61751 #ifdef CONFIG_TC
61752 extern void tc_init(void);
61753 #endif
61754 +extern void grsecurity_init(void);
61755
61756 enum system_states system_state __read_mostly;
61757 EXPORT_SYMBOL(system_state);
61758 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61759
61760 __setup("reset_devices", set_reset_devices);
61761
61762 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61763 +extern char pax_enter_kernel_user[];
61764 +extern char pax_exit_kernel_user[];
61765 +extern pgdval_t clone_pgd_mask;
61766 +#endif
61767 +
61768 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61769 +static int __init setup_pax_nouderef(char *str)
61770 +{
61771 +#ifdef CONFIG_X86_32
61772 + unsigned int cpu;
61773 + struct desc_struct *gdt;
61774 +
61775 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
61776 + gdt = get_cpu_gdt_table(cpu);
61777 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61778 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61779 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61780 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61781 + }
61782 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61783 +#else
61784 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61785 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61786 + clone_pgd_mask = ~(pgdval_t)0UL;
61787 +#endif
61788 +
61789 + return 0;
61790 +}
61791 +early_param("pax_nouderef", setup_pax_nouderef);
61792 +#endif
61793 +
61794 +#ifdef CONFIG_PAX_SOFTMODE
61795 +int pax_softmode;
61796 +
61797 +static int __init setup_pax_softmode(char *str)
61798 +{
61799 + get_option(&str, &pax_softmode);
61800 + return 1;
61801 +}
61802 +__setup("pax_softmode=", setup_pax_softmode);
61803 +#endif
61804 +
61805 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61806 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61807 static const char *panic_later, *panic_param;
61808 @@ -705,52 +749,53 @@ int initcall_debug;
61809 core_param(initcall_debug, initcall_debug, bool, 0644);
61810
61811 static char msgbuf[64];
61812 -static struct boot_trace_call call;
61813 -static struct boot_trace_ret ret;
61814 +static struct boot_trace_call trace_call;
61815 +static struct boot_trace_ret trace_ret;
61816
61817 int do_one_initcall(initcall_t fn)
61818 {
61819 int count = preempt_count();
61820 ktime_t calltime, delta, rettime;
61821 + const char *msg1 = "", *msg2 = "";
61822
61823 if (initcall_debug) {
61824 - call.caller = task_pid_nr(current);
61825 - printk("calling %pF @ %i\n", fn, call.caller);
61826 + trace_call.caller = task_pid_nr(current);
61827 + printk("calling %pF @ %i\n", fn, trace_call.caller);
61828 calltime = ktime_get();
61829 - trace_boot_call(&call, fn);
61830 + trace_boot_call(&trace_call, fn);
61831 enable_boot_trace();
61832 }
61833
61834 - ret.result = fn();
61835 + trace_ret.result = fn();
61836
61837 if (initcall_debug) {
61838 disable_boot_trace();
61839 rettime = ktime_get();
61840 delta = ktime_sub(rettime, calltime);
61841 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61842 - trace_boot_ret(&ret, fn);
61843 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61844 + trace_boot_ret(&trace_ret, fn);
61845 printk("initcall %pF returned %d after %Ld usecs\n", fn,
61846 - ret.result, ret.duration);
61847 + trace_ret.result, trace_ret.duration);
61848 }
61849
61850 msgbuf[0] = 0;
61851
61852 - if (ret.result && ret.result != -ENODEV && initcall_debug)
61853 - sprintf(msgbuf, "error code %d ", ret.result);
61854 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
61855 + sprintf(msgbuf, "error code %d ", trace_ret.result);
61856
61857 if (preempt_count() != count) {
61858 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61859 + msg1 = " preemption imbalance";
61860 preempt_count() = count;
61861 }
61862 if (irqs_disabled()) {
61863 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61864 + msg2 = " disabled interrupts";
61865 local_irq_enable();
61866 }
61867 - if (msgbuf[0]) {
61868 - printk("initcall %pF returned with %s\n", fn, msgbuf);
61869 + if (msgbuf[0] || *msg1 || *msg2) {
61870 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61871 }
61872
61873 - return ret.result;
61874 + return trace_ret.result;
61875 }
61876
61877
61878 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
61879 if (!ramdisk_execute_command)
61880 ramdisk_execute_command = "/init";
61881
61882 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61883 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
61884 ramdisk_execute_command = NULL;
61885 prepare_namespace();
61886 }
61887
61888 + grsecurity_init();
61889 +
61890 /*
61891 * Ok, we have completed the initial bootup, and
61892 * we're essentially up and running. Get rid of the
61893 diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
61894 --- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
61895 +++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
61896 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
61897 {
61898 int err;
61899
61900 - err = sys_mkdir("/dev", 0755);
61901 + err = sys_mkdir((const char __user *)"/dev", 0755);
61902 if (err < 0)
61903 goto out;
61904
61905 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
61906 if (err < 0)
61907 goto out;
61908
61909 - err = sys_mkdir("/root", 0700);
61910 + err = sys_mkdir((const char __user *)"/root", 0700);
61911 if (err < 0)
61912 goto out;
61913
61914 diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
61915 --- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
61916 +++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
61917 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
61918 mq_bytes = (mq_msg_tblsz +
61919 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61920
61921 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61922 spin_lock(&mq_lock);
61923 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61924 u->mq_bytes + mq_bytes >
61925 diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
61926 --- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
61927 +++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
61928 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
61929 return security_msg_queue_associate(msq, msgflg);
61930 }
61931
61932 +static struct ipc_ops msg_ops = {
61933 + .getnew = newque,
61934 + .associate = msg_security,
61935 + .more_checks = NULL
61936 +};
61937 +
61938 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61939 {
61940 struct ipc_namespace *ns;
61941 - struct ipc_ops msg_ops;
61942 struct ipc_params msg_params;
61943
61944 ns = current->nsproxy->ipc_ns;
61945
61946 - msg_ops.getnew = newque;
61947 - msg_ops.associate = msg_security;
61948 - msg_ops.more_checks = NULL;
61949 -
61950 msg_params.key = key;
61951 msg_params.flg = msgflg;
61952
61953 diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
61954 --- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
61955 +++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
61956 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
61957 return 0;
61958 }
61959
61960 +static struct ipc_ops sem_ops = {
61961 + .getnew = newary,
61962 + .associate = sem_security,
61963 + .more_checks = sem_more_checks
61964 +};
61965 +
61966 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61967 {
61968 struct ipc_namespace *ns;
61969 - struct ipc_ops sem_ops;
61970 struct ipc_params sem_params;
61971
61972 ns = current->nsproxy->ipc_ns;
61973 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61974 if (nsems < 0 || nsems > ns->sc_semmsl)
61975 return -EINVAL;
61976
61977 - sem_ops.getnew = newary;
61978 - sem_ops.associate = sem_security;
61979 - sem_ops.more_checks = sem_more_checks;
61980 -
61981 sem_params.key = key;
61982 sem_params.flg = semflg;
61983 sem_params.u.nsems = nsems;
61984 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
61985 ushort* sem_io = fast_sem_io;
61986 int nsems;
61987
61988 + pax_track_stack();
61989 +
61990 sma = sem_lock_check(ns, semid);
61991 if (IS_ERR(sma))
61992 return PTR_ERR(sma);
61993 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
61994 unsigned long jiffies_left = 0;
61995 struct ipc_namespace *ns;
61996
61997 + pax_track_stack();
61998 +
61999 ns = current->nsproxy->ipc_ns;
62000
62001 if (nsops < 1 || semid < 0)
62002 diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62003 --- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62004 +++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62005 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62006 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62007 #endif
62008
62009 +#ifdef CONFIG_GRKERNSEC
62010 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62011 + const time_t shm_createtime, const uid_t cuid,
62012 + const int shmid);
62013 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62014 + const time_t shm_createtime);
62015 +#endif
62016 +
62017 void shm_init_ns(struct ipc_namespace *ns)
62018 {
62019 ns->shm_ctlmax = SHMMAX;
62020 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62021 shp->shm_lprid = 0;
62022 shp->shm_atim = shp->shm_dtim = 0;
62023 shp->shm_ctim = get_seconds();
62024 +#ifdef CONFIG_GRKERNSEC
62025 + {
62026 + struct timespec timeval;
62027 + do_posix_clock_monotonic_gettime(&timeval);
62028 +
62029 + shp->shm_createtime = timeval.tv_sec;
62030 + }
62031 +#endif
62032 shp->shm_segsz = size;
62033 shp->shm_nattch = 0;
62034 shp->shm_file = file;
62035 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62036 return 0;
62037 }
62038
62039 +static struct ipc_ops shm_ops = {
62040 + .getnew = newseg,
62041 + .associate = shm_security,
62042 + .more_checks = shm_more_checks
62043 +};
62044 +
62045 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62046 {
62047 struct ipc_namespace *ns;
62048 - struct ipc_ops shm_ops;
62049 struct ipc_params shm_params;
62050
62051 ns = current->nsproxy->ipc_ns;
62052
62053 - shm_ops.getnew = newseg;
62054 - shm_ops.associate = shm_security;
62055 - shm_ops.more_checks = shm_more_checks;
62056 -
62057 shm_params.key = key;
62058 shm_params.flg = shmflg;
62059 shm_params.u.size = size;
62060 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62061 if (err)
62062 goto out_unlock;
62063
62064 +#ifdef CONFIG_GRKERNSEC
62065 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62066 + shp->shm_perm.cuid, shmid) ||
62067 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62068 + err = -EACCES;
62069 + goto out_unlock;
62070 + }
62071 +#endif
62072 +
62073 path.dentry = dget(shp->shm_file->f_path.dentry);
62074 path.mnt = shp->shm_file->f_path.mnt;
62075 shp->shm_nattch++;
62076 +#ifdef CONFIG_GRKERNSEC
62077 + shp->shm_lapid = current->pid;
62078 +#endif
62079 size = i_size_read(path.dentry->d_inode);
62080 shm_unlock(shp);
62081
62082 diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62083 --- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62084 +++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62085 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62086 */
62087 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62088 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62089 - file->f_op->write(file, (char *)&ac,
62090 + file->f_op->write(file, (__force char __user *)&ac,
62091 sizeof(acct_t), &file->f_pos);
62092 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62093 set_fs(fs);
62094 diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62095 --- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62096 +++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62097 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62098 3) suppressed due to audit_rate_limit
62099 4) suppressed due to audit_backlog_limit
62100 */
62101 -static atomic_t audit_lost = ATOMIC_INIT(0);
62102 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62103
62104 /* The netlink socket. */
62105 static struct sock *audit_sock;
62106 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62107 unsigned long now;
62108 int print;
62109
62110 - atomic_inc(&audit_lost);
62111 + atomic_inc_unchecked(&audit_lost);
62112
62113 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62114
62115 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62116 printk(KERN_WARNING
62117 "audit: audit_lost=%d audit_rate_limit=%d "
62118 "audit_backlog_limit=%d\n",
62119 - atomic_read(&audit_lost),
62120 + atomic_read_unchecked(&audit_lost),
62121 audit_rate_limit,
62122 audit_backlog_limit);
62123 audit_panic(message);
62124 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62125 status_set.pid = audit_pid;
62126 status_set.rate_limit = audit_rate_limit;
62127 status_set.backlog_limit = audit_backlog_limit;
62128 - status_set.lost = atomic_read(&audit_lost);
62129 + status_set.lost = atomic_read_unchecked(&audit_lost);
62130 status_set.backlog = skb_queue_len(&audit_skb_queue);
62131 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62132 &status_set, sizeof(status_set));
62133 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62134 spin_unlock_irq(&tsk->sighand->siglock);
62135 }
62136 read_unlock(&tasklist_lock);
62137 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62138 - &s, sizeof(s));
62139 +
62140 + if (!err)
62141 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62142 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62143 break;
62144 }
62145 case AUDIT_TTY_SET: {
62146 diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62147 --- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62148 +++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62149 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62150 }
62151
62152 /* global counter which is incremented every time something logs in */
62153 -static atomic_t session_id = ATOMIC_INIT(0);
62154 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62155
62156 /**
62157 * audit_set_loginuid - set a task's audit_context loginuid
62158 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62159 */
62160 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62161 {
62162 - unsigned int sessionid = atomic_inc_return(&session_id);
62163 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62164 struct audit_context *context = task->audit_context;
62165
62166 if (context && context->in_syscall) {
62167 diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62168 --- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62169 +++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62170 @@ -305,10 +305,26 @@ int capable(int cap)
62171 BUG();
62172 }
62173
62174 - if (security_capable(cap) == 0) {
62175 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62176 current->flags |= PF_SUPERPRIV;
62177 return 1;
62178 }
62179 return 0;
62180 }
62181 +
62182 +int capable_nolog(int cap)
62183 +{
62184 + if (unlikely(!cap_valid(cap))) {
62185 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62186 + BUG();
62187 + }
62188 +
62189 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62190 + current->flags |= PF_SUPERPRIV;
62191 + return 1;
62192 + }
62193 + return 0;
62194 +}
62195 +
62196 EXPORT_SYMBOL(capable);
62197 +EXPORT_SYMBOL(capable_nolog);
62198 diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62199 --- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62200 +++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62201 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62202 struct hlist_head *hhead;
62203 struct cg_cgroup_link *link;
62204
62205 + pax_track_stack();
62206 +
62207 /* First see if we already have a cgroup group that matches
62208 * the desired set */
62209 read_lock(&css_set_lock);
62210 diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62211 --- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62212 +++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62213 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62214 struct proc_dir_entry *entry;
62215
62216 /* create the current config file */
62217 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62218 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62219 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62220 + &ikconfig_file_ops);
62221 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62222 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62223 + &ikconfig_file_ops);
62224 +#endif
62225 +#else
62226 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62227 &ikconfig_file_ops);
62228 +#endif
62229 +
62230 if (!entry)
62231 return -ENOMEM;
62232
62233 diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62234 --- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62235 +++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62236 @@ -19,7 +19,7 @@
62237 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62238 static DEFINE_MUTEX(cpu_add_remove_lock);
62239
62240 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62241 +static RAW_NOTIFIER_HEAD(cpu_chain);
62242
62243 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62244 * Should always be manipulated under cpu_add_remove_lock
62245 diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62246 --- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62247 +++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62248 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62249 */
62250 void __put_cred(struct cred *cred)
62251 {
62252 + pax_track_stack();
62253 +
62254 kdebug("__put_cred(%p{%d,%d})", cred,
62255 atomic_read(&cred->usage),
62256 read_cred_subscribers(cred));
62257 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62258 {
62259 struct cred *cred;
62260
62261 + pax_track_stack();
62262 +
62263 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62264 atomic_read(&tsk->cred->usage),
62265 read_cred_subscribers(tsk->cred));
62266 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62267 {
62268 const struct cred *cred;
62269
62270 + pax_track_stack();
62271 +
62272 rcu_read_lock();
62273
62274 do {
62275 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62276 {
62277 struct cred *new;
62278
62279 + pax_track_stack();
62280 +
62281 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62282 if (!new)
62283 return NULL;
62284 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62285 const struct cred *old;
62286 struct cred *new;
62287
62288 + pax_track_stack();
62289 +
62290 validate_process_creds();
62291
62292 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62293 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62294 struct thread_group_cred *tgcred = NULL;
62295 struct cred *new;
62296
62297 + pax_track_stack();
62298 +
62299 #ifdef CONFIG_KEYS
62300 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62301 if (!tgcred)
62302 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62303 struct cred *new;
62304 int ret;
62305
62306 + pax_track_stack();
62307 +
62308 mutex_init(&p->cred_guard_mutex);
62309
62310 if (
62311 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62312 struct task_struct *task = current;
62313 const struct cred *old = task->real_cred;
62314
62315 + pax_track_stack();
62316 +
62317 kdebug("commit_creds(%p{%d,%d})", new,
62318 atomic_read(&new->usage),
62319 read_cred_subscribers(new));
62320 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62321
62322 get_cred(new); /* we will require a ref for the subj creds too */
62323
62324 + gr_set_role_label(task, new->uid, new->gid);
62325 +
62326 /* dumpability changes */
62327 if (old->euid != new->euid ||
62328 old->egid != new->egid ||
62329 @@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62330 key_fsgid_changed(task);
62331
62332 /* do it
62333 - * - What if a process setreuid()'s and this brings the
62334 - * new uid over his NPROC rlimit? We can check this now
62335 - * cheaply with the new uid cache, so if it matters
62336 - * we should be checking for it. -DaveM
62337 + * RLIMIT_NPROC limits on user->processes have already been checked
62338 + * in set_user().
62339 */
62340 alter_cred_subscribers(new, 2);
62341 if (new->user != old->user)
62342 @@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62343 */
62344 void abort_creds(struct cred *new)
62345 {
62346 + pax_track_stack();
62347 +
62348 kdebug("abort_creds(%p{%d,%d})", new,
62349 atomic_read(&new->usage),
62350 read_cred_subscribers(new));
62351 @@ -629,6 +647,8 @@ const struct cred *override_creds(const
62352 {
62353 const struct cred *old = current->cred;
62354
62355 + pax_track_stack();
62356 +
62357 kdebug("override_creds(%p{%d,%d})", new,
62358 atomic_read(&new->usage),
62359 read_cred_subscribers(new));
62360 @@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62361 {
62362 const struct cred *override = current->cred;
62363
62364 + pax_track_stack();
62365 +
62366 kdebug("revert_creds(%p{%d,%d})", old,
62367 atomic_read(&old->usage),
62368 read_cred_subscribers(old));
62369 @@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62370 const struct cred *old;
62371 struct cred *new;
62372
62373 + pax_track_stack();
62374 +
62375 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62376 if (!new)
62377 return NULL;
62378 @@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62379 */
62380 int set_security_override(struct cred *new, u32 secid)
62381 {
62382 + pax_track_stack();
62383 +
62384 return security_kernel_act_as(new, secid);
62385 }
62386 EXPORT_SYMBOL(set_security_override);
62387 @@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62388 u32 secid;
62389 int ret;
62390
62391 + pax_track_stack();
62392 +
62393 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62394 if (ret < 0)
62395 return ret;
62396 diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62397 --- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62398 +++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62399 @@ -55,6 +55,10 @@
62400 #include <asm/pgtable.h>
62401 #include <asm/mmu_context.h>
62402
62403 +#ifdef CONFIG_GRKERNSEC
62404 +extern rwlock_t grsec_exec_file_lock;
62405 +#endif
62406 +
62407 static void exit_mm(struct task_struct * tsk);
62408
62409 static void __unhash_process(struct task_struct *p)
62410 @@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62411 struct task_struct *leader;
62412 int zap_leader;
62413 repeat:
62414 +#ifdef CONFIG_NET
62415 + gr_del_task_from_ip_table(p);
62416 +#endif
62417 +
62418 tracehook_prepare_release_task(p);
62419 /* don't need to get the RCU readlock here - the process is dead and
62420 * can't be modifying its own credentials */
62421 @@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62422 {
62423 write_lock_irq(&tasklist_lock);
62424
62425 +#ifdef CONFIG_GRKERNSEC
62426 + write_lock(&grsec_exec_file_lock);
62427 + if (current->exec_file) {
62428 + fput(current->exec_file);
62429 + current->exec_file = NULL;
62430 + }
62431 + write_unlock(&grsec_exec_file_lock);
62432 +#endif
62433 +
62434 ptrace_unlink(current);
62435 /* Reparent to init */
62436 current->real_parent = current->parent = kthreadd_task;
62437 list_move_tail(&current->sibling, &current->real_parent->children);
62438
62439 + gr_set_kernel_label(current);
62440 +
62441 /* Set the exit signal to SIGCHLD so we signal init on exit */
62442 current->exit_signal = SIGCHLD;
62443
62444 @@ -397,7 +416,7 @@ int allow_signal(int sig)
62445 * know it'll be handled, so that they don't get converted to
62446 * SIGKILL or just silently dropped.
62447 */
62448 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62449 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62450 recalc_sigpending();
62451 spin_unlock_irq(&current->sighand->siglock);
62452 return 0;
62453 @@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62454 vsnprintf(current->comm, sizeof(current->comm), name, args);
62455 va_end(args);
62456
62457 +#ifdef CONFIG_GRKERNSEC
62458 + write_lock(&grsec_exec_file_lock);
62459 + if (current->exec_file) {
62460 + fput(current->exec_file);
62461 + current->exec_file = NULL;
62462 + }
62463 + write_unlock(&grsec_exec_file_lock);
62464 +#endif
62465 +
62466 + gr_set_kernel_label(current);
62467 +
62468 /*
62469 * If we were started as result of loading a module, close all of the
62470 * user space pages. We don't need them, and if we didn't close them
62471 @@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62472 struct task_struct *tsk = current;
62473 int group_dead;
62474
62475 - profile_task_exit(tsk);
62476 -
62477 - WARN_ON(atomic_read(&tsk->fs_excl));
62478 -
62479 + /*
62480 + * Check this first since set_fs() below depends on
62481 + * current_thread_info(), which we better not access when we're in
62482 + * interrupt context. Other than that, we want to do the set_fs()
62483 + * as early as possible.
62484 + */
62485 if (unlikely(in_interrupt()))
62486 panic("Aiee, killing interrupt handler!");
62487 - if (unlikely(!tsk->pid))
62488 - panic("Attempted to kill the idle task!");
62489
62490 /*
62491 - * If do_exit is called because this processes oopsed, it's possible
62492 + * If do_exit is called because this processes Oops'ed, it's possible
62493 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62494 * continuing. Amongst other possible reasons, this is to prevent
62495 * mm_release()->clear_child_tid() from writing to a user-controlled
62496 @@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62497 */
62498 set_fs(USER_DS);
62499
62500 + profile_task_exit(tsk);
62501 +
62502 + WARN_ON(atomic_read(&tsk->fs_excl));
62503 +
62504 + if (unlikely(!tsk->pid))
62505 + panic("Attempted to kill the idle task!");
62506 +
62507 tracehook_report_exit(&code);
62508
62509 validate_creds_for_do_exit(tsk);
62510 @@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62511 tsk->exit_code = code;
62512 taskstats_exit(tsk, group_dead);
62513
62514 + gr_acl_handle_psacct(tsk, code);
62515 + gr_acl_handle_exit();
62516 +
62517 exit_mm(tsk);
62518
62519 if (group_dead)
62520 @@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62521
62522 if (unlikely(wo->wo_flags & WNOWAIT)) {
62523 int exit_code = p->exit_code;
62524 - int why, status;
62525 + int why;
62526
62527 get_task_struct(p);
62528 read_unlock(&tasklist_lock);
62529 diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62530 --- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62531 +++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62532 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62533 *stackend = STACK_END_MAGIC; /* for overflow detection */
62534
62535 #ifdef CONFIG_CC_STACKPROTECTOR
62536 - tsk->stack_canary = get_random_int();
62537 + tsk->stack_canary = pax_get_random_long();
62538 #endif
62539
62540 /* One for us, one for whoever does the "release_task()" (usually parent) */
62541 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62542 mm->locked_vm = 0;
62543 mm->mmap = NULL;
62544 mm->mmap_cache = NULL;
62545 - mm->free_area_cache = oldmm->mmap_base;
62546 - mm->cached_hole_size = ~0UL;
62547 + mm->free_area_cache = oldmm->free_area_cache;
62548 + mm->cached_hole_size = oldmm->cached_hole_size;
62549 mm->map_count = 0;
62550 cpumask_clear(mm_cpumask(mm));
62551 mm->mm_rb = RB_ROOT;
62552 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62553 tmp->vm_flags &= ~VM_LOCKED;
62554 tmp->vm_mm = mm;
62555 tmp->vm_next = tmp->vm_prev = NULL;
62556 + tmp->vm_mirror = NULL;
62557 anon_vma_link(tmp);
62558 file = tmp->vm_file;
62559 if (file) {
62560 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62561 if (retval)
62562 goto out;
62563 }
62564 +
62565 +#ifdef CONFIG_PAX_SEGMEXEC
62566 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62567 + struct vm_area_struct *mpnt_m;
62568 +
62569 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62570 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62571 +
62572 + if (!mpnt->vm_mirror)
62573 + continue;
62574 +
62575 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62576 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62577 + mpnt->vm_mirror = mpnt_m;
62578 + } else {
62579 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62580 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62581 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62582 + mpnt->vm_mirror->vm_mirror = mpnt;
62583 + }
62584 + }
62585 + BUG_ON(mpnt_m);
62586 + }
62587 +#endif
62588 +
62589 /* a new mm has just been created */
62590 arch_dup_mmap(oldmm, mm);
62591 retval = 0;
62592 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62593 write_unlock(&fs->lock);
62594 return -EAGAIN;
62595 }
62596 - fs->users++;
62597 + atomic_inc(&fs->users);
62598 write_unlock(&fs->lock);
62599 return 0;
62600 }
62601 tsk->fs = copy_fs_struct(fs);
62602 if (!tsk->fs)
62603 return -ENOMEM;
62604 + gr_set_chroot_entries(tsk, &tsk->fs->root);
62605 return 0;
62606 }
62607
62608 @@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62609 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62610 #endif
62611 retval = -EAGAIN;
62612 +
62613 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62614 +
62615 if (atomic_read(&p->real_cred->user->processes) >=
62616 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62617 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62618 - p->real_cred->user != INIT_USER)
62619 + if (p->real_cred->user != INIT_USER &&
62620 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62621 goto bad_fork_free;
62622 }
62623 + current->flags &= ~PF_NPROC_EXCEEDED;
62624
62625 retval = copy_creds(p, clone_flags);
62626 if (retval < 0)
62627 @@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62628 goto bad_fork_free_pid;
62629 }
62630
62631 + gr_copy_label(p);
62632 +
62633 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62634 /*
62635 * Clear TID on mm_release()?
62636 @@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62637 bad_fork_free:
62638 free_task(p);
62639 fork_out:
62640 + gr_log_forkfail(retval);
62641 +
62642 return ERR_PTR(retval);
62643 }
62644
62645 @@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62646 if (clone_flags & CLONE_PARENT_SETTID)
62647 put_user(nr, parent_tidptr);
62648
62649 + gr_handle_brute_check();
62650 +
62651 if (clone_flags & CLONE_VFORK) {
62652 p->vfork_done = &vfork;
62653 init_completion(&vfork);
62654 @@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62655 return 0;
62656
62657 /* don't need lock here; in the worst case we'll do useless copy */
62658 - if (fs->users == 1)
62659 + if (atomic_read(&fs->users) == 1)
62660 return 0;
62661
62662 *new_fsp = copy_fs_struct(fs);
62663 @@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62664 fs = current->fs;
62665 write_lock(&fs->lock);
62666 current->fs = new_fs;
62667 - if (--fs->users)
62668 + gr_set_chroot_entries(current, &current->fs->root);
62669 + if (atomic_dec_return(&fs->users))
62670 new_fs = NULL;
62671 else
62672 new_fs = fs;
62673 diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
62674 --- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
62675 +++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
62676 @@ -54,6 +54,7 @@
62677 #include <linux/mount.h>
62678 #include <linux/pagemap.h>
62679 #include <linux/syscalls.h>
62680 +#include <linux/ptrace.h>
62681 #include <linux/signal.h>
62682 #include <linux/module.h>
62683 #include <linux/magic.h>
62684 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62685 struct page *page;
62686 int err;
62687
62688 +#ifdef CONFIG_PAX_SEGMEXEC
62689 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62690 + return -EFAULT;
62691 +#endif
62692 +
62693 /*
62694 * The futex address must be "naturally" aligned.
62695 */
62696 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
62697 struct futex_q q;
62698 int ret;
62699
62700 + pax_track_stack();
62701 +
62702 if (!bitset)
62703 return -EINVAL;
62704
62705 @@ -1841,7 +1849,7 @@ retry:
62706
62707 restart = &current_thread_info()->restart_block;
62708 restart->fn = futex_wait_restart;
62709 - restart->futex.uaddr = (u32 *)uaddr;
62710 + restart->futex.uaddr = uaddr;
62711 restart->futex.val = val;
62712 restart->futex.time = abs_time->tv64;
62713 restart->futex.bitset = bitset;
62714 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
62715 struct futex_q q;
62716 int res, ret;
62717
62718 + pax_track_stack();
62719 +
62720 if (!bitset)
62721 return -EINVAL;
62722
62723 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62724 {
62725 struct robust_list_head __user *head;
62726 unsigned long ret;
62727 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62728 const struct cred *cred = current_cred(), *pcred;
62729 +#endif
62730
62731 if (!futex_cmpxchg_enabled)
62732 return -ENOSYS;
62733 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62734 if (!p)
62735 goto err_unlock;
62736 ret = -EPERM;
62737 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62738 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62739 + goto err_unlock;
62740 +#else
62741 pcred = __task_cred(p);
62742 if (cred->euid != pcred->euid &&
62743 cred->euid != pcred->uid &&
62744 !capable(CAP_SYS_PTRACE))
62745 goto err_unlock;
62746 +#endif
62747 head = p->robust_list;
62748 rcu_read_unlock();
62749 }
62750 @@ -2459,7 +2476,7 @@ retry:
62751 */
62752 static inline int fetch_robust_entry(struct robust_list __user **entry,
62753 struct robust_list __user * __user *head,
62754 - int *pi)
62755 + unsigned int *pi)
62756 {
62757 unsigned long uentry;
62758
62759 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
62760 {
62761 u32 curval;
62762 int i;
62763 + mm_segment_t oldfs;
62764
62765 /*
62766 * This will fail and we want it. Some arch implementations do
62767 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
62768 * implementation, the non functional ones will return
62769 * -ENOSYS.
62770 */
62771 + oldfs = get_fs();
62772 + set_fs(USER_DS);
62773 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62774 + set_fs(oldfs);
62775 if (curval == -EFAULT)
62776 futex_cmpxchg_enabled = 1;
62777
62778 diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
62779 --- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62780 +++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62781 @@ -10,6 +10,7 @@
62782 #include <linux/compat.h>
62783 #include <linux/nsproxy.h>
62784 #include <linux/futex.h>
62785 +#include <linux/ptrace.h>
62786
62787 #include <asm/uaccess.h>
62788
62789 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62790 {
62791 struct compat_robust_list_head __user *head;
62792 unsigned long ret;
62793 - const struct cred *cred = current_cred(), *pcred;
62794 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62795 + const struct cred *cred = current_cred();
62796 + const struct cred *pcred;
62797 +#endif
62798
62799 if (!futex_cmpxchg_enabled)
62800 return -ENOSYS;
62801 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62802 if (!p)
62803 goto err_unlock;
62804 ret = -EPERM;
62805 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62806 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
62807 + goto err_unlock;
62808 +#else
62809 pcred = __task_cred(p);
62810 if (cred->euid != pcred->euid &&
62811 cred->euid != pcred->uid &&
62812 !capable(CAP_SYS_PTRACE))
62813 goto err_unlock;
62814 +#endif
62815 head = p->compat_robust_list;
62816 read_unlock(&tasklist_lock);
62817 }
62818 diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
62819 --- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
62820 +++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
62821 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
62822 }
62823
62824 #ifdef CONFIG_MODULES
62825 -static inline int within(void *addr, void *start, unsigned long size)
62826 -{
62827 - return ((addr >= start) && (addr < start + size));
62828 -}
62829 -
62830 /* Update list and generate events when modules are unloaded. */
62831 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62832 void *data)
62833 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62834 prev = NULL;
62835 /* Remove entries located in module from linked list. */
62836 for (info = gcov_info_head; info; info = info->next) {
62837 - if (within(info, mod->module_core, mod->core_size)) {
62838 + if (within_module_core_rw((unsigned long)info, mod)) {
62839 if (prev)
62840 prev->next = info->next;
62841 else
62842 diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
62843 --- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
62844 +++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
62845 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62846 local_irq_restore(flags);
62847 }
62848
62849 -static void run_hrtimer_softirq(struct softirq_action *h)
62850 +static void run_hrtimer_softirq(void)
62851 {
62852 hrtimer_peek_ahead_timers();
62853 }
62854 diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
62855 --- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
62856 +++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
62857 @@ -11,6 +11,9 @@
62858 * Changed the compression method from stem compression to "table lookup"
62859 * compression (see scripts/kallsyms.c for a more complete description)
62860 */
62861 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62862 +#define __INCLUDED_BY_HIDESYM 1
62863 +#endif
62864 #include <linux/kallsyms.h>
62865 #include <linux/module.h>
62866 #include <linux/init.h>
62867 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
62868
62869 static inline int is_kernel_inittext(unsigned long addr)
62870 {
62871 + if (system_state != SYSTEM_BOOTING)
62872 + return 0;
62873 +
62874 if (addr >= (unsigned long)_sinittext
62875 && addr <= (unsigned long)_einittext)
62876 return 1;
62877 return 0;
62878 }
62879
62880 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62881 +#ifdef CONFIG_MODULES
62882 +static inline int is_module_text(unsigned long addr)
62883 +{
62884 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62885 + return 1;
62886 +
62887 + addr = ktla_ktva(addr);
62888 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62889 +}
62890 +#else
62891 +static inline int is_module_text(unsigned long addr)
62892 +{
62893 + return 0;
62894 +}
62895 +#endif
62896 +#endif
62897 +
62898 static inline int is_kernel_text(unsigned long addr)
62899 {
62900 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
62901 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
62902
62903 static inline int is_kernel(unsigned long addr)
62904 {
62905 +
62906 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62907 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
62908 + return 1;
62909 +
62910 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
62911 +#else
62912 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
62913 +#endif
62914 +
62915 return 1;
62916 return in_gate_area_no_task(addr);
62917 }
62918
62919 static int is_ksym_addr(unsigned long addr)
62920 {
62921 +
62922 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62923 + if (is_module_text(addr))
62924 + return 0;
62925 +#endif
62926 +
62927 if (all_var)
62928 return is_kernel(addr);
62929
62930 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
62931
62932 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
62933 {
62934 - iter->name[0] = '\0';
62935 iter->nameoff = get_symbol_offset(new_pos);
62936 iter->pos = new_pos;
62937 }
62938 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
62939 {
62940 struct kallsym_iter *iter = m->private;
62941
62942 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62943 + if (current_uid())
62944 + return 0;
62945 +#endif
62946 +
62947 /* Some debugging symbols have no name. Ignore them. */
62948 if (!iter->name[0])
62949 return 0;
62950 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
62951 struct kallsym_iter *iter;
62952 int ret;
62953
62954 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
62955 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
62956 if (!iter)
62957 return -ENOMEM;
62958 reset_iter(iter, 0);
62959 diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
62960 --- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
62961 +++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
62962 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
62963 /* Guard for recursive entry */
62964 static int exception_level;
62965
62966 -static struct kgdb_io *kgdb_io_ops;
62967 +static const struct kgdb_io *kgdb_io_ops;
62968 static DEFINE_SPINLOCK(kgdb_registration_lock);
62969
62970 /* kgdb console driver is loaded */
62971 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
62972 */
62973 static atomic_t passive_cpu_wait[NR_CPUS];
62974 static atomic_t cpu_in_kgdb[NR_CPUS];
62975 -atomic_t kgdb_setting_breakpoint;
62976 +atomic_unchecked_t kgdb_setting_breakpoint;
62977
62978 struct task_struct *kgdb_usethread;
62979 struct task_struct *kgdb_contthread;
62980 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
62981 sizeof(unsigned long)];
62982
62983 /* to keep track of the CPU which is doing the single stepping*/
62984 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62985 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62986
62987 /*
62988 * If you are debugging a problem where roundup (the collection of
62989 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
62990 return 0;
62991 if (kgdb_connected)
62992 return 1;
62993 - if (atomic_read(&kgdb_setting_breakpoint))
62994 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
62995 return 1;
62996 if (print_wait)
62997 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
62998 @@ -1426,8 +1426,8 @@ acquirelock:
62999 * instance of the exception handler wanted to come into the
63000 * debugger on a different CPU via a single step
63001 */
63002 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63003 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63004 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63005 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63006
63007 atomic_set(&kgdb_active, -1);
63008 touch_softlockup_watchdog();
63009 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63010 *
63011 * Register it with the KGDB core.
63012 */
63013 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63014 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63015 {
63016 int err;
63017
63018 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63019 *
63020 * Unregister it with the KGDB core.
63021 */
63022 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63023 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63024 {
63025 BUG_ON(kgdb_connected);
63026
63027 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63028 */
63029 void kgdb_breakpoint(void)
63030 {
63031 - atomic_set(&kgdb_setting_breakpoint, 1);
63032 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63033 wmb(); /* Sync point before breakpoint */
63034 arch_kgdb_breakpoint();
63035 wmb(); /* Sync point after breakpoint */
63036 - atomic_set(&kgdb_setting_breakpoint, 0);
63037 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63038 }
63039 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63040
63041 diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63042 --- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63043 +++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63044 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63045 * If module auto-loading support is disabled then this function
63046 * becomes a no-operation.
63047 */
63048 -int __request_module(bool wait, const char *fmt, ...)
63049 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63050 {
63051 - va_list args;
63052 char module_name[MODULE_NAME_LEN];
63053 unsigned int max_modprobes;
63054 int ret;
63055 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63056 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63057 static char *envp[] = { "HOME=/",
63058 "TERM=linux",
63059 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63060 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63061 if (ret)
63062 return ret;
63063
63064 - va_start(args, fmt);
63065 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63066 - va_end(args);
63067 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63068 if (ret >= MODULE_NAME_LEN)
63069 return -ENAMETOOLONG;
63070
63071 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63072 + if (!current_uid()) {
63073 + /* hack to workaround consolekit/udisks stupidity */
63074 + read_lock(&tasklist_lock);
63075 + if (!strcmp(current->comm, "mount") &&
63076 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63077 + read_unlock(&tasklist_lock);
63078 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63079 + return -EPERM;
63080 + }
63081 + read_unlock(&tasklist_lock);
63082 + }
63083 +#endif
63084 +
63085 /* If modprobe needs a service that is in a module, we get a recursive
63086 * loop. Limit the number of running kmod threads to max_threads/2 or
63087 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63088 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63089 atomic_dec(&kmod_concurrent);
63090 return ret;
63091 }
63092 +
63093 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63094 +{
63095 + va_list args;
63096 + int ret;
63097 +
63098 + va_start(args, fmt);
63099 + ret = ____request_module(wait, module_param, fmt, args);
63100 + va_end(args);
63101 +
63102 + return ret;
63103 +}
63104 +
63105 +int __request_module(bool wait, const char *fmt, ...)
63106 +{
63107 + va_list args;
63108 + int ret;
63109 +
63110 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63111 + if (current_uid()) {
63112 + char module_param[MODULE_NAME_LEN];
63113 +
63114 + memset(module_param, 0, sizeof(module_param));
63115 +
63116 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63117 +
63118 + va_start(args, fmt);
63119 + ret = ____request_module(wait, module_param, fmt, args);
63120 + va_end(args);
63121 +
63122 + return ret;
63123 + }
63124 +#endif
63125 +
63126 + va_start(args, fmt);
63127 + ret = ____request_module(wait, NULL, fmt, args);
63128 + va_end(args);
63129 +
63130 + return ret;
63131 +}
63132 +
63133 +
63134 EXPORT_SYMBOL(__request_module);
63135 #endif /* CONFIG_MODULES */
63136
63137 diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63138 --- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63139 +++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63140 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63141 * kernel image and loaded module images reside. This is required
63142 * so x86_64 can correctly handle the %rip-relative fixups.
63143 */
63144 - kip->insns = module_alloc(PAGE_SIZE);
63145 + kip->insns = module_alloc_exec(PAGE_SIZE);
63146 if (!kip->insns) {
63147 kfree(kip);
63148 return NULL;
63149 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63150 */
63151 if (!list_is_singular(&kprobe_insn_pages)) {
63152 list_del(&kip->list);
63153 - module_free(NULL, kip->insns);
63154 + module_free_exec(NULL, kip->insns);
63155 kfree(kip);
63156 }
63157 return 1;
63158 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63159 {
63160 int i, err = 0;
63161 unsigned long offset = 0, size = 0;
63162 - char *modname, namebuf[128];
63163 + char *modname, namebuf[KSYM_NAME_LEN];
63164 const char *symbol_name;
63165 void *addr;
63166 struct kprobe_blackpoint *kb;
63167 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63168 const char *sym = NULL;
63169 unsigned int i = *(loff_t *) v;
63170 unsigned long offset = 0;
63171 - char *modname, namebuf[128];
63172 + char *modname, namebuf[KSYM_NAME_LEN];
63173
63174 head = &kprobe_table[i];
63175 preempt_disable();
63176 diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63177 --- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63178 +++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63179 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63180 /*
63181 * Various lockdep statistics:
63182 */
63183 -atomic_t chain_lookup_hits;
63184 -atomic_t chain_lookup_misses;
63185 -atomic_t hardirqs_on_events;
63186 -atomic_t hardirqs_off_events;
63187 -atomic_t redundant_hardirqs_on;
63188 -atomic_t redundant_hardirqs_off;
63189 -atomic_t softirqs_on_events;
63190 -atomic_t softirqs_off_events;
63191 -atomic_t redundant_softirqs_on;
63192 -atomic_t redundant_softirqs_off;
63193 -atomic_t nr_unused_locks;
63194 -atomic_t nr_cyclic_checks;
63195 -atomic_t nr_find_usage_forwards_checks;
63196 -atomic_t nr_find_usage_backwards_checks;
63197 +atomic_unchecked_t chain_lookup_hits;
63198 +atomic_unchecked_t chain_lookup_misses;
63199 +atomic_unchecked_t hardirqs_on_events;
63200 +atomic_unchecked_t hardirqs_off_events;
63201 +atomic_unchecked_t redundant_hardirqs_on;
63202 +atomic_unchecked_t redundant_hardirqs_off;
63203 +atomic_unchecked_t softirqs_on_events;
63204 +atomic_unchecked_t softirqs_off_events;
63205 +atomic_unchecked_t redundant_softirqs_on;
63206 +atomic_unchecked_t redundant_softirqs_off;
63207 +atomic_unchecked_t nr_unused_locks;
63208 +atomic_unchecked_t nr_cyclic_checks;
63209 +atomic_unchecked_t nr_find_usage_forwards_checks;
63210 +atomic_unchecked_t nr_find_usage_backwards_checks;
63211 #endif
63212
63213 /*
63214 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63215 int i;
63216 #endif
63217
63218 +#ifdef CONFIG_PAX_KERNEXEC
63219 + start = ktla_ktva(start);
63220 +#endif
63221 +
63222 /*
63223 * static variable?
63224 */
63225 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63226 */
63227 for_each_possible_cpu(i) {
63228 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63229 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63230 - + per_cpu_offset(i);
63231 + end = start + PERCPU_ENOUGH_ROOM;
63232
63233 if ((addr >= start) && (addr < end))
63234 return 1;
63235 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63236 if (!static_obj(lock->key)) {
63237 debug_locks_off();
63238 printk("INFO: trying to register non-static key.\n");
63239 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63240 printk("the code is fine but needs lockdep annotation.\n");
63241 printk("turning off the locking correctness validator.\n");
63242 dump_stack();
63243 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63244 if (!class)
63245 return 0;
63246 }
63247 - debug_atomic_inc((atomic_t *)&class->ops);
63248 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63249 if (very_verbose(class)) {
63250 printk("\nacquire class [%p] %s", class->key, class->name);
63251 if (class->name_version > 1)
63252 diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63253 --- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63254 +++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63255 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63256 /*
63257 * Various lockdep statistics:
63258 */
63259 -extern atomic_t chain_lookup_hits;
63260 -extern atomic_t chain_lookup_misses;
63261 -extern atomic_t hardirqs_on_events;
63262 -extern atomic_t hardirqs_off_events;
63263 -extern atomic_t redundant_hardirqs_on;
63264 -extern atomic_t redundant_hardirqs_off;
63265 -extern atomic_t softirqs_on_events;
63266 -extern atomic_t softirqs_off_events;
63267 -extern atomic_t redundant_softirqs_on;
63268 -extern atomic_t redundant_softirqs_off;
63269 -extern atomic_t nr_unused_locks;
63270 -extern atomic_t nr_cyclic_checks;
63271 -extern atomic_t nr_cyclic_check_recursions;
63272 -extern atomic_t nr_find_usage_forwards_checks;
63273 -extern atomic_t nr_find_usage_forwards_recursions;
63274 -extern atomic_t nr_find_usage_backwards_checks;
63275 -extern atomic_t nr_find_usage_backwards_recursions;
63276 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63277 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63278 -# define debug_atomic_read(ptr) atomic_read(ptr)
63279 +extern atomic_unchecked_t chain_lookup_hits;
63280 +extern atomic_unchecked_t chain_lookup_misses;
63281 +extern atomic_unchecked_t hardirqs_on_events;
63282 +extern atomic_unchecked_t hardirqs_off_events;
63283 +extern atomic_unchecked_t redundant_hardirqs_on;
63284 +extern atomic_unchecked_t redundant_hardirqs_off;
63285 +extern atomic_unchecked_t softirqs_on_events;
63286 +extern atomic_unchecked_t softirqs_off_events;
63287 +extern atomic_unchecked_t redundant_softirqs_on;
63288 +extern atomic_unchecked_t redundant_softirqs_off;
63289 +extern atomic_unchecked_t nr_unused_locks;
63290 +extern atomic_unchecked_t nr_cyclic_checks;
63291 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63292 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63293 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63294 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63295 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63296 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63297 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63298 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63299 #else
63300 # define debug_atomic_inc(ptr) do { } while (0)
63301 # define debug_atomic_dec(ptr) do { } while (0)
63302 diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63303 --- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63304 +++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63305 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63306
63307 static void print_name(struct seq_file *m, struct lock_class *class)
63308 {
63309 - char str[128];
63310 + char str[KSYM_NAME_LEN];
63311 const char *name = class->name;
63312
63313 if (!name) {
63314 diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63315 --- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63316 +++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63317 @@ -55,6 +55,7 @@
63318 #include <linux/async.h>
63319 #include <linux/percpu.h>
63320 #include <linux/kmemleak.h>
63321 +#include <linux/grsecurity.h>
63322
63323 #define CREATE_TRACE_POINTS
63324 #include <trace/events/module.h>
63325 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63326 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63327
63328 /* Bounds of module allocation, for speeding __module_address */
63329 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63330 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63331 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63332
63333 int register_module_notifier(struct notifier_block * nb)
63334 {
63335 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63336 return true;
63337
63338 list_for_each_entry_rcu(mod, &modules, list) {
63339 - struct symsearch arr[] = {
63340 + struct symsearch modarr[] = {
63341 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63342 NOT_GPL_ONLY, false },
63343 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63344 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63345 #endif
63346 };
63347
63348 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63349 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63350 return true;
63351 }
63352 return false;
63353 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63354 void *ptr;
63355 int cpu;
63356
63357 - if (align > PAGE_SIZE) {
63358 + if (align-1 >= PAGE_SIZE) {
63359 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63360 name, align, PAGE_SIZE);
63361 align = PAGE_SIZE;
63362 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63363 * /sys/module/foo/sections stuff
63364 * J. Corbet <corbet@lwn.net>
63365 */
63366 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63367 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63368
63369 static inline bool sect_empty(const Elf_Shdr *sect)
63370 {
63371 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63372 destroy_params(mod->kp, mod->num_kp);
63373
63374 /* This may be NULL, but that's OK */
63375 - module_free(mod, mod->module_init);
63376 + module_free(mod, mod->module_init_rw);
63377 + module_free_exec(mod, mod->module_init_rx);
63378 kfree(mod->args);
63379 if (mod->percpu)
63380 percpu_modfree(mod->percpu);
63381 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63382 percpu_modfree(mod->refptr);
63383 #endif
63384 /* Free lock-classes: */
63385 - lockdep_free_key_range(mod->module_core, mod->core_size);
63386 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63387 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63388
63389 /* Finally, free the core (containing the module structure) */
63390 - module_free(mod, mod->module_core);
63391 + module_free_exec(mod, mod->module_core_rx);
63392 + module_free(mod, mod->module_core_rw);
63393
63394 #ifdef CONFIG_MPU
63395 update_protections(current->mm);
63396 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63397 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63398 int ret = 0;
63399 const struct kernel_symbol *ksym;
63400 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63401 + int is_fs_load = 0;
63402 + int register_filesystem_found = 0;
63403 + char *p;
63404 +
63405 + p = strstr(mod->args, "grsec_modharden_fs");
63406 +
63407 + if (p) {
63408 + char *endptr = p + strlen("grsec_modharden_fs");
63409 + /* copy \0 as well */
63410 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63411 + is_fs_load = 1;
63412 + }
63413 +#endif
63414 +
63415
63416 for (i = 1; i < n; i++) {
63417 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63418 + const char *name = strtab + sym[i].st_name;
63419 +
63420 + /* it's a real shame this will never get ripped and copied
63421 + upstream! ;(
63422 + */
63423 + if (is_fs_load && !strcmp(name, "register_filesystem"))
63424 + register_filesystem_found = 1;
63425 +#endif
63426 switch (sym[i].st_shndx) {
63427 case SHN_COMMON:
63428 /* We compiled with -fno-common. These are not
63429 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63430 strtab + sym[i].st_name, mod);
63431 /* Ok if resolved. */
63432 if (ksym) {
63433 + pax_open_kernel();
63434 sym[i].st_value = ksym->value;
63435 + pax_close_kernel();
63436 break;
63437 }
63438
63439 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63440 secbase = (unsigned long)mod->percpu;
63441 else
63442 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63443 + pax_open_kernel();
63444 sym[i].st_value += secbase;
63445 + pax_close_kernel();
63446 break;
63447 }
63448 }
63449
63450 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63451 + if (is_fs_load && !register_filesystem_found) {
63452 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63453 + ret = -EPERM;
63454 + }
63455 +#endif
63456 +
63457 return ret;
63458 }
63459
63460 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63461 || s->sh_entsize != ~0UL
63462 || strstarts(secstrings + s->sh_name, ".init"))
63463 continue;
63464 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63465 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63466 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63467 + else
63468 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63469 DEBUGP("\t%s\n", secstrings + s->sh_name);
63470 }
63471 - if (m == 0)
63472 - mod->core_text_size = mod->core_size;
63473 }
63474
63475 DEBUGP("Init section allocation order:\n");
63476 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63477 || s->sh_entsize != ~0UL
63478 || !strstarts(secstrings + s->sh_name, ".init"))
63479 continue;
63480 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63481 - | INIT_OFFSET_MASK);
63482 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63483 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63484 + else
63485 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63486 + s->sh_entsize |= INIT_OFFSET_MASK;
63487 DEBUGP("\t%s\n", secstrings + s->sh_name);
63488 }
63489 - if (m == 0)
63490 - mod->init_text_size = mod->init_size;
63491 }
63492 }
63493
63494 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63495
63496 /* As per nm */
63497 static char elf_type(const Elf_Sym *sym,
63498 - Elf_Shdr *sechdrs,
63499 - const char *secstrings,
63500 - struct module *mod)
63501 + const Elf_Shdr *sechdrs,
63502 + const char *secstrings)
63503 {
63504 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63505 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63506 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63507
63508 /* Put symbol section at end of init part of module. */
63509 symsect->sh_flags |= SHF_ALLOC;
63510 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63511 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63512 symindex) | INIT_OFFSET_MASK;
63513 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63514
63515 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63516 }
63517
63518 /* Append room for core symbols at end of core part. */
63519 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63520 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63521 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63522 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63523
63524 /* Put string table section at end of init part of module. */
63525 strsect->sh_flags |= SHF_ALLOC;
63526 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63527 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63528 strindex) | INIT_OFFSET_MASK;
63529 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63530
63531 /* Append room for core symbols' strings at end of core part. */
63532 - *pstroffs = mod->core_size;
63533 + *pstroffs = mod->core_size_rx;
63534 __set_bit(0, strmap);
63535 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63536 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63537
63538 return symoffs;
63539 }
63540 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63541 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63542 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63543
63544 + pax_open_kernel();
63545 +
63546 /* Set types up while we still have access to sections. */
63547 for (i = 0; i < mod->num_symtab; i++)
63548 mod->symtab[i].st_info
63549 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63550 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
63551
63552 - mod->core_symtab = dst = mod->module_core + symoffs;
63553 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
63554 src = mod->symtab;
63555 *dst = *src;
63556 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63557 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63558 }
63559 mod->core_num_syms = ndst;
63560
63561 - mod->core_strtab = s = mod->module_core + stroffs;
63562 + mod->core_strtab = s = mod->module_core_rx + stroffs;
63563 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63564 if (test_bit(i, strmap))
63565 *++s = mod->strtab[i];
63566 +
63567 + pax_close_kernel();
63568 }
63569 #else
63570 static inline unsigned long layout_symtab(struct module *mod,
63571 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63572 #endif
63573 }
63574
63575 -static void *module_alloc_update_bounds(unsigned long size)
63576 +static void *module_alloc_update_bounds_rw(unsigned long size)
63577 {
63578 void *ret = module_alloc(size);
63579
63580 if (ret) {
63581 /* Update module bounds. */
63582 - if ((unsigned long)ret < module_addr_min)
63583 - module_addr_min = (unsigned long)ret;
63584 - if ((unsigned long)ret + size > module_addr_max)
63585 - module_addr_max = (unsigned long)ret + size;
63586 + if ((unsigned long)ret < module_addr_min_rw)
63587 + module_addr_min_rw = (unsigned long)ret;
63588 + if ((unsigned long)ret + size > module_addr_max_rw)
63589 + module_addr_max_rw = (unsigned long)ret + size;
63590 + }
63591 + return ret;
63592 +}
63593 +
63594 +static void *module_alloc_update_bounds_rx(unsigned long size)
63595 +{
63596 + void *ret = module_alloc_exec(size);
63597 +
63598 + if (ret) {
63599 + /* Update module bounds. */
63600 + if ((unsigned long)ret < module_addr_min_rx)
63601 + module_addr_min_rx = (unsigned long)ret;
63602 + if ((unsigned long)ret + size > module_addr_max_rx)
63603 + module_addr_max_rx = (unsigned long)ret + size;
63604 }
63605 return ret;
63606 }
63607 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63608 unsigned int i;
63609
63610 /* only scan the sections containing data */
63611 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63612 - (unsigned long)mod->module_core,
63613 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63614 + (unsigned long)mod->module_core_rw,
63615 sizeof(struct module), GFP_KERNEL);
63616
63617 for (i = 1; i < hdr->e_shnum; i++) {
63618 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63619 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63620 continue;
63621
63622 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63623 - (unsigned long)mod->module_core,
63624 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63625 + (unsigned long)mod->module_core_rw,
63626 sechdrs[i].sh_size, GFP_KERNEL);
63627 }
63628 }
63629 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63630 secstrings, &stroffs, strmap);
63631
63632 /* Do the allocs. */
63633 - ptr = module_alloc_update_bounds(mod->core_size);
63634 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63635 /*
63636 * The pointer to this block is stored in the module structure
63637 * which is inside the block. Just mark it as not being a
63638 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63639 err = -ENOMEM;
63640 goto free_percpu;
63641 }
63642 - memset(ptr, 0, mod->core_size);
63643 - mod->module_core = ptr;
63644 + memset(ptr, 0, mod->core_size_rw);
63645 + mod->module_core_rw = ptr;
63646
63647 - ptr = module_alloc_update_bounds(mod->init_size);
63648 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63649 /*
63650 * The pointer to this block is stored in the module structure
63651 * which is inside the block. This block doesn't need to be
63652 * scanned as it contains data and code that will be freed
63653 * after the module is initialized.
63654 */
63655 - kmemleak_ignore(ptr);
63656 - if (!ptr && mod->init_size) {
63657 + kmemleak_not_leak(ptr);
63658 + if (!ptr && mod->init_size_rw) {
63659 + err = -ENOMEM;
63660 + goto free_core_rw;
63661 + }
63662 + memset(ptr, 0, mod->init_size_rw);
63663 + mod->module_init_rw = ptr;
63664 +
63665 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63666 + kmemleak_not_leak(ptr);
63667 + if (!ptr) {
63668 err = -ENOMEM;
63669 - goto free_core;
63670 + goto free_init_rw;
63671 }
63672 - memset(ptr, 0, mod->init_size);
63673 - mod->module_init = ptr;
63674 +
63675 + pax_open_kernel();
63676 + memset(ptr, 0, mod->core_size_rx);
63677 + pax_close_kernel();
63678 + mod->module_core_rx = ptr;
63679 +
63680 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63681 + kmemleak_not_leak(ptr);
63682 + if (!ptr && mod->init_size_rx) {
63683 + err = -ENOMEM;
63684 + goto free_core_rx;
63685 + }
63686 +
63687 + pax_open_kernel();
63688 + memset(ptr, 0, mod->init_size_rx);
63689 + pax_close_kernel();
63690 + mod->module_init_rx = ptr;
63691
63692 /* Transfer each section which specifies SHF_ALLOC */
63693 DEBUGP("final section addresses:\n");
63694 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63695 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63696 continue;
63697
63698 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63699 - dest = mod->module_init
63700 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63701 - else
63702 - dest = mod->module_core + sechdrs[i].sh_entsize;
63703 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63704 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63705 + dest = mod->module_init_rw
63706 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63707 + else
63708 + dest = mod->module_init_rx
63709 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63710 + } else {
63711 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63712 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63713 + else
63714 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63715 + }
63716 +
63717 + if (sechdrs[i].sh_type != SHT_NOBITS) {
63718
63719 - if (sechdrs[i].sh_type != SHT_NOBITS)
63720 - memcpy(dest, (void *)sechdrs[i].sh_addr,
63721 - sechdrs[i].sh_size);
63722 +#ifdef CONFIG_PAX_KERNEXEC
63723 +#ifdef CONFIG_X86_64
63724 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63725 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63726 +#endif
63727 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63728 + pax_open_kernel();
63729 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63730 + pax_close_kernel();
63731 + } else
63732 +#endif
63733 +
63734 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63735 + }
63736 /* Update sh_addr to point to copy in image. */
63737 - sechdrs[i].sh_addr = (unsigned long)dest;
63738 +
63739 +#ifdef CONFIG_PAX_KERNEXEC
63740 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63741 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63742 + else
63743 +#endif
63744 +
63745 + sechdrs[i].sh_addr = (unsigned long)dest;
63746 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63747 }
63748 /* Module has been moved. */
63749 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63750 mod->name);
63751 if (!mod->refptr) {
63752 err = -ENOMEM;
63753 - goto free_init;
63754 + goto free_init_rx;
63755 }
63756 #endif
63757 /* Now we've moved module, initialize linked lists, etc. */
63758 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63759 /* Set up MODINFO_ATTR fields */
63760 setup_modinfo(mod, sechdrs, infoindex);
63761
63762 + mod->args = args;
63763 +
63764 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63765 + {
63766 + char *p, *p2;
63767 +
63768 + if (strstr(mod->args, "grsec_modharden_netdev")) {
63769 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63770 + err = -EPERM;
63771 + goto cleanup;
63772 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63773 + p += strlen("grsec_modharden_normal");
63774 + p2 = strstr(p, "_");
63775 + if (p2) {
63776 + *p2 = '\0';
63777 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63778 + *p2 = '_';
63779 + }
63780 + err = -EPERM;
63781 + goto cleanup;
63782 + }
63783 + }
63784 +#endif
63785 +
63786 +
63787 /* Fix up syms, so that st_value is a pointer to location. */
63788 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63789 mod);
63790 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63791
63792 /* Now do relocations. */
63793 for (i = 1; i < hdr->e_shnum; i++) {
63794 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
63795 unsigned int info = sechdrs[i].sh_info;
63796 + strtab = (char *)sechdrs[strindex].sh_addr;
63797
63798 /* Not a valid relocation section? */
63799 if (info >= hdr->e_shnum)
63800 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63801 * Do it before processing of module parameters, so the module
63802 * can provide parameter accessor functions of its own.
63803 */
63804 - if (mod->module_init)
63805 - flush_icache_range((unsigned long)mod->module_init,
63806 - (unsigned long)mod->module_init
63807 - + mod->init_size);
63808 - flush_icache_range((unsigned long)mod->module_core,
63809 - (unsigned long)mod->module_core + mod->core_size);
63810 + if (mod->module_init_rx)
63811 + flush_icache_range((unsigned long)mod->module_init_rx,
63812 + (unsigned long)mod->module_init_rx
63813 + + mod->init_size_rx);
63814 + flush_icache_range((unsigned long)mod->module_core_rx,
63815 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
63816
63817 set_fs(old_fs);
63818
63819 - mod->args = args;
63820 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
63821 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
63822 mod->name);
63823 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
63824 free_unload:
63825 module_unload_free(mod);
63826 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
63827 + free_init_rx:
63828 percpu_modfree(mod->refptr);
63829 - free_init:
63830 #endif
63831 - module_free(mod, mod->module_init);
63832 - free_core:
63833 - module_free(mod, mod->module_core);
63834 + module_free_exec(mod, mod->module_init_rx);
63835 + free_core_rx:
63836 + module_free_exec(mod, mod->module_core_rx);
63837 + free_init_rw:
63838 + module_free(mod, mod->module_init_rw);
63839 + free_core_rw:
63840 + module_free(mod, mod->module_core_rw);
63841 /* mod will be freed with core. Don't access it beyond this line! */
63842 free_percpu:
63843 if (percpu)
63844 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
63845 mod->symtab = mod->core_symtab;
63846 mod->strtab = mod->core_strtab;
63847 #endif
63848 - module_free(mod, mod->module_init);
63849 - mod->module_init = NULL;
63850 - mod->init_size = 0;
63851 - mod->init_text_size = 0;
63852 + module_free(mod, mod->module_init_rw);
63853 + module_free_exec(mod, mod->module_init_rx);
63854 + mod->module_init_rw = NULL;
63855 + mod->module_init_rx = NULL;
63856 + mod->init_size_rw = 0;
63857 + mod->init_size_rx = 0;
63858 mutex_unlock(&module_mutex);
63859
63860 return 0;
63861 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
63862 unsigned long nextval;
63863
63864 /* At worse, next value is at end of module */
63865 - if (within_module_init(addr, mod))
63866 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
63867 + if (within_module_init_rx(addr, mod))
63868 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63869 + else if (within_module_init_rw(addr, mod))
63870 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63871 + else if (within_module_core_rx(addr, mod))
63872 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63873 + else if (within_module_core_rw(addr, mod))
63874 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63875 else
63876 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
63877 + return NULL;
63878
63879 /* Scan for closest preceeding symbol, and next symbol. (ELF
63880 starts real symbols at 1). */
63881 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
63882 char buf[8];
63883
63884 seq_printf(m, "%s %u",
63885 - mod->name, mod->init_size + mod->core_size);
63886 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63887 print_unload_info(m, mod);
63888
63889 /* Informative for users. */
63890 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
63891 mod->state == MODULE_STATE_COMING ? "Loading":
63892 "Live");
63893 /* Used by oprofile and other similar tools. */
63894 - seq_printf(m, " 0x%p", mod->module_core);
63895 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63896
63897 /* Taints info */
63898 if (mod->taints)
63899 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
63900
63901 static int __init proc_modules_init(void)
63902 {
63903 +#ifndef CONFIG_GRKERNSEC_HIDESYM
63904 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63905 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63906 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63907 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63908 +#else
63909 proc_create("modules", 0, NULL, &proc_modules_operations);
63910 +#endif
63911 +#else
63912 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63913 +#endif
63914 return 0;
63915 }
63916 module_init(proc_modules_init);
63917 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
63918 {
63919 struct module *mod;
63920
63921 - if (addr < module_addr_min || addr > module_addr_max)
63922 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63923 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
63924 return NULL;
63925
63926 list_for_each_entry_rcu(mod, &modules, list)
63927 - if (within_module_core(addr, mod)
63928 - || within_module_init(addr, mod))
63929 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
63930 return mod;
63931 return NULL;
63932 }
63933 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
63934 */
63935 struct module *__module_text_address(unsigned long addr)
63936 {
63937 - struct module *mod = __module_address(addr);
63938 + struct module *mod;
63939 +
63940 +#ifdef CONFIG_X86_32
63941 + addr = ktla_ktva(addr);
63942 +#endif
63943 +
63944 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
63945 + return NULL;
63946 +
63947 + mod = __module_address(addr);
63948 +
63949 if (mod) {
63950 /* Make sure it's within the text section. */
63951 - if (!within(addr, mod->module_init, mod->init_text_size)
63952 - && !within(addr, mod->module_core, mod->core_text_size))
63953 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
63954 mod = NULL;
63955 }
63956 return mod;
63957 diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
63958 --- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
63959 +++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
63960 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
63961 */
63962
63963 for (;;) {
63964 - struct thread_info *owner;
63965 + struct task_struct *owner;
63966
63967 /*
63968 * If we own the BKL, then don't spin. The owner of
63969 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
63970 spin_lock_mutex(&lock->wait_lock, flags);
63971
63972 debug_mutex_lock_common(lock, &waiter);
63973 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
63974 + debug_mutex_add_waiter(lock, &waiter, task);
63975
63976 /* add waiting tasks to the end of the waitqueue (FIFO): */
63977 list_add_tail(&waiter.list, &lock->wait_list);
63978 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
63979 * TASK_UNINTERRUPTIBLE case.)
63980 */
63981 if (unlikely(signal_pending_state(state, task))) {
63982 - mutex_remove_waiter(lock, &waiter,
63983 - task_thread_info(task));
63984 + mutex_remove_waiter(lock, &waiter, task);
63985 mutex_release(&lock->dep_map, 1, ip);
63986 spin_unlock_mutex(&lock->wait_lock, flags);
63987
63988 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
63989 done:
63990 lock_acquired(&lock->dep_map, ip);
63991 /* got the lock - rejoice! */
63992 - mutex_remove_waiter(lock, &waiter, current_thread_info());
63993 + mutex_remove_waiter(lock, &waiter, task);
63994 mutex_set_owner(lock);
63995
63996 /* set it to 0 if there are no waiters left: */
63997 diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
63998 --- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
63999 +++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64000 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64001 }
64002
64003 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64004 - struct thread_info *ti)
64005 + struct task_struct *task)
64006 {
64007 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64008
64009 /* Mark the current thread as blocked on the lock: */
64010 - ti->task->blocked_on = waiter;
64011 + task->blocked_on = waiter;
64012 }
64013
64014 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64015 - struct thread_info *ti)
64016 + struct task_struct *task)
64017 {
64018 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64019 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64020 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64021 - ti->task->blocked_on = NULL;
64022 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64023 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64024 + task->blocked_on = NULL;
64025
64026 list_del_init(&waiter->list);
64027 waiter->task = NULL;
64028 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64029 return;
64030
64031 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64032 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64033 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64034 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64035 mutex_clear_owner(lock);
64036 }
64037 diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64038 --- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64039 +++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64040 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64041 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64042 extern void debug_mutex_add_waiter(struct mutex *lock,
64043 struct mutex_waiter *waiter,
64044 - struct thread_info *ti);
64045 + struct task_struct *task);
64046 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64047 - struct thread_info *ti);
64048 + struct task_struct *task);
64049 extern void debug_mutex_unlock(struct mutex *lock);
64050 extern void debug_mutex_init(struct mutex *lock, const char *name,
64051 struct lock_class_key *key);
64052
64053 static inline void mutex_set_owner(struct mutex *lock)
64054 {
64055 - lock->owner = current_thread_info();
64056 + lock->owner = current;
64057 }
64058
64059 static inline void mutex_clear_owner(struct mutex *lock)
64060 diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64061 --- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64062 +++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64063 @@ -19,7 +19,7 @@
64064 #ifdef CONFIG_SMP
64065 static inline void mutex_set_owner(struct mutex *lock)
64066 {
64067 - lock->owner = current_thread_info();
64068 + lock->owner = current;
64069 }
64070
64071 static inline void mutex_clear_owner(struct mutex *lock)
64072 diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64073 --- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64074 +++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64075 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64076 const char *board;
64077
64078 printk(KERN_WARNING "------------[ cut here ]------------\n");
64079 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64080 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64081 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64082 if (board)
64083 printk(KERN_WARNING "Hardware name: %s\n", board);
64084 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64085 */
64086 void __stack_chk_fail(void)
64087 {
64088 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64089 + dump_stack();
64090 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64091 __builtin_return_address(0));
64092 }
64093 EXPORT_SYMBOL(__stack_chk_fail);
64094 diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64095 --- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64096 +++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64097 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64098 return ret;
64099 }
64100
64101 -static struct sysfs_ops module_sysfs_ops = {
64102 +static const struct sysfs_ops module_sysfs_ops = {
64103 .show = module_attr_show,
64104 .store = module_attr_store,
64105 };
64106 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64107 return 0;
64108 }
64109
64110 -static struct kset_uevent_ops module_uevent_ops = {
64111 +static const struct kset_uevent_ops module_uevent_ops = {
64112 .filter = uevent_filter,
64113 };
64114
64115 diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64116 --- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64117 +++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64118 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64119 */
64120 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64121
64122 -static atomic64_t perf_event_id;
64123 +static atomic64_unchecked_t perf_event_id;
64124
64125 /*
64126 * Lock for (sysadmin-configurable) event reservations:
64127 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64128 * In order to keep per-task stats reliable we need to flip the event
64129 * values when we flip the contexts.
64130 */
64131 - value = atomic64_read(&next_event->count);
64132 - value = atomic64_xchg(&event->count, value);
64133 - atomic64_set(&next_event->count, value);
64134 + value = atomic64_read_unchecked(&next_event->count);
64135 + value = atomic64_xchg_unchecked(&event->count, value);
64136 + atomic64_set_unchecked(&next_event->count, value);
64137
64138 swap(event->total_time_enabled, next_event->total_time_enabled);
64139 swap(event->total_time_running, next_event->total_time_running);
64140 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64141 update_event_times(event);
64142 }
64143
64144 - return atomic64_read(&event->count);
64145 + return atomic64_read_unchecked(&event->count);
64146 }
64147
64148 /*
64149 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64150 values[n++] = 1 + leader->nr_siblings;
64151 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64152 values[n++] = leader->total_time_enabled +
64153 - atomic64_read(&leader->child_total_time_enabled);
64154 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64155 }
64156 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64157 values[n++] = leader->total_time_running +
64158 - atomic64_read(&leader->child_total_time_running);
64159 + atomic64_read_unchecked(&leader->child_total_time_running);
64160 }
64161
64162 size = n * sizeof(u64);
64163 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64164 values[n++] = perf_event_read_value(event);
64165 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64166 values[n++] = event->total_time_enabled +
64167 - atomic64_read(&event->child_total_time_enabled);
64168 + atomic64_read_unchecked(&event->child_total_time_enabled);
64169 }
64170 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64171 values[n++] = event->total_time_running +
64172 - atomic64_read(&event->child_total_time_running);
64173 + atomic64_read_unchecked(&event->child_total_time_running);
64174 }
64175 if (read_format & PERF_FORMAT_ID)
64176 values[n++] = primary_event_id(event);
64177 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64178 static void perf_event_reset(struct perf_event *event)
64179 {
64180 (void)perf_event_read(event);
64181 - atomic64_set(&event->count, 0);
64182 + atomic64_set_unchecked(&event->count, 0);
64183 perf_event_update_userpage(event);
64184 }
64185
64186 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64187 ++userpg->lock;
64188 barrier();
64189 userpg->index = perf_event_index(event);
64190 - userpg->offset = atomic64_read(&event->count);
64191 + userpg->offset = atomic64_read_unchecked(&event->count);
64192 if (event->state == PERF_EVENT_STATE_ACTIVE)
64193 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64194 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64195
64196 userpg->time_enabled = event->total_time_enabled +
64197 - atomic64_read(&event->child_total_time_enabled);
64198 + atomic64_read_unchecked(&event->child_total_time_enabled);
64199
64200 userpg->time_running = event->total_time_running +
64201 - atomic64_read(&event->child_total_time_running);
64202 + atomic64_read_unchecked(&event->child_total_time_running);
64203
64204 barrier();
64205 ++userpg->lock;
64206 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64207 u64 values[4];
64208 int n = 0;
64209
64210 - values[n++] = atomic64_read(&event->count);
64211 + values[n++] = atomic64_read_unchecked(&event->count);
64212 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64213 values[n++] = event->total_time_enabled +
64214 - atomic64_read(&event->child_total_time_enabled);
64215 + atomic64_read_unchecked(&event->child_total_time_enabled);
64216 }
64217 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64218 values[n++] = event->total_time_running +
64219 - atomic64_read(&event->child_total_time_running);
64220 + atomic64_read_unchecked(&event->child_total_time_running);
64221 }
64222 if (read_format & PERF_FORMAT_ID)
64223 values[n++] = primary_event_id(event);
64224 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64225 if (leader != event)
64226 leader->pmu->read(leader);
64227
64228 - values[n++] = atomic64_read(&leader->count);
64229 + values[n++] = atomic64_read_unchecked(&leader->count);
64230 if (read_format & PERF_FORMAT_ID)
64231 values[n++] = primary_event_id(leader);
64232
64233 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64234 if (sub != event)
64235 sub->pmu->read(sub);
64236
64237 - values[n++] = atomic64_read(&sub->count);
64238 + values[n++] = atomic64_read_unchecked(&sub->count);
64239 if (read_format & PERF_FORMAT_ID)
64240 values[n++] = primary_event_id(sub);
64241
64242 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64243 {
64244 struct hw_perf_event *hwc = &event->hw;
64245
64246 - atomic64_add(nr, &event->count);
64247 + atomic64_add_unchecked(nr, &event->count);
64248
64249 if (!hwc->sample_period)
64250 return;
64251 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64252 u64 now;
64253
64254 now = cpu_clock(cpu);
64255 - prev = atomic64_read(&event->hw.prev_count);
64256 - atomic64_set(&event->hw.prev_count, now);
64257 - atomic64_add(now - prev, &event->count);
64258 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64259 + atomic64_set_unchecked(&event->hw.prev_count, now);
64260 + atomic64_add_unchecked(now - prev, &event->count);
64261 }
64262
64263 static int cpu_clock_perf_event_enable(struct perf_event *event)
64264 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64265 struct hw_perf_event *hwc = &event->hw;
64266 int cpu = raw_smp_processor_id();
64267
64268 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64269 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64270 perf_swevent_start_hrtimer(event);
64271
64272 return 0;
64273 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64274 u64 prev;
64275 s64 delta;
64276
64277 - prev = atomic64_xchg(&event->hw.prev_count, now);
64278 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64279 delta = now - prev;
64280 - atomic64_add(delta, &event->count);
64281 + atomic64_add_unchecked(delta, &event->count);
64282 }
64283
64284 static int task_clock_perf_event_enable(struct perf_event *event)
64285 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64286
64287 now = event->ctx->time;
64288
64289 - atomic64_set(&hwc->prev_count, now);
64290 + atomic64_set_unchecked(&hwc->prev_count, now);
64291
64292 perf_swevent_start_hrtimer(event);
64293
64294 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64295 event->parent = parent_event;
64296
64297 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64298 - event->id = atomic64_inc_return(&perf_event_id);
64299 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64300
64301 event->state = PERF_EVENT_STATE_INACTIVE;
64302
64303 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64304 if (child_event->attr.inherit_stat)
64305 perf_event_read_event(child_event, child);
64306
64307 - child_val = atomic64_read(&child_event->count);
64308 + child_val = atomic64_read_unchecked(&child_event->count);
64309
64310 /*
64311 * Add back the child's count to the parent's count:
64312 */
64313 - atomic64_add(child_val, &parent_event->count);
64314 - atomic64_add(child_event->total_time_enabled,
64315 + atomic64_add_unchecked(child_val, &parent_event->count);
64316 + atomic64_add_unchecked(child_event->total_time_enabled,
64317 &parent_event->child_total_time_enabled);
64318 - atomic64_add(child_event->total_time_running,
64319 + atomic64_add_unchecked(child_event->total_time_running,
64320 &parent_event->child_total_time_running);
64321
64322 /*
64323 diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64324 --- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64325 +++ linux-2.6.32.45/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64326 @@ -33,6 +33,7 @@
64327 #include <linux/rculist.h>
64328 #include <linux/bootmem.h>
64329 #include <linux/hash.h>
64330 +#include <linux/security.h>
64331 #include <linux/pid_namespace.h>
64332 #include <linux/init_task.h>
64333 #include <linux/syscalls.h>
64334 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64335
64336 int pid_max = PID_MAX_DEFAULT;
64337
64338 -#define RESERVED_PIDS 300
64339 +#define RESERVED_PIDS 500
64340
64341 int pid_max_min = RESERVED_PIDS + 1;
64342 int pid_max_max = PID_MAX_LIMIT;
64343 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64344 */
64345 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64346 {
64347 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64348 + struct task_struct *task;
64349 +
64350 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64351 +
64352 + if (gr_pid_is_chrooted(task))
64353 + return NULL;
64354 +
64355 + return task;
64356 }
64357
64358 struct task_struct *find_task_by_vpid(pid_t vnr)
64359 @@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64360 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64361 }
64362
64363 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64364 +{
64365 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64366 +}
64367 +
64368 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64369 {
64370 struct pid *pid;
64371 diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64372 --- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64373 +++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64374 @@ -6,6 +6,7 @@
64375 #include <linux/posix-timers.h>
64376 #include <linux/errno.h>
64377 #include <linux/math64.h>
64378 +#include <linux/security.h>
64379 #include <asm/uaccess.h>
64380 #include <linux/kernel_stat.h>
64381 #include <trace/events/timer.h>
64382 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64383
64384 static __init int init_posix_cpu_timers(void)
64385 {
64386 - struct k_clock process = {
64387 + static struct k_clock process = {
64388 .clock_getres = process_cpu_clock_getres,
64389 .clock_get = process_cpu_clock_get,
64390 .clock_set = do_posix_clock_nosettime,
64391 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64392 .nsleep = process_cpu_nsleep,
64393 .nsleep_restart = process_cpu_nsleep_restart,
64394 };
64395 - struct k_clock thread = {
64396 + static struct k_clock thread = {
64397 .clock_getres = thread_cpu_clock_getres,
64398 .clock_get = thread_cpu_clock_get,
64399 .clock_set = do_posix_clock_nosettime,
64400 diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64401 --- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64402 +++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-23 20:22:38.000000000 -0400
64403 @@ -42,6 +42,7 @@
64404 #include <linux/compiler.h>
64405 #include <linux/idr.h>
64406 #include <linux/posix-timers.h>
64407 +#include <linux/grsecurity.h>
64408 #include <linux/syscalls.h>
64409 #include <linux/wait.h>
64410 #include <linux/workqueue.h>
64411 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64412 * which we beg off on and pass to do_sys_settimeofday().
64413 */
64414
64415 -static struct k_clock posix_clocks[MAX_CLOCKS];
64416 +static struct k_clock *posix_clocks[MAX_CLOCKS];
64417
64418 /*
64419 * These ones are defined below.
64420 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64421 */
64422 #define CLOCK_DISPATCH(clock, call, arglist) \
64423 ((clock) < 0 ? posix_cpu_##call arglist : \
64424 - (posix_clocks[clock].call != NULL \
64425 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64426 + (posix_clocks[clock]->call != NULL \
64427 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64428
64429 /*
64430 * Default clock hook functions when the struct k_clock passed
64431 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64432 struct timespec *tp)
64433 {
64434 tp->tv_sec = 0;
64435 - tp->tv_nsec = posix_clocks[which_clock].res;
64436 + tp->tv_nsec = posix_clocks[which_clock]->res;
64437 return 0;
64438 }
64439
64440 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64441 return 0;
64442 if ((unsigned) which_clock >= MAX_CLOCKS)
64443 return 1;
64444 - if (posix_clocks[which_clock].clock_getres != NULL)
64445 + if (posix_clocks[which_clock] == NULL)
64446 return 0;
64447 - if (posix_clocks[which_clock].res != 0)
64448 + if (posix_clocks[which_clock]->clock_getres != NULL)
64449 + return 0;
64450 + if (posix_clocks[which_clock]->res != 0)
64451 return 0;
64452 return 1;
64453 }
64454 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64455 */
64456 static __init int init_posix_timers(void)
64457 {
64458 - struct k_clock clock_realtime = {
64459 + static struct k_clock clock_realtime = {
64460 .clock_getres = hrtimer_get_res,
64461 };
64462 - struct k_clock clock_monotonic = {
64463 + static struct k_clock clock_monotonic = {
64464 .clock_getres = hrtimer_get_res,
64465 .clock_get = posix_ktime_get_ts,
64466 .clock_set = do_posix_clock_nosettime,
64467 };
64468 - struct k_clock clock_monotonic_raw = {
64469 + static struct k_clock clock_monotonic_raw = {
64470 .clock_getres = hrtimer_get_res,
64471 .clock_get = posix_get_monotonic_raw,
64472 .clock_set = do_posix_clock_nosettime,
64473 .timer_create = no_timer_create,
64474 .nsleep = no_nsleep,
64475 };
64476 - struct k_clock clock_realtime_coarse = {
64477 + static struct k_clock clock_realtime_coarse = {
64478 .clock_getres = posix_get_coarse_res,
64479 .clock_get = posix_get_realtime_coarse,
64480 .clock_set = do_posix_clock_nosettime,
64481 .timer_create = no_timer_create,
64482 .nsleep = no_nsleep,
64483 };
64484 - struct k_clock clock_monotonic_coarse = {
64485 + static struct k_clock clock_monotonic_coarse = {
64486 .clock_getres = posix_get_coarse_res,
64487 .clock_get = posix_get_monotonic_coarse,
64488 .clock_set = do_posix_clock_nosettime,
64489 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64490 .nsleep = no_nsleep,
64491 };
64492
64493 + pax_track_stack();
64494 +
64495 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64496 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64497 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64498 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64499 return;
64500 }
64501
64502 - posix_clocks[clock_id] = *new_clock;
64503 + posix_clocks[clock_id] = new_clock;
64504 }
64505 EXPORT_SYMBOL_GPL(register_posix_clock);
64506
64507 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64508 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64509 return -EFAULT;
64510
64511 + /* only the CLOCK_REALTIME clock can be set, all other clocks
64512 + have their clock_set fptr set to a nosettime dummy function
64513 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64514 + call common_clock_set, which calls do_sys_settimeofday, which
64515 + we hook
64516 + */
64517 +
64518 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64519 }
64520
64521 diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64522 --- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64523 +++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64524 @@ -48,14 +48,14 @@ enum {
64525
64526 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64527
64528 -static struct platform_hibernation_ops *hibernation_ops;
64529 +static const struct platform_hibernation_ops *hibernation_ops;
64530
64531 /**
64532 * hibernation_set_ops - set the global hibernate operations
64533 * @ops: the hibernation operations to use in subsequent hibernation transitions
64534 */
64535
64536 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
64537 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64538 {
64539 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64540 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64541 diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64542 --- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64543 +++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64544 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64545 .enable_mask = SYSRQ_ENABLE_BOOT,
64546 };
64547
64548 -static int pm_sysrq_init(void)
64549 +static int __init pm_sysrq_init(void)
64550 {
64551 register_sysrq_key('o', &sysrq_poweroff_op);
64552 return 0;
64553 diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
64554 --- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64555 +++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64556 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64557 struct timeval start, end;
64558 u64 elapsed_csecs64;
64559 unsigned int elapsed_csecs;
64560 + bool timedout = false;
64561
64562 do_gettimeofday(&start);
64563
64564 end_time = jiffies + TIMEOUT;
64565 do {
64566 todo = 0;
64567 + if (time_after(jiffies, end_time))
64568 + timedout = true;
64569 read_lock(&tasklist_lock);
64570 do_each_thread(g, p) {
64571 if (frozen(p) || !freezeable(p))
64572 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64573 * It is "frozen enough". If the task does wake
64574 * up, it will immediately call try_to_freeze.
64575 */
64576 - if (!task_is_stopped_or_traced(p) &&
64577 - !freezer_should_skip(p))
64578 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64579 todo++;
64580 + if (timedout) {
64581 + printk(KERN_ERR "Task refusing to freeze:\n");
64582 + sched_show_task(p);
64583 + }
64584 + }
64585 } while_each_thread(g, p);
64586 read_unlock(&tasklist_lock);
64587 yield(); /* Yield is okay here */
64588 - if (time_after(jiffies, end_time))
64589 - break;
64590 - } while (todo);
64591 + } while (todo && !timedout);
64592
64593 do_gettimeofday(&end);
64594 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64595 diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
64596 --- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64597 +++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64598 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64599 [PM_SUSPEND_MEM] = "mem",
64600 };
64601
64602 -static struct platform_suspend_ops *suspend_ops;
64603 +static const struct platform_suspend_ops *suspend_ops;
64604
64605 /**
64606 * suspend_set_ops - Set the global suspend method table.
64607 * @ops: Pointer to ops structure.
64608 */
64609 -void suspend_set_ops(struct platform_suspend_ops *ops)
64610 +void suspend_set_ops(const struct platform_suspend_ops *ops)
64611 {
64612 mutex_lock(&pm_mutex);
64613 suspend_ops = ops;
64614 diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
64615 --- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64616 +++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64617 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64618 char c;
64619 int error = 0;
64620
64621 +#ifdef CONFIG_GRKERNSEC_DMESG
64622 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64623 + return -EPERM;
64624 +#endif
64625 +
64626 error = security_syslog(type);
64627 if (error)
64628 return error;
64629 diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
64630 --- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64631 +++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64632 @@ -39,7 +39,7 @@ struct profile_hit {
64633 /* Oprofile timer tick hook */
64634 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64635
64636 -static atomic_t *prof_buffer;
64637 +static atomic_unchecked_t *prof_buffer;
64638 static unsigned long prof_len, prof_shift;
64639
64640 int prof_on __read_mostly;
64641 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64642 hits[i].pc = 0;
64643 continue;
64644 }
64645 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64646 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64647 hits[i].hits = hits[i].pc = 0;
64648 }
64649 }
64650 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64651 * Add the current hit(s) and flush the write-queue out
64652 * to the global buffer:
64653 */
64654 - atomic_add(nr_hits, &prof_buffer[pc]);
64655 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64656 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64657 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64658 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64659 hits[i].pc = hits[i].hits = 0;
64660 }
64661 out:
64662 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64663 if (prof_on != type || !prof_buffer)
64664 return;
64665 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64666 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64667 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64668 }
64669 #endif /* !CONFIG_SMP */
64670 EXPORT_SYMBOL_GPL(profile_hits);
64671 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64672 return -EFAULT;
64673 buf++; p++; count--; read++;
64674 }
64675 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64676 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64677 if (copy_to_user(buf, (void *)pnt, count))
64678 return -EFAULT;
64679 read += count;
64680 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64681 }
64682 #endif
64683 profile_discard_flip_buffers();
64684 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64685 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64686 return count;
64687 }
64688
64689 diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
64690 --- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64691 +++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64692 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64693 return ret;
64694 }
64695
64696 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64697 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64698 + unsigned int log)
64699 {
64700 const struct cred *cred = current_cred(), *tcred;
64701
64702 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64703 cred->gid != tcred->egid ||
64704 cred->gid != tcred->sgid ||
64705 cred->gid != tcred->gid) &&
64706 - !capable(CAP_SYS_PTRACE)) {
64707 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64708 + (log && !capable(CAP_SYS_PTRACE)))
64709 + ) {
64710 rcu_read_unlock();
64711 return -EPERM;
64712 }
64713 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64714 smp_rmb();
64715 if (task->mm)
64716 dumpable = get_dumpable(task->mm);
64717 - if (!dumpable && !capable(CAP_SYS_PTRACE))
64718 + if (!dumpable &&
64719 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64720 + (log && !capable(CAP_SYS_PTRACE))))
64721 return -EPERM;
64722
64723 return security_ptrace_access_check(task, mode);
64724 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64725 {
64726 int err;
64727 task_lock(task);
64728 - err = __ptrace_may_access(task, mode);
64729 + err = __ptrace_may_access(task, mode, 0);
64730 + task_unlock(task);
64731 + return !err;
64732 +}
64733 +
64734 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64735 +{
64736 + int err;
64737 + task_lock(task);
64738 + err = __ptrace_may_access(task, mode, 1);
64739 task_unlock(task);
64740 return !err;
64741 }
64742 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64743 goto out;
64744
64745 task_lock(task);
64746 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64747 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64748 task_unlock(task);
64749 if (retval)
64750 goto unlock_creds;
64751 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64752 goto unlock_tasklist;
64753
64754 task->ptrace = PT_PTRACED;
64755 - if (capable(CAP_SYS_PTRACE))
64756 + if (capable_nolog(CAP_SYS_PTRACE))
64757 task->ptrace |= PT_PTRACE_CAP;
64758
64759 __ptrace_link(task, current);
64760 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64761 {
64762 int copied = 0;
64763
64764 + pax_track_stack();
64765 +
64766 while (len > 0) {
64767 char buf[128];
64768 int this_len, retval;
64769 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64770 {
64771 int copied = 0;
64772
64773 + pax_track_stack();
64774 +
64775 while (len > 0) {
64776 char buf[128];
64777 int this_len, retval;
64778 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64779 int ret = -EIO;
64780 siginfo_t siginfo;
64781
64782 + pax_track_stack();
64783 +
64784 switch (request) {
64785 case PTRACE_PEEKTEXT:
64786 case PTRACE_PEEKDATA:
64787 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64788 ret = ptrace_setoptions(child, data);
64789 break;
64790 case PTRACE_GETEVENTMSG:
64791 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64792 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64793 break;
64794
64795 case PTRACE_GETSIGINFO:
64796 ret = ptrace_getsiginfo(child, &siginfo);
64797 if (!ret)
64798 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
64799 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64800 &siginfo);
64801 break;
64802
64803 case PTRACE_SETSIGINFO:
64804 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64805 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64806 sizeof siginfo))
64807 ret = -EFAULT;
64808 else
64809 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64810 goto out;
64811 }
64812
64813 + if (gr_handle_ptrace(child, request)) {
64814 + ret = -EPERM;
64815 + goto out_put_task_struct;
64816 + }
64817 +
64818 if (request == PTRACE_ATTACH) {
64819 ret = ptrace_attach(child);
64820 /*
64821 * Some architectures need to do book-keeping after
64822 * a ptrace attach.
64823 */
64824 - if (!ret)
64825 + if (!ret) {
64826 arch_ptrace_attach(child);
64827 + gr_audit_ptrace(child);
64828 + }
64829 goto out_put_task_struct;
64830 }
64831
64832 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
64833 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64834 if (copied != sizeof(tmp))
64835 return -EIO;
64836 - return put_user(tmp, (unsigned long __user *)data);
64837 + return put_user(tmp, (__force unsigned long __user *)data);
64838 }
64839
64840 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
64841 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
64842 siginfo_t siginfo;
64843 int ret;
64844
64845 + pax_track_stack();
64846 +
64847 switch (request) {
64848 case PTRACE_PEEKTEXT:
64849 case PTRACE_PEEKDATA:
64850 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
64851 goto out;
64852 }
64853
64854 + if (gr_handle_ptrace(child, request)) {
64855 + ret = -EPERM;
64856 + goto out_put_task_struct;
64857 + }
64858 +
64859 if (request == PTRACE_ATTACH) {
64860 ret = ptrace_attach(child);
64861 /*
64862 * Some architectures need to do book-keeping after
64863 * a ptrace attach.
64864 */
64865 - if (!ret)
64866 + if (!ret) {
64867 arch_ptrace_attach(child);
64868 + gr_audit_ptrace(child);
64869 + }
64870 goto out_put_task_struct;
64871 }
64872
64873 diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
64874 --- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
64875 +++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
64876 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64877 { 0 };
64878 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64879 { 0 };
64880 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64881 -static atomic_t n_rcu_torture_alloc;
64882 -static atomic_t n_rcu_torture_alloc_fail;
64883 -static atomic_t n_rcu_torture_free;
64884 -static atomic_t n_rcu_torture_mberror;
64885 -static atomic_t n_rcu_torture_error;
64886 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64887 +static atomic_unchecked_t n_rcu_torture_alloc;
64888 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
64889 +static atomic_unchecked_t n_rcu_torture_free;
64890 +static atomic_unchecked_t n_rcu_torture_mberror;
64891 +static atomic_unchecked_t n_rcu_torture_error;
64892 static long n_rcu_torture_timers;
64893 static struct list_head rcu_torture_removed;
64894 static cpumask_var_t shuffle_tmp_mask;
64895 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
64896
64897 spin_lock_bh(&rcu_torture_lock);
64898 if (list_empty(&rcu_torture_freelist)) {
64899 - atomic_inc(&n_rcu_torture_alloc_fail);
64900 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64901 spin_unlock_bh(&rcu_torture_lock);
64902 return NULL;
64903 }
64904 - atomic_inc(&n_rcu_torture_alloc);
64905 + atomic_inc_unchecked(&n_rcu_torture_alloc);
64906 p = rcu_torture_freelist.next;
64907 list_del_init(p);
64908 spin_unlock_bh(&rcu_torture_lock);
64909 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
64910 static void
64911 rcu_torture_free(struct rcu_torture *p)
64912 {
64913 - atomic_inc(&n_rcu_torture_free);
64914 + atomic_inc_unchecked(&n_rcu_torture_free);
64915 spin_lock_bh(&rcu_torture_lock);
64916 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64917 spin_unlock_bh(&rcu_torture_lock);
64918 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
64919 i = rp->rtort_pipe_count;
64920 if (i > RCU_TORTURE_PIPE_LEN)
64921 i = RCU_TORTURE_PIPE_LEN;
64922 - atomic_inc(&rcu_torture_wcount[i]);
64923 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64924 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64925 rp->rtort_mbtest = 0;
64926 rcu_torture_free(rp);
64927 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
64928 i = rp->rtort_pipe_count;
64929 if (i > RCU_TORTURE_PIPE_LEN)
64930 i = RCU_TORTURE_PIPE_LEN;
64931 - atomic_inc(&rcu_torture_wcount[i]);
64932 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64933 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64934 rp->rtort_mbtest = 0;
64935 list_del(&rp->rtort_free);
64936 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
64937 i = old_rp->rtort_pipe_count;
64938 if (i > RCU_TORTURE_PIPE_LEN)
64939 i = RCU_TORTURE_PIPE_LEN;
64940 - atomic_inc(&rcu_torture_wcount[i]);
64941 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
64942 old_rp->rtort_pipe_count++;
64943 cur_ops->deferred_free(old_rp);
64944 }
64945 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
64946 return;
64947 }
64948 if (p->rtort_mbtest == 0)
64949 - atomic_inc(&n_rcu_torture_mberror);
64950 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64951 spin_lock(&rand_lock);
64952 cur_ops->read_delay(&rand);
64953 n_rcu_torture_timers++;
64954 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
64955 continue;
64956 }
64957 if (p->rtort_mbtest == 0)
64958 - atomic_inc(&n_rcu_torture_mberror);
64959 + atomic_inc_unchecked(&n_rcu_torture_mberror);
64960 cur_ops->read_delay(&rand);
64961 preempt_disable();
64962 pipe_count = p->rtort_pipe_count;
64963 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
64964 rcu_torture_current,
64965 rcu_torture_current_version,
64966 list_empty(&rcu_torture_freelist),
64967 - atomic_read(&n_rcu_torture_alloc),
64968 - atomic_read(&n_rcu_torture_alloc_fail),
64969 - atomic_read(&n_rcu_torture_free),
64970 - atomic_read(&n_rcu_torture_mberror),
64971 + atomic_read_unchecked(&n_rcu_torture_alloc),
64972 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64973 + atomic_read_unchecked(&n_rcu_torture_free),
64974 + atomic_read_unchecked(&n_rcu_torture_mberror),
64975 n_rcu_torture_timers);
64976 - if (atomic_read(&n_rcu_torture_mberror) != 0)
64977 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
64978 cnt += sprintf(&page[cnt], " !!!");
64979 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64980 if (i > 1) {
64981 cnt += sprintf(&page[cnt], "!!! ");
64982 - atomic_inc(&n_rcu_torture_error);
64983 + atomic_inc_unchecked(&n_rcu_torture_error);
64984 WARN_ON_ONCE(1);
64985 }
64986 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64987 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
64988 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
64989 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
64990 cnt += sprintf(&page[cnt], " %d",
64991 - atomic_read(&rcu_torture_wcount[i]));
64992 + atomic_read_unchecked(&rcu_torture_wcount[i]));
64993 }
64994 cnt += sprintf(&page[cnt], "\n");
64995 if (cur_ops->stats)
64996 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
64997
64998 if (cur_ops->cleanup)
64999 cur_ops->cleanup();
65000 - if (atomic_read(&n_rcu_torture_error))
65001 + if (atomic_read_unchecked(&n_rcu_torture_error))
65002 rcu_torture_print_module_parms("End of test: FAILURE");
65003 else
65004 rcu_torture_print_module_parms("End of test: SUCCESS");
65005 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65006
65007 rcu_torture_current = NULL;
65008 rcu_torture_current_version = 0;
65009 - atomic_set(&n_rcu_torture_alloc, 0);
65010 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65011 - atomic_set(&n_rcu_torture_free, 0);
65012 - atomic_set(&n_rcu_torture_mberror, 0);
65013 - atomic_set(&n_rcu_torture_error, 0);
65014 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65015 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65016 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65017 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65018 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65019 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65020 - atomic_set(&rcu_torture_wcount[i], 0);
65021 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65022 for_each_possible_cpu(cpu) {
65023 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65024 per_cpu(rcu_torture_count, cpu)[i] = 0;
65025 diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65026 --- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65027 +++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65028 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65029 /*
65030 * Do softirq processing for the current CPU.
65031 */
65032 -static void rcu_process_callbacks(struct softirq_action *unused)
65033 +static void rcu_process_callbacks(void)
65034 {
65035 /*
65036 * Memory references from any prior RCU read-side critical sections
65037 diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65038 --- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65039 +++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65040 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65041 */
65042 void __rcu_read_lock(void)
65043 {
65044 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65045 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65046 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65047 }
65048 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65049 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65050 struct task_struct *t = current;
65051
65052 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65053 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65054 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65055 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65056 rcu_read_unlock_special(t);
65057 }
65058 diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65059 --- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65060 +++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65061 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65062 unsigned int flags,
65063 int *nonpad_ret)
65064 {
65065 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65066 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65067 struct rchan_buf *rbuf = in->private_data;
65068 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65069 uint64_t pos = (uint64_t) *ppos;
65070 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65071 .ops = &relay_pipe_buf_ops,
65072 .spd_release = relay_page_release,
65073 };
65074 + ssize_t ret;
65075 +
65076 + pax_track_stack();
65077
65078 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65079 return 0;
65080 diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65081 --- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65082 +++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65083 @@ -132,8 +132,18 @@ static const struct file_operations proc
65084
65085 static int __init ioresources_init(void)
65086 {
65087 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65088 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65089 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65090 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65091 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65092 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65093 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65094 +#endif
65095 +#else
65096 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65097 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65098 +#endif
65099 return 0;
65100 }
65101 __initcall(ioresources_init);
65102 diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65103 --- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65104 +++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65105 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65106 */
65107 spin_lock_irqsave(&pendowner->pi_lock, flags);
65108
65109 - WARN_ON(!pendowner->pi_blocked_on);
65110 + BUG_ON(!pendowner->pi_blocked_on);
65111 WARN_ON(pendowner->pi_blocked_on != waiter);
65112 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65113
65114 diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65115 --- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65116 +++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65117 @@ -21,7 +21,7 @@
65118 #define MAX_RT_TEST_MUTEXES 8
65119
65120 static spinlock_t rttest_lock;
65121 -static atomic_t rttest_event;
65122 +static atomic_unchecked_t rttest_event;
65123
65124 struct test_thread_data {
65125 int opcode;
65126 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65127
65128 case RTTEST_LOCKCONT:
65129 td->mutexes[td->opdata] = 1;
65130 - td->event = atomic_add_return(1, &rttest_event);
65131 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65132 return 0;
65133
65134 case RTTEST_RESET:
65135 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65136 return 0;
65137
65138 case RTTEST_RESETEVENT:
65139 - atomic_set(&rttest_event, 0);
65140 + atomic_set_unchecked(&rttest_event, 0);
65141 return 0;
65142
65143 default:
65144 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65145 return ret;
65146
65147 td->mutexes[id] = 1;
65148 - td->event = atomic_add_return(1, &rttest_event);
65149 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65150 rt_mutex_lock(&mutexes[id]);
65151 - td->event = atomic_add_return(1, &rttest_event);
65152 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65153 td->mutexes[id] = 4;
65154 return 0;
65155
65156 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65157 return ret;
65158
65159 td->mutexes[id] = 1;
65160 - td->event = atomic_add_return(1, &rttest_event);
65161 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65162 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65163 - td->event = atomic_add_return(1, &rttest_event);
65164 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65165 td->mutexes[id] = ret ? 0 : 4;
65166 return ret ? -EINTR : 0;
65167
65168 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65169 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65170 return ret;
65171
65172 - td->event = atomic_add_return(1, &rttest_event);
65173 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65174 rt_mutex_unlock(&mutexes[id]);
65175 - td->event = atomic_add_return(1, &rttest_event);
65176 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65177 td->mutexes[id] = 0;
65178 return 0;
65179
65180 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65181 break;
65182
65183 td->mutexes[dat] = 2;
65184 - td->event = atomic_add_return(1, &rttest_event);
65185 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65186 break;
65187
65188 case RTTEST_LOCKBKL:
65189 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65190 return;
65191
65192 td->mutexes[dat] = 3;
65193 - td->event = atomic_add_return(1, &rttest_event);
65194 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65195 break;
65196
65197 case RTTEST_LOCKNOWAIT:
65198 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65199 return;
65200
65201 td->mutexes[dat] = 1;
65202 - td->event = atomic_add_return(1, &rttest_event);
65203 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65204 return;
65205
65206 case RTTEST_LOCKBKL:
65207 diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65208 --- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65209 +++ linux-2.6.32.45/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65210 @@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65211 {
65212 unsigned long flags;
65213 struct rq *rq;
65214 - int cpu = get_cpu();
65215
65216 #ifdef CONFIG_SMP
65217 + int cpu = get_cpu();
65218 +
65219 rq = task_rq_lock(p, &flags);
65220 p->state = TASK_WAKING;
65221
65222 @@ -5043,7 +5044,7 @@ out:
65223 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65224 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65225 */
65226 -static void run_rebalance_domains(struct softirq_action *h)
65227 +static void run_rebalance_domains(void)
65228 {
65229 int this_cpu = smp_processor_id();
65230 struct rq *this_rq = cpu_rq(this_cpu);
65231 @@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65232 struct rq *rq;
65233 int cpu;
65234
65235 + pax_track_stack();
65236 +
65237 need_resched:
65238 preempt_disable();
65239 cpu = smp_processor_id();
65240 @@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65241 * Look out! "owner" is an entirely speculative pointer
65242 * access and not reliable.
65243 */
65244 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65245 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65246 {
65247 unsigned int cpu;
65248 struct rq *rq;
65249 @@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65250 * DEBUG_PAGEALLOC could have unmapped it if
65251 * the mutex owner just released it and exited.
65252 */
65253 - if (probe_kernel_address(&owner->cpu, cpu))
65254 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65255 return 0;
65256 #else
65257 - cpu = owner->cpu;
65258 + cpu = task_thread_info(owner)->cpu;
65259 #endif
65260
65261 /*
65262 @@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65263 /*
65264 * Is that owner really running on that cpu?
65265 */
65266 - if (task_thread_info(rq->curr) != owner || need_resched())
65267 + if (rq->curr != owner || need_resched())
65268 return 0;
65269
65270 cpu_relax();
65271 @@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65272 /* convert nice value [19,-20] to rlimit style value [1,40] */
65273 int nice_rlim = 20 - nice;
65274
65275 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65276 +
65277 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65278 capable(CAP_SYS_NICE));
65279 }
65280 @@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65281 if (nice > 19)
65282 nice = 19;
65283
65284 - if (increment < 0 && !can_nice(current, nice))
65285 + if (increment < 0 && (!can_nice(current, nice) ||
65286 + gr_handle_chroot_nice()))
65287 return -EPERM;
65288
65289 retval = security_task_setnice(current, nice);
65290 @@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65291 long power;
65292 int weight;
65293
65294 - WARN_ON(!sd || !sd->groups);
65295 + BUG_ON(!sd || !sd->groups);
65296
65297 if (cpu != group_first_cpu(sd->groups))
65298 return;
65299 diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65300 --- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65301 +++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65302 @@ -41,12 +41,12 @@
65303
65304 static struct kmem_cache *sigqueue_cachep;
65305
65306 -static void __user *sig_handler(struct task_struct *t, int sig)
65307 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65308 {
65309 return t->sighand->action[sig - 1].sa.sa_handler;
65310 }
65311
65312 -static int sig_handler_ignored(void __user *handler, int sig)
65313 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65314 {
65315 /* Is it explicitly or implicitly ignored? */
65316 return handler == SIG_IGN ||
65317 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65318 static int sig_task_ignored(struct task_struct *t, int sig,
65319 int from_ancestor_ns)
65320 {
65321 - void __user *handler;
65322 + __sighandler_t handler;
65323
65324 handler = sig_handler(t, sig);
65325
65326 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65327 */
65328 user = get_uid(__task_cred(t)->user);
65329 atomic_inc(&user->sigpending);
65330 +
65331 + if (!override_rlimit)
65332 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65333 if (override_rlimit ||
65334 atomic_read(&user->sigpending) <=
65335 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65336 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65337
65338 int unhandled_signal(struct task_struct *tsk, int sig)
65339 {
65340 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65341 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65342 if (is_global_init(tsk))
65343 return 1;
65344 if (handler != SIG_IGN && handler != SIG_DFL)
65345 @@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65346 }
65347 }
65348
65349 + /* allow glibc communication via tgkill to other threads in our
65350 + thread group */
65351 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65352 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65353 + && gr_handle_signal(t, sig))
65354 + return -EPERM;
65355 +
65356 return security_task_kill(t, info, sig, 0);
65357 }
65358
65359 @@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65360 return send_signal(sig, info, p, 1);
65361 }
65362
65363 -static int
65364 +int
65365 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65366 {
65367 return send_signal(sig, info, t, 0);
65368 @@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65369 unsigned long int flags;
65370 int ret, blocked, ignored;
65371 struct k_sigaction *action;
65372 + int is_unhandled = 0;
65373
65374 spin_lock_irqsave(&t->sighand->siglock, flags);
65375 action = &t->sighand->action[sig-1];
65376 @@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65377 }
65378 if (action->sa.sa_handler == SIG_DFL)
65379 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65380 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65381 + is_unhandled = 1;
65382 ret = specific_send_sig_info(sig, info, t);
65383 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65384
65385 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
65386 + normal operation */
65387 + if (is_unhandled) {
65388 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65389 + gr_handle_crash(t, sig);
65390 + }
65391 +
65392 return ret;
65393 }
65394
65395 @@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65396 {
65397 int ret = check_kill_permission(sig, info, p);
65398
65399 - if (!ret && sig)
65400 + if (!ret && sig) {
65401 ret = do_send_sig_info(sig, info, p, true);
65402 + if (!ret)
65403 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65404 + }
65405
65406 return ret;
65407 }
65408 @@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65409 {
65410 siginfo_t info;
65411
65412 + pax_track_stack();
65413 +
65414 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65415
65416 memset(&info, 0, sizeof info);
65417 @@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65418 int error = -ESRCH;
65419
65420 rcu_read_lock();
65421 - p = find_task_by_vpid(pid);
65422 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65423 + /* allow glibc communication via tgkill to other threads in our
65424 + thread group */
65425 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65426 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
65427 + p = find_task_by_vpid_unrestricted(pid);
65428 + else
65429 +#endif
65430 + p = find_task_by_vpid(pid);
65431 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65432 error = check_kill_permission(sig, info, p);
65433 /*
65434 diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65435 --- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65436 +++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65437 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65438 }
65439 EXPORT_SYMBOL(smp_call_function);
65440
65441 -void ipi_call_lock(void)
65442 +void ipi_call_lock(void) __acquires(call_function.lock)
65443 {
65444 spin_lock(&call_function.lock);
65445 }
65446
65447 -void ipi_call_unlock(void)
65448 +void ipi_call_unlock(void) __releases(call_function.lock)
65449 {
65450 spin_unlock(&call_function.lock);
65451 }
65452
65453 -void ipi_call_lock_irq(void)
65454 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
65455 {
65456 spin_lock_irq(&call_function.lock);
65457 }
65458
65459 -void ipi_call_unlock_irq(void)
65460 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
65461 {
65462 spin_unlock_irq(&call_function.lock);
65463 }
65464 diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65465 --- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65466 +++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65467 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65468
65469 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65470
65471 -char *softirq_to_name[NR_SOFTIRQS] = {
65472 +const char * const softirq_to_name[NR_SOFTIRQS] = {
65473 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65474 "TASKLET", "SCHED", "HRTIMER", "RCU"
65475 };
65476 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65477
65478 asmlinkage void __do_softirq(void)
65479 {
65480 - struct softirq_action *h;
65481 + const struct softirq_action *h;
65482 __u32 pending;
65483 int max_restart = MAX_SOFTIRQ_RESTART;
65484 int cpu;
65485 @@ -233,7 +233,7 @@ restart:
65486 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65487
65488 trace_softirq_entry(h, softirq_vec);
65489 - h->action(h);
65490 + h->action();
65491 trace_softirq_exit(h, softirq_vec);
65492 if (unlikely(prev_count != preempt_count())) {
65493 printk(KERN_ERR "huh, entered softirq %td %s %p"
65494 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65495 local_irq_restore(flags);
65496 }
65497
65498 -void open_softirq(int nr, void (*action)(struct softirq_action *))
65499 +void open_softirq(int nr, void (*action)(void))
65500 {
65501 - softirq_vec[nr].action = action;
65502 + pax_open_kernel();
65503 + *(void **)&softirq_vec[nr].action = action;
65504 + pax_close_kernel();
65505 }
65506
65507 /*
65508 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65509
65510 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65511
65512 -static void tasklet_action(struct softirq_action *a)
65513 +static void tasklet_action(void)
65514 {
65515 struct tasklet_struct *list;
65516
65517 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65518 }
65519 }
65520
65521 -static void tasklet_hi_action(struct softirq_action *a)
65522 +static void tasklet_hi_action(void)
65523 {
65524 struct tasklet_struct *list;
65525
65526 diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65527 --- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65528 +++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65529 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65530 error = -EACCES;
65531 goto out;
65532 }
65533 +
65534 + if (gr_handle_chroot_setpriority(p, niceval)) {
65535 + error = -EACCES;
65536 + goto out;
65537 + }
65538 +
65539 no_nice = security_task_setnice(p, niceval);
65540 if (no_nice) {
65541 error = no_nice;
65542 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65543 !(user = find_user(who)))
65544 goto out_unlock; /* No processes for this user */
65545
65546 - do_each_thread(g, p)
65547 + do_each_thread(g, p) {
65548 if (__task_cred(p)->uid == who)
65549 error = set_one_prio(p, niceval, error);
65550 - while_each_thread(g, p);
65551 + } while_each_thread(g, p);
65552 if (who != cred->uid)
65553 free_uid(user); /* For find_user() */
65554 break;
65555 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65556 !(user = find_user(who)))
65557 goto out_unlock; /* No processes for this user */
65558
65559 - do_each_thread(g, p)
65560 + do_each_thread(g, p) {
65561 if (__task_cred(p)->uid == who) {
65562 niceval = 20 - task_nice(p);
65563 if (niceval > retval)
65564 retval = niceval;
65565 }
65566 - while_each_thread(g, p);
65567 + } while_each_thread(g, p);
65568 if (who != cred->uid)
65569 free_uid(user); /* for find_user() */
65570 break;
65571 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65572 goto error;
65573 }
65574
65575 + if (gr_check_group_change(new->gid, new->egid, -1))
65576 + goto error;
65577 +
65578 if (rgid != (gid_t) -1 ||
65579 (egid != (gid_t) -1 && egid != old->gid))
65580 new->sgid = new->egid;
65581 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65582 goto error;
65583
65584 retval = -EPERM;
65585 +
65586 + if (gr_check_group_change(gid, gid, gid))
65587 + goto error;
65588 +
65589 if (capable(CAP_SETGID))
65590 new->gid = new->egid = new->sgid = new->fsgid = gid;
65591 else if (gid == old->gid || gid == old->sgid)
65592 @@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65593 if (!new_user)
65594 return -EAGAIN;
65595
65596 + /*
65597 + * We don't fail in case of NPROC limit excess here because too many
65598 + * poorly written programs don't check set*uid() return code, assuming
65599 + * it never fails if called by root. We may still enforce NPROC limit
65600 + * for programs doing set*uid()+execve() by harmlessly deferring the
65601 + * failure to the execve() stage.
65602 + */
65603 if (atomic_read(&new_user->processes) >=
65604 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65605 - new_user != INIT_USER) {
65606 - free_uid(new_user);
65607 - return -EAGAIN;
65608 - }
65609 + new_user != INIT_USER)
65610 + current->flags |= PF_NPROC_EXCEEDED;
65611 + else
65612 + current->flags &= ~PF_NPROC_EXCEEDED;
65613
65614 free_uid(new->user);
65615 new->user = new_user;
65616 @@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65617 goto error;
65618 }
65619
65620 + if (gr_check_user_change(new->uid, new->euid, -1))
65621 + goto error;
65622 +
65623 if (new->uid != old->uid) {
65624 retval = set_user(new);
65625 if (retval < 0)
65626 @@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65627 goto error;
65628
65629 retval = -EPERM;
65630 +
65631 + if (gr_check_crash_uid(uid))
65632 + goto error;
65633 + if (gr_check_user_change(uid, uid, uid))
65634 + goto error;
65635 +
65636 if (capable(CAP_SETUID)) {
65637 new->suid = new->uid = uid;
65638 if (uid != old->uid) {
65639 @@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65640 goto error;
65641 }
65642
65643 + if (gr_check_user_change(ruid, euid, -1))
65644 + goto error;
65645 +
65646 if (ruid != (uid_t) -1) {
65647 new->uid = ruid;
65648 if (ruid != old->uid) {
65649 @@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65650 goto error;
65651 }
65652
65653 + if (gr_check_group_change(rgid, egid, -1))
65654 + goto error;
65655 +
65656 if (rgid != (gid_t) -1)
65657 new->gid = rgid;
65658 if (egid != (gid_t) -1)
65659 @@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65660 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65661 goto error;
65662
65663 + if (gr_check_user_change(-1, -1, uid))
65664 + goto error;
65665 +
65666 if (uid == old->uid || uid == old->euid ||
65667 uid == old->suid || uid == old->fsuid ||
65668 capable(CAP_SETUID)) {
65669 @@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65670 if (gid == old->gid || gid == old->egid ||
65671 gid == old->sgid || gid == old->fsgid ||
65672 capable(CAP_SETGID)) {
65673 + if (gr_check_group_change(-1, -1, gid))
65674 + goto error;
65675 +
65676 if (gid != old_fsgid) {
65677 new->fsgid = gid;
65678 goto change_okay;
65679 @@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65680 error = get_dumpable(me->mm);
65681 break;
65682 case PR_SET_DUMPABLE:
65683 - if (arg2 < 0 || arg2 > 1) {
65684 + if (arg2 > 1) {
65685 error = -EINVAL;
65686 break;
65687 }
65688 diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
65689 --- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65690 +++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65691 @@ -63,6 +63,13 @@
65692 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65693
65694 #if defined(CONFIG_SYSCTL)
65695 +#include <linux/grsecurity.h>
65696 +#include <linux/grinternal.h>
65697 +
65698 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65699 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65700 + const int op);
65701 +extern int gr_handle_chroot_sysctl(const int op);
65702
65703 /* External variables not in a header file. */
65704 extern int C_A_D;
65705 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65706 static int proc_taint(struct ctl_table *table, int write,
65707 void __user *buffer, size_t *lenp, loff_t *ppos);
65708 #endif
65709 +extern ctl_table grsecurity_table[];
65710
65711 static struct ctl_table root_table[];
65712 static struct ctl_table_root sysctl_table_root;
65713 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65714 int sysctl_legacy_va_layout;
65715 #endif
65716
65717 +#ifdef CONFIG_PAX_SOFTMODE
65718 +static ctl_table pax_table[] = {
65719 + {
65720 + .ctl_name = CTL_UNNUMBERED,
65721 + .procname = "softmode",
65722 + .data = &pax_softmode,
65723 + .maxlen = sizeof(unsigned int),
65724 + .mode = 0600,
65725 + .proc_handler = &proc_dointvec,
65726 + },
65727 +
65728 + { .ctl_name = 0 }
65729 +};
65730 +#endif
65731 +
65732 extern int prove_locking;
65733 extern int lock_stat;
65734
65735 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65736 #endif
65737
65738 static struct ctl_table kern_table[] = {
65739 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65740 + {
65741 + .ctl_name = CTL_UNNUMBERED,
65742 + .procname = "grsecurity",
65743 + .mode = 0500,
65744 + .child = grsecurity_table,
65745 + },
65746 +#endif
65747 +
65748 +#ifdef CONFIG_PAX_SOFTMODE
65749 + {
65750 + .ctl_name = CTL_UNNUMBERED,
65751 + .procname = "pax",
65752 + .mode = 0500,
65753 + .child = pax_table,
65754 + },
65755 +#endif
65756 +
65757 {
65758 .ctl_name = CTL_UNNUMBERED,
65759 .procname = "sched_child_runs_first",
65760 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65761 .data = &modprobe_path,
65762 .maxlen = KMOD_PATH_LEN,
65763 .mode = 0644,
65764 - .proc_handler = &proc_dostring,
65765 - .strategy = &sysctl_string,
65766 + .proc_handler = &proc_dostring_modpriv,
65767 + .strategy = &sysctl_string_modpriv,
65768 },
65769 {
65770 .ctl_name = CTL_UNNUMBERED,
65771 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65772 .mode = 0644,
65773 .proc_handler = &proc_dointvec
65774 },
65775 + {
65776 + .procname = "heap_stack_gap",
65777 + .data = &sysctl_heap_stack_gap,
65778 + .maxlen = sizeof(sysctl_heap_stack_gap),
65779 + .mode = 0644,
65780 + .proc_handler = proc_doulongvec_minmax,
65781 + },
65782 #else
65783 {
65784 .ctl_name = CTL_UNNUMBERED,
65785 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65786 return 0;
65787 }
65788
65789 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65790 +
65791 static int parse_table(int __user *name, int nlen,
65792 void __user *oldval, size_t __user *oldlenp,
65793 void __user *newval, size_t newlen,
65794 @@ -1821,7 +1871,7 @@ repeat:
65795 if (n == table->ctl_name) {
65796 int error;
65797 if (table->child) {
65798 - if (sysctl_perm(root, table, MAY_EXEC))
65799 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
65800 return -EPERM;
65801 name++;
65802 nlen--;
65803 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65804 int error;
65805 int mode;
65806
65807 + if (table->parent != NULL && table->parent->procname != NULL &&
65808 + table->procname != NULL &&
65809 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65810 + return -EACCES;
65811 + if (gr_handle_chroot_sysctl(op))
65812 + return -EACCES;
65813 + error = gr_handle_sysctl(table, op);
65814 + if (error)
65815 + return error;
65816 +
65817 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65818 + if (error)
65819 + return error;
65820 +
65821 + if (root->permissions)
65822 + mode = root->permissions(root, current->nsproxy, table);
65823 + else
65824 + mode = table->mode;
65825 +
65826 + return test_perm(mode, op);
65827 +}
65828 +
65829 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
65830 +{
65831 + int error;
65832 + int mode;
65833 +
65834 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65835 if (error)
65836 return error;
65837 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
65838 buffer, lenp, ppos);
65839 }
65840
65841 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65842 + void __user *buffer, size_t *lenp, loff_t *ppos)
65843 +{
65844 + if (write && !capable(CAP_SYS_MODULE))
65845 + return -EPERM;
65846 +
65847 + return _proc_do_string(table->data, table->maxlen, write,
65848 + buffer, lenp, ppos);
65849 +}
65850 +
65851
65852 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
65853 int *valp,
65854 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
65855 vleft = table->maxlen / sizeof(unsigned long);
65856 left = *lenp;
65857
65858 - for (; left && vleft--; i++, min++, max++, first=0) {
65859 + for (; left && vleft--; i++, first=0) {
65860 if (write) {
65861 while (left) {
65862 char c;
65863 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
65864 return -ENOSYS;
65865 }
65866
65867 +int proc_dostring_modpriv(struct ctl_table *table, int write,
65868 + void __user *buffer, size_t *lenp, loff_t *ppos)
65869 +{
65870 + return -ENOSYS;
65871 +}
65872 +
65873 int proc_dointvec(struct ctl_table *table, int write,
65874 void __user *buffer, size_t *lenp, loff_t *ppos)
65875 {
65876 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
65877 return 1;
65878 }
65879
65880 +int sysctl_string_modpriv(struct ctl_table *table,
65881 + void __user *oldval, size_t __user *oldlenp,
65882 + void __user *newval, size_t newlen)
65883 +{
65884 + if (newval && newlen && !capable(CAP_SYS_MODULE))
65885 + return -EPERM;
65886 +
65887 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
65888 +}
65889 +
65890 /*
65891 * This function makes sure that all of the integers in the vector
65892 * are between the minimum and maximum values given in the arrays
65893 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
65894 return -ENOSYS;
65895 }
65896
65897 +int sysctl_string_modpriv(struct ctl_table *table,
65898 + void __user *oldval, size_t __user *oldlenp,
65899 + void __user *newval, size_t newlen)
65900 +{
65901 + return -ENOSYS;
65902 +}
65903 +
65904 int sysctl_intvec(struct ctl_table *table,
65905 void __user *oldval, size_t __user *oldlenp,
65906 void __user *newval, size_t newlen)
65907 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65908 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65909 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65910 EXPORT_SYMBOL(proc_dostring);
65911 +EXPORT_SYMBOL(proc_dostring_modpriv);
65912 EXPORT_SYMBOL(proc_doulongvec_minmax);
65913 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65914 EXPORT_SYMBOL(register_sysctl_table);
65915 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
65916 EXPORT_SYMBOL(sysctl_jiffies);
65917 EXPORT_SYMBOL(sysctl_ms_jiffies);
65918 EXPORT_SYMBOL(sysctl_string);
65919 +EXPORT_SYMBOL(sysctl_string_modpriv);
65920 EXPORT_SYMBOL(sysctl_data);
65921 EXPORT_SYMBOL(unregister_sysctl_table);
65922 diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
65923 --- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
65924 +++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
65925 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
65926 } else {
65927 if ((table->strategy == sysctl_data) ||
65928 (table->strategy == sysctl_string) ||
65929 + (table->strategy == sysctl_string_modpriv) ||
65930 (table->strategy == sysctl_intvec) ||
65931 (table->strategy == sysctl_jiffies) ||
65932 (table->strategy == sysctl_ms_jiffies) ||
65933 (table->proc_handler == proc_dostring) ||
65934 + (table->proc_handler == proc_dostring_modpriv) ||
65935 (table->proc_handler == proc_dointvec) ||
65936 (table->proc_handler == proc_dointvec_minmax) ||
65937 (table->proc_handler == proc_dointvec_jiffies) ||
65938 diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
65939 --- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
65940 +++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
65941 @@ -26,9 +26,12 @@
65942 #include <linux/cgroup.h>
65943 #include <linux/fs.h>
65944 #include <linux/file.h>
65945 +#include <linux/grsecurity.h>
65946 #include <net/genetlink.h>
65947 #include <asm/atomic.h>
65948
65949 +extern int gr_is_taskstats_denied(int pid);
65950 +
65951 /*
65952 * Maximum length of a cpumask that can be specified in
65953 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65954 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
65955 size_t size;
65956 cpumask_var_t mask;
65957
65958 + if (gr_is_taskstats_denied(current->pid))
65959 + return -EACCES;
65960 +
65961 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
65962 return -ENOMEM;
65963
65964 diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
65965 --- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
65966 +++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
65967 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
65968 * then clear the broadcast bit.
65969 */
65970 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65971 - int cpu = smp_processor_id();
65972 + cpu = smp_processor_id();
65973
65974 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
65975 tick_broadcast_clear_oneshot(cpu);
65976 diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
65977 --- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
65978 +++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
65979 @@ -14,6 +14,7 @@
65980 #include <linux/init.h>
65981 #include <linux/mm.h>
65982 #include <linux/sched.h>
65983 +#include <linux/grsecurity.h>
65984 #include <linux/sysdev.h>
65985 #include <linux/clocksource.h>
65986 #include <linux/jiffies.h>
65987 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
65988 */
65989 struct timespec ts = xtime;
65990 timespec_add_ns(&ts, nsec);
65991 - ACCESS_ONCE(xtime_cache) = ts;
65992 + ACCESS_ONCE_RW(xtime_cache) = ts;
65993 }
65994
65995 /* must hold xtime_lock */
65996 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
65997 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
65998 return -EINVAL;
65999
66000 + gr_log_timechange();
66001 +
66002 write_seqlock_irqsave(&xtime_lock, flags);
66003
66004 timekeeping_forward_now();
66005 diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66006 --- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66007 +++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66008 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66009
66010 static void print_name_offset(struct seq_file *m, void *sym)
66011 {
66012 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66013 + SEQ_printf(m, "<%p>", NULL);
66014 +#else
66015 char symname[KSYM_NAME_LEN];
66016
66017 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66018 SEQ_printf(m, "<%p>", sym);
66019 else
66020 SEQ_printf(m, "%s", symname);
66021 +#endif
66022 }
66023
66024 static void
66025 @@ -112,7 +116,11 @@ next_one:
66026 static void
66027 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66028 {
66029 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66030 + SEQ_printf(m, " .base: %p\n", NULL);
66031 +#else
66032 SEQ_printf(m, " .base: %p\n", base);
66033 +#endif
66034 SEQ_printf(m, " .index: %d\n",
66035 base->index);
66036 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66037 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66038 {
66039 struct proc_dir_entry *pe;
66040
66041 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66042 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66043 +#else
66044 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66045 +#endif
66046 if (!pe)
66047 return -ENOMEM;
66048 return 0;
66049 diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66050 --- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66051 +++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66052 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66053 static unsigned long nr_entries;
66054 static struct entry entries[MAX_ENTRIES];
66055
66056 -static atomic_t overflow_count;
66057 +static atomic_unchecked_t overflow_count;
66058
66059 /*
66060 * The entries are in a hash-table, for fast lookup:
66061 @@ -140,7 +140,7 @@ static void reset_entries(void)
66062 nr_entries = 0;
66063 memset(entries, 0, sizeof(entries));
66064 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66065 - atomic_set(&overflow_count, 0);
66066 + atomic_set_unchecked(&overflow_count, 0);
66067 }
66068
66069 static struct entry *alloc_entry(void)
66070 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66071 if (likely(entry))
66072 entry->count++;
66073 else
66074 - atomic_inc(&overflow_count);
66075 + atomic_inc_unchecked(&overflow_count);
66076
66077 out_unlock:
66078 spin_unlock_irqrestore(lock, flags);
66079 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66080
66081 static void print_name_offset(struct seq_file *m, unsigned long addr)
66082 {
66083 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66084 + seq_printf(m, "<%p>", NULL);
66085 +#else
66086 char symname[KSYM_NAME_LEN];
66087
66088 if (lookup_symbol_name(addr, symname) < 0)
66089 seq_printf(m, "<%p>", (void *)addr);
66090 else
66091 seq_printf(m, "%s", symname);
66092 +#endif
66093 }
66094
66095 static int tstats_show(struct seq_file *m, void *v)
66096 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66097
66098 seq_puts(m, "Timer Stats Version: v0.2\n");
66099 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66100 - if (atomic_read(&overflow_count))
66101 + if (atomic_read_unchecked(&overflow_count))
66102 seq_printf(m, "Overflow: %d entries\n",
66103 - atomic_read(&overflow_count));
66104 + atomic_read_unchecked(&overflow_count));
66105
66106 for (i = 0; i < nr_entries; i++) {
66107 entry = entries + i;
66108 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66109 {
66110 struct proc_dir_entry *pe;
66111
66112 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66113 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66114 +#else
66115 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66116 +#endif
66117 if (!pe)
66118 return -ENOMEM;
66119 return 0;
66120 diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66121 --- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66122 +++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66123 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66124 return error;
66125
66126 if (tz) {
66127 + /* we log in do_settimeofday called below, so don't log twice
66128 + */
66129 + if (!tv)
66130 + gr_log_timechange();
66131 +
66132 /* SMP safe, global irq locking makes it work. */
66133 sys_tz = *tz;
66134 update_vsyscall_tz();
66135 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66136 * Avoid unnecessary multiplications/divisions in the
66137 * two most common HZ cases:
66138 */
66139 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66140 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66141 {
66142 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66143 return (MSEC_PER_SEC / HZ) * j;
66144 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66145 }
66146 EXPORT_SYMBOL(jiffies_to_msecs);
66147
66148 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66149 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66150 {
66151 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66152 return (USEC_PER_SEC / HZ) * j;
66153 diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66154 --- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66155 +++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66156 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66157 /*
66158 * This function runs timers and the timer-tq in bottom half context.
66159 */
66160 -static void run_timer_softirq(struct softirq_action *h)
66161 +static void run_timer_softirq(void)
66162 {
66163 struct tvec_base *base = __get_cpu_var(tvec_bases);
66164
66165 diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66166 --- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66167 +++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66168 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66169 struct blk_trace *bt = filp->private_data;
66170 char buf[16];
66171
66172 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66173 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66174
66175 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66176 }
66177 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66178 return 1;
66179
66180 bt = buf->chan->private_data;
66181 - atomic_inc(&bt->dropped);
66182 + atomic_inc_unchecked(&bt->dropped);
66183 return 0;
66184 }
66185
66186 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66187
66188 bt->dir = dir;
66189 bt->dev = dev;
66190 - atomic_set(&bt->dropped, 0);
66191 + atomic_set_unchecked(&bt->dropped, 0);
66192
66193 ret = -EIO;
66194 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66195 diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66196 --- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66197 +++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66198 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66199
66200 ip = rec->ip;
66201
66202 + ret = ftrace_arch_code_modify_prepare();
66203 + FTRACE_WARN_ON(ret);
66204 + if (ret)
66205 + return 0;
66206 +
66207 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66208 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66209 if (ret) {
66210 ftrace_bug(ret, ip);
66211 rec->flags |= FTRACE_FL_FAILED;
66212 - return 0;
66213 }
66214 - return 1;
66215 + return ret ? 0 : 1;
66216 }
66217
66218 /*
66219 diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66220 --- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66221 +++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66222 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66223 * the reader page). But if the next page is a header page,
66224 * its flags will be non zero.
66225 */
66226 -static int inline
66227 +static inline int
66228 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66229 struct buffer_page *page, struct list_head *list)
66230 {
66231 diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66232 --- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66233 +++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66234 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66235 size_t rem;
66236 unsigned int i;
66237
66238 + pax_track_stack();
66239 +
66240 /* copy the tracer to avoid using a global lock all around */
66241 mutex_lock(&trace_types_lock);
66242 if (unlikely(old_tracer != current_trace && current_trace)) {
66243 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66244 int entries, size, i;
66245 size_t ret;
66246
66247 + pax_track_stack();
66248 +
66249 if (*ppos & (PAGE_SIZE - 1)) {
66250 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66251 return -EINVAL;
66252 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66253 };
66254 #endif
66255
66256 -static struct dentry *d_tracer;
66257 -
66258 struct dentry *tracing_init_dentry(void)
66259 {
66260 + static struct dentry *d_tracer;
66261 static int once;
66262
66263 if (d_tracer)
66264 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66265 return d_tracer;
66266 }
66267
66268 -static struct dentry *d_percpu;
66269 -
66270 struct dentry *tracing_dentry_percpu(void)
66271 {
66272 + static struct dentry *d_percpu;
66273 static int once;
66274 struct dentry *d_tracer;
66275
66276 diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66277 --- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66278 +++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66279 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66280 * Modules must own their file_operations to keep up with
66281 * reference counting.
66282 */
66283 +
66284 struct ftrace_module_file_ops {
66285 struct list_head list;
66286 struct module *mod;
66287 - struct file_operations id;
66288 - struct file_operations enable;
66289 - struct file_operations format;
66290 - struct file_operations filter;
66291 };
66292
66293 static void remove_subsystem_dir(const char *name)
66294 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66295
66296 file_ops->mod = mod;
66297
66298 - file_ops->id = ftrace_event_id_fops;
66299 - file_ops->id.owner = mod;
66300 -
66301 - file_ops->enable = ftrace_enable_fops;
66302 - file_ops->enable.owner = mod;
66303 -
66304 - file_ops->filter = ftrace_event_filter_fops;
66305 - file_ops->filter.owner = mod;
66306 -
66307 - file_ops->format = ftrace_event_format_fops;
66308 - file_ops->format.owner = mod;
66309 + pax_open_kernel();
66310 + *(void **)&mod->trace_id.owner = mod;
66311 + *(void **)&mod->trace_enable.owner = mod;
66312 + *(void **)&mod->trace_filter.owner = mod;
66313 + *(void **)&mod->trace_format.owner = mod;
66314 + pax_close_kernel();
66315
66316 list_add(&file_ops->list, &ftrace_module_file_list);
66317
66318 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66319 call->mod = mod;
66320 list_add(&call->list, &ftrace_events);
66321 event_create_dir(call, d_events,
66322 - &file_ops->id, &file_ops->enable,
66323 - &file_ops->filter, &file_ops->format);
66324 + &mod->trace_id, &mod->trace_enable,
66325 + &mod->trace_filter, &mod->trace_format);
66326 }
66327 }
66328
66329 diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66330 --- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66331 +++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66332 @@ -23,7 +23,7 @@ struct header_iter {
66333 static struct trace_array *mmio_trace_array;
66334 static bool overrun_detected;
66335 static unsigned long prev_overruns;
66336 -static atomic_t dropped_count;
66337 +static atomic_unchecked_t dropped_count;
66338
66339 static void mmio_reset_data(struct trace_array *tr)
66340 {
66341 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66342
66343 static unsigned long count_overruns(struct trace_iterator *iter)
66344 {
66345 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66346 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66347 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66348
66349 if (over > prev_overruns)
66350 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66351 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66352 sizeof(*entry), 0, pc);
66353 if (!event) {
66354 - atomic_inc(&dropped_count);
66355 + atomic_inc_unchecked(&dropped_count);
66356 return;
66357 }
66358 entry = ring_buffer_event_data(event);
66359 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66360 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66361 sizeof(*entry), 0, pc);
66362 if (!event) {
66363 - atomic_inc(&dropped_count);
66364 + atomic_inc_unchecked(&dropped_count);
66365 return;
66366 }
66367 entry = ring_buffer_event_data(event);
66368 diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66369 --- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66370 +++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66371 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66372 return 0;
66373 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66374 if (!IS_ERR(p)) {
66375 - p = mangle_path(s->buffer + s->len, p, "\n");
66376 + p = mangle_path(s->buffer + s->len, p, "\n\\");
66377 if (p) {
66378 s->len = p - s->buffer;
66379 return 1;
66380 diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66381 --- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66382 +++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66383 @@ -50,7 +50,7 @@ static inline void check_stack(void)
66384 return;
66385
66386 /* we do not handle interrupt stacks yet */
66387 - if (!object_is_on_stack(&this_size))
66388 + if (!object_starts_on_stack(&this_size))
66389 return;
66390
66391 local_irq_save(flags);
66392 diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66393 --- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66394 +++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66395 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66396 int cpu;
66397 pid_t pid;
66398 /* Can be inserted from interrupt or user context, need to be atomic */
66399 - atomic_t inserted;
66400 + atomic_unchecked_t inserted;
66401 /*
66402 * Don't need to be atomic, works are serialized in a single workqueue thread
66403 * on a single CPU.
66404 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66405 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66406 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66407 if (node->pid == wq_thread->pid) {
66408 - atomic_inc(&node->inserted);
66409 + atomic_inc_unchecked(&node->inserted);
66410 goto found;
66411 }
66412 }
66413 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66414 tsk = get_pid_task(pid, PIDTYPE_PID);
66415 if (tsk) {
66416 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66417 - atomic_read(&cws->inserted), cws->executed,
66418 + atomic_read_unchecked(&cws->inserted), cws->executed,
66419 tsk->comm);
66420 put_task_struct(tsk);
66421 }
66422 diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66423 --- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66424 +++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66425 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66426 spin_lock_irq(&uidhash_lock);
66427 up = uid_hash_find(uid, hashent);
66428 if (up) {
66429 + put_user_ns(ns);
66430 key_put(new->uid_keyring);
66431 key_put(new->session_keyring);
66432 kmem_cache_free(uid_cachep, new);
66433 diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66434 --- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66435 +++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66436 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66437 return BUG_TRAP_TYPE_NONE;
66438
66439 bug = find_bug(bugaddr);
66440 + if (!bug)
66441 + return BUG_TRAP_TYPE_NONE;
66442
66443 printk(KERN_EMERG "------------[ cut here ]------------\n");
66444
66445 diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66446 --- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66447 +++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66448 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66449 if (limit > 4)
66450 return;
66451
66452 - is_on_stack = object_is_on_stack(addr);
66453 + is_on_stack = object_starts_on_stack(addr);
66454 if (is_on_stack == onstack)
66455 return;
66456
66457 diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66458 --- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66459 +++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66460 @@ -861,7 +861,7 @@ out:
66461
66462 static void check_for_stack(struct device *dev, void *addr)
66463 {
66464 - if (object_is_on_stack(addr))
66465 + if (object_starts_on_stack(addr))
66466 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66467 "stack [addr=%p]\n", addr);
66468 }
66469 diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66470 --- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66471 +++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66472 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66473 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66474
66475 /* if already at the top layer, we need to grow */
66476 - if (id >= 1 << (idp->layers * IDR_BITS)) {
66477 + if (id >= (1 << (idp->layers * IDR_BITS))) {
66478 *starting_id = id;
66479 return IDR_NEED_TO_GROW;
66480 }
66481 diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66482 --- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66483 +++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66484 @@ -266,7 +266,7 @@ static void free(void *where)
66485 malloc_ptr = free_mem_ptr;
66486 }
66487 #else
66488 -#define malloc(a) kmalloc(a, GFP_KERNEL)
66489 +#define malloc(a) kmalloc((a), GFP_KERNEL)
66490 #define free(a) kfree(a)
66491 #endif
66492
66493 diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66494 --- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66495 +++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66496 @@ -905,7 +905,7 @@ config LATENCYTOP
66497 select STACKTRACE
66498 select SCHEDSTATS
66499 select SCHED_DEBUG
66500 - depends on HAVE_LATENCYTOP_SUPPORT
66501 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66502 help
66503 Enable this option if you want to use the LatencyTOP tool
66504 to find out which userspace is blocking on what kernel operations.
66505 diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66506 --- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66507 +++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66508 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66509 return ret;
66510 }
66511
66512 -struct sysfs_ops kobj_sysfs_ops = {
66513 +const struct sysfs_ops kobj_sysfs_ops = {
66514 .show = kobj_attr_show,
66515 .store = kobj_attr_store,
66516 };
66517 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66518 * If the kset was not able to be created, NULL will be returned.
66519 */
66520 static struct kset *kset_create(const char *name,
66521 - struct kset_uevent_ops *uevent_ops,
66522 + const struct kset_uevent_ops *uevent_ops,
66523 struct kobject *parent_kobj)
66524 {
66525 struct kset *kset;
66526 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66527 * If the kset was not able to be created, NULL will be returned.
66528 */
66529 struct kset *kset_create_and_add(const char *name,
66530 - struct kset_uevent_ops *uevent_ops,
66531 + const struct kset_uevent_ops *uevent_ops,
66532 struct kobject *parent_kobj)
66533 {
66534 struct kset *kset;
66535 diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66536 --- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66537 +++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66538 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66539 const char *subsystem;
66540 struct kobject *top_kobj;
66541 struct kset *kset;
66542 - struct kset_uevent_ops *uevent_ops;
66543 + const struct kset_uevent_ops *uevent_ops;
66544 u64 seq;
66545 int i = 0;
66546 int retval = 0;
66547 diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
66548 --- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66549 +++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66550 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66551 */
66552 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66553 {
66554 - WARN_ON(release == NULL);
66555 + BUG_ON(release == NULL);
66556 WARN_ON(release == (void (*)(struct kref *))kfree);
66557
66558 if (atomic_dec_and_test(&kref->refcount)) {
66559 diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
66560 --- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66561 +++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66562 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66563 char *buf;
66564 int ret;
66565
66566 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66567 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66568 if (!buf)
66569 return -ENOMEM;
66570 memcpy(buf, s->from, s->to - s->from);
66571 diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
66572 --- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66573 +++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66574 @@ -81,7 +81,7 @@ struct radix_tree_preload {
66575 int nr;
66576 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66577 };
66578 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66579 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66580
66581 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66582 {
66583 diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
66584 --- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66585 +++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66586 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66587 */
66588 static inline u32 __seed(u32 x, u32 m)
66589 {
66590 - return (x < m) ? x + m : x;
66591 + return (x <= m) ? x + m + 1 : x;
66592 }
66593
66594 /**
66595 diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
66596 --- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66597 +++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66598 @@ -16,6 +16,9 @@
66599 * - scnprintf and vscnprintf
66600 */
66601
66602 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66603 +#define __INCLUDED_BY_HIDESYM 1
66604 +#endif
66605 #include <stdarg.h>
66606 #include <linux/module.h>
66607 #include <linux/types.h>
66608 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66609 return buf;
66610 }
66611
66612 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66613 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66614 {
66615 int len, i;
66616
66617 if ((unsigned long)s < PAGE_SIZE)
66618 - s = "<NULL>";
66619 + s = "(null)";
66620
66621 len = strnlen(s, spec.precision);
66622
66623 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66624 unsigned long value = (unsigned long) ptr;
66625 #ifdef CONFIG_KALLSYMS
66626 char sym[KSYM_SYMBOL_LEN];
66627 - if (ext != 'f' && ext != 's')
66628 + if (ext != 'f' && ext != 's' && ext != 'a')
66629 sprint_symbol(sym, value);
66630 else
66631 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66632 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66633 * - 'f' For simple symbolic function names without offset
66634 * - 'S' For symbolic direct pointers with offset
66635 * - 's' For symbolic direct pointers without offset
66636 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66637 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66638 * - 'R' For a struct resource pointer, it prints the range of
66639 * addresses (not the name nor the flags)
66640 * - 'M' For a 6-byte MAC address, it prints the address in the
66641 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66642 struct printf_spec spec)
66643 {
66644 if (!ptr)
66645 - return string(buf, end, "(null)", spec);
66646 + return string(buf, end, "(nil)", spec);
66647
66648 switch (*fmt) {
66649 case 'F':
66650 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66651 case 's':
66652 /* Fallthrough */
66653 case 'S':
66654 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66655 + break;
66656 +#else
66657 + return symbol_string(buf, end, ptr, spec, *fmt);
66658 +#endif
66659 + case 'a':
66660 + /* Fallthrough */
66661 + case 'A':
66662 return symbol_string(buf, end, ptr, spec, *fmt);
66663 case 'R':
66664 return resource_string(buf, end, ptr, spec);
66665 @@ -1445,7 +1458,7 @@ do { \
66666 size_t len;
66667 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66668 || (unsigned long)save_str < PAGE_SIZE)
66669 - save_str = "<NULL>";
66670 + save_str = "(null)";
66671 len = strlen(save_str);
66672 if (str + len + 1 < end)
66673 memcpy(str, save_str, len + 1);
66674 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66675 typeof(type) value; \
66676 if (sizeof(type) == 8) { \
66677 args = PTR_ALIGN(args, sizeof(u32)); \
66678 - *(u32 *)&value = *(u32 *)args; \
66679 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66680 + *(u32 *)&value = *(const u32 *)args; \
66681 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66682 } else { \
66683 args = PTR_ALIGN(args, sizeof(type)); \
66684 - value = *(typeof(type) *)args; \
66685 + value = *(const typeof(type) *)args; \
66686 } \
66687 args += sizeof(type); \
66688 value; \
66689 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66690 const char *str_arg = args;
66691 size_t len = strlen(str_arg);
66692 args += len + 1;
66693 - str = string(str, end, (char *)str_arg, spec);
66694 + str = string(str, end, str_arg, spec);
66695 break;
66696 }
66697
66698 diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
66699 --- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66700 +++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66701 @@ -0,0 +1 @@
66702 +-grsec
66703 diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
66704 --- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
66705 +++ linux-2.6.32.45/Makefile 2011-08-26 22:53:29.000000000 -0400
66706 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66707
66708 HOSTCC = gcc
66709 HOSTCXX = g++
66710 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66711 -HOSTCXXFLAGS = -O2
66712 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66713 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66714 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66715
66716 # Decide whether to build built-in, modular, or both.
66717 # Normally, just do built-in.
66718 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66719 KBUILD_CPPFLAGS := -D__KERNEL__
66720
66721 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66722 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
66723 -fno-strict-aliasing -fno-common \
66724 -Werror-implicit-function-declaration \
66725 -Wno-format-security \
66726 -fno-delete-null-pointer-checks
66727 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66728 KBUILD_AFLAGS := -D__ASSEMBLY__
66729
66730 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66731 @@ -376,9 +379,10 @@ export RCS_TAR_IGNORE := --exclude SCCS
66732 # Rules shared between *config targets and build targets
66733
66734 # Basic helpers built in scripts/
66735 -PHONY += scripts_basic
66736 -scripts_basic:
66737 +PHONY += scripts_basic0 scripts_basic gcc-plugins
66738 +scripts_basic0:
66739 $(Q)$(MAKE) $(build)=scripts/basic
66740 +scripts_basic: scripts_basic0 gcc-plugins
66741
66742 # To avoid any implicit rule to kick in, define an empty command.
66743 scripts/basic/%: scripts_basic ;
66744 @@ -403,7 +407,7 @@ endif
66745 # of make so .config is not included in this case either (for *config).
66746
66747 no-dot-config-targets := clean mrproper distclean \
66748 - cscope TAGS tags help %docs check% \
66749 + cscope gtags TAGS tags help %docs check% \
66750 include/linux/version.h headers_% \
66751 kernelrelease kernelversion
66752
66753 @@ -526,6 +530,24 @@ else
66754 KBUILD_CFLAGS += -O2
66755 endif
66756
66757 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66758 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66759 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
66760 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66761 +endif
66762 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66763 +gcc-plugins:
66764 + $(Q)$(MAKE) $(build)=tools/gcc
66765 +else
66766 +gcc-plugins:
66767 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66768 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66769 +else
66770 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66771 +endif
66772 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66773 +endif
66774 +
66775 include $(srctree)/arch/$(SRCARCH)/Makefile
66776
66777 ifneq ($(CONFIG_FRAME_WARN),0)
66778 @@ -644,7 +666,7 @@ export mod_strip_cmd
66779
66780
66781 ifeq ($(KBUILD_EXTMOD),)
66782 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66783 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66784
66785 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66786 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66787 @@ -840,6 +862,7 @@ define rule_vmlinux-modpost
66788 endef
66789
66790 # vmlinux image - including updated kernel symbols
66791 +vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66792 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
66793 ifdef CONFIG_HEADERS_CHECK
66794 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
66795 @@ -970,7 +993,7 @@ ifneq ($(KBUILD_SRC),)
66796 endif
66797
66798 # prepare2 creates a makefile if using a separate output directory
66799 -prepare2: prepare3 outputmakefile
66800 +prepare2: prepare3 outputmakefile gcc-plugins
66801
66802 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66803 include/asm include/config/auto.conf
66804 @@ -1124,6 +1147,7 @@ all: modules
66805 # using awk while concatenating to the final file.
66806
66807 PHONY += modules
66808 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66809 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
66810 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66811 @$(kecho) ' Building modules, stage 2.';
66812 @@ -1198,7 +1222,7 @@ MRPROPER_FILES += .config .config.old in
66813 include/linux/autoconf.h include/linux/version.h \
66814 include/linux/utsrelease.h \
66815 include/linux/bounds.h include/asm*/asm-offsets.h \
66816 - Module.symvers Module.markers tags TAGS cscope*
66817 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66818
66819 # clean - Delete most, but leave enough to build external modules
66820 #
66821 @@ -1289,6 +1313,7 @@ help:
66822 @echo ' modules_prepare - Set up for building external modules'
66823 @echo ' tags/TAGS - Generate tags file for editors'
66824 @echo ' cscope - Generate cscope index'
66825 + @echo ' gtags - Generate GNU GLOBAL index'
66826 @echo ' kernelrelease - Output the release version string'
66827 @echo ' kernelversion - Output the version stored in Makefile'
66828 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66829 @@ -1390,6 +1415,7 @@ PHONY += $(module-dirs) modules
66830 $(module-dirs): crmodverdir $(objtree)/Module.symvers
66831 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
66832
66833 +modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66834 modules: $(module-dirs)
66835 @$(kecho) ' Building modules, stage 2.';
66836 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
66837 @@ -1421,7 +1447,7 @@ clean: $(clean-dirs)
66838 $(call cmd,rmdirs)
66839 $(call cmd,rmfiles)
66840 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
66841 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66842 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66843 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66844 -o -name '*.gcno' \) -type f -print | xargs rm -f
66845
66846 @@ -1445,7 +1471,7 @@ endif # KBUILD_EXTMOD
66847 quiet_cmd_tags = GEN $@
66848 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
66849
66850 -tags TAGS cscope: FORCE
66851 +tags TAGS cscope gtags: FORCE
66852 $(call cmd,tags)
66853
66854 # Scripts to check various things for consistency
66855 diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
66856 --- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
66857 +++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
66858 @@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
66859 list_add_tail_rcu(&wb->list, &bdi->wb_list);
66860 spin_unlock(&bdi->wb_lock);
66861
66862 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
66863 + tsk->flags |= PF_SWAPWRITE;
66864 set_freezable();
66865
66866 /*
66867 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
66868 * Add the default flusher task that gets created for any bdi
66869 * that has dirty data pending writeout
66870 */
66871 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66872 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66873 {
66874 if (!bdi_cap_writeback_dirty(bdi))
66875 return;
66876 diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
66877 --- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
66878 +++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
66879 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
66880 struct address_space *mapping = file->f_mapping;
66881
66882 if (!mapping->a_ops->readpage)
66883 - return -ENOEXEC;
66884 + return -ENODEV;
66885 file_accessed(file);
66886 vma->vm_ops = &generic_file_vm_ops;
66887 vma->vm_flags |= VM_CAN_NONLINEAR;
66888 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
66889 *pos = i_size_read(inode);
66890
66891 if (limit != RLIM_INFINITY) {
66892 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66893 if (*pos >= limit) {
66894 send_sig(SIGXFSZ, current, 0);
66895 return -EFBIG;
66896 diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
66897 --- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
66898 +++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
66899 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66900 retry:
66901 vma = find_vma(mm, start);
66902
66903 +#ifdef CONFIG_PAX_SEGMEXEC
66904 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66905 + goto out;
66906 +#endif
66907 +
66908 /*
66909 * Make sure the vma is shared, that it supports prefaulting,
66910 * and that the remapped range is valid and fully within
66911 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66912 /*
66913 * drop PG_Mlocked flag for over-mapped range
66914 */
66915 - unsigned int saved_flags = vma->vm_flags;
66916 + unsigned long saved_flags = vma->vm_flags;
66917 munlock_vma_pages_range(vma, start, start + size);
66918 vma->vm_flags = saved_flags;
66919 }
66920 diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
66921 --- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
66922 +++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
66923 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
66924 * So no dangers, even with speculative execution.
66925 */
66926 page = pte_page(pkmap_page_table[i]);
66927 + pax_open_kernel();
66928 pte_clear(&init_mm, (unsigned long)page_address(page),
66929 &pkmap_page_table[i]);
66930 -
66931 + pax_close_kernel();
66932 set_page_address(page, NULL);
66933 need_flush = 1;
66934 }
66935 @@ -177,9 +178,11 @@ start:
66936 }
66937 }
66938 vaddr = PKMAP_ADDR(last_pkmap_nr);
66939 +
66940 + pax_open_kernel();
66941 set_pte_at(&init_mm, vaddr,
66942 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66943 -
66944 + pax_close_kernel();
66945 pkmap_count[last_pkmap_nr] = 1;
66946 set_page_address(page, (void *)vaddr);
66947
66948 diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
66949 --- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
66950 +++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
66951 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
66952 return 1;
66953 }
66954
66955 +#ifdef CONFIG_PAX_SEGMEXEC
66956 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66957 +{
66958 + struct mm_struct *mm = vma->vm_mm;
66959 + struct vm_area_struct *vma_m;
66960 + unsigned long address_m;
66961 + pte_t *ptep_m;
66962 +
66963 + vma_m = pax_find_mirror_vma(vma);
66964 + if (!vma_m)
66965 + return;
66966 +
66967 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66968 + address_m = address + SEGMEXEC_TASK_SIZE;
66969 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66970 + get_page(page_m);
66971 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66972 +}
66973 +#endif
66974 +
66975 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
66976 unsigned long address, pte_t *ptep, pte_t pte,
66977 struct page *pagecache_page)
66978 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
66979 huge_ptep_clear_flush(vma, address, ptep);
66980 set_huge_pte_at(mm, address, ptep,
66981 make_huge_pte(vma, new_page, 1));
66982 +
66983 +#ifdef CONFIG_PAX_SEGMEXEC
66984 + pax_mirror_huge_pte(vma, address, new_page);
66985 +#endif
66986 +
66987 /* Make the old page be freed below */
66988 new_page = old_page;
66989 }
66990 @@ -2135,6 +2160,10 @@ retry:
66991 && (vma->vm_flags & VM_SHARED)));
66992 set_huge_pte_at(mm, address, ptep, new_pte);
66993
66994 +#ifdef CONFIG_PAX_SEGMEXEC
66995 + pax_mirror_huge_pte(vma, address, page);
66996 +#endif
66997 +
66998 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
66999 /* Optimization, do the COW without a second fault */
67000 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67001 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67002 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67003 struct hstate *h = hstate_vma(vma);
67004
67005 +#ifdef CONFIG_PAX_SEGMEXEC
67006 + struct vm_area_struct *vma_m;
67007 +
67008 + vma_m = pax_find_mirror_vma(vma);
67009 + if (vma_m) {
67010 + unsigned long address_m;
67011 +
67012 + if (vma->vm_start > vma_m->vm_start) {
67013 + address_m = address;
67014 + address -= SEGMEXEC_TASK_SIZE;
67015 + vma = vma_m;
67016 + h = hstate_vma(vma);
67017 + } else
67018 + address_m = address + SEGMEXEC_TASK_SIZE;
67019 +
67020 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67021 + return VM_FAULT_OOM;
67022 + address_m &= HPAGE_MASK;
67023 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67024 + }
67025 +#endif
67026 +
67027 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67028 if (!ptep)
67029 return VM_FAULT_OOM;
67030 diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67031 --- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67032 +++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67033 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67034 * in mm/page_alloc.c
67035 */
67036 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67037 +extern void free_compound_page(struct page *page);
67038 extern void prep_compound_page(struct page *page, unsigned long order);
67039
67040
67041 diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67042 --- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67043 +++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67044 @@ -228,7 +228,7 @@ config KSM
67045 config DEFAULT_MMAP_MIN_ADDR
67046 int "Low address space to protect from user allocation"
67047 depends on MMU
67048 - default 4096
67049 + default 65536
67050 help
67051 This is the portion of low virtual memory which should be protected
67052 from userspace allocation. Keeping a user from writing to low pages
67053 diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67054 --- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67055 +++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67056 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67057
67058 for (i = 0; i < object->trace_len; i++) {
67059 void *ptr = (void *)object->trace[i];
67060 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67061 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67062 }
67063 }
67064
67065 diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67066 --- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67067 +++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67068 @@ -14,7 +14,7 @@
67069 * Safely read from address @src to the buffer at @dst. If a kernel fault
67070 * happens, handle that and return -EFAULT.
67071 */
67072 -long probe_kernel_read(void *dst, void *src, size_t size)
67073 +long probe_kernel_read(void *dst, const void *src, size_t size)
67074 {
67075 long ret;
67076 mm_segment_t old_fs = get_fs();
67077 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67078 * Safely write to address @dst from the buffer at @src. If a kernel fault
67079 * happens, handle that and return -EFAULT.
67080 */
67081 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67082 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67083 {
67084 long ret;
67085 mm_segment_t old_fs = get_fs();
67086 diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67087 --- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67088 +++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67089 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67090 pgoff_t pgoff;
67091 unsigned long new_flags = vma->vm_flags;
67092
67093 +#ifdef CONFIG_PAX_SEGMEXEC
67094 + struct vm_area_struct *vma_m;
67095 +#endif
67096 +
67097 switch (behavior) {
67098 case MADV_NORMAL:
67099 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67100 @@ -103,6 +107,13 @@ success:
67101 /*
67102 * vm_flags is protected by the mmap_sem held in write mode.
67103 */
67104 +
67105 +#ifdef CONFIG_PAX_SEGMEXEC
67106 + vma_m = pax_find_mirror_vma(vma);
67107 + if (vma_m)
67108 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67109 +#endif
67110 +
67111 vma->vm_flags = new_flags;
67112
67113 out:
67114 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67115 struct vm_area_struct ** prev,
67116 unsigned long start, unsigned long end)
67117 {
67118 +
67119 +#ifdef CONFIG_PAX_SEGMEXEC
67120 + struct vm_area_struct *vma_m;
67121 +#endif
67122 +
67123 *prev = vma;
67124 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67125 return -EINVAL;
67126 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67127 zap_page_range(vma, start, end - start, &details);
67128 } else
67129 zap_page_range(vma, start, end - start, NULL);
67130 +
67131 +#ifdef CONFIG_PAX_SEGMEXEC
67132 + vma_m = pax_find_mirror_vma(vma);
67133 + if (vma_m) {
67134 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67135 + struct zap_details details = {
67136 + .nonlinear_vma = vma_m,
67137 + .last_index = ULONG_MAX,
67138 + };
67139 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67140 + } else
67141 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67142 + }
67143 +#endif
67144 +
67145 return 0;
67146 }
67147
67148 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67149 if (end < start)
67150 goto out;
67151
67152 +#ifdef CONFIG_PAX_SEGMEXEC
67153 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67154 + if (end > SEGMEXEC_TASK_SIZE)
67155 + goto out;
67156 + } else
67157 +#endif
67158 +
67159 + if (end > TASK_SIZE)
67160 + goto out;
67161 +
67162 error = 0;
67163 if (end == start)
67164 goto out;
67165 diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67166 --- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67167 +++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67168 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67169 return;
67170
67171 pmd = pmd_offset(pud, start);
67172 +
67173 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67174 pud_clear(pud);
67175 pmd_free_tlb(tlb, pmd, start);
67176 +#endif
67177 +
67178 }
67179
67180 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67181 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67182 if (end - 1 > ceiling - 1)
67183 return;
67184
67185 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67186 pud = pud_offset(pgd, start);
67187 pgd_clear(pgd);
67188 pud_free_tlb(tlb, pud, start);
67189 +#endif
67190 +
67191 }
67192
67193 /*
67194 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67195 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67196 i = 0;
67197
67198 - do {
67199 + while (nr_pages) {
67200 struct vm_area_struct *vma;
67201
67202 - vma = find_extend_vma(mm, start);
67203 + vma = find_vma(mm, start);
67204 if (!vma && in_gate_area(tsk, start)) {
67205 unsigned long pg = start & PAGE_MASK;
67206 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67207 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67208 continue;
67209 }
67210
67211 - if (!vma ||
67212 + if (!vma || start < vma->vm_start ||
67213 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67214 !(vm_flags & vma->vm_flags))
67215 return i ? : -EFAULT;
67216 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67217 start += PAGE_SIZE;
67218 nr_pages--;
67219 } while (nr_pages && start < vma->vm_end);
67220 - } while (nr_pages);
67221 + }
67222 return i;
67223 }
67224
67225 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67226 page_add_file_rmap(page);
67227 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67228
67229 +#ifdef CONFIG_PAX_SEGMEXEC
67230 + pax_mirror_file_pte(vma, addr, page, ptl);
67231 +#endif
67232 +
67233 retval = 0;
67234 pte_unmap_unlock(pte, ptl);
67235 return retval;
67236 @@ -1560,10 +1571,22 @@ out:
67237 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67238 struct page *page)
67239 {
67240 +
67241 +#ifdef CONFIG_PAX_SEGMEXEC
67242 + struct vm_area_struct *vma_m;
67243 +#endif
67244 +
67245 if (addr < vma->vm_start || addr >= vma->vm_end)
67246 return -EFAULT;
67247 if (!page_count(page))
67248 return -EINVAL;
67249 +
67250 +#ifdef CONFIG_PAX_SEGMEXEC
67251 + vma_m = pax_find_mirror_vma(vma);
67252 + if (vma_m)
67253 + vma_m->vm_flags |= VM_INSERTPAGE;
67254 +#endif
67255 +
67256 vma->vm_flags |= VM_INSERTPAGE;
67257 return insert_page(vma, addr, page, vma->vm_page_prot);
67258 }
67259 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67260 unsigned long pfn)
67261 {
67262 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67263 + BUG_ON(vma->vm_mirror);
67264
67265 if (addr < vma->vm_start || addr >= vma->vm_end)
67266 return -EFAULT;
67267 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67268 copy_user_highpage(dst, src, va, vma);
67269 }
67270
67271 +#ifdef CONFIG_PAX_SEGMEXEC
67272 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67273 +{
67274 + struct mm_struct *mm = vma->vm_mm;
67275 + spinlock_t *ptl;
67276 + pte_t *pte, entry;
67277 +
67278 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67279 + entry = *pte;
67280 + if (!pte_present(entry)) {
67281 + if (!pte_none(entry)) {
67282 + BUG_ON(pte_file(entry));
67283 + free_swap_and_cache(pte_to_swp_entry(entry));
67284 + pte_clear_not_present_full(mm, address, pte, 0);
67285 + }
67286 + } else {
67287 + struct page *page;
67288 +
67289 + flush_cache_page(vma, address, pte_pfn(entry));
67290 + entry = ptep_clear_flush(vma, address, pte);
67291 + BUG_ON(pte_dirty(entry));
67292 + page = vm_normal_page(vma, address, entry);
67293 + if (page) {
67294 + update_hiwater_rss(mm);
67295 + if (PageAnon(page))
67296 + dec_mm_counter(mm, anon_rss);
67297 + else
67298 + dec_mm_counter(mm, file_rss);
67299 + page_remove_rmap(page);
67300 + page_cache_release(page);
67301 + }
67302 + }
67303 + pte_unmap_unlock(pte, ptl);
67304 +}
67305 +
67306 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
67307 + *
67308 + * the ptl of the lower mapped page is held on entry and is not released on exit
67309 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67310 + */
67311 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67312 +{
67313 + struct mm_struct *mm = vma->vm_mm;
67314 + unsigned long address_m;
67315 + spinlock_t *ptl_m;
67316 + struct vm_area_struct *vma_m;
67317 + pmd_t *pmd_m;
67318 + pte_t *pte_m, entry_m;
67319 +
67320 + BUG_ON(!page_m || !PageAnon(page_m));
67321 +
67322 + vma_m = pax_find_mirror_vma(vma);
67323 + if (!vma_m)
67324 + return;
67325 +
67326 + BUG_ON(!PageLocked(page_m));
67327 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67328 + address_m = address + SEGMEXEC_TASK_SIZE;
67329 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67330 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67331 + ptl_m = pte_lockptr(mm, pmd_m);
67332 + if (ptl != ptl_m) {
67333 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67334 + if (!pte_none(*pte_m))
67335 + goto out;
67336 + }
67337 +
67338 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67339 + page_cache_get(page_m);
67340 + page_add_anon_rmap(page_m, vma_m, address_m);
67341 + inc_mm_counter(mm, anon_rss);
67342 + set_pte_at(mm, address_m, pte_m, entry_m);
67343 + update_mmu_cache(vma_m, address_m, entry_m);
67344 +out:
67345 + if (ptl != ptl_m)
67346 + spin_unlock(ptl_m);
67347 + pte_unmap_nested(pte_m);
67348 + unlock_page(page_m);
67349 +}
67350 +
67351 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67352 +{
67353 + struct mm_struct *mm = vma->vm_mm;
67354 + unsigned long address_m;
67355 + spinlock_t *ptl_m;
67356 + struct vm_area_struct *vma_m;
67357 + pmd_t *pmd_m;
67358 + pte_t *pte_m, entry_m;
67359 +
67360 + BUG_ON(!page_m || PageAnon(page_m));
67361 +
67362 + vma_m = pax_find_mirror_vma(vma);
67363 + if (!vma_m)
67364 + return;
67365 +
67366 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67367 + address_m = address + SEGMEXEC_TASK_SIZE;
67368 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67369 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67370 + ptl_m = pte_lockptr(mm, pmd_m);
67371 + if (ptl != ptl_m) {
67372 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67373 + if (!pte_none(*pte_m))
67374 + goto out;
67375 + }
67376 +
67377 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67378 + page_cache_get(page_m);
67379 + page_add_file_rmap(page_m);
67380 + inc_mm_counter(mm, file_rss);
67381 + set_pte_at(mm, address_m, pte_m, entry_m);
67382 + update_mmu_cache(vma_m, address_m, entry_m);
67383 +out:
67384 + if (ptl != ptl_m)
67385 + spin_unlock(ptl_m);
67386 + pte_unmap_nested(pte_m);
67387 +}
67388 +
67389 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67390 +{
67391 + struct mm_struct *mm = vma->vm_mm;
67392 + unsigned long address_m;
67393 + spinlock_t *ptl_m;
67394 + struct vm_area_struct *vma_m;
67395 + pmd_t *pmd_m;
67396 + pte_t *pte_m, entry_m;
67397 +
67398 + vma_m = pax_find_mirror_vma(vma);
67399 + if (!vma_m)
67400 + return;
67401 +
67402 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67403 + address_m = address + SEGMEXEC_TASK_SIZE;
67404 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67405 + pte_m = pte_offset_map_nested(pmd_m, address_m);
67406 + ptl_m = pte_lockptr(mm, pmd_m);
67407 + if (ptl != ptl_m) {
67408 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67409 + if (!pte_none(*pte_m))
67410 + goto out;
67411 + }
67412 +
67413 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67414 + set_pte_at(mm, address_m, pte_m, entry_m);
67415 +out:
67416 + if (ptl != ptl_m)
67417 + spin_unlock(ptl_m);
67418 + pte_unmap_nested(pte_m);
67419 +}
67420 +
67421 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67422 +{
67423 + struct page *page_m;
67424 + pte_t entry;
67425 +
67426 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67427 + goto out;
67428 +
67429 + entry = *pte;
67430 + page_m = vm_normal_page(vma, address, entry);
67431 + if (!page_m)
67432 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67433 + else if (PageAnon(page_m)) {
67434 + if (pax_find_mirror_vma(vma)) {
67435 + pte_unmap_unlock(pte, ptl);
67436 + lock_page(page_m);
67437 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67438 + if (pte_same(entry, *pte))
67439 + pax_mirror_anon_pte(vma, address, page_m, ptl);
67440 + else
67441 + unlock_page(page_m);
67442 + }
67443 + } else
67444 + pax_mirror_file_pte(vma, address, page_m, ptl);
67445 +
67446 +out:
67447 + pte_unmap_unlock(pte, ptl);
67448 +}
67449 +#endif
67450 +
67451 /*
67452 * This routine handles present pages, when users try to write
67453 * to a shared page. It is done by copying the page to a new address
67454 @@ -2156,6 +2360,12 @@ gotten:
67455 */
67456 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67457 if (likely(pte_same(*page_table, orig_pte))) {
67458 +
67459 +#ifdef CONFIG_PAX_SEGMEXEC
67460 + if (pax_find_mirror_vma(vma))
67461 + BUG_ON(!trylock_page(new_page));
67462 +#endif
67463 +
67464 if (old_page) {
67465 if (!PageAnon(old_page)) {
67466 dec_mm_counter(mm, file_rss);
67467 @@ -2207,6 +2417,10 @@ gotten:
67468 page_remove_rmap(old_page);
67469 }
67470
67471 +#ifdef CONFIG_PAX_SEGMEXEC
67472 + pax_mirror_anon_pte(vma, address, new_page, ptl);
67473 +#endif
67474 +
67475 /* Free the old page.. */
67476 new_page = old_page;
67477 ret |= VM_FAULT_WRITE;
67478 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67479 swap_free(entry);
67480 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67481 try_to_free_swap(page);
67482 +
67483 +#ifdef CONFIG_PAX_SEGMEXEC
67484 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67485 +#endif
67486 +
67487 unlock_page(page);
67488
67489 if (flags & FAULT_FLAG_WRITE) {
67490 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67491
67492 /* No need to invalidate - it was non-present before */
67493 update_mmu_cache(vma, address, pte);
67494 +
67495 +#ifdef CONFIG_PAX_SEGMEXEC
67496 + pax_mirror_anon_pte(vma, address, page, ptl);
67497 +#endif
67498 +
67499 unlock:
67500 pte_unmap_unlock(page_table, ptl);
67501 out:
67502 @@ -2632,40 +2856,6 @@ out_release:
67503 }
67504
67505 /*
67506 - * This is like a special single-page "expand_{down|up}wards()",
67507 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
67508 - * doesn't hit another vma.
67509 - */
67510 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67511 -{
67512 - address &= PAGE_MASK;
67513 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67514 - struct vm_area_struct *prev = vma->vm_prev;
67515 -
67516 - /*
67517 - * Is there a mapping abutting this one below?
67518 - *
67519 - * That's only ok if it's the same stack mapping
67520 - * that has gotten split..
67521 - */
67522 - if (prev && prev->vm_end == address)
67523 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67524 -
67525 - expand_stack(vma, address - PAGE_SIZE);
67526 - }
67527 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67528 - struct vm_area_struct *next = vma->vm_next;
67529 -
67530 - /* As VM_GROWSDOWN but s/below/above/ */
67531 - if (next && next->vm_start == address + PAGE_SIZE)
67532 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67533 -
67534 - expand_upwards(vma, address + PAGE_SIZE);
67535 - }
67536 - return 0;
67537 -}
67538 -
67539 -/*
67540 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67541 * but allow concurrent faults), and pte mapped but not yet locked.
67542 * We return with mmap_sem still held, but pte unmapped and unlocked.
67543 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67544 unsigned long address, pte_t *page_table, pmd_t *pmd,
67545 unsigned int flags)
67546 {
67547 - struct page *page;
67548 + struct page *page = NULL;
67549 spinlock_t *ptl;
67550 pte_t entry;
67551
67552 - pte_unmap(page_table);
67553 -
67554 - /* Check if we need to add a guard page to the stack */
67555 - if (check_stack_guard_page(vma, address) < 0)
67556 - return VM_FAULT_SIGBUS;
67557 -
67558 - /* Use the zero-page for reads */
67559 if (!(flags & FAULT_FLAG_WRITE)) {
67560 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67561 vma->vm_page_prot));
67562 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67563 + ptl = pte_lockptr(mm, pmd);
67564 + spin_lock(ptl);
67565 if (!pte_none(*page_table))
67566 goto unlock;
67567 goto setpte;
67568 }
67569
67570 /* Allocate our own private page. */
67571 + pte_unmap(page_table);
67572 +
67573 if (unlikely(anon_vma_prepare(vma)))
67574 goto oom;
67575 page = alloc_zeroed_user_highpage_movable(vma, address);
67576 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67577 if (!pte_none(*page_table))
67578 goto release;
67579
67580 +#ifdef CONFIG_PAX_SEGMEXEC
67581 + if (pax_find_mirror_vma(vma))
67582 + BUG_ON(!trylock_page(page));
67583 +#endif
67584 +
67585 inc_mm_counter(mm, anon_rss);
67586 page_add_new_anon_rmap(page, vma, address);
67587 setpte:
67588 @@ -2720,6 +2911,12 @@ setpte:
67589
67590 /* No need to invalidate - it was non-present before */
67591 update_mmu_cache(vma, address, entry);
67592 +
67593 +#ifdef CONFIG_PAX_SEGMEXEC
67594 + if (page)
67595 + pax_mirror_anon_pte(vma, address, page, ptl);
67596 +#endif
67597 +
67598 unlock:
67599 pte_unmap_unlock(page_table, ptl);
67600 return 0;
67601 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67602 */
67603 /* Only go through if we didn't race with anybody else... */
67604 if (likely(pte_same(*page_table, orig_pte))) {
67605 +
67606 +#ifdef CONFIG_PAX_SEGMEXEC
67607 + if (anon && pax_find_mirror_vma(vma))
67608 + BUG_ON(!trylock_page(page));
67609 +#endif
67610 +
67611 flush_icache_page(vma, page);
67612 entry = mk_pte(page, vma->vm_page_prot);
67613 if (flags & FAULT_FLAG_WRITE)
67614 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67615
67616 /* no need to invalidate: a not-present page won't be cached */
67617 update_mmu_cache(vma, address, entry);
67618 +
67619 +#ifdef CONFIG_PAX_SEGMEXEC
67620 + if (anon)
67621 + pax_mirror_anon_pte(vma, address, page, ptl);
67622 + else
67623 + pax_mirror_file_pte(vma, address, page, ptl);
67624 +#endif
67625 +
67626 } else {
67627 if (charged)
67628 mem_cgroup_uncharge_page(page);
67629 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67630 if (flags & FAULT_FLAG_WRITE)
67631 flush_tlb_page(vma, address);
67632 }
67633 +
67634 +#ifdef CONFIG_PAX_SEGMEXEC
67635 + pax_mirror_pte(vma, address, pte, pmd, ptl);
67636 + return 0;
67637 +#endif
67638 +
67639 unlock:
67640 pte_unmap_unlock(pte, ptl);
67641 return 0;
67642 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67643 pmd_t *pmd;
67644 pte_t *pte;
67645
67646 +#ifdef CONFIG_PAX_SEGMEXEC
67647 + struct vm_area_struct *vma_m;
67648 +#endif
67649 +
67650 __set_current_state(TASK_RUNNING);
67651
67652 count_vm_event(PGFAULT);
67653 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67654 if (unlikely(is_vm_hugetlb_page(vma)))
67655 return hugetlb_fault(mm, vma, address, flags);
67656
67657 +#ifdef CONFIG_PAX_SEGMEXEC
67658 + vma_m = pax_find_mirror_vma(vma);
67659 + if (vma_m) {
67660 + unsigned long address_m;
67661 + pgd_t *pgd_m;
67662 + pud_t *pud_m;
67663 + pmd_t *pmd_m;
67664 +
67665 + if (vma->vm_start > vma_m->vm_start) {
67666 + address_m = address;
67667 + address -= SEGMEXEC_TASK_SIZE;
67668 + vma = vma_m;
67669 + } else
67670 + address_m = address + SEGMEXEC_TASK_SIZE;
67671 +
67672 + pgd_m = pgd_offset(mm, address_m);
67673 + pud_m = pud_alloc(mm, pgd_m, address_m);
67674 + if (!pud_m)
67675 + return VM_FAULT_OOM;
67676 + pmd_m = pmd_alloc(mm, pud_m, address_m);
67677 + if (!pmd_m)
67678 + return VM_FAULT_OOM;
67679 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67680 + return VM_FAULT_OOM;
67681 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67682 + }
67683 +#endif
67684 +
67685 pgd = pgd_offset(mm, address);
67686 pud = pud_alloc(mm, pgd, address);
67687 if (!pud)
67688 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67689 gate_vma.vm_start = FIXADDR_USER_START;
67690 gate_vma.vm_end = FIXADDR_USER_END;
67691 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67692 - gate_vma.vm_page_prot = __P101;
67693 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67694 /*
67695 * Make sure the vDSO gets into every core dump.
67696 * Dumping its contents makes post-mortem fully interpretable later
67697 diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
67698 --- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67699 +++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67700 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67701
67702 int sysctl_memory_failure_recovery __read_mostly = 1;
67703
67704 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67705 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67706
67707 /*
67708 * Send all the processes who have the page mapped an ``action optional''
67709 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67710 return 0;
67711 }
67712
67713 - atomic_long_add(1, &mce_bad_pages);
67714 + atomic_long_add_unchecked(1, &mce_bad_pages);
67715
67716 /*
67717 * We need/can do nothing about count=0 pages.
67718 diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
67719 --- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67720 +++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67721 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67722 struct vm_area_struct *next;
67723 int err;
67724
67725 +#ifdef CONFIG_PAX_SEGMEXEC
67726 + struct vm_area_struct *vma_m;
67727 +#endif
67728 +
67729 err = 0;
67730 for (; vma && vma->vm_start < end; vma = next) {
67731 next = vma->vm_next;
67732 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67733 err = policy_vma(vma, new);
67734 if (err)
67735 break;
67736 +
67737 +#ifdef CONFIG_PAX_SEGMEXEC
67738 + vma_m = pax_find_mirror_vma(vma);
67739 + if (vma_m) {
67740 + err = policy_vma(vma_m, new);
67741 + if (err)
67742 + break;
67743 + }
67744 +#endif
67745 +
67746 }
67747 return err;
67748 }
67749 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67750
67751 if (end < start)
67752 return -EINVAL;
67753 +
67754 +#ifdef CONFIG_PAX_SEGMEXEC
67755 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67756 + if (end > SEGMEXEC_TASK_SIZE)
67757 + return -EINVAL;
67758 + } else
67759 +#endif
67760 +
67761 + if (end > TASK_SIZE)
67762 + return -EINVAL;
67763 +
67764 if (end == start)
67765 return 0;
67766
67767 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67768 if (!mm)
67769 return -EINVAL;
67770
67771 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67772 + if (mm != current->mm &&
67773 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67774 + err = -EPERM;
67775 + goto out;
67776 + }
67777 +#endif
67778 +
67779 /*
67780 * Check if this process has the right to modify the specified
67781 * process. The right exists if the process has administrative
67782 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67783 rcu_read_lock();
67784 tcred = __task_cred(task);
67785 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67786 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67787 - !capable(CAP_SYS_NICE)) {
67788 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67789 rcu_read_unlock();
67790 err = -EPERM;
67791 goto out;
67792 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67793
67794 if (file) {
67795 seq_printf(m, " file=");
67796 - seq_path(m, &file->f_path, "\n\t= ");
67797 + seq_path(m, &file->f_path, "\n\t\\= ");
67798 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67799 seq_printf(m, " heap");
67800 } else if (vma->vm_start <= mm->start_stack &&
67801 diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
67802 --- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67803 +++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67804 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67805 unsigned long chunk_start;
67806 int err;
67807
67808 + pax_track_stack();
67809 +
67810 task_nodes = cpuset_mems_allowed(task);
67811
67812 err = -ENOMEM;
67813 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67814 if (!mm)
67815 return -EINVAL;
67816
67817 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67818 + if (mm != current->mm &&
67819 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67820 + err = -EPERM;
67821 + goto out;
67822 + }
67823 +#endif
67824 +
67825 /*
67826 * Check if this process has the right to modify the specified
67827 * process. The right exists if the process has administrative
67828 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67829 rcu_read_lock();
67830 tcred = __task_cred(task);
67831 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67832 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
67833 - !capable(CAP_SYS_NICE)) {
67834 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67835 rcu_read_unlock();
67836 err = -EPERM;
67837 goto out;
67838 diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
67839 --- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67840 +++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67841 @@ -13,6 +13,7 @@
67842 #include <linux/pagemap.h>
67843 #include <linux/mempolicy.h>
67844 #include <linux/syscalls.h>
67845 +#include <linux/security.h>
67846 #include <linux/sched.h>
67847 #include <linux/module.h>
67848 #include <linux/rmap.h>
67849 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
67850 }
67851 }
67852
67853 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67854 -{
67855 - return (vma->vm_flags & VM_GROWSDOWN) &&
67856 - (vma->vm_start == addr) &&
67857 - !vma_stack_continue(vma->vm_prev, addr);
67858 -}
67859 -
67860 /**
67861 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
67862 * @vma: target vma
67863 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
67864 if (vma->vm_flags & VM_WRITE)
67865 gup_flags |= FOLL_WRITE;
67866
67867 - /* We don't try to access the guard page of a stack vma */
67868 - if (stack_guard_page(vma, start)) {
67869 - addr += PAGE_SIZE;
67870 - nr_pages--;
67871 - }
67872 -
67873 while (nr_pages > 0) {
67874 int i;
67875
67876 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
67877 {
67878 unsigned long nstart, end, tmp;
67879 struct vm_area_struct * vma, * prev;
67880 - int error;
67881 + int error = -EINVAL;
67882
67883 len = PAGE_ALIGN(len);
67884 end = start + len;
67885 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
67886 return -EINVAL;
67887 if (end == start)
67888 return 0;
67889 + if (end > TASK_SIZE)
67890 + return -EINVAL;
67891 +
67892 vma = find_vma_prev(current->mm, start, &prev);
67893 if (!vma || vma->vm_start > start)
67894 return -ENOMEM;
67895 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
67896 for (nstart = start ; ; ) {
67897 unsigned int newflags;
67898
67899 +#ifdef CONFIG_PAX_SEGMEXEC
67900 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67901 + break;
67902 +#endif
67903 +
67904 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67905
67906 newflags = vma->vm_flags | VM_LOCKED;
67907 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67908 lock_limit >>= PAGE_SHIFT;
67909
67910 /* check against resource limits */
67911 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67912 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67913 error = do_mlock(start, len, 1);
67914 up_write(&current->mm->mmap_sem);
67915 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67916 static int do_mlockall(int flags)
67917 {
67918 struct vm_area_struct * vma, * prev = NULL;
67919 - unsigned int def_flags = 0;
67920
67921 if (flags & MCL_FUTURE)
67922 - def_flags = VM_LOCKED;
67923 - current->mm->def_flags = def_flags;
67924 + current->mm->def_flags |= VM_LOCKED;
67925 + else
67926 + current->mm->def_flags &= ~VM_LOCKED;
67927 if (flags == MCL_FUTURE)
67928 goto out;
67929
67930 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67931 - unsigned int newflags;
67932 + unsigned long newflags;
67933 +
67934 +#ifdef CONFIG_PAX_SEGMEXEC
67935 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67936 + break;
67937 +#endif
67938
67939 + BUG_ON(vma->vm_end > TASK_SIZE);
67940 newflags = vma->vm_flags | VM_LOCKED;
67941 if (!(flags & MCL_CURRENT))
67942 newflags &= ~VM_LOCKED;
67943 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67944 lock_limit >>= PAGE_SHIFT;
67945
67946 ret = -ENOMEM;
67947 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67948 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67949 capable(CAP_IPC_LOCK))
67950 ret = do_mlockall(flags);
67951 diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
67952 --- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
67953 +++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
67954 @@ -45,6 +45,16 @@
67955 #define arch_rebalance_pgtables(addr, len) (addr)
67956 #endif
67957
67958 +static inline void verify_mm_writelocked(struct mm_struct *mm)
67959 +{
67960 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67961 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67962 + up_read(&mm->mmap_sem);
67963 + BUG();
67964 + }
67965 +#endif
67966 +}
67967 +
67968 static void unmap_region(struct mm_struct *mm,
67969 struct vm_area_struct *vma, struct vm_area_struct *prev,
67970 unsigned long start, unsigned long end);
67971 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
67972 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67973 *
67974 */
67975 -pgprot_t protection_map[16] = {
67976 +pgprot_t protection_map[16] __read_only = {
67977 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67978 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67979 };
67980
67981 pgprot_t vm_get_page_prot(unsigned long vm_flags)
67982 {
67983 - return __pgprot(pgprot_val(protection_map[vm_flags &
67984 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67985 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67986 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67987 +
67988 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67989 + if (!nx_enabled &&
67990 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
67991 + (vm_flags & (VM_READ | VM_WRITE)))
67992 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
67993 +#endif
67994 +
67995 + return prot;
67996 }
67997 EXPORT_SYMBOL(vm_get_page_prot);
67998
67999 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68000 int sysctl_overcommit_ratio = 50; /* default is 50% */
68001 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68002 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68003 struct percpu_counter vm_committed_as;
68004
68005 /*
68006 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68007 struct vm_area_struct *next = vma->vm_next;
68008
68009 might_sleep();
68010 + BUG_ON(vma->vm_mirror);
68011 if (vma->vm_ops && vma->vm_ops->close)
68012 vma->vm_ops->close(vma);
68013 if (vma->vm_file) {
68014 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68015 * not page aligned -Ram Gupta
68016 */
68017 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68018 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68019 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68020 (mm->end_data - mm->start_data) > rlim)
68021 goto out;
68022 @@ -704,6 +726,12 @@ static int
68023 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68024 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68025 {
68026 +
68027 +#ifdef CONFIG_PAX_SEGMEXEC
68028 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68029 + return 0;
68030 +#endif
68031 +
68032 if (is_mergeable_vma(vma, file, vm_flags) &&
68033 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68034 if (vma->vm_pgoff == vm_pgoff)
68035 @@ -723,6 +751,12 @@ static int
68036 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68037 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68038 {
68039 +
68040 +#ifdef CONFIG_PAX_SEGMEXEC
68041 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68042 + return 0;
68043 +#endif
68044 +
68045 if (is_mergeable_vma(vma, file, vm_flags) &&
68046 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68047 pgoff_t vm_pglen;
68048 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68049 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68050 struct vm_area_struct *prev, unsigned long addr,
68051 unsigned long end, unsigned long vm_flags,
68052 - struct anon_vma *anon_vma, struct file *file,
68053 + struct anon_vma *anon_vma, struct file *file,
68054 pgoff_t pgoff, struct mempolicy *policy)
68055 {
68056 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68057 struct vm_area_struct *area, *next;
68058
68059 +#ifdef CONFIG_PAX_SEGMEXEC
68060 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68061 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68062 +
68063 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68064 +#endif
68065 +
68066 /*
68067 * We later require that vma->vm_flags == vm_flags,
68068 * so this tests vma->vm_flags & VM_SPECIAL, too.
68069 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68070 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68071 next = next->vm_next;
68072
68073 +#ifdef CONFIG_PAX_SEGMEXEC
68074 + if (prev)
68075 + prev_m = pax_find_mirror_vma(prev);
68076 + if (area)
68077 + area_m = pax_find_mirror_vma(area);
68078 + if (next)
68079 + next_m = pax_find_mirror_vma(next);
68080 +#endif
68081 +
68082 /*
68083 * Can it merge with the predecessor?
68084 */
68085 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68086 /* cases 1, 6 */
68087 vma_adjust(prev, prev->vm_start,
68088 next->vm_end, prev->vm_pgoff, NULL);
68089 - } else /* cases 2, 5, 7 */
68090 +
68091 +#ifdef CONFIG_PAX_SEGMEXEC
68092 + if (prev_m)
68093 + vma_adjust(prev_m, prev_m->vm_start,
68094 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68095 +#endif
68096 +
68097 + } else { /* cases 2, 5, 7 */
68098 vma_adjust(prev, prev->vm_start,
68099 end, prev->vm_pgoff, NULL);
68100 +
68101 +#ifdef CONFIG_PAX_SEGMEXEC
68102 + if (prev_m)
68103 + vma_adjust(prev_m, prev_m->vm_start,
68104 + end_m, prev_m->vm_pgoff, NULL);
68105 +#endif
68106 +
68107 + }
68108 return prev;
68109 }
68110
68111 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68112 mpol_equal(policy, vma_policy(next)) &&
68113 can_vma_merge_before(next, vm_flags,
68114 anon_vma, file, pgoff+pglen)) {
68115 - if (prev && addr < prev->vm_end) /* case 4 */
68116 + if (prev && addr < prev->vm_end) { /* case 4 */
68117 vma_adjust(prev, prev->vm_start,
68118 addr, prev->vm_pgoff, NULL);
68119 - else /* cases 3, 8 */
68120 +
68121 +#ifdef CONFIG_PAX_SEGMEXEC
68122 + if (prev_m)
68123 + vma_adjust(prev_m, prev_m->vm_start,
68124 + addr_m, prev_m->vm_pgoff, NULL);
68125 +#endif
68126 +
68127 + } else { /* cases 3, 8 */
68128 vma_adjust(area, addr, next->vm_end,
68129 next->vm_pgoff - pglen, NULL);
68130 +
68131 +#ifdef CONFIG_PAX_SEGMEXEC
68132 + if (area_m)
68133 + vma_adjust(area_m, addr_m, next_m->vm_end,
68134 + next_m->vm_pgoff - pglen, NULL);
68135 +#endif
68136 +
68137 + }
68138 return area;
68139 }
68140
68141 @@ -898,14 +978,11 @@ none:
68142 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68143 struct file *file, long pages)
68144 {
68145 - const unsigned long stack_flags
68146 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68147 -
68148 if (file) {
68149 mm->shared_vm += pages;
68150 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68151 mm->exec_vm += pages;
68152 - } else if (flags & stack_flags)
68153 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68154 mm->stack_vm += pages;
68155 if (flags & (VM_RESERVED|VM_IO))
68156 mm->reserved_vm += pages;
68157 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68158 * (the exception is when the underlying filesystem is noexec
68159 * mounted, in which case we dont add PROT_EXEC.)
68160 */
68161 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68162 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68163 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68164 prot |= PROT_EXEC;
68165
68166 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68167 /* Obtain the address to map to. we verify (or select) it and ensure
68168 * that it represents a valid section of the address space.
68169 */
68170 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68171 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68172 if (addr & ~PAGE_MASK)
68173 return addr;
68174
68175 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68176 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68177 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68178
68179 +#ifdef CONFIG_PAX_MPROTECT
68180 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68181 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68182 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68183 + gr_log_rwxmmap(file);
68184 +
68185 +#ifdef CONFIG_PAX_EMUPLT
68186 + vm_flags &= ~VM_EXEC;
68187 +#else
68188 + return -EPERM;
68189 +#endif
68190 +
68191 + }
68192 +
68193 + if (!(vm_flags & VM_EXEC))
68194 + vm_flags &= ~VM_MAYEXEC;
68195 +#else
68196 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68197 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68198 +#endif
68199 + else
68200 + vm_flags &= ~VM_MAYWRITE;
68201 + }
68202 +#endif
68203 +
68204 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68205 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68206 + vm_flags &= ~VM_PAGEEXEC;
68207 +#endif
68208 +
68209 if (flags & MAP_LOCKED)
68210 if (!can_do_mlock())
68211 return -EPERM;
68212 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68213 locked += mm->locked_vm;
68214 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68215 lock_limit >>= PAGE_SHIFT;
68216 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68217 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68218 return -EAGAIN;
68219 }
68220 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68221 if (error)
68222 return error;
68223
68224 + if (!gr_acl_handle_mmap(file, prot))
68225 + return -EACCES;
68226 +
68227 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68228 }
68229 EXPORT_SYMBOL(do_mmap_pgoff);
68230 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68231 */
68232 int vma_wants_writenotify(struct vm_area_struct *vma)
68233 {
68234 - unsigned int vm_flags = vma->vm_flags;
68235 + unsigned long vm_flags = vma->vm_flags;
68236
68237 /* If it was private or non-writable, the write bit is already clear */
68238 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68239 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68240 return 0;
68241
68242 /* The backer wishes to know when pages are first written to? */
68243 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68244 unsigned long charged = 0;
68245 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68246
68247 +#ifdef CONFIG_PAX_SEGMEXEC
68248 + struct vm_area_struct *vma_m = NULL;
68249 +#endif
68250 +
68251 + /*
68252 + * mm->mmap_sem is required to protect against another thread
68253 + * changing the mappings in case we sleep.
68254 + */
68255 + verify_mm_writelocked(mm);
68256 +
68257 /* Clear old maps */
68258 error = -ENOMEM;
68259 -munmap_back:
68260 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68261 if (vma && vma->vm_start < addr + len) {
68262 if (do_munmap(mm, addr, len))
68263 return -ENOMEM;
68264 - goto munmap_back;
68265 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68266 + BUG_ON(vma && vma->vm_start < addr + len);
68267 }
68268
68269 /* Check against address space limit. */
68270 @@ -1173,6 +1294,16 @@ munmap_back:
68271 goto unacct_error;
68272 }
68273
68274 +#ifdef CONFIG_PAX_SEGMEXEC
68275 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68276 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68277 + if (!vma_m) {
68278 + error = -ENOMEM;
68279 + goto free_vma;
68280 + }
68281 + }
68282 +#endif
68283 +
68284 vma->vm_mm = mm;
68285 vma->vm_start = addr;
68286 vma->vm_end = addr + len;
68287 @@ -1195,6 +1326,19 @@ munmap_back:
68288 error = file->f_op->mmap(file, vma);
68289 if (error)
68290 goto unmap_and_free_vma;
68291 +
68292 +#ifdef CONFIG_PAX_SEGMEXEC
68293 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68294 + added_exe_file_vma(mm);
68295 +#endif
68296 +
68297 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68298 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68299 + vma->vm_flags |= VM_PAGEEXEC;
68300 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68301 + }
68302 +#endif
68303 +
68304 if (vm_flags & VM_EXECUTABLE)
68305 added_exe_file_vma(mm);
68306
68307 @@ -1218,6 +1362,11 @@ munmap_back:
68308 vma_link(mm, vma, prev, rb_link, rb_parent);
68309 file = vma->vm_file;
68310
68311 +#ifdef CONFIG_PAX_SEGMEXEC
68312 + if (vma_m)
68313 + pax_mirror_vma(vma_m, vma);
68314 +#endif
68315 +
68316 /* Once vma denies write, undo our temporary denial count */
68317 if (correct_wcount)
68318 atomic_inc(&inode->i_writecount);
68319 @@ -1226,6 +1375,7 @@ out:
68320
68321 mm->total_vm += len >> PAGE_SHIFT;
68322 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68323 + track_exec_limit(mm, addr, addr + len, vm_flags);
68324 if (vm_flags & VM_LOCKED) {
68325 /*
68326 * makes pages present; downgrades, drops, reacquires mmap_sem
68327 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68328 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68329 charged = 0;
68330 free_vma:
68331 +
68332 +#ifdef CONFIG_PAX_SEGMEXEC
68333 + if (vma_m)
68334 + kmem_cache_free(vm_area_cachep, vma_m);
68335 +#endif
68336 +
68337 kmem_cache_free(vm_area_cachep, vma);
68338 unacct_error:
68339 if (charged)
68340 @@ -1255,6 +1411,44 @@ unacct_error:
68341 return error;
68342 }
68343
68344 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68345 +{
68346 + if (!vma) {
68347 +#ifdef CONFIG_STACK_GROWSUP
68348 + if (addr > sysctl_heap_stack_gap)
68349 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68350 + else
68351 + vma = find_vma(current->mm, 0);
68352 + if (vma && (vma->vm_flags & VM_GROWSUP))
68353 + return false;
68354 +#endif
68355 + return true;
68356 + }
68357 +
68358 + if (addr + len > vma->vm_start)
68359 + return false;
68360 +
68361 + if (vma->vm_flags & VM_GROWSDOWN)
68362 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68363 +#ifdef CONFIG_STACK_GROWSUP
68364 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68365 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68366 +#endif
68367 +
68368 + return true;
68369 +}
68370 +
68371 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68372 +{
68373 + if (vma->vm_start < len)
68374 + return -ENOMEM;
68375 + if (!(vma->vm_flags & VM_GROWSDOWN))
68376 + return vma->vm_start - len;
68377 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
68378 + return vma->vm_start - len - sysctl_heap_stack_gap;
68379 + return -ENOMEM;
68380 +}
68381 +
68382 /* Get an address range which is currently unmapped.
68383 * For shmat() with addr=0.
68384 *
68385 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68386 if (flags & MAP_FIXED)
68387 return addr;
68388
68389 +#ifdef CONFIG_PAX_RANDMMAP
68390 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68391 +#endif
68392 +
68393 if (addr) {
68394 addr = PAGE_ALIGN(addr);
68395 - vma = find_vma(mm, addr);
68396 - if (TASK_SIZE - len >= addr &&
68397 - (!vma || addr + len <= vma->vm_start))
68398 - return addr;
68399 + if (TASK_SIZE - len >= addr) {
68400 + vma = find_vma(mm, addr);
68401 + if (check_heap_stack_gap(vma, addr, len))
68402 + return addr;
68403 + }
68404 }
68405 if (len > mm->cached_hole_size) {
68406 - start_addr = addr = mm->free_area_cache;
68407 + start_addr = addr = mm->free_area_cache;
68408 } else {
68409 - start_addr = addr = TASK_UNMAPPED_BASE;
68410 - mm->cached_hole_size = 0;
68411 + start_addr = addr = mm->mmap_base;
68412 + mm->cached_hole_size = 0;
68413 }
68414
68415 full_search:
68416 @@ -1303,34 +1502,40 @@ full_search:
68417 * Start a new search - just in case we missed
68418 * some holes.
68419 */
68420 - if (start_addr != TASK_UNMAPPED_BASE) {
68421 - addr = TASK_UNMAPPED_BASE;
68422 - start_addr = addr;
68423 + if (start_addr != mm->mmap_base) {
68424 + start_addr = addr = mm->mmap_base;
68425 mm->cached_hole_size = 0;
68426 goto full_search;
68427 }
68428 return -ENOMEM;
68429 }
68430 - if (!vma || addr + len <= vma->vm_start) {
68431 - /*
68432 - * Remember the place where we stopped the search:
68433 - */
68434 - mm->free_area_cache = addr + len;
68435 - return addr;
68436 - }
68437 + if (check_heap_stack_gap(vma, addr, len))
68438 + break;
68439 if (addr + mm->cached_hole_size < vma->vm_start)
68440 mm->cached_hole_size = vma->vm_start - addr;
68441 addr = vma->vm_end;
68442 }
68443 +
68444 + /*
68445 + * Remember the place where we stopped the search:
68446 + */
68447 + mm->free_area_cache = addr + len;
68448 + return addr;
68449 }
68450 #endif
68451
68452 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68453 {
68454 +
68455 +#ifdef CONFIG_PAX_SEGMEXEC
68456 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68457 + return;
68458 +#endif
68459 +
68460 /*
68461 * Is this a new hole at the lowest possible address?
68462 */
68463 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68464 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68465 mm->free_area_cache = addr;
68466 mm->cached_hole_size = ~0UL;
68467 }
68468 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68469 {
68470 struct vm_area_struct *vma;
68471 struct mm_struct *mm = current->mm;
68472 - unsigned long addr = addr0;
68473 + unsigned long base = mm->mmap_base, addr = addr0;
68474
68475 /* requested length too big for entire address space */
68476 if (len > TASK_SIZE)
68477 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68478 if (flags & MAP_FIXED)
68479 return addr;
68480
68481 +#ifdef CONFIG_PAX_RANDMMAP
68482 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68483 +#endif
68484 +
68485 /* requesting a specific address */
68486 if (addr) {
68487 addr = PAGE_ALIGN(addr);
68488 - vma = find_vma(mm, addr);
68489 - if (TASK_SIZE - len >= addr &&
68490 - (!vma || addr + len <= vma->vm_start))
68491 - return addr;
68492 + if (TASK_SIZE - len >= addr) {
68493 + vma = find_vma(mm, addr);
68494 + if (check_heap_stack_gap(vma, addr, len))
68495 + return addr;
68496 + }
68497 }
68498
68499 /* check if free_area_cache is useful for us */
68500 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68501 /* make sure it can fit in the remaining address space */
68502 if (addr > len) {
68503 vma = find_vma(mm, addr-len);
68504 - if (!vma || addr <= vma->vm_start)
68505 + if (check_heap_stack_gap(vma, addr - len, len))
68506 /* remember the address as a hint for next time */
68507 return (mm->free_area_cache = addr-len);
68508 }
68509 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68510 * return with success:
68511 */
68512 vma = find_vma(mm, addr);
68513 - if (!vma || addr+len <= vma->vm_start)
68514 + if (check_heap_stack_gap(vma, addr, len))
68515 /* remember the address as a hint for next time */
68516 return (mm->free_area_cache = addr);
68517
68518 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68519 mm->cached_hole_size = vma->vm_start - addr;
68520
68521 /* try just below the current vma->vm_start */
68522 - addr = vma->vm_start-len;
68523 - } while (len < vma->vm_start);
68524 + addr = skip_heap_stack_gap(vma, len);
68525 + } while (!IS_ERR_VALUE(addr));
68526
68527 bottomup:
68528 /*
68529 @@ -1414,13 +1624,21 @@ bottomup:
68530 * can happen with large stack limits and large mmap()
68531 * allocations.
68532 */
68533 + mm->mmap_base = TASK_UNMAPPED_BASE;
68534 +
68535 +#ifdef CONFIG_PAX_RANDMMAP
68536 + if (mm->pax_flags & MF_PAX_RANDMMAP)
68537 + mm->mmap_base += mm->delta_mmap;
68538 +#endif
68539 +
68540 + mm->free_area_cache = mm->mmap_base;
68541 mm->cached_hole_size = ~0UL;
68542 - mm->free_area_cache = TASK_UNMAPPED_BASE;
68543 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68544 /*
68545 * Restore the topdown base:
68546 */
68547 - mm->free_area_cache = mm->mmap_base;
68548 + mm->mmap_base = base;
68549 + mm->free_area_cache = base;
68550 mm->cached_hole_size = ~0UL;
68551
68552 return addr;
68553 @@ -1429,6 +1647,12 @@ bottomup:
68554
68555 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68556 {
68557 +
68558 +#ifdef CONFIG_PAX_SEGMEXEC
68559 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68560 + return;
68561 +#endif
68562 +
68563 /*
68564 * Is this a new hole at the highest possible address?
68565 */
68566 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68567 mm->free_area_cache = addr;
68568
68569 /* dont allow allocations above current base */
68570 - if (mm->free_area_cache > mm->mmap_base)
68571 + if (mm->free_area_cache > mm->mmap_base) {
68572 mm->free_area_cache = mm->mmap_base;
68573 + mm->cached_hole_size = ~0UL;
68574 + }
68575 }
68576
68577 unsigned long
68578 @@ -1545,6 +1771,27 @@ out:
68579 return prev ? prev->vm_next : vma;
68580 }
68581
68582 +#ifdef CONFIG_PAX_SEGMEXEC
68583 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68584 +{
68585 + struct vm_area_struct *vma_m;
68586 +
68587 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68588 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68589 + BUG_ON(vma->vm_mirror);
68590 + return NULL;
68591 + }
68592 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68593 + vma_m = vma->vm_mirror;
68594 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68595 + BUG_ON(vma->vm_file != vma_m->vm_file);
68596 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68597 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68598 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68599 + return vma_m;
68600 +}
68601 +#endif
68602 +
68603 /*
68604 * Verify that the stack growth is acceptable and
68605 * update accounting. This is shared with both the
68606 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68607 return -ENOMEM;
68608
68609 /* Stack limit test */
68610 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
68611 if (size > rlim[RLIMIT_STACK].rlim_cur)
68612 return -ENOMEM;
68613
68614 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68615 unsigned long limit;
68616 locked = mm->locked_vm + grow;
68617 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68618 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68619 if (locked > limit && !capable(CAP_IPC_LOCK))
68620 return -ENOMEM;
68621 }
68622 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68623 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68624 * vma is the last one with address > vma->vm_end. Have to extend vma.
68625 */
68626 +#ifndef CONFIG_IA64
68627 +static
68628 +#endif
68629 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68630 {
68631 int error;
68632 + bool locknext;
68633
68634 if (!(vma->vm_flags & VM_GROWSUP))
68635 return -EFAULT;
68636
68637 + /* Also guard against wrapping around to address 0. */
68638 + if (address < PAGE_ALIGN(address+1))
68639 + address = PAGE_ALIGN(address+1);
68640 + else
68641 + return -ENOMEM;
68642 +
68643 /*
68644 * We must make sure the anon_vma is allocated
68645 * so that the anon_vma locking is not a noop.
68646 */
68647 if (unlikely(anon_vma_prepare(vma)))
68648 return -ENOMEM;
68649 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68650 + if (locknext && anon_vma_prepare(vma->vm_next))
68651 + return -ENOMEM;
68652 anon_vma_lock(vma);
68653 + if (locknext)
68654 + anon_vma_lock(vma->vm_next);
68655
68656 /*
68657 * vma->vm_start/vm_end cannot change under us because the caller
68658 * is required to hold the mmap_sem in read mode. We need the
68659 - * anon_vma lock to serialize against concurrent expand_stacks.
68660 - * Also guard against wrapping around to address 0.
68661 + * anon_vma locks to serialize against concurrent expand_stacks
68662 + * and expand_upwards.
68663 */
68664 - if (address < PAGE_ALIGN(address+4))
68665 - address = PAGE_ALIGN(address+4);
68666 - else {
68667 - anon_vma_unlock(vma);
68668 - return -ENOMEM;
68669 - }
68670 error = 0;
68671
68672 /* Somebody else might have raced and expanded it already */
68673 - if (address > vma->vm_end) {
68674 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68675 + error = -ENOMEM;
68676 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68677 unsigned long size, grow;
68678
68679 size = address - vma->vm_start;
68680 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68681 if (!error)
68682 vma->vm_end = address;
68683 }
68684 + if (locknext)
68685 + anon_vma_unlock(vma->vm_next);
68686 anon_vma_unlock(vma);
68687 return error;
68688 }
68689 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68690 unsigned long address)
68691 {
68692 int error;
68693 + bool lockprev = false;
68694 + struct vm_area_struct *prev;
68695
68696 /*
68697 * We must make sure the anon_vma is allocated
68698 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68699 if (error)
68700 return error;
68701
68702 + prev = vma->vm_prev;
68703 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68704 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68705 +#endif
68706 + if (lockprev && anon_vma_prepare(prev))
68707 + return -ENOMEM;
68708 + if (lockprev)
68709 + anon_vma_lock(prev);
68710 +
68711 anon_vma_lock(vma);
68712
68713 /*
68714 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68715 */
68716
68717 /* Somebody else might have raced and expanded it already */
68718 - if (address < vma->vm_start) {
68719 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68720 + error = -ENOMEM;
68721 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68722 unsigned long size, grow;
68723
68724 +#ifdef CONFIG_PAX_SEGMEXEC
68725 + struct vm_area_struct *vma_m;
68726 +
68727 + vma_m = pax_find_mirror_vma(vma);
68728 +#endif
68729 +
68730 size = vma->vm_end - address;
68731 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68732
68733 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68734 if (!error) {
68735 vma->vm_start = address;
68736 vma->vm_pgoff -= grow;
68737 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68738 +
68739 +#ifdef CONFIG_PAX_SEGMEXEC
68740 + if (vma_m) {
68741 + vma_m->vm_start -= grow << PAGE_SHIFT;
68742 + vma_m->vm_pgoff -= grow;
68743 + }
68744 +#endif
68745 +
68746 }
68747 }
68748 anon_vma_unlock(vma);
68749 + if (lockprev)
68750 + anon_vma_unlock(prev);
68751 return error;
68752 }
68753
68754 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68755 do {
68756 long nrpages = vma_pages(vma);
68757
68758 +#ifdef CONFIG_PAX_SEGMEXEC
68759 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68760 + vma = remove_vma(vma);
68761 + continue;
68762 + }
68763 +#endif
68764 +
68765 mm->total_vm -= nrpages;
68766 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68767 vma = remove_vma(vma);
68768 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68769 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68770 vma->vm_prev = NULL;
68771 do {
68772 +
68773 +#ifdef CONFIG_PAX_SEGMEXEC
68774 + if (vma->vm_mirror) {
68775 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68776 + vma->vm_mirror->vm_mirror = NULL;
68777 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
68778 + vma->vm_mirror = NULL;
68779 + }
68780 +#endif
68781 +
68782 rb_erase(&vma->vm_rb, &mm->mm_rb);
68783 mm->map_count--;
68784 tail_vma = vma;
68785 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68786 struct mempolicy *pol;
68787 struct vm_area_struct *new;
68788
68789 +#ifdef CONFIG_PAX_SEGMEXEC
68790 + struct vm_area_struct *vma_m, *new_m = NULL;
68791 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68792 +#endif
68793 +
68794 if (is_vm_hugetlb_page(vma) && (addr &
68795 ~(huge_page_mask(hstate_vma(vma)))))
68796 return -EINVAL;
68797
68798 +#ifdef CONFIG_PAX_SEGMEXEC
68799 + vma_m = pax_find_mirror_vma(vma);
68800 +
68801 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68802 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68803 + if (mm->map_count >= sysctl_max_map_count-1)
68804 + return -ENOMEM;
68805 + } else
68806 +#endif
68807 +
68808 if (mm->map_count >= sysctl_max_map_count)
68809 return -ENOMEM;
68810
68811 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68812 if (!new)
68813 return -ENOMEM;
68814
68815 +#ifdef CONFIG_PAX_SEGMEXEC
68816 + if (vma_m) {
68817 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68818 + if (!new_m) {
68819 + kmem_cache_free(vm_area_cachep, new);
68820 + return -ENOMEM;
68821 + }
68822 + }
68823 +#endif
68824 +
68825 /* most fields are the same, copy all, and then fixup */
68826 *new = *vma;
68827
68828 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68829 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68830 }
68831
68832 +#ifdef CONFIG_PAX_SEGMEXEC
68833 + if (vma_m) {
68834 + *new_m = *vma_m;
68835 + new_m->vm_mirror = new;
68836 + new->vm_mirror = new_m;
68837 +
68838 + if (new_below)
68839 + new_m->vm_end = addr_m;
68840 + else {
68841 + new_m->vm_start = addr_m;
68842 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68843 + }
68844 + }
68845 +#endif
68846 +
68847 pol = mpol_dup(vma_policy(vma));
68848 if (IS_ERR(pol)) {
68849 +
68850 +#ifdef CONFIG_PAX_SEGMEXEC
68851 + if (new_m)
68852 + kmem_cache_free(vm_area_cachep, new_m);
68853 +#endif
68854 +
68855 kmem_cache_free(vm_area_cachep, new);
68856 return PTR_ERR(pol);
68857 }
68858 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
68859 else
68860 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68861
68862 +#ifdef CONFIG_PAX_SEGMEXEC
68863 + if (vma_m) {
68864 + mpol_get(pol);
68865 + vma_set_policy(new_m, pol);
68866 +
68867 + if (new_m->vm_file) {
68868 + get_file(new_m->vm_file);
68869 + if (vma_m->vm_flags & VM_EXECUTABLE)
68870 + added_exe_file_vma(mm);
68871 + }
68872 +
68873 + if (new_m->vm_ops && new_m->vm_ops->open)
68874 + new_m->vm_ops->open(new_m);
68875 +
68876 + if (new_below)
68877 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68878 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68879 + else
68880 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68881 + }
68882 +#endif
68883 +
68884 return 0;
68885 }
68886
68887 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
68888 * work. This now handles partial unmappings.
68889 * Jeremy Fitzhardinge <jeremy@goop.org>
68890 */
68891 +#ifdef CONFIG_PAX_SEGMEXEC
68892 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68893 +{
68894 + int ret = __do_munmap(mm, start, len);
68895 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68896 + return ret;
68897 +
68898 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68899 +}
68900 +
68901 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68902 +#else
68903 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68904 +#endif
68905 {
68906 unsigned long end;
68907 struct vm_area_struct *vma, *prev, *last;
68908
68909 + /*
68910 + * mm->mmap_sem is required to protect against another thread
68911 + * changing the mappings in case we sleep.
68912 + */
68913 + verify_mm_writelocked(mm);
68914 +
68915 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68916 return -EINVAL;
68917
68918 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
68919 /* Fix up all other VM information */
68920 remove_vma_list(mm, vma);
68921
68922 + track_exec_limit(mm, start, end, 0UL);
68923 +
68924 return 0;
68925 }
68926
68927 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68928
68929 profile_munmap(addr);
68930
68931 +#ifdef CONFIG_PAX_SEGMEXEC
68932 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68933 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68934 + return -EINVAL;
68935 +#endif
68936 +
68937 down_write(&mm->mmap_sem);
68938 ret = do_munmap(mm, addr, len);
68939 up_write(&mm->mmap_sem);
68940 return ret;
68941 }
68942
68943 -static inline void verify_mm_writelocked(struct mm_struct *mm)
68944 -{
68945 -#ifdef CONFIG_DEBUG_VM
68946 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68947 - WARN_ON(1);
68948 - up_read(&mm->mmap_sem);
68949 - }
68950 -#endif
68951 -}
68952 -
68953 /*
68954 * this is really a simplified "do_mmap". it only handles
68955 * anonymous maps. eventually we may be able to do some
68956 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
68957 struct rb_node ** rb_link, * rb_parent;
68958 pgoff_t pgoff = addr >> PAGE_SHIFT;
68959 int error;
68960 + unsigned long charged;
68961
68962 len = PAGE_ALIGN(len);
68963 if (!len)
68964 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
68965
68966 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
68967
68968 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68969 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68970 + flags &= ~VM_EXEC;
68971 +
68972 +#ifdef CONFIG_PAX_MPROTECT
68973 + if (mm->pax_flags & MF_PAX_MPROTECT)
68974 + flags &= ~VM_MAYEXEC;
68975 +#endif
68976 +
68977 + }
68978 +#endif
68979 +
68980 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
68981 if (error & ~PAGE_MASK)
68982 return error;
68983
68984 + charged = len >> PAGE_SHIFT;
68985 +
68986 /*
68987 * mlock MCL_FUTURE?
68988 */
68989 if (mm->def_flags & VM_LOCKED) {
68990 unsigned long locked, lock_limit;
68991 - locked = len >> PAGE_SHIFT;
68992 + locked = charged;
68993 locked += mm->locked_vm;
68994 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68995 lock_limit >>= PAGE_SHIFT;
68996 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
68997 /*
68998 * Clear old maps. this also does some error checking for us
68999 */
69000 - munmap_back:
69001 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69002 if (vma && vma->vm_start < addr + len) {
69003 if (do_munmap(mm, addr, len))
69004 return -ENOMEM;
69005 - goto munmap_back;
69006 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69007 + BUG_ON(vma && vma->vm_start < addr + len);
69008 }
69009
69010 /* Check against address space limits *after* clearing old maps... */
69011 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69012 + if (!may_expand_vm(mm, charged))
69013 return -ENOMEM;
69014
69015 if (mm->map_count > sysctl_max_map_count)
69016 return -ENOMEM;
69017
69018 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69019 + if (security_vm_enough_memory(charged))
69020 return -ENOMEM;
69021
69022 /* Can we just expand an old private anonymous mapping? */
69023 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69024 */
69025 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69026 if (!vma) {
69027 - vm_unacct_memory(len >> PAGE_SHIFT);
69028 + vm_unacct_memory(charged);
69029 return -ENOMEM;
69030 }
69031
69032 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69033 vma->vm_page_prot = vm_get_page_prot(flags);
69034 vma_link(mm, vma, prev, rb_link, rb_parent);
69035 out:
69036 - mm->total_vm += len >> PAGE_SHIFT;
69037 + mm->total_vm += charged;
69038 if (flags & VM_LOCKED) {
69039 if (!mlock_vma_pages_range(vma, addr, addr + len))
69040 - mm->locked_vm += (len >> PAGE_SHIFT);
69041 + mm->locked_vm += charged;
69042 }
69043 + track_exec_limit(mm, addr, addr + len, flags);
69044 return addr;
69045 }
69046
69047 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69048 * Walk the list again, actually closing and freeing it,
69049 * with preemption enabled, without holding any MM locks.
69050 */
69051 - while (vma)
69052 + while (vma) {
69053 + vma->vm_mirror = NULL;
69054 vma = remove_vma(vma);
69055 + }
69056
69057 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69058 }
69059 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69060 struct vm_area_struct * __vma, * prev;
69061 struct rb_node ** rb_link, * rb_parent;
69062
69063 +#ifdef CONFIG_PAX_SEGMEXEC
69064 + struct vm_area_struct *vma_m = NULL;
69065 +#endif
69066 +
69067 /*
69068 * The vm_pgoff of a purely anonymous vma should be irrelevant
69069 * until its first write fault, when page's anon_vma and index
69070 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69071 if ((vma->vm_flags & VM_ACCOUNT) &&
69072 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69073 return -ENOMEM;
69074 +
69075 +#ifdef CONFIG_PAX_SEGMEXEC
69076 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69077 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69078 + if (!vma_m)
69079 + return -ENOMEM;
69080 + }
69081 +#endif
69082 +
69083 vma_link(mm, vma, prev, rb_link, rb_parent);
69084 +
69085 +#ifdef CONFIG_PAX_SEGMEXEC
69086 + if (vma_m)
69087 + pax_mirror_vma(vma_m, vma);
69088 +#endif
69089 +
69090 return 0;
69091 }
69092
69093 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69094 struct rb_node **rb_link, *rb_parent;
69095 struct mempolicy *pol;
69096
69097 + BUG_ON(vma->vm_mirror);
69098 +
69099 /*
69100 * If anonymous vma has not yet been faulted, update new pgoff
69101 * to match new location, to increase its chance of merging.
69102 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69103 return new_vma;
69104 }
69105
69106 +#ifdef CONFIG_PAX_SEGMEXEC
69107 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69108 +{
69109 + struct vm_area_struct *prev_m;
69110 + struct rb_node **rb_link_m, *rb_parent_m;
69111 + struct mempolicy *pol_m;
69112 +
69113 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69114 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69115 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69116 + *vma_m = *vma;
69117 + pol_m = vma_policy(vma_m);
69118 + mpol_get(pol_m);
69119 + vma_set_policy(vma_m, pol_m);
69120 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69121 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69122 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69123 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69124 + if (vma_m->vm_file)
69125 + get_file(vma_m->vm_file);
69126 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69127 + vma_m->vm_ops->open(vma_m);
69128 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69129 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69130 + vma_m->vm_mirror = vma;
69131 + vma->vm_mirror = vma_m;
69132 +}
69133 +#endif
69134 +
69135 /*
69136 * Return true if the calling process may expand its vm space by the passed
69137 * number of pages
69138 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69139 unsigned long lim;
69140
69141 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69142 -
69143 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69144 if (cur + npages > lim)
69145 return 0;
69146 return 1;
69147 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69148 vma->vm_start = addr;
69149 vma->vm_end = addr + len;
69150
69151 +#ifdef CONFIG_PAX_MPROTECT
69152 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69153 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69154 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69155 + return -EPERM;
69156 + if (!(vm_flags & VM_EXEC))
69157 + vm_flags &= ~VM_MAYEXEC;
69158 +#else
69159 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69160 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69161 +#endif
69162 + else
69163 + vm_flags &= ~VM_MAYWRITE;
69164 + }
69165 +#endif
69166 +
69167 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69168 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69169
69170 diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69171 --- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69172 +++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69173 @@ -24,10 +24,16 @@
69174 #include <linux/mmu_notifier.h>
69175 #include <linux/migrate.h>
69176 #include <linux/perf_event.h>
69177 +
69178 +#ifdef CONFIG_PAX_MPROTECT
69179 +#include <linux/elf.h>
69180 +#endif
69181 +
69182 #include <asm/uaccess.h>
69183 #include <asm/pgtable.h>
69184 #include <asm/cacheflush.h>
69185 #include <asm/tlbflush.h>
69186 +#include <asm/mmu_context.h>
69187
69188 #ifndef pgprot_modify
69189 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69190 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69191 flush_tlb_range(vma, start, end);
69192 }
69193
69194 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69195 +/* called while holding the mmap semaphor for writing except stack expansion */
69196 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69197 +{
69198 + unsigned long oldlimit, newlimit = 0UL;
69199 +
69200 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69201 + return;
69202 +
69203 + spin_lock(&mm->page_table_lock);
69204 + oldlimit = mm->context.user_cs_limit;
69205 + if ((prot & VM_EXEC) && oldlimit < end)
69206 + /* USER_CS limit moved up */
69207 + newlimit = end;
69208 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69209 + /* USER_CS limit moved down */
69210 + newlimit = start;
69211 +
69212 + if (newlimit) {
69213 + mm->context.user_cs_limit = newlimit;
69214 +
69215 +#ifdef CONFIG_SMP
69216 + wmb();
69217 + cpus_clear(mm->context.cpu_user_cs_mask);
69218 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69219 +#endif
69220 +
69221 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69222 + }
69223 + spin_unlock(&mm->page_table_lock);
69224 + if (newlimit == end) {
69225 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69226 +
69227 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69228 + if (is_vm_hugetlb_page(vma))
69229 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69230 + else
69231 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69232 + }
69233 +}
69234 +#endif
69235 +
69236 int
69237 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69238 unsigned long start, unsigned long end, unsigned long newflags)
69239 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69240 int error;
69241 int dirty_accountable = 0;
69242
69243 +#ifdef CONFIG_PAX_SEGMEXEC
69244 + struct vm_area_struct *vma_m = NULL;
69245 + unsigned long start_m, end_m;
69246 +
69247 + start_m = start + SEGMEXEC_TASK_SIZE;
69248 + end_m = end + SEGMEXEC_TASK_SIZE;
69249 +#endif
69250 +
69251 if (newflags == oldflags) {
69252 *pprev = vma;
69253 return 0;
69254 }
69255
69256 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69257 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69258 +
69259 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69260 + return -ENOMEM;
69261 +
69262 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69263 + return -ENOMEM;
69264 + }
69265 +
69266 /*
69267 * If we make a private mapping writable we increase our commit;
69268 * but (without finer accounting) cannot reduce our commit if we
69269 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69270 }
69271 }
69272
69273 +#ifdef CONFIG_PAX_SEGMEXEC
69274 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69275 + if (start != vma->vm_start) {
69276 + error = split_vma(mm, vma, start, 1);
69277 + if (error)
69278 + goto fail;
69279 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69280 + *pprev = (*pprev)->vm_next;
69281 + }
69282 +
69283 + if (end != vma->vm_end) {
69284 + error = split_vma(mm, vma, end, 0);
69285 + if (error)
69286 + goto fail;
69287 + }
69288 +
69289 + if (pax_find_mirror_vma(vma)) {
69290 + error = __do_munmap(mm, start_m, end_m - start_m);
69291 + if (error)
69292 + goto fail;
69293 + } else {
69294 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69295 + if (!vma_m) {
69296 + error = -ENOMEM;
69297 + goto fail;
69298 + }
69299 + vma->vm_flags = newflags;
69300 + pax_mirror_vma(vma_m, vma);
69301 + }
69302 + }
69303 +#endif
69304 +
69305 /*
69306 * First try to merge with previous and/or next vma.
69307 */
69308 @@ -195,9 +293,21 @@ success:
69309 * vm_flags and vm_page_prot are protected by the mmap_sem
69310 * held in write mode.
69311 */
69312 +
69313 +#ifdef CONFIG_PAX_SEGMEXEC
69314 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69315 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69316 +#endif
69317 +
69318 vma->vm_flags = newflags;
69319 +
69320 +#ifdef CONFIG_PAX_MPROTECT
69321 + if (mm->binfmt && mm->binfmt->handle_mprotect)
69322 + mm->binfmt->handle_mprotect(vma, newflags);
69323 +#endif
69324 +
69325 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69326 - vm_get_page_prot(newflags));
69327 + vm_get_page_prot(vma->vm_flags));
69328
69329 if (vma_wants_writenotify(vma)) {
69330 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69331 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69332 end = start + len;
69333 if (end <= start)
69334 return -ENOMEM;
69335 +
69336 +#ifdef CONFIG_PAX_SEGMEXEC
69337 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69338 + if (end > SEGMEXEC_TASK_SIZE)
69339 + return -EINVAL;
69340 + } else
69341 +#endif
69342 +
69343 + if (end > TASK_SIZE)
69344 + return -EINVAL;
69345 +
69346 if (!arch_validate_prot(prot))
69347 return -EINVAL;
69348
69349 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69350 /*
69351 * Does the application expect PROT_READ to imply PROT_EXEC:
69352 */
69353 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69354 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69355 prot |= PROT_EXEC;
69356
69357 vm_flags = calc_vm_prot_bits(prot);
69358 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69359 if (start > vma->vm_start)
69360 prev = vma;
69361
69362 +#ifdef CONFIG_PAX_MPROTECT
69363 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69364 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
69365 +#endif
69366 +
69367 for (nstart = start ; ; ) {
69368 unsigned long newflags;
69369
69370 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69371
69372 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69373 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69374 + if (prot & (PROT_WRITE | PROT_EXEC))
69375 + gr_log_rwxmprotect(vma->vm_file);
69376 +
69377 + error = -EACCES;
69378 + goto out;
69379 + }
69380 +
69381 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69382 error = -EACCES;
69383 goto out;
69384 }
69385 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69386 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69387 if (error)
69388 goto out;
69389 +
69390 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
69391 +
69392 nstart = tmp;
69393
69394 if (nstart < prev->vm_end)
69395 diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69396 --- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69397 +++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69398 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69399 continue;
69400 pte = ptep_clear_flush(vma, old_addr, old_pte);
69401 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69402 +
69403 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69404 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69405 + pte = pte_exprotect(pte);
69406 +#endif
69407 +
69408 set_pte_at(mm, new_addr, new_pte, pte);
69409 }
69410
69411 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69412 if (is_vm_hugetlb_page(vma))
69413 goto Einval;
69414
69415 +#ifdef CONFIG_PAX_SEGMEXEC
69416 + if (pax_find_mirror_vma(vma))
69417 + goto Einval;
69418 +#endif
69419 +
69420 /* We can't remap across vm area boundaries */
69421 if (old_len > vma->vm_end - addr)
69422 goto Efault;
69423 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69424 unsigned long ret = -EINVAL;
69425 unsigned long charged = 0;
69426 unsigned long map_flags;
69427 + unsigned long pax_task_size = TASK_SIZE;
69428
69429 if (new_addr & ~PAGE_MASK)
69430 goto out;
69431
69432 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69433 +#ifdef CONFIG_PAX_SEGMEXEC
69434 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69435 + pax_task_size = SEGMEXEC_TASK_SIZE;
69436 +#endif
69437 +
69438 + pax_task_size -= PAGE_SIZE;
69439 +
69440 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69441 goto out;
69442
69443 /* Check if the location we're moving into overlaps the
69444 * old location at all, and fail if it does.
69445 */
69446 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
69447 - goto out;
69448 -
69449 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
69450 + if (addr + old_len > new_addr && new_addr + new_len > addr)
69451 goto out;
69452
69453 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69454 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69455 struct vm_area_struct *vma;
69456 unsigned long ret = -EINVAL;
69457 unsigned long charged = 0;
69458 + unsigned long pax_task_size = TASK_SIZE;
69459
69460 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69461 goto out;
69462 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69463 if (!new_len)
69464 goto out;
69465
69466 +#ifdef CONFIG_PAX_SEGMEXEC
69467 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
69468 + pax_task_size = SEGMEXEC_TASK_SIZE;
69469 +#endif
69470 +
69471 + pax_task_size -= PAGE_SIZE;
69472 +
69473 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69474 + old_len > pax_task_size || addr > pax_task_size-old_len)
69475 + goto out;
69476 +
69477 if (flags & MREMAP_FIXED) {
69478 if (flags & MREMAP_MAYMOVE)
69479 ret = mremap_to(addr, old_len, new_addr, new_len);
69480 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69481 addr + new_len);
69482 }
69483 ret = addr;
69484 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69485 goto out;
69486 }
69487 }
69488 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69489 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69490 if (ret)
69491 goto out;
69492 +
69493 + map_flags = vma->vm_flags;
69494 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69495 + if (!(ret & ~PAGE_MASK)) {
69496 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69497 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69498 + }
69499 }
69500 out:
69501 if (ret & ~PAGE_MASK)
69502 diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69503 --- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69504 +++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69505 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69506 int sysctl_overcommit_ratio = 50; /* default is 50% */
69507 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69508 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69509 -int heap_stack_gap = 0;
69510
69511 atomic_long_t mmap_pages_allocated;
69512
69513 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69514 EXPORT_SYMBOL(find_vma);
69515
69516 /*
69517 - * find a VMA
69518 - * - we don't extend stack VMAs under NOMMU conditions
69519 - */
69520 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69521 -{
69522 - return find_vma(mm, addr);
69523 -}
69524 -
69525 -/*
69526 * expand a stack to a given address
69527 * - not supported under NOMMU conditions
69528 */
69529 diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69530 --- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69531 +++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69532 @@ -289,7 +289,7 @@ out:
69533 * This usage means that zero-order pages may not be compound.
69534 */
69535
69536 -static void free_compound_page(struct page *page)
69537 +void free_compound_page(struct page *page)
69538 {
69539 __free_pages_ok(page, compound_order(page));
69540 }
69541 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69542 int bad = 0;
69543 int wasMlocked = __TestClearPageMlocked(page);
69544
69545 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69546 + unsigned long index = 1UL << order;
69547 +#endif
69548 +
69549 kmemcheck_free_shadow(page, order);
69550
69551 for (i = 0 ; i < (1 << order) ; ++i)
69552 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69553 debug_check_no_obj_freed(page_address(page),
69554 PAGE_SIZE << order);
69555 }
69556 +
69557 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69558 + for (; index; --index)
69559 + sanitize_highpage(page + index - 1);
69560 +#endif
69561 +
69562 arch_free_page(page, order);
69563 kernel_map_pages(page, 1 << order, 0);
69564
69565 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69566 arch_alloc_page(page, order);
69567 kernel_map_pages(page, 1 << order, 1);
69568
69569 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
69570 if (gfp_flags & __GFP_ZERO)
69571 prep_zero_page(page, order, gfp_flags);
69572 +#endif
69573
69574 if (order && (gfp_flags & __GFP_COMP))
69575 prep_compound_page(page, order);
69576 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69577 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69578 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69579 }
69580 +
69581 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
69582 + sanitize_highpage(page);
69583 +#endif
69584 +
69585 arch_free_page(page, 0);
69586 kernel_map_pages(page, 1, 0);
69587
69588 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
69589 int cpu;
69590 struct zone *zone;
69591
69592 + pax_track_stack();
69593 +
69594 for_each_populated_zone(zone) {
69595 show_node(zone);
69596 printk("%s per-cpu:\n", zone->name);
69597 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69598 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69599 }
69600 #else
69601 -static void inline setup_usemap(struct pglist_data *pgdat,
69602 +static inline void setup_usemap(struct pglist_data *pgdat,
69603 struct zone *zone, unsigned long zonesize) {}
69604 #endif /* CONFIG_SPARSEMEM */
69605
69606 diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
69607 --- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69608 +++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69609 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69610 static unsigned int pcpu_last_unit_cpu __read_mostly;
69611
69612 /* the address of the first chunk which starts with the kernel static area */
69613 -void *pcpu_base_addr __read_mostly;
69614 +void *pcpu_base_addr __read_only;
69615 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69616
69617 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69618 diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
69619 --- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69620 +++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69621 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69622 /* page_table_lock to protect against threads */
69623 spin_lock(&mm->page_table_lock);
69624 if (likely(!vma->anon_vma)) {
69625 +
69626 +#ifdef CONFIG_PAX_SEGMEXEC
69627 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69628 +
69629 + if (vma_m) {
69630 + BUG_ON(vma_m->anon_vma);
69631 + vma_m->anon_vma = anon_vma;
69632 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69633 + }
69634 +#endif
69635 +
69636 vma->anon_vma = anon_vma;
69637 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69638 allocated = NULL;
69639 diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
69640 --- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69641 +++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69642 @@ -31,7 +31,7 @@
69643 #include <linux/swap.h>
69644 #include <linux/ima.h>
69645
69646 -static struct vfsmount *shm_mnt;
69647 +struct vfsmount *shm_mnt;
69648
69649 #ifdef CONFIG_SHMEM
69650 /*
69651 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69652 goto unlock;
69653 }
69654 entry = shmem_swp_entry(info, index, NULL);
69655 + if (!entry)
69656 + goto unlock;
69657 if (entry->val) {
69658 /*
69659 * The more uptodate page coming down from a stacked
69660 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69661 struct vm_area_struct pvma;
69662 struct page *page;
69663
69664 + pax_track_stack();
69665 +
69666 spol = mpol_cond_copy(&mpol,
69667 mpol_shared_policy_lookup(&info->policy, idx));
69668
69669 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69670
69671 info = SHMEM_I(inode);
69672 inode->i_size = len-1;
69673 - if (len <= (char *)inode - (char *)info) {
69674 + if (len <= (char *)inode - (char *)info && len <= 64) {
69675 /* do it inline */
69676 memcpy(info, symname, len);
69677 inode->i_op = &shmem_symlink_inline_operations;
69678 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69679 int err = -ENOMEM;
69680
69681 /* Round up to L1_CACHE_BYTES to resist false sharing */
69682 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69683 - L1_CACHE_BYTES), GFP_KERNEL);
69684 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69685 if (!sbinfo)
69686 return -ENOMEM;
69687
69688 diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
69689 --- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69690 +++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69691 @@ -174,7 +174,7 @@
69692
69693 /* Legal flag mask for kmem_cache_create(). */
69694 #if DEBUG
69695 -# define CREATE_MASK (SLAB_RED_ZONE | \
69696 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69697 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69698 SLAB_CACHE_DMA | \
69699 SLAB_STORE_USER | \
69700 @@ -182,7 +182,7 @@
69701 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69702 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69703 #else
69704 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69705 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69706 SLAB_CACHE_DMA | \
69707 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69708 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69709 @@ -308,7 +308,7 @@ struct kmem_list3 {
69710 * Need this for bootstrapping a per node allocator.
69711 */
69712 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69713 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69714 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69715 #define CACHE_CACHE 0
69716 #define SIZE_AC MAX_NUMNODES
69717 #define SIZE_L3 (2 * MAX_NUMNODES)
69718 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69719 if ((x)->max_freeable < i) \
69720 (x)->max_freeable = i; \
69721 } while (0)
69722 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69723 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69724 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69725 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69726 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69727 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69728 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69729 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69730 #else
69731 #define STATS_INC_ACTIVE(x) do { } while (0)
69732 #define STATS_DEC_ACTIVE(x) do { } while (0)
69733 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69734 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69735 */
69736 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69737 - const struct slab *slab, void *obj)
69738 + const struct slab *slab, const void *obj)
69739 {
69740 u32 offset = (obj - slab->s_mem);
69741 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69742 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69743 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69744 sizes[INDEX_AC].cs_size,
69745 ARCH_KMALLOC_MINALIGN,
69746 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69747 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69748 NULL);
69749
69750 if (INDEX_AC != INDEX_L3) {
69751 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69752 kmem_cache_create(names[INDEX_L3].name,
69753 sizes[INDEX_L3].cs_size,
69754 ARCH_KMALLOC_MINALIGN,
69755 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69756 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69757 NULL);
69758 }
69759
69760 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69761 sizes->cs_cachep = kmem_cache_create(names->name,
69762 sizes->cs_size,
69763 ARCH_KMALLOC_MINALIGN,
69764 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69765 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69766 NULL);
69767 }
69768 #ifdef CONFIG_ZONE_DMA
69769 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69770 }
69771 /* cpu stats */
69772 {
69773 - unsigned long allochit = atomic_read(&cachep->allochit);
69774 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69775 - unsigned long freehit = atomic_read(&cachep->freehit);
69776 - unsigned long freemiss = atomic_read(&cachep->freemiss);
69777 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69778 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69779 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69780 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69781
69782 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69783 allochit, allocmiss, freehit, freemiss);
69784 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
69785
69786 static int __init slab_proc_init(void)
69787 {
69788 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69789 + mode_t gr_mode = S_IRUGO;
69790 +
69791 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69792 + gr_mode = S_IRUSR;
69793 +#endif
69794 +
69795 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69796 #ifdef CONFIG_DEBUG_SLAB_LEAK
69797 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69798 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69799 #endif
69800 return 0;
69801 }
69802 module_init(slab_proc_init);
69803 #endif
69804
69805 +void check_object_size(const void *ptr, unsigned long n, bool to)
69806 +{
69807 +
69808 +#ifdef CONFIG_PAX_USERCOPY
69809 + struct page *page;
69810 + struct kmem_cache *cachep = NULL;
69811 + struct slab *slabp;
69812 + unsigned int objnr;
69813 + unsigned long offset;
69814 +
69815 + if (!n)
69816 + return;
69817 +
69818 + if (ZERO_OR_NULL_PTR(ptr))
69819 + goto report;
69820 +
69821 + if (!virt_addr_valid(ptr))
69822 + return;
69823 +
69824 + page = virt_to_head_page(ptr);
69825 +
69826 + if (!PageSlab(page)) {
69827 + if (object_is_on_stack(ptr, n) == -1)
69828 + goto report;
69829 + return;
69830 + }
69831 +
69832 + cachep = page_get_cache(page);
69833 + if (!(cachep->flags & SLAB_USERCOPY))
69834 + goto report;
69835 +
69836 + slabp = page_get_slab(page);
69837 + objnr = obj_to_index(cachep, slabp, ptr);
69838 + BUG_ON(objnr >= cachep->num);
69839 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69840 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69841 + return;
69842 +
69843 +report:
69844 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69845 +#endif
69846 +
69847 +}
69848 +EXPORT_SYMBOL(check_object_size);
69849 +
69850 /**
69851 * ksize - get the actual amount of memory allocated for a given object
69852 * @objp: Pointer to the object
69853 diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
69854 --- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
69855 +++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
69856 @@ -29,7 +29,7 @@
69857 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69858 * alloc_pages() directly, allocating compound pages so the page order
69859 * does not have to be separately tracked, and also stores the exact
69860 - * allocation size in page->private so that it can be used to accurately
69861 + * allocation size in slob_page->size so that it can be used to accurately
69862 * provide ksize(). These objects are detected in kfree() because slob_page()
69863 * is false for them.
69864 *
69865 @@ -58,6 +58,7 @@
69866 */
69867
69868 #include <linux/kernel.h>
69869 +#include <linux/sched.h>
69870 #include <linux/slab.h>
69871 #include <linux/mm.h>
69872 #include <linux/swap.h> /* struct reclaim_state */
69873 @@ -100,7 +101,8 @@ struct slob_page {
69874 unsigned long flags; /* mandatory */
69875 atomic_t _count; /* mandatory */
69876 slobidx_t units; /* free units left in page */
69877 - unsigned long pad[2];
69878 + unsigned long pad[1];
69879 + unsigned long size; /* size when >=PAGE_SIZE */
69880 slob_t *free; /* first free slob_t in page */
69881 struct list_head list; /* linked list of free pages */
69882 };
69883 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
69884 */
69885 static inline int is_slob_page(struct slob_page *sp)
69886 {
69887 - return PageSlab((struct page *)sp);
69888 + return PageSlab((struct page *)sp) && !sp->size;
69889 }
69890
69891 static inline void set_slob_page(struct slob_page *sp)
69892 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
69893
69894 static inline struct slob_page *slob_page(const void *addr)
69895 {
69896 - return (struct slob_page *)virt_to_page(addr);
69897 + return (struct slob_page *)virt_to_head_page(addr);
69898 }
69899
69900 /*
69901 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
69902 /*
69903 * Return the size of a slob block.
69904 */
69905 -static slobidx_t slob_units(slob_t *s)
69906 +static slobidx_t slob_units(const slob_t *s)
69907 {
69908 if (s->units > 0)
69909 return s->units;
69910 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
69911 /*
69912 * Return the next free slob block pointer after this one.
69913 */
69914 -static slob_t *slob_next(slob_t *s)
69915 +static slob_t *slob_next(const slob_t *s)
69916 {
69917 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69918 slobidx_t next;
69919 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
69920 /*
69921 * Returns true if s is the last free block in its page.
69922 */
69923 -static int slob_last(slob_t *s)
69924 +static int slob_last(const slob_t *s)
69925 {
69926 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
69927 }
69928 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
69929 if (!page)
69930 return NULL;
69931
69932 + set_slob_page(page);
69933 return page_address(page);
69934 }
69935
69936 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
69937 if (!b)
69938 return NULL;
69939 sp = slob_page(b);
69940 - set_slob_page(sp);
69941
69942 spin_lock_irqsave(&slob_lock, flags);
69943 sp->units = SLOB_UNITS(PAGE_SIZE);
69944 sp->free = b;
69945 + sp->size = 0;
69946 INIT_LIST_HEAD(&sp->list);
69947 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
69948 set_slob_page_free(sp, slob_list);
69949 @@ -475,10 +478,9 @@ out:
69950 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
69951 #endif
69952
69953 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
69954 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
69955 {
69956 - unsigned int *m;
69957 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
69958 + slob_t *m;
69959 void *ret;
69960
69961 lockdep_trace_alloc(gfp);
69962 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
69963
69964 if (!m)
69965 return NULL;
69966 - *m = size;
69967 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
69968 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
69969 + m[0].units = size;
69970 + m[1].units = align;
69971 ret = (void *)m + align;
69972
69973 trace_kmalloc_node(_RET_IP_, ret,
69974 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
69975
69976 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
69977 if (ret) {
69978 - struct page *page;
69979 - page = virt_to_page(ret);
69980 - page->private = size;
69981 + struct slob_page *sp;
69982 + sp = slob_page(ret);
69983 + sp->size = size;
69984 }
69985
69986 trace_kmalloc_node(_RET_IP_, ret,
69987 size, PAGE_SIZE << order, gfp, node);
69988 }
69989
69990 - kmemleak_alloc(ret, size, 1, gfp);
69991 + return ret;
69992 +}
69993 +
69994 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
69995 +{
69996 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
69997 + void *ret = __kmalloc_node_align(size, gfp, node, align);
69998 +
69999 + if (!ZERO_OR_NULL_PTR(ret))
70000 + kmemleak_alloc(ret, size, 1, gfp);
70001 return ret;
70002 }
70003 EXPORT_SYMBOL(__kmalloc_node);
70004 @@ -528,13 +542,88 @@ void kfree(const void *block)
70005 sp = slob_page(block);
70006 if (is_slob_page(sp)) {
70007 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70008 - unsigned int *m = (unsigned int *)(block - align);
70009 - slob_free(m, *m + align);
70010 - } else
70011 + slob_t *m = (slob_t *)(block - align);
70012 + slob_free(m, m[0].units + align);
70013 + } else {
70014 + clear_slob_page(sp);
70015 + free_slob_page(sp);
70016 + sp->size = 0;
70017 put_page(&sp->page);
70018 + }
70019 }
70020 EXPORT_SYMBOL(kfree);
70021
70022 +void check_object_size(const void *ptr, unsigned long n, bool to)
70023 +{
70024 +
70025 +#ifdef CONFIG_PAX_USERCOPY
70026 + struct slob_page *sp;
70027 + const slob_t *free;
70028 + const void *base;
70029 + unsigned long flags;
70030 +
70031 + if (!n)
70032 + return;
70033 +
70034 + if (ZERO_OR_NULL_PTR(ptr))
70035 + goto report;
70036 +
70037 + if (!virt_addr_valid(ptr))
70038 + return;
70039 +
70040 + sp = slob_page(ptr);
70041 + if (!PageSlab((struct page*)sp)) {
70042 + if (object_is_on_stack(ptr, n) == -1)
70043 + goto report;
70044 + return;
70045 + }
70046 +
70047 + if (sp->size) {
70048 + base = page_address(&sp->page);
70049 + if (base <= ptr && n <= sp->size - (ptr - base))
70050 + return;
70051 + goto report;
70052 + }
70053 +
70054 + /* some tricky double walking to find the chunk */
70055 + spin_lock_irqsave(&slob_lock, flags);
70056 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70057 + free = sp->free;
70058 +
70059 + while (!slob_last(free) && (void *)free <= ptr) {
70060 + base = free + slob_units(free);
70061 + free = slob_next(free);
70062 + }
70063 +
70064 + while (base < (void *)free) {
70065 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70066 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70067 + int offset;
70068 +
70069 + if (ptr < base + align)
70070 + break;
70071 +
70072 + offset = ptr - base - align;
70073 + if (offset >= m) {
70074 + base += size;
70075 + continue;
70076 + }
70077 +
70078 + if (n > m - offset)
70079 + break;
70080 +
70081 + spin_unlock_irqrestore(&slob_lock, flags);
70082 + return;
70083 + }
70084 +
70085 + spin_unlock_irqrestore(&slob_lock, flags);
70086 +report:
70087 + pax_report_usercopy(ptr, n, to, NULL);
70088 +#endif
70089 +
70090 +}
70091 +EXPORT_SYMBOL(check_object_size);
70092 +
70093 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70094 size_t ksize(const void *block)
70095 {
70096 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70097 sp = slob_page(block);
70098 if (is_slob_page(sp)) {
70099 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70100 - unsigned int *m = (unsigned int *)(block - align);
70101 - return SLOB_UNITS(*m) * SLOB_UNIT;
70102 + slob_t *m = (slob_t *)(block - align);
70103 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70104 } else
70105 - return sp->page.private;
70106 + return sp->size;
70107 }
70108 EXPORT_SYMBOL(ksize);
70109
70110 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70111 {
70112 struct kmem_cache *c;
70113
70114 +#ifdef CONFIG_PAX_USERCOPY
70115 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70116 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70117 +#else
70118 c = slob_alloc(sizeof(struct kmem_cache),
70119 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70120 +#endif
70121
70122 if (c) {
70123 c->name = name;
70124 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70125 {
70126 void *b;
70127
70128 +#ifdef CONFIG_PAX_USERCOPY
70129 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70130 +#else
70131 if (c->size < PAGE_SIZE) {
70132 b = slob_alloc(c->size, flags, c->align, node);
70133 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70134 SLOB_UNITS(c->size) * SLOB_UNIT,
70135 flags, node);
70136 } else {
70137 + struct slob_page *sp;
70138 +
70139 b = slob_new_pages(flags, get_order(c->size), node);
70140 + sp = slob_page(b);
70141 + sp->size = c->size;
70142 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70143 PAGE_SIZE << get_order(c->size),
70144 flags, node);
70145 }
70146 +#endif
70147
70148 if (c->ctor)
70149 c->ctor(b);
70150 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70151
70152 static void __kmem_cache_free(void *b, int size)
70153 {
70154 - if (size < PAGE_SIZE)
70155 + struct slob_page *sp = slob_page(b);
70156 +
70157 + if (is_slob_page(sp))
70158 slob_free(b, size);
70159 - else
70160 + else {
70161 + clear_slob_page(sp);
70162 + free_slob_page(sp);
70163 + sp->size = 0;
70164 slob_free_pages(b, get_order(size));
70165 + }
70166 }
70167
70168 static void kmem_rcu_free(struct rcu_head *head)
70169 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70170
70171 void kmem_cache_free(struct kmem_cache *c, void *b)
70172 {
70173 + int size = c->size;
70174 +
70175 +#ifdef CONFIG_PAX_USERCOPY
70176 + if (size + c->align < PAGE_SIZE) {
70177 + size += c->align;
70178 + b -= c->align;
70179 + }
70180 +#endif
70181 +
70182 kmemleak_free_recursive(b, c->flags);
70183 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70184 struct slob_rcu *slob_rcu;
70185 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70186 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70187 INIT_RCU_HEAD(&slob_rcu->head);
70188 - slob_rcu->size = c->size;
70189 + slob_rcu->size = size;
70190 call_rcu(&slob_rcu->head, kmem_rcu_free);
70191 } else {
70192 - __kmem_cache_free(b, c->size);
70193 + __kmem_cache_free(b, size);
70194 }
70195
70196 +#ifdef CONFIG_PAX_USERCOPY
70197 + trace_kfree(_RET_IP_, b);
70198 +#else
70199 trace_kmem_cache_free(_RET_IP_, b);
70200 +#endif
70201 +
70202 }
70203 EXPORT_SYMBOL(kmem_cache_free);
70204
70205 diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70206 --- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70207 +++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70208 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70209 if (!t->addr)
70210 return;
70211
70212 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70213 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70214 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70215 }
70216
70217 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70218
70219 page = virt_to_head_page(x);
70220
70221 + BUG_ON(!PageSlab(page));
70222 +
70223 slab_free(s, page, x, _RET_IP_);
70224
70225 trace_kmem_cache_free(_RET_IP_, x);
70226 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70227 * Merge control. If this is set then no merging of slab caches will occur.
70228 * (Could be removed. This was introduced to pacify the merge skeptics.)
70229 */
70230 -static int slub_nomerge;
70231 +static int slub_nomerge = 1;
70232
70233 /*
70234 * Calculate the order of allocation given an slab object size.
70235 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70236 * list to avoid pounding the page allocator excessively.
70237 */
70238 set_min_partial(s, ilog2(s->size));
70239 - s->refcount = 1;
70240 + atomic_set(&s->refcount, 1);
70241 #ifdef CONFIG_NUMA
70242 s->remote_node_defrag_ratio = 1000;
70243 #endif
70244 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70245 void kmem_cache_destroy(struct kmem_cache *s)
70246 {
70247 down_write(&slub_lock);
70248 - s->refcount--;
70249 - if (!s->refcount) {
70250 + if (atomic_dec_and_test(&s->refcount)) {
70251 list_del(&s->list);
70252 up_write(&slub_lock);
70253 if (kmem_cache_close(s)) {
70254 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70255 __setup("slub_nomerge", setup_slub_nomerge);
70256
70257 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70258 - const char *name, int size, gfp_t gfp_flags)
70259 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70260 {
70261 - unsigned int flags = 0;
70262 -
70263 if (gfp_flags & SLUB_DMA)
70264 - flags = SLAB_CACHE_DMA;
70265 + flags |= SLAB_CACHE_DMA;
70266
70267 /*
70268 * This function is called with IRQs disabled during early-boot on
70269 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70270 EXPORT_SYMBOL(__kmalloc_node);
70271 #endif
70272
70273 +void check_object_size(const void *ptr, unsigned long n, bool to)
70274 +{
70275 +
70276 +#ifdef CONFIG_PAX_USERCOPY
70277 + struct page *page;
70278 + struct kmem_cache *s = NULL;
70279 + unsigned long offset;
70280 +
70281 + if (!n)
70282 + return;
70283 +
70284 + if (ZERO_OR_NULL_PTR(ptr))
70285 + goto report;
70286 +
70287 + if (!virt_addr_valid(ptr))
70288 + return;
70289 +
70290 + page = get_object_page(ptr);
70291 +
70292 + if (!page) {
70293 + if (object_is_on_stack(ptr, n) == -1)
70294 + goto report;
70295 + return;
70296 + }
70297 +
70298 + s = page->slab;
70299 + if (!(s->flags & SLAB_USERCOPY))
70300 + goto report;
70301 +
70302 + offset = (ptr - page_address(page)) % s->size;
70303 + if (offset <= s->objsize && n <= s->objsize - offset)
70304 + return;
70305 +
70306 +report:
70307 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70308 +#endif
70309 +
70310 +}
70311 +EXPORT_SYMBOL(check_object_size);
70312 +
70313 size_t ksize(const void *object)
70314 {
70315 struct page *page;
70316 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70317 * kmem_cache_open for slab_state == DOWN.
70318 */
70319 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70320 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
70321 - kmalloc_caches[0].refcount = -1;
70322 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70323 + atomic_set(&kmalloc_caches[0].refcount, -1);
70324 caches++;
70325
70326 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70327 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70328 /* Caches that are not of the two-to-the-power-of size */
70329 if (KMALLOC_MIN_SIZE <= 32) {
70330 create_kmalloc_cache(&kmalloc_caches[1],
70331 - "kmalloc-96", 96, GFP_NOWAIT);
70332 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70333 caches++;
70334 }
70335 if (KMALLOC_MIN_SIZE <= 64) {
70336 create_kmalloc_cache(&kmalloc_caches[2],
70337 - "kmalloc-192", 192, GFP_NOWAIT);
70338 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70339 caches++;
70340 }
70341
70342 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70343 create_kmalloc_cache(&kmalloc_caches[i],
70344 - "kmalloc", 1 << i, GFP_NOWAIT);
70345 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70346 caches++;
70347 }
70348
70349 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70350 /*
70351 * We may have set a slab to be unmergeable during bootstrap.
70352 */
70353 - if (s->refcount < 0)
70354 + if (atomic_read(&s->refcount) < 0)
70355 return 1;
70356
70357 return 0;
70358 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70359 if (s) {
70360 int cpu;
70361
70362 - s->refcount++;
70363 + atomic_inc(&s->refcount);
70364 /*
70365 * Adjust the object sizes so that we clear
70366 * the complete object on kzalloc.
70367 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70368
70369 if (sysfs_slab_alias(s, name)) {
70370 down_write(&slub_lock);
70371 - s->refcount--;
70372 + atomic_dec(&s->refcount);
70373 up_write(&slub_lock);
70374 goto err;
70375 }
70376 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70377
70378 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70379 {
70380 - return sprintf(buf, "%d\n", s->refcount - 1);
70381 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70382 }
70383 SLAB_ATTR_RO(aliases);
70384
70385 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70386 kfree(s);
70387 }
70388
70389 -static struct sysfs_ops slab_sysfs_ops = {
70390 +static const struct sysfs_ops slab_sysfs_ops = {
70391 .show = slab_attr_show,
70392 .store = slab_attr_store,
70393 };
70394 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70395 return 0;
70396 }
70397
70398 -static struct kset_uevent_ops slab_uevent_ops = {
70399 +static const struct kset_uevent_ops slab_uevent_ops = {
70400 .filter = uevent_filter,
70401 };
70402
70403 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
70404
70405 static int __init slab_proc_init(void)
70406 {
70407 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70408 + mode_t gr_mode = S_IRUGO;
70409 +
70410 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70411 + gr_mode = S_IRUSR;
70412 +#endif
70413 +
70414 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70415 return 0;
70416 }
70417 module_init(slab_proc_init);
70418 diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70419 --- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70420 +++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70421 @@ -30,6 +30,7 @@
70422 #include <linux/notifier.h>
70423 #include <linux/backing-dev.h>
70424 #include <linux/memcontrol.h>
70425 +#include <linux/hugetlb.h>
70426
70427 #include "internal.h"
70428
70429 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70430 compound_page_dtor *dtor;
70431
70432 dtor = get_compound_page_dtor(page);
70433 + if (!PageHuge(page))
70434 + BUG_ON(dtor != free_compound_page);
70435 (*dtor)(page);
70436 }
70437 }
70438 diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70439 --- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70440 +++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70441 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70442 void arch_pick_mmap_layout(struct mm_struct *mm)
70443 {
70444 mm->mmap_base = TASK_UNMAPPED_BASE;
70445 +
70446 +#ifdef CONFIG_PAX_RANDMMAP
70447 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70448 + mm->mmap_base += mm->delta_mmap;
70449 +#endif
70450 +
70451 mm->get_unmapped_area = arch_get_unmapped_area;
70452 mm->unmap_area = arch_unmap_area;
70453 }
70454 diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70455 --- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70456 +++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70457 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70458
70459 pte = pte_offset_kernel(pmd, addr);
70460 do {
70461 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70462 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70463 +
70464 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70465 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70466 + BUG_ON(!pte_exec(*pte));
70467 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70468 + continue;
70469 + }
70470 +#endif
70471 +
70472 + {
70473 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70474 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70475 + }
70476 } while (pte++, addr += PAGE_SIZE, addr != end);
70477 }
70478
70479 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70480 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70481 {
70482 pte_t *pte;
70483 + int ret = -ENOMEM;
70484
70485 /*
70486 * nr is a running index into the array which helps higher level
70487 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70488 pte = pte_alloc_kernel(pmd, addr);
70489 if (!pte)
70490 return -ENOMEM;
70491 +
70492 + pax_open_kernel();
70493 do {
70494 struct page *page = pages[*nr];
70495
70496 - if (WARN_ON(!pte_none(*pte)))
70497 - return -EBUSY;
70498 - if (WARN_ON(!page))
70499 - return -ENOMEM;
70500 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70501 + if (!(pgprot_val(prot) & _PAGE_NX))
70502 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70503 + else
70504 +#endif
70505 +
70506 + if (WARN_ON(!pte_none(*pte))) {
70507 + ret = -EBUSY;
70508 + goto out;
70509 + }
70510 + if (WARN_ON(!page)) {
70511 + ret = -ENOMEM;
70512 + goto out;
70513 + }
70514 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70515 (*nr)++;
70516 } while (pte++, addr += PAGE_SIZE, addr != end);
70517 - return 0;
70518 + ret = 0;
70519 +out:
70520 + pax_close_kernel();
70521 + return ret;
70522 }
70523
70524 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70525 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70526 * and fall back on vmalloc() if that fails. Others
70527 * just put it in the vmalloc space.
70528 */
70529 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70530 +#ifdef CONFIG_MODULES
70531 +#ifdef MODULES_VADDR
70532 unsigned long addr = (unsigned long)x;
70533 if (addr >= MODULES_VADDR && addr < MODULES_END)
70534 return 1;
70535 #endif
70536 +
70537 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70538 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70539 + return 1;
70540 +#endif
70541 +
70542 +#endif
70543 +
70544 return is_vmalloc_addr(x);
70545 }
70546
70547 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70548
70549 if (!pgd_none(*pgd)) {
70550 pud_t *pud = pud_offset(pgd, addr);
70551 +#ifdef CONFIG_X86
70552 + if (!pud_large(*pud))
70553 +#endif
70554 if (!pud_none(*pud)) {
70555 pmd_t *pmd = pmd_offset(pud, addr);
70556 +#ifdef CONFIG_X86
70557 + if (!pmd_large(*pmd))
70558 +#endif
70559 if (!pmd_none(*pmd)) {
70560 pte_t *ptep, pte;
70561
70562 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70563 struct rb_node *tmp;
70564
70565 while (*p) {
70566 - struct vmap_area *tmp;
70567 + struct vmap_area *varea;
70568
70569 parent = *p;
70570 - tmp = rb_entry(parent, struct vmap_area, rb_node);
70571 - if (va->va_start < tmp->va_end)
70572 + varea = rb_entry(parent, struct vmap_area, rb_node);
70573 + if (va->va_start < varea->va_end)
70574 p = &(*p)->rb_left;
70575 - else if (va->va_end > tmp->va_start)
70576 + else if (va->va_end > varea->va_start)
70577 p = &(*p)->rb_right;
70578 else
70579 BUG();
70580 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
70581 struct vm_struct *area;
70582
70583 BUG_ON(in_interrupt());
70584 +
70585 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70586 + if (flags & VM_KERNEXEC) {
70587 + if (start != VMALLOC_START || end != VMALLOC_END)
70588 + return NULL;
70589 + start = (unsigned long)MODULES_EXEC_VADDR;
70590 + end = (unsigned long)MODULES_EXEC_END;
70591 + }
70592 +#endif
70593 +
70594 if (flags & VM_IOREMAP) {
70595 int bit = fls(size);
70596
70597 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
70598 if (count > totalram_pages)
70599 return NULL;
70600
70601 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70602 + if (!(pgprot_val(prot) & _PAGE_NX))
70603 + flags |= VM_KERNEXEC;
70604 +#endif
70605 +
70606 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70607 __builtin_return_address(0));
70608 if (!area)
70609 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
70610 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70611 return NULL;
70612
70613 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70614 + if (!(pgprot_val(prot) & _PAGE_NX))
70615 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70616 + node, gfp_mask, caller);
70617 + else
70618 +#endif
70619 +
70620 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70621 VMALLOC_END, node, gfp_mask, caller);
70622
70623 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
70624 return addr;
70625 }
70626
70627 +#undef __vmalloc
70628 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70629 {
70630 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70631 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
70632 * For tight control over page level allocator and protection flags
70633 * use __vmalloc() instead.
70634 */
70635 +#undef vmalloc
70636 void *vmalloc(unsigned long size)
70637 {
70638 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70639 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
70640 * The resulting memory area is zeroed so it can be mapped to userspace
70641 * without leaking data.
70642 */
70643 +#undef vmalloc_user
70644 void *vmalloc_user(unsigned long size)
70645 {
70646 struct vm_struct *area;
70647 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
70648 * For tight control over page level allocator and protection flags
70649 * use __vmalloc() instead.
70650 */
70651 +#undef vmalloc_node
70652 void *vmalloc_node(unsigned long size, int node)
70653 {
70654 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70655 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
70656 * For tight control over page level allocator and protection flags
70657 * use __vmalloc() instead.
70658 */
70659 -
70660 +#undef vmalloc_exec
70661 void *vmalloc_exec(unsigned long size)
70662 {
70663 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70664 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70665 -1, __builtin_return_address(0));
70666 }
70667
70668 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
70669 * Allocate enough 32bit PA addressable pages to cover @size from the
70670 * page level allocator and map them into contiguous kernel virtual space.
70671 */
70672 +#undef vmalloc_32
70673 void *vmalloc_32(unsigned long size)
70674 {
70675 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70676 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
70677 * The resulting memory area is 32bit addressable and zeroed so it can be
70678 * mapped to userspace without leaking data.
70679 */
70680 +#undef vmalloc_32_user
70681 void *vmalloc_32_user(unsigned long size)
70682 {
70683 struct vm_struct *area;
70684 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
70685 unsigned long uaddr = vma->vm_start;
70686 unsigned long usize = vma->vm_end - vma->vm_start;
70687
70688 + BUG_ON(vma->vm_mirror);
70689 +
70690 if ((PAGE_SIZE-1) & (unsigned long)addr)
70691 return -EINVAL;
70692
70693 diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
70694 --- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70695 +++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70696 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70697 *
70698 * vm_stat contains the global counters
70699 */
70700 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70701 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70702 EXPORT_SYMBOL(vm_stat);
70703
70704 #ifdef CONFIG_SMP
70705 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70706 v = p->vm_stat_diff[i];
70707 p->vm_stat_diff[i] = 0;
70708 local_irq_restore(flags);
70709 - atomic_long_add(v, &zone->vm_stat[i]);
70710 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70711 global_diff[i] += v;
70712 #ifdef CONFIG_NUMA
70713 /* 3 seconds idle till flush */
70714 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70715
70716 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70717 if (global_diff[i])
70718 - atomic_long_add(global_diff[i], &vm_stat[i]);
70719 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70720 }
70721
70722 #endif
70723 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70724 start_cpu_timer(cpu);
70725 #endif
70726 #ifdef CONFIG_PROC_FS
70727 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70728 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70729 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70730 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70731 + {
70732 + mode_t gr_mode = S_IRUGO;
70733 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70734 + gr_mode = S_IRUSR;
70735 +#endif
70736 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70737 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70738 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70739 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70740 +#else
70741 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70742 +#endif
70743 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70744 + }
70745 #endif
70746 return 0;
70747 }
70748 diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
70749 --- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70750 +++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70751 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70752 err = -EPERM;
70753 if (!capable(CAP_NET_ADMIN))
70754 break;
70755 - if ((args.u.name_type >= 0) &&
70756 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70757 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70758 struct vlan_net *vn;
70759
70760 vn = net_generic(net, vlan_net_id);
70761 diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
70762 --- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70763 +++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70764 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70765 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70766 return 1;
70767 atm_return(vcc,truesize);
70768 - atomic_inc(&vcc->stats->rx_drop);
70769 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70770 return 0;
70771 }
70772
70773 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70774 }
70775 }
70776 atm_return(vcc,guess);
70777 - atomic_inc(&vcc->stats->rx_drop);
70778 + atomic_inc_unchecked(&vcc->stats->rx_drop);
70779 return NULL;
70780 }
70781
70782 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70783
70784 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70785 {
70786 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70787 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70788 __SONET_ITEMS
70789 #undef __HANDLE_ITEM
70790 }
70791 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70792
70793 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70794 {
70795 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70796 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70797 __SONET_ITEMS
70798 #undef __HANDLE_ITEM
70799 }
70800 diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
70801 --- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70802 +++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70803 @@ -48,7 +48,7 @@ struct lane2_ops {
70804 const u8 *tlvs, u32 sizeoftlvs);
70805 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70806 const u8 *tlvs, u32 sizeoftlvs);
70807 -};
70808 +} __no_const;
70809
70810 /*
70811 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70812 diff -urNp linux-2.6.32.45/net/atm/mpc.h linux-2.6.32.45/net/atm/mpc.h
70813 --- linux-2.6.32.45/net/atm/mpc.h 2011-03-27 14:31:47.000000000 -0400
70814 +++ linux-2.6.32.45/net/atm/mpc.h 2011-08-23 21:22:38.000000000 -0400
70815 @@ -33,7 +33,7 @@ struct mpoa_client {
70816 struct mpc_parameters parameters; /* parameters for this client */
70817
70818 const struct net_device_ops *old_ops;
70819 - struct net_device_ops new_ops;
70820 + net_device_ops_no_const new_ops;
70821 };
70822
70823
70824 diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
70825 --- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70826 +++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70827 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70828 struct timeval now;
70829 struct k_message msg;
70830
70831 + pax_track_stack();
70832 +
70833 do_gettimeofday(&now);
70834
70835 write_lock_irq(&client->egress_lock);
70836 diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
70837 --- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70838 +++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70839 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70840 const struct k_atm_aal_stats *stats)
70841 {
70842 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
70843 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
70844 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
70845 - atomic_read(&stats->rx_drop));
70846 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
70847 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
70848 + atomic_read_unchecked(&stats->rx_drop));
70849 }
70850
70851 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
70852 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
70853 {
70854 struct sock *sk = sk_atm(vcc);
70855
70856 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70857 + seq_printf(seq, "%p ", NULL);
70858 +#else
70859 seq_printf(seq, "%p ", vcc);
70860 +#endif
70861 +
70862 if (!vcc->dev)
70863 seq_printf(seq, "Unassigned ");
70864 else
70865 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
70866 {
70867 if (!vcc->dev)
70868 seq_printf(seq, sizeof(void *) == 4 ?
70869 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70870 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
70871 +#else
70872 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
70873 +#endif
70874 else
70875 seq_printf(seq, "%3d %3d %5d ",
70876 vcc->dev->number, vcc->vpi, vcc->vci);
70877 diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
70878 --- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
70879 +++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
70880 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
70881 static void copy_aal_stats(struct k_atm_aal_stats *from,
70882 struct atm_aal_stats *to)
70883 {
70884 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70885 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70886 __AAL_STAT_ITEMS
70887 #undef __HANDLE_ITEM
70888 }
70889 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
70890 static void subtract_aal_stats(struct k_atm_aal_stats *from,
70891 struct atm_aal_stats *to)
70892 {
70893 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70894 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
70895 __AAL_STAT_ITEMS
70896 #undef __HANDLE_ITEM
70897 }
70898 diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
70899 --- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
70900 +++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
70901 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
70902 err = -ENOTCONN;
70903 break;
70904 }
70905 -
70906 + memset(&cinfo, 0, sizeof(cinfo));
70907 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
70908 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
70909
70910 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
70911
70912 /* Reject if config buffer is too small. */
70913 len = cmd_len - sizeof(*req);
70914 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70915 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70916 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
70917 l2cap_build_conf_rsp(sk, rsp,
70918 L2CAP_CONF_REJECT, flags), rsp);
70919 diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
70920 --- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
70921 +++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
70922 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
70923
70924 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
70925
70926 + memset(&cinfo, 0, sizeof(cinfo));
70927 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
70928 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
70929
70930 diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
70931 --- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
70932 +++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
70933 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
70934
70935 #ifdef CONFIG_SYSFS
70936 /* br_sysfs_if.c */
70937 -extern struct sysfs_ops brport_sysfs_ops;
70938 +extern const struct sysfs_ops brport_sysfs_ops;
70939 extern int br_sysfs_addif(struct net_bridge_port *p);
70940
70941 /* br_sysfs_br.c */
70942 diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
70943 --- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
70944 +++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
70945 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
70946 char *envp[] = { NULL };
70947
70948 if (br->stp_enabled == BR_USER_STP) {
70949 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
70950 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
70951 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
70952 br->dev->name, r);
70953
70954 diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
70955 --- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
70956 +++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
70957 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
70958 return ret;
70959 }
70960
70961 -struct sysfs_ops brport_sysfs_ops = {
70962 +const struct sysfs_ops brport_sysfs_ops = {
70963 .show = brport_show,
70964 .store = brport_store,
70965 };
70966 diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
70967 --- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
70968 +++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
70969 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
70970 unsigned int entries_size, nentries;
70971 char *entries;
70972
70973 + pax_track_stack();
70974 +
70975 if (cmd == EBT_SO_GET_ENTRIES) {
70976 entries_size = t->private->entries_size;
70977 nentries = t->private->nentries;
70978 diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
70979 --- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
70980 +++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
70981 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
70982 struct bcm_sock *bo = bcm_sk(sk);
70983 struct bcm_op *op;
70984
70985 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70986 + seq_printf(m, ">>> socket %p", NULL);
70987 + seq_printf(m, " / sk %p", NULL);
70988 + seq_printf(m, " / bo %p", NULL);
70989 +#else
70990 seq_printf(m, ">>> socket %p", sk->sk_socket);
70991 seq_printf(m, " / sk %p", sk);
70992 seq_printf(m, " / bo %p", bo);
70993 +#endif
70994 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
70995 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
70996 seq_printf(m, " <<<\n");
70997 diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
70998 --- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
70999 +++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71000 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71001 if (no_module && capable(CAP_NET_ADMIN))
71002 no_module = request_module("netdev-%s", name);
71003 if (no_module && capable(CAP_SYS_MODULE)) {
71004 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71005 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71006 +#else
71007 if (!request_module("%s", name))
71008 pr_err("Loading kernel module for a network device "
71009 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71010 "instead\n", name);
71011 +#endif
71012 }
71013 }
71014 EXPORT_SYMBOL(dev_load);
71015 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71016
71017 struct dev_gso_cb {
71018 void (*destructor)(struct sk_buff *skb);
71019 -};
71020 +} __no_const;
71021
71022 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71023
71024 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71025 }
71026 EXPORT_SYMBOL(netif_rx_ni);
71027
71028 -static void net_tx_action(struct softirq_action *h)
71029 +static void net_tx_action(void)
71030 {
71031 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71032
71033 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71034 EXPORT_SYMBOL(netif_napi_del);
71035
71036
71037 -static void net_rx_action(struct softirq_action *h)
71038 +static void net_rx_action(void)
71039 {
71040 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71041 unsigned long time_limit = jiffies + 2;
71042 diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71043 --- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71044 +++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71045 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71046 atomic_t *object_ref;
71047 };
71048
71049 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71050 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71051
71052 static u32 flow_hash_shift;
71053 #define flow_hash_size (1 << flow_hash_shift)
71054 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71055 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71056
71057 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71058
71059 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71060 u32 hash_rnd;
71061 int count;
71062 };
71063 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71064 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71065
71066 #define flow_hash_rnd_recalc(cpu) \
71067 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71068 @@ -69,7 +69,7 @@ struct flow_flush_info {
71069 atomic_t cpuleft;
71070 struct completion completion;
71071 };
71072 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71073 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71074
71075 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71076
71077 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71078 if (fle->family == family &&
71079 fle->dir == dir &&
71080 flow_key_compare(key, &fle->key) == 0) {
71081 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71082 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71083 void *ret = fle->object;
71084
71085 if (ret)
71086 @@ -228,7 +228,7 @@ nocache:
71087 err = resolver(net, key, family, dir, &obj, &obj_ref);
71088
71089 if (fle && !err) {
71090 - fle->genid = atomic_read(&flow_cache_genid);
71091 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71092
71093 if (fle->object)
71094 atomic_dec(fle->object_ref);
71095 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71096
71097 fle = flow_table(cpu)[i];
71098 for (; fle; fle = fle->next) {
71099 - unsigned genid = atomic_read(&flow_cache_genid);
71100 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71101
71102 if (!fle->object || fle->genid == genid)
71103 continue;
71104 diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71105 --- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71106 +++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71107 @@ -57,7 +57,7 @@ struct rtnl_link
71108 {
71109 rtnl_doit_func doit;
71110 rtnl_dumpit_func dumpit;
71111 -};
71112 +} __no_const;
71113
71114 static DEFINE_MUTEX(rtnl_mutex);
71115
71116 diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71117 --- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71118 +++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71119 @@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71120 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71121
71122 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71123 - __be16 dport)
71124 + __be16 dport)
71125 {
71126 u32 secret[MD5_MESSAGE_BYTES / 4];
71127 u32 hash[MD5_DIGEST_WORDS];
71128 @@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71129 secret[i] = net_secret[i];
71130
71131 md5_transform(hash, secret);
71132 -
71133 return hash[0];
71134 }
71135 #endif
71136 diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71137 --- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71138 +++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71139 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71140 struct sk_buff *frag_iter;
71141 struct sock *sk = skb->sk;
71142
71143 + pax_track_stack();
71144 +
71145 /*
71146 * __skb_splice_bits() only fails if the output has no room left,
71147 * so no point in going over the frag_list for the error case.
71148 diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71149 --- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71150 +++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71151 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71152 break;
71153
71154 case SO_PEERCRED:
71155 + {
71156 + struct ucred peercred;
71157 if (len > sizeof(sk->sk_peercred))
71158 len = sizeof(sk->sk_peercred);
71159 - if (copy_to_user(optval, &sk->sk_peercred, len))
71160 + peercred = sk->sk_peercred;
71161 + if (copy_to_user(optval, &peercred, len))
71162 return -EFAULT;
71163 goto lenout;
71164 + }
71165
71166 case SO_PEERNAME:
71167 {
71168 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71169 */
71170 smp_wmb();
71171 atomic_set(&sk->sk_refcnt, 1);
71172 - atomic_set(&sk->sk_drops, 0);
71173 + atomic_set_unchecked(&sk->sk_drops, 0);
71174 }
71175 EXPORT_SYMBOL(sock_init_data);
71176
71177 diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71178 --- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71179 +++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71180 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71181
71182 if (len > *lenp) len = *lenp;
71183
71184 - if (copy_to_user(buffer, addr, len))
71185 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
71186 return -EFAULT;
71187
71188 *lenp = len;
71189 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71190
71191 if (len > *lenp) len = *lenp;
71192
71193 - if (copy_to_user(buffer, devname, len))
71194 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
71195 return -EFAULT;
71196
71197 *lenp = len;
71198 diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71199 --- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71200 +++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71201 @@ -4,7 +4,7 @@
71202
71203 config ECONET
71204 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71205 - depends on EXPERIMENTAL && INET
71206 + depends on EXPERIMENTAL && INET && BROKEN
71207 ---help---
71208 Econet is a fairly old and slow networking protocol mainly used by
71209 Acorn computers to access file and print servers. It uses native
71210 diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71211 --- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71212 +++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71213 @@ -318,7 +318,7 @@ out:
71214 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71215 {
71216 if (sock_queue_rcv_skb(sk, skb) < 0) {
71217 - atomic_inc(&sk->sk_drops);
71218 + atomic_inc_unchecked(&sk->sk_drops);
71219 kfree_skb(skb);
71220 return NET_RX_DROP;
71221 }
71222 diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71223 --- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71224 +++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71225 @@ -206,7 +206,7 @@ out:
71226 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71227 {
71228 if (sock_queue_rcv_skb(sk, skb) < 0) {
71229 - atomic_inc(&sk->sk_drops);
71230 + atomic_inc_unchecked(&sk->sk_drops);
71231 kfree_skb(skb);
71232 return NET_RX_DROP;
71233 }
71234 diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71235 --- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71236 +++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71237 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71238 r->idiag_retrans = 0;
71239
71240 r->id.idiag_if = sk->sk_bound_dev_if;
71241 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71242 + r->id.idiag_cookie[0] = 0;
71243 + r->id.idiag_cookie[1] = 0;
71244 +#else
71245 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71246 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71247 +#endif
71248
71249 r->id.idiag_sport = inet->sport;
71250 r->id.idiag_dport = inet->dport;
71251 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71252 r->idiag_family = tw->tw_family;
71253 r->idiag_retrans = 0;
71254 r->id.idiag_if = tw->tw_bound_dev_if;
71255 +
71256 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71257 + r->id.idiag_cookie[0] = 0;
71258 + r->id.idiag_cookie[1] = 0;
71259 +#else
71260 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71261 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71262 +#endif
71263 +
71264 r->id.idiag_sport = tw->tw_sport;
71265 r->id.idiag_dport = tw->tw_dport;
71266 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71267 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71268 if (sk == NULL)
71269 goto unlock;
71270
71271 +#ifndef CONFIG_GRKERNSEC_HIDESYM
71272 err = -ESTALE;
71273 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71274 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71275 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71276 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71277 goto out;
71278 +#endif
71279
71280 err = -ENOMEM;
71281 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71282 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71283 r->idiag_retrans = req->retrans;
71284
71285 r->id.idiag_if = sk->sk_bound_dev_if;
71286 +
71287 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71288 + r->id.idiag_cookie[0] = 0;
71289 + r->id.idiag_cookie[1] = 0;
71290 +#else
71291 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71292 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71293 +#endif
71294
71295 tmo = req->expires - jiffies;
71296 if (tmo < 0)
71297 diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71298 --- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71299 +++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71300 @@ -18,12 +18,15 @@
71301 #include <linux/sched.h>
71302 #include <linux/slab.h>
71303 #include <linux/wait.h>
71304 +#include <linux/security.h>
71305
71306 #include <net/inet_connection_sock.h>
71307 #include <net/inet_hashtables.h>
71308 #include <net/secure_seq.h>
71309 #include <net/ip.h>
71310
71311 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71312 +
71313 /*
71314 * Allocate and initialize a new local port bind bucket.
71315 * The bindhash mutex for snum's hash chain must be held here.
71316 @@ -491,6 +494,8 @@ ok:
71317 }
71318 spin_unlock(&head->lock);
71319
71320 + gr_update_task_in_ip_table(current, inet_sk(sk));
71321 +
71322 if (tw) {
71323 inet_twsk_deschedule(tw, death_row);
71324 inet_twsk_put(tw);
71325 diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71326 --- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71327 +++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71328 @@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71329 struct inet_peer *p, *n;
71330 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71331
71332 + pax_track_stack();
71333 +
71334 /* Look up for the address quickly. */
71335 read_lock_bh(&peer_pool_lock);
71336 p = lookup(daddr, NULL);
71337 @@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71338 return NULL;
71339 n->v4daddr = daddr;
71340 atomic_set(&n->refcnt, 1);
71341 - atomic_set(&n->rid, 0);
71342 + atomic_set_unchecked(&n->rid, 0);
71343 n->ip_id_count = secure_ip_id(daddr);
71344 n->tcp_ts_stamp = 0;
71345
71346 diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71347 --- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71348 +++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71349 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71350 return 0;
71351
71352 start = qp->rid;
71353 - end = atomic_inc_return(&peer->rid);
71354 + end = atomic_inc_return_unchecked(&peer->rid);
71355 qp->rid = end;
71356
71357 rc = qp->q.fragments && (end - start) > max;
71358 diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71359 --- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71360 +++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71361 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71362 int val;
71363 int len;
71364
71365 + pax_track_stack();
71366 +
71367 if (level != SOL_IP)
71368 return -EOPNOTSUPP;
71369
71370 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71371 --- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71372 +++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71373 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71374 private = &tmp;
71375 }
71376 #endif
71377 + memset(&info, 0, sizeof(info));
71378 info.valid_hooks = t->valid_hooks;
71379 memcpy(info.hook_entry, private->hook_entry,
71380 sizeof(info.hook_entry));
71381 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c
71382 --- linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71383 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71384 @@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71385
71386 if (v->data_len < sizeof(*user_iph))
71387 return 0;
71388 + if (v->data_len > 65535)
71389 + return -EMSGSIZE;
71390 +
71391 diff = v->data_len - e->skb->len;
71392 if (diff < 0) {
71393 if (pskb_trim(e->skb, v->data_len))
71394 @@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71395 static inline void
71396 __ipq_rcv_skb(struct sk_buff *skb)
71397 {
71398 - int status, type, pid, flags, nlmsglen, skblen;
71399 + int status, type, pid, flags;
71400 + unsigned int nlmsglen, skblen;
71401 struct nlmsghdr *nlh;
71402
71403 skblen = skb->len;
71404 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71405 --- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71406 +++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71407 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71408 private = &tmp;
71409 }
71410 #endif
71411 + memset(&info, 0, sizeof(info));
71412 info.valid_hooks = t->valid_hooks;
71413 memcpy(info.hook_entry, private->hook_entry,
71414 sizeof(info.hook_entry));
71415 diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71416 --- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71417 +++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71418 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71419
71420 *len = 0;
71421
71422 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71423 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71424 if (*octets == NULL) {
71425 if (net_ratelimit())
71426 printk("OOM in bsalg (%d)\n", __LINE__);
71427 diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71428 --- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71429 +++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71430 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71431 /* Charge it to the socket. */
71432
71433 if (sock_queue_rcv_skb(sk, skb) < 0) {
71434 - atomic_inc(&sk->sk_drops);
71435 + atomic_inc_unchecked(&sk->sk_drops);
71436 kfree_skb(skb);
71437 return NET_RX_DROP;
71438 }
71439 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71440 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71441 {
71442 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71443 - atomic_inc(&sk->sk_drops);
71444 + atomic_inc_unchecked(&sk->sk_drops);
71445 kfree_skb(skb);
71446 return NET_RX_DROP;
71447 }
71448 @@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71449
71450 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71451 {
71452 + struct icmp_filter filter;
71453 +
71454 + if (optlen < 0)
71455 + return -EINVAL;
71456 if (optlen > sizeof(struct icmp_filter))
71457 optlen = sizeof(struct icmp_filter);
71458 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71459 + if (copy_from_user(&filter, optval, optlen))
71460 return -EFAULT;
71461 + raw_sk(sk)->filter = filter;
71462 +
71463 return 0;
71464 }
71465
71466 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71467 {
71468 int len, ret = -EFAULT;
71469 + struct icmp_filter filter;
71470
71471 if (get_user(len, optlen))
71472 goto out;
71473 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71474 if (len > sizeof(struct icmp_filter))
71475 len = sizeof(struct icmp_filter);
71476 ret = -EFAULT;
71477 - if (put_user(len, optlen) ||
71478 - copy_to_user(optval, &raw_sk(sk)->filter, len))
71479 + filter = raw_sk(sk)->filter;
71480 + if (put_user(len, optlen) || len > sizeof filter ||
71481 + copy_to_user(optval, &filter, len))
71482 goto out;
71483 ret = 0;
71484 out: return ret;
71485 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71486 sk_wmem_alloc_get(sp),
71487 sk_rmem_alloc_get(sp),
71488 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71489 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71490 + atomic_read(&sp->sk_refcnt),
71491 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71492 + NULL,
71493 +#else
71494 + sp,
71495 +#endif
71496 + atomic_read_unchecked(&sp->sk_drops));
71497 }
71498
71499 static int raw_seq_show(struct seq_file *seq, void *v)
71500 diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71501 --- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71502 +++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71503 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71504
71505 static inline int rt_genid(struct net *net)
71506 {
71507 - return atomic_read(&net->ipv4.rt_genid);
71508 + return atomic_read_unchecked(&net->ipv4.rt_genid);
71509 }
71510
71511 #ifdef CONFIG_PROC_FS
71512 @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71513 unsigned char shuffle;
71514
71515 get_random_bytes(&shuffle, sizeof(shuffle));
71516 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71517 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71518 }
71519
71520 /*
71521 @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71522
71523 static __net_init int rt_secret_timer_init(struct net *net)
71524 {
71525 - atomic_set(&net->ipv4.rt_genid,
71526 + atomic_set_unchecked(&net->ipv4.rt_genid,
71527 (int) ((num_physpages ^ (num_physpages>>8)) ^
71528 (jiffies ^ (jiffies >> 7))));
71529
71530 diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71531 --- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71532 +++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71533 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71534 int val;
71535 int err = 0;
71536
71537 + pax_track_stack();
71538 +
71539 /* This is a string value all the others are int's */
71540 if (optname == TCP_CONGESTION) {
71541 char name[TCP_CA_NAME_MAX];
71542 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71543 struct tcp_sock *tp = tcp_sk(sk);
71544 int val, len;
71545
71546 + pax_track_stack();
71547 +
71548 if (get_user(len, optlen))
71549 return -EFAULT;
71550
71551 diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71552 --- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71553 +++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-23 21:22:32.000000000 -0400
71554 @@ -85,6 +85,9 @@
71555 int sysctl_tcp_tw_reuse __read_mostly;
71556 int sysctl_tcp_low_latency __read_mostly;
71557
71558 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71559 +extern int grsec_enable_blackhole;
71560 +#endif
71561
71562 #ifdef CONFIG_TCP_MD5SIG
71563 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71564 @@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71565 return 0;
71566
71567 reset:
71568 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71569 + if (!grsec_enable_blackhole)
71570 +#endif
71571 tcp_v4_send_reset(rsk, skb);
71572 discard:
71573 kfree_skb(skb);
71574 @@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71575 TCP_SKB_CB(skb)->sacked = 0;
71576
71577 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71578 - if (!sk)
71579 + if (!sk) {
71580 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71581 + ret = 1;
71582 +#endif
71583 goto no_tcp_socket;
71584 + }
71585
71586 process:
71587 - if (sk->sk_state == TCP_TIME_WAIT)
71588 + if (sk->sk_state == TCP_TIME_WAIT) {
71589 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71590 + ret = 2;
71591 +#endif
71592 goto do_time_wait;
71593 + }
71594
71595 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71596 goto discard_and_relse;
71597 @@ -1651,6 +1665,10 @@ no_tcp_socket:
71598 bad_packet:
71599 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71600 } else {
71601 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71602 + if (!grsec_enable_blackhole || (ret == 1 &&
71603 + (skb->dev->flags & IFF_LOOPBACK)))
71604 +#endif
71605 tcp_v4_send_reset(NULL, skb);
71606 }
71607
71608 @@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71609 0, /* non standard timer */
71610 0, /* open_requests have no inode */
71611 atomic_read(&sk->sk_refcnt),
71612 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71613 + NULL,
71614 +#else
71615 req,
71616 +#endif
71617 len);
71618 }
71619
71620 @@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71621 sock_i_uid(sk),
71622 icsk->icsk_probes_out,
71623 sock_i_ino(sk),
71624 - atomic_read(&sk->sk_refcnt), sk,
71625 + atomic_read(&sk->sk_refcnt),
71626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71627 + NULL,
71628 +#else
71629 + sk,
71630 +#endif
71631 jiffies_to_clock_t(icsk->icsk_rto),
71632 jiffies_to_clock_t(icsk->icsk_ack.ato),
71633 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71634 @@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71635 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71636 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71637 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71638 - atomic_read(&tw->tw_refcnt), tw, len);
71639 + atomic_read(&tw->tw_refcnt),
71640 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71641 + NULL,
71642 +#else
71643 + tw,
71644 +#endif
71645 + len);
71646 }
71647
71648 #define TMPSZ 150
71649 diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
71650 --- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71651 +++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71652 @@ -26,6 +26,10 @@
71653 #include <net/inet_common.h>
71654 #include <net/xfrm.h>
71655
71656 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71657 +extern int grsec_enable_blackhole;
71658 +#endif
71659 +
71660 #ifdef CONFIG_SYSCTL
71661 #define SYNC_INIT 0 /* let the user enable it */
71662 #else
71663 @@ -672,6 +676,10 @@ listen_overflow:
71664
71665 embryonic_reset:
71666 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71667 +
71668 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71669 + if (!grsec_enable_blackhole)
71670 +#endif
71671 if (!(flg & TCP_FLAG_RST))
71672 req->rsk_ops->send_reset(sk, skb);
71673
71674 diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
71675 --- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71676 +++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71677 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71678 __u8 *md5_hash_location;
71679 int mss;
71680
71681 + pax_track_stack();
71682 +
71683 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71684 if (skb == NULL)
71685 return NULL;
71686 diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
71687 --- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71688 +++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71689 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71690 if (cnt + width >= len)
71691 break;
71692
71693 - if (copy_to_user(buf + cnt, tbuf, width))
71694 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71695 return -EFAULT;
71696 cnt += width;
71697 }
71698 diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
71699 --- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71700 +++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71701 @@ -21,6 +21,10 @@
71702 #include <linux/module.h>
71703 #include <net/tcp.h>
71704
71705 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71706 +extern int grsec_lastack_retries;
71707 +#endif
71708 +
71709 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71710 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71711 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71712 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71713 }
71714 }
71715
71716 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71717 + if ((sk->sk_state == TCP_LAST_ACK) &&
71718 + (grsec_lastack_retries > 0) &&
71719 + (grsec_lastack_retries < retry_until))
71720 + retry_until = grsec_lastack_retries;
71721 +#endif
71722 +
71723 if (retransmits_timed_out(sk, retry_until)) {
71724 /* Has it gone just too far? */
71725 tcp_write_err(sk);
71726 diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
71727 --- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71728 +++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-23 21:22:32.000000000 -0400
71729 @@ -86,6 +86,7 @@
71730 #include <linux/types.h>
71731 #include <linux/fcntl.h>
71732 #include <linux/module.h>
71733 +#include <linux/security.h>
71734 #include <linux/socket.h>
71735 #include <linux/sockios.h>
71736 #include <linux/igmp.h>
71737 @@ -106,6 +107,10 @@
71738 #include <net/xfrm.h>
71739 #include "udp_impl.h"
71740
71741 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71742 +extern int grsec_enable_blackhole;
71743 +#endif
71744 +
71745 struct udp_table udp_table;
71746 EXPORT_SYMBOL(udp_table);
71747
71748 @@ -371,6 +376,9 @@ found:
71749 return s;
71750 }
71751
71752 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71753 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71754 +
71755 /*
71756 * This routine is called by the ICMP module when it gets some
71757 * sort of error condition. If err < 0 then the socket should
71758 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71759 dport = usin->sin_port;
71760 if (dport == 0)
71761 return -EINVAL;
71762 +
71763 + err = gr_search_udp_sendmsg(sk, usin);
71764 + if (err)
71765 + return err;
71766 } else {
71767 if (sk->sk_state != TCP_ESTABLISHED)
71768 return -EDESTADDRREQ;
71769 +
71770 + err = gr_search_udp_sendmsg(sk, NULL);
71771 + if (err)
71772 + return err;
71773 +
71774 daddr = inet->daddr;
71775 dport = inet->dport;
71776 /* Open fast path for connected socket.
71777 @@ -945,6 +962,10 @@ try_again:
71778 if (!skb)
71779 goto out;
71780
71781 + err = gr_search_udp_recvmsg(sk, skb);
71782 + if (err)
71783 + goto out_free;
71784 +
71785 ulen = skb->len - sizeof(struct udphdr);
71786 copied = len;
71787 if (copied > ulen)
71788 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71789 if (rc == -ENOMEM) {
71790 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71791 is_udplite);
71792 - atomic_inc(&sk->sk_drops);
71793 + atomic_inc_unchecked(&sk->sk_drops);
71794 }
71795 goto drop;
71796 }
71797 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71798 goto csum_error;
71799
71800 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71801 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71802 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71803 +#endif
71804 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71805
71806 /*
71807 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71808 sk_wmem_alloc_get(sp),
71809 sk_rmem_alloc_get(sp),
71810 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71811 - atomic_read(&sp->sk_refcnt), sp,
71812 - atomic_read(&sp->sk_drops), len);
71813 + atomic_read(&sp->sk_refcnt),
71814 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71815 + NULL,
71816 +#else
71817 + sp,
71818 +#endif
71819 + atomic_read_unchecked(&sp->sk_drops), len);
71820 }
71821
71822 int udp4_seq_show(struct seq_file *seq, void *v)
71823 diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
71824 --- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
71825 +++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
71826 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
71827 #ifdef CONFIG_XFRM
71828 {
71829 struct rt6_info *rt = (struct rt6_info *)dst;
71830 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71831 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71832 }
71833 #endif
71834 }
71835 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
71836 #ifdef CONFIG_XFRM
71837 if (dst) {
71838 struct rt6_info *rt = (struct rt6_info *)dst;
71839 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71840 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71841 sk->sk_dst_cache = NULL;
71842 dst_release(dst);
71843 dst = NULL;
71844 diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
71845 --- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71846 +++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
71847 @@ -119,7 +119,7 @@ out:
71848 }
71849 EXPORT_SYMBOL(__inet6_lookup_established);
71850
71851 -static int inline compute_score(struct sock *sk, struct net *net,
71852 +static inline int compute_score(struct sock *sk, struct net *net,
71853 const unsigned short hnum,
71854 const struct in6_addr *daddr,
71855 const int dif)
71856 diff -urNp linux-2.6.32.45/net/ipv6/ip6_tunnel.c linux-2.6.32.45/net/ipv6/ip6_tunnel.c
71857 --- linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-09 18:35:30.000000000 -0400
71858 +++ linux-2.6.32.45/net/ipv6/ip6_tunnel.c 2011-08-24 18:52:25.000000000 -0400
71859 @@ -1466,7 +1466,7 @@ static int __init ip6_tunnel_init(void)
71860 {
71861 int err;
71862
71863 - err = register_pernet_device(&ip6_tnl_net_ops);
71864 + err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
71865 if (err < 0)
71866 goto out_pernet;
71867
71868 @@ -1487,7 +1487,7 @@ static int __init ip6_tunnel_init(void)
71869 out_ip6ip6:
71870 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
71871 out_ip4ip6:
71872 - unregister_pernet_device(&ip6_tnl_net_ops);
71873 + unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
71874 out_pernet:
71875 return err;
71876 }
71877 diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
71878 --- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71879 +++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71880 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
71881 int val, valbool;
71882 int retv = -ENOPROTOOPT;
71883
71884 + pax_track_stack();
71885 +
71886 if (optval == NULL)
71887 val=0;
71888 else {
71889 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
71890 int len;
71891 int val;
71892
71893 + pax_track_stack();
71894 +
71895 if (ip6_mroute_opt(optname))
71896 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71897
71898 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c
71899 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
71900 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
71901 @@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
71902
71903 if (v->data_len < sizeof(*user_iph))
71904 return 0;
71905 + if (v->data_len > 65535)
71906 + return -EMSGSIZE;
71907 +
71908 diff = v->data_len - e->skb->len;
71909 if (diff < 0) {
71910 if (pskb_trim(e->skb, v->data_len))
71911 @@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
71912 static inline void
71913 __ipq_rcv_skb(struct sk_buff *skb)
71914 {
71915 - int status, type, pid, flags, nlmsglen, skblen;
71916 + int status, type, pid, flags;
71917 + unsigned int nlmsglen, skblen;
71918 struct nlmsghdr *nlh;
71919
71920 skblen = skb->len;
71921 diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
71922 --- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
71923 +++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
71924 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
71925 private = &tmp;
71926 }
71927 #endif
71928 + memset(&info, 0, sizeof(info));
71929 info.valid_hooks = t->valid_hooks;
71930 memcpy(info.hook_entry, private->hook_entry,
71931 sizeof(info.hook_entry));
71932 diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
71933 --- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
71934 +++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
71935 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
71936 {
71937 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
71938 skb_checksum_complete(skb)) {
71939 - atomic_inc(&sk->sk_drops);
71940 + atomic_inc_unchecked(&sk->sk_drops);
71941 kfree_skb(skb);
71942 return NET_RX_DROP;
71943 }
71944
71945 /* Charge it to the socket. */
71946 if (sock_queue_rcv_skb(sk,skb)<0) {
71947 - atomic_inc(&sk->sk_drops);
71948 + atomic_inc_unchecked(&sk->sk_drops);
71949 kfree_skb(skb);
71950 return NET_RX_DROP;
71951 }
71952 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71953 struct raw6_sock *rp = raw6_sk(sk);
71954
71955 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
71956 - atomic_inc(&sk->sk_drops);
71957 + atomic_inc_unchecked(&sk->sk_drops);
71958 kfree_skb(skb);
71959 return NET_RX_DROP;
71960 }
71961 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71962
71963 if (inet->hdrincl) {
71964 if (skb_checksum_complete(skb)) {
71965 - atomic_inc(&sk->sk_drops);
71966 + atomic_inc_unchecked(&sk->sk_drops);
71967 kfree_skb(skb);
71968 return NET_RX_DROP;
71969 }
71970 @@ -518,7 +518,7 @@ csum_copy_err:
71971 as some normal condition.
71972 */
71973 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
71974 - atomic_inc(&sk->sk_drops);
71975 + atomic_inc_unchecked(&sk->sk_drops);
71976 goto out;
71977 }
71978
71979 @@ -600,7 +600,7 @@ out:
71980 return err;
71981 }
71982
71983 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
71984 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
71985 struct flowi *fl, struct rt6_info *rt,
71986 unsigned int flags)
71987 {
71988 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
71989 u16 proto;
71990 int err;
71991
71992 + pax_track_stack();
71993 +
71994 /* Rough check on arithmetic overflow,
71995 better check is made in ip6_append_data().
71996 */
71997 @@ -916,12 +918,17 @@ do_confirm:
71998 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
71999 char __user *optval, int optlen)
72000 {
72001 + struct icmp6_filter filter;
72002 +
72003 switch (optname) {
72004 case ICMPV6_FILTER:
72005 + if (optlen < 0)
72006 + return -EINVAL;
72007 if (optlen > sizeof(struct icmp6_filter))
72008 optlen = sizeof(struct icmp6_filter);
72009 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72010 + if (copy_from_user(&filter, optval, optlen))
72011 return -EFAULT;
72012 + raw6_sk(sk)->filter = filter;
72013 return 0;
72014 default:
72015 return -ENOPROTOOPT;
72016 @@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72017 char __user *optval, int __user *optlen)
72018 {
72019 int len;
72020 + struct icmp6_filter filter;
72021
72022 switch (optname) {
72023 case ICMPV6_FILTER:
72024 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72025 len = sizeof(struct icmp6_filter);
72026 if (put_user(len, optlen))
72027 return -EFAULT;
72028 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72029 + filter = raw6_sk(sk)->filter;
72030 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
72031 return -EFAULT;
72032 return 0;
72033 default:
72034 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72035 0, 0L, 0,
72036 sock_i_uid(sp), 0,
72037 sock_i_ino(sp),
72038 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72039 + atomic_read(&sp->sk_refcnt),
72040 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72041 + NULL,
72042 +#else
72043 + sp,
72044 +#endif
72045 + atomic_read_unchecked(&sp->sk_drops));
72046 }
72047
72048 static int raw6_seq_show(struct seq_file *seq, void *v)
72049 diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72050 --- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72051 +++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72052 @@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72053 }
72054 #endif
72055
72056 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72057 +extern int grsec_enable_blackhole;
72058 +#endif
72059 +
72060 static void tcp_v6_hash(struct sock *sk)
72061 {
72062 if (sk->sk_state != TCP_CLOSE) {
72063 @@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72064 return 0;
72065
72066 reset:
72067 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72068 + if (!grsec_enable_blackhole)
72069 +#endif
72070 tcp_v6_send_reset(sk, skb);
72071 discard:
72072 if (opt_skb)
72073 @@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72074 TCP_SKB_CB(skb)->sacked = 0;
72075
72076 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72077 - if (!sk)
72078 + if (!sk) {
72079 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72080 + ret = 1;
72081 +#endif
72082 goto no_tcp_socket;
72083 + }
72084
72085 process:
72086 - if (sk->sk_state == TCP_TIME_WAIT)
72087 + if (sk->sk_state == TCP_TIME_WAIT) {
72088 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72089 + ret = 2;
72090 +#endif
72091 goto do_time_wait;
72092 + }
72093
72094 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72095 goto discard_and_relse;
72096 @@ -1701,6 +1716,10 @@ no_tcp_socket:
72097 bad_packet:
72098 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72099 } else {
72100 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72101 + if (!grsec_enable_blackhole || (ret == 1 &&
72102 + (skb->dev->flags & IFF_LOOPBACK)))
72103 +#endif
72104 tcp_v6_send_reset(NULL, skb);
72105 }
72106
72107 @@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72108 uid,
72109 0, /* non standard timer */
72110 0, /* open_requests have no inode */
72111 - 0, req);
72112 + 0,
72113 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72114 + NULL
72115 +#else
72116 + req
72117 +#endif
72118 + );
72119 }
72120
72121 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72122 @@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72123 sock_i_uid(sp),
72124 icsk->icsk_probes_out,
72125 sock_i_ino(sp),
72126 - atomic_read(&sp->sk_refcnt), sp,
72127 + atomic_read(&sp->sk_refcnt),
72128 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72129 + NULL,
72130 +#else
72131 + sp,
72132 +#endif
72133 jiffies_to_clock_t(icsk->icsk_rto),
72134 jiffies_to_clock_t(icsk->icsk_ack.ato),
72135 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72136 @@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72137 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72138 tw->tw_substate, 0, 0,
72139 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72140 - atomic_read(&tw->tw_refcnt), tw);
72141 + atomic_read(&tw->tw_refcnt),
72142 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72143 + NULL
72144 +#else
72145 + tw
72146 +#endif
72147 + );
72148 }
72149
72150 static int tcp6_seq_show(struct seq_file *seq, void *v)
72151 diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72152 --- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72153 +++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72154 @@ -49,6 +49,10 @@
72155 #include <linux/seq_file.h>
72156 #include "udp_impl.h"
72157
72158 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72159 +extern int grsec_enable_blackhole;
72160 +#endif
72161 +
72162 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72163 {
72164 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72165 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72166 if (rc == -ENOMEM) {
72167 UDP6_INC_STATS_BH(sock_net(sk),
72168 UDP_MIB_RCVBUFERRORS, is_udplite);
72169 - atomic_inc(&sk->sk_drops);
72170 + atomic_inc_unchecked(&sk->sk_drops);
72171 }
72172 goto drop;
72173 }
72174 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72175 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72176 proto == IPPROTO_UDPLITE);
72177
72178 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72179 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72180 +#endif
72181 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72182
72183 kfree_skb(skb);
72184 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72185 0, 0L, 0,
72186 sock_i_uid(sp), 0,
72187 sock_i_ino(sp),
72188 - atomic_read(&sp->sk_refcnt), sp,
72189 - atomic_read(&sp->sk_drops));
72190 + atomic_read(&sp->sk_refcnt),
72191 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72192 + NULL,
72193 +#else
72194 + sp,
72195 +#endif
72196 + atomic_read_unchecked(&sp->sk_drops));
72197 }
72198
72199 int udp6_seq_show(struct seq_file *seq, void *v)
72200 diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72201 --- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72202 +++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72203 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72204 add_wait_queue(&self->open_wait, &wait);
72205
72206 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72207 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72208 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72209
72210 /* As far as I can see, we protect open_count - Jean II */
72211 spin_lock_irqsave(&self->spinlock, flags);
72212 if (!tty_hung_up_p(filp)) {
72213 extra_count = 1;
72214 - self->open_count--;
72215 + local_dec(&self->open_count);
72216 }
72217 spin_unlock_irqrestore(&self->spinlock, flags);
72218 - self->blocked_open++;
72219 + local_inc(&self->blocked_open);
72220
72221 while (1) {
72222 if (tty->termios->c_cflag & CBAUD) {
72223 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72224 }
72225
72226 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72227 - __FILE__,__LINE__, tty->driver->name, self->open_count );
72228 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72229
72230 schedule();
72231 }
72232 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72233 if (extra_count) {
72234 /* ++ is not atomic, so this should be protected - Jean II */
72235 spin_lock_irqsave(&self->spinlock, flags);
72236 - self->open_count++;
72237 + local_inc(&self->open_count);
72238 spin_unlock_irqrestore(&self->spinlock, flags);
72239 }
72240 - self->blocked_open--;
72241 + local_dec(&self->blocked_open);
72242
72243 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72244 - __FILE__,__LINE__, tty->driver->name, self->open_count);
72245 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72246
72247 if (!retval)
72248 self->flags |= ASYNC_NORMAL_ACTIVE;
72249 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72250 }
72251 /* ++ is not atomic, so this should be protected - Jean II */
72252 spin_lock_irqsave(&self->spinlock, flags);
72253 - self->open_count++;
72254 + local_inc(&self->open_count);
72255
72256 tty->driver_data = self;
72257 self->tty = tty;
72258 spin_unlock_irqrestore(&self->spinlock, flags);
72259
72260 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72261 - self->line, self->open_count);
72262 + self->line, local_read(&self->open_count));
72263
72264 /* Not really used by us, but lets do it anyway */
72265 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72266 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72267 return;
72268 }
72269
72270 - if ((tty->count == 1) && (self->open_count != 1)) {
72271 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72272 /*
72273 * Uh, oh. tty->count is 1, which means that the tty
72274 * structure will be freed. state->count should always
72275 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72276 */
72277 IRDA_DEBUG(0, "%s(), bad serial port count; "
72278 "tty->count is 1, state->count is %d\n", __func__ ,
72279 - self->open_count);
72280 - self->open_count = 1;
72281 + local_read(&self->open_count));
72282 + local_set(&self->open_count, 1);
72283 }
72284
72285 - if (--self->open_count < 0) {
72286 + if (local_dec_return(&self->open_count) < 0) {
72287 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72288 - __func__, self->line, self->open_count);
72289 - self->open_count = 0;
72290 + __func__, self->line, local_read(&self->open_count));
72291 + local_set(&self->open_count, 0);
72292 }
72293 - if (self->open_count) {
72294 + if (local_read(&self->open_count)) {
72295 spin_unlock_irqrestore(&self->spinlock, flags);
72296
72297 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72298 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72299 tty->closing = 0;
72300 self->tty = NULL;
72301
72302 - if (self->blocked_open) {
72303 + if (local_read(&self->blocked_open)) {
72304 if (self->close_delay)
72305 schedule_timeout_interruptible(self->close_delay);
72306 wake_up_interruptible(&self->open_wait);
72307 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72308 spin_lock_irqsave(&self->spinlock, flags);
72309 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72310 self->tty = NULL;
72311 - self->open_count = 0;
72312 + local_set(&self->open_count, 0);
72313 spin_unlock_irqrestore(&self->spinlock, flags);
72314
72315 wake_up_interruptible(&self->open_wait);
72316 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72317 seq_putc(m, '\n');
72318
72319 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72320 - seq_printf(m, "Open count: %d\n", self->open_count);
72321 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72322 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72323 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72324
72325 diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72326 --- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72327 +++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72328 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72329
72330 write_lock_bh(&iucv_sk_list.lock);
72331
72332 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72333 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72334 while (__iucv_get_sock_by_name(name)) {
72335 sprintf(name, "%08x",
72336 - atomic_inc_return(&iucv_sk_list.autobind_name));
72337 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72338 }
72339
72340 write_unlock_bh(&iucv_sk_list.lock);
72341 diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72342 --- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72343 +++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72344 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72345 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72346 struct xfrm_kmaddress k;
72347
72348 + pax_track_stack();
72349 +
72350 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72351 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72352 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72353 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72354 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72355 else
72356 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72357 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72358 + NULL,
72359 +#else
72360 s,
72361 +#endif
72362 atomic_read(&s->sk_refcnt),
72363 sk_rmem_alloc_get(s),
72364 sk_wmem_alloc_get(s),
72365 diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72366 --- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72367 +++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72368 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72369 goto out;
72370
72371 lapb->dev = dev;
72372 - lapb->callbacks = *callbacks;
72373 + lapb->callbacks = callbacks;
72374
72375 __lapb_insert_cb(lapb);
72376
72377 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72378
72379 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72380 {
72381 - if (lapb->callbacks.connect_confirmation)
72382 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
72383 + if (lapb->callbacks->connect_confirmation)
72384 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
72385 }
72386
72387 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72388 {
72389 - if (lapb->callbacks.connect_indication)
72390 - lapb->callbacks.connect_indication(lapb->dev, reason);
72391 + if (lapb->callbacks->connect_indication)
72392 + lapb->callbacks->connect_indication(lapb->dev, reason);
72393 }
72394
72395 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72396 {
72397 - if (lapb->callbacks.disconnect_confirmation)
72398 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72399 + if (lapb->callbacks->disconnect_confirmation)
72400 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72401 }
72402
72403 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72404 {
72405 - if (lapb->callbacks.disconnect_indication)
72406 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
72407 + if (lapb->callbacks->disconnect_indication)
72408 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
72409 }
72410
72411 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72412 {
72413 - if (lapb->callbacks.data_indication)
72414 - return lapb->callbacks.data_indication(lapb->dev, skb);
72415 + if (lapb->callbacks->data_indication)
72416 + return lapb->callbacks->data_indication(lapb->dev, skb);
72417
72418 kfree_skb(skb);
72419 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72420 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72421 {
72422 int used = 0;
72423
72424 - if (lapb->callbacks.data_transmit) {
72425 - lapb->callbacks.data_transmit(lapb->dev, skb);
72426 + if (lapb->callbacks->data_transmit) {
72427 + lapb->callbacks->data_transmit(lapb->dev, skb);
72428 used = 1;
72429 }
72430
72431 diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72432 --- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72433 +++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72434 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72435 return err;
72436 }
72437
72438 -struct cfg80211_ops mac80211_config_ops = {
72439 +const struct cfg80211_ops mac80211_config_ops = {
72440 .add_virtual_intf = ieee80211_add_iface,
72441 .del_virtual_intf = ieee80211_del_iface,
72442 .change_virtual_intf = ieee80211_change_iface,
72443 diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72444 --- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72445 +++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72446 @@ -4,6 +4,6 @@
72447 #ifndef __CFG_H
72448 #define __CFG_H
72449
72450 -extern struct cfg80211_ops mac80211_config_ops;
72451 +extern const struct cfg80211_ops mac80211_config_ops;
72452
72453 #endif /* __CFG_H */
72454 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72455 --- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72456 +++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72457 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72458 size_t count, loff_t *ppos)
72459 {
72460 struct ieee80211_key *key = file->private_data;
72461 - int i, res, bufsize = 2 * key->conf.keylen + 2;
72462 + int i, bufsize = 2 * key->conf.keylen + 2;
72463 char *buf = kmalloc(bufsize, GFP_KERNEL);
72464 char *p = buf;
72465 + ssize_t res;
72466 +
72467 + if (buf == NULL)
72468 + return -ENOMEM;
72469
72470 for (i = 0; i < key->conf.keylen; i++)
72471 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72472 diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72473 --- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72474 +++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72475 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72476 int i;
72477 struct sta_info *sta = file->private_data;
72478
72479 + pax_track_stack();
72480 +
72481 spin_lock_bh(&sta->lock);
72482 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72483 sta->ampdu_mlme.dialog_token_allocator + 1);
72484 diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72485 --- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72486 +++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72487 @@ -25,6 +25,7 @@
72488 #include <linux/etherdevice.h>
72489 #include <net/cfg80211.h>
72490 #include <net/mac80211.h>
72491 +#include <asm/local.h>
72492 #include "key.h"
72493 #include "sta_info.h"
72494
72495 @@ -635,7 +636,7 @@ struct ieee80211_local {
72496 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72497 spinlock_t queue_stop_reason_lock;
72498
72499 - int open_count;
72500 + local_t open_count;
72501 int monitors, cooked_mntrs;
72502 /* number of interfaces with corresponding FIF_ flags */
72503 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72504 diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72505 --- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72506 +++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72507 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72508 break;
72509 }
72510
72511 - if (local->open_count == 0) {
72512 + if (local_read(&local->open_count) == 0) {
72513 res = drv_start(local);
72514 if (res)
72515 goto err_del_bss;
72516 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72517 * Validate the MAC address for this device.
72518 */
72519 if (!is_valid_ether_addr(dev->dev_addr)) {
72520 - if (!local->open_count)
72521 + if (!local_read(&local->open_count))
72522 drv_stop(local);
72523 return -EADDRNOTAVAIL;
72524 }
72525 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72526
72527 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72528
72529 - local->open_count++;
72530 + local_inc(&local->open_count);
72531 if (hw_reconf_flags) {
72532 ieee80211_hw_config(local, hw_reconf_flags);
72533 /*
72534 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72535 err_del_interface:
72536 drv_remove_interface(local, &conf);
72537 err_stop:
72538 - if (!local->open_count)
72539 + if (!local_read(&local->open_count))
72540 drv_stop(local);
72541 err_del_bss:
72542 sdata->bss = NULL;
72543 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72544 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72545 }
72546
72547 - local->open_count--;
72548 + local_dec(&local->open_count);
72549
72550 switch (sdata->vif.type) {
72551 case NL80211_IFTYPE_AP_VLAN:
72552 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72553
72554 ieee80211_recalc_ps(local, -1);
72555
72556 - if (local->open_count == 0) {
72557 + if (local_read(&local->open_count) == 0) {
72558 ieee80211_clear_tx_pending(local);
72559 ieee80211_stop_device(local);
72560
72561 diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
72562 --- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72563 +++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72564 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72565 local->hw.conf.power_level = power;
72566 }
72567
72568 - if (changed && local->open_count) {
72569 + if (changed && local_read(&local->open_count)) {
72570 ret = drv_config(local, changed);
72571 /*
72572 * Goal:
72573 diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
72574 --- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72575 +++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72576 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72577 bool have_higher_than_11mbit = false, newsta = false;
72578 u16 ap_ht_cap_flags;
72579
72580 + pax_track_stack();
72581 +
72582 /*
72583 * AssocResp and ReassocResp have identical structure, so process both
72584 * of them in this function.
72585 diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
72586 --- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72587 +++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72588 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72589 }
72590
72591 /* stop hardware - this must stop RX */
72592 - if (local->open_count)
72593 + if (local_read(&local->open_count))
72594 ieee80211_stop_device(local);
72595
72596 local->suspended = true;
72597 diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
72598 --- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72599 +++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72600 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72601 struct rate_control_ref *ref, *old;
72602
72603 ASSERT_RTNL();
72604 - if (local->open_count)
72605 + if (local_read(&local->open_count))
72606 return -EBUSY;
72607
72608 ref = rate_control_alloc(name, local);
72609 diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
72610 --- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72611 +++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72612 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72613 return cpu_to_le16(dur);
72614 }
72615
72616 -static int inline is_ieee80211_device(struct ieee80211_local *local,
72617 +static inline int is_ieee80211_device(struct ieee80211_local *local,
72618 struct net_device *dev)
72619 {
72620 return local == wdev_priv(dev->ieee80211_ptr);
72621 diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
72622 --- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72623 +++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72624 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72625 local->resuming = true;
72626
72627 /* restart hardware */
72628 - if (local->open_count) {
72629 + if (local_read(&local->open_count)) {
72630 /*
72631 * Upon resume hardware can sometimes be goofy due to
72632 * various platform / driver / bus issues, so restarting
72633 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
72634 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72635 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72636 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
72637 .open = ip_vs_app_open,
72638 .read = seq_read,
72639 .llseek = seq_lseek,
72640 - .release = seq_release,
72641 + .release = seq_release_net,
72642 };
72643 #endif
72644
72645 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
72646 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72647 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72648 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72649 /* if the connection is not template and is created
72650 * by sync, preserve the activity flag.
72651 */
72652 - cp->flags |= atomic_read(&dest->conn_flags) &
72653 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72654 (~IP_VS_CONN_F_INACTIVE);
72655 else
72656 - cp->flags |= atomic_read(&dest->conn_flags);
72657 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72658 cp->dest = dest;
72659
72660 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72661 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72662 atomic_set(&cp->refcnt, 1);
72663
72664 atomic_set(&cp->n_control, 0);
72665 - atomic_set(&cp->in_pkts, 0);
72666 + atomic_set_unchecked(&cp->in_pkts, 0);
72667
72668 atomic_inc(&ip_vs_conn_count);
72669 if (flags & IP_VS_CONN_F_NO_CPORT)
72670 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
72671 .open = ip_vs_conn_open,
72672 .read = seq_read,
72673 .llseek = seq_lseek,
72674 - .release = seq_release,
72675 + .release = seq_release_net,
72676 };
72677
72678 static const char *ip_vs_origin_name(unsigned flags)
72679 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
72680 .open = ip_vs_conn_sync_open,
72681 .read = seq_read,
72682 .llseek = seq_lseek,
72683 - .release = seq_release,
72684 + .release = seq_release_net,
72685 };
72686
72687 #endif
72688 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72689
72690 /* Don't drop the entry if its number of incoming packets is not
72691 located in [0, 8] */
72692 - i = atomic_read(&cp->in_pkts);
72693 + i = atomic_read_unchecked(&cp->in_pkts);
72694 if (i > 8 || i < 0) return 0;
72695
72696 if (!todrop_rate[i]) return 0;
72697 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
72698 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72699 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72700 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72701 ret = cp->packet_xmit(skb, cp, pp);
72702 /* do not touch skb anymore */
72703
72704 - atomic_inc(&cp->in_pkts);
72705 + atomic_inc_unchecked(&cp->in_pkts);
72706 ip_vs_conn_put(cp);
72707 return ret;
72708 }
72709 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72710 * Sync connection if it is about to close to
72711 * encorage the standby servers to update the connections timeout
72712 */
72713 - pkts = atomic_add_return(1, &cp->in_pkts);
72714 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72715 if (af == AF_INET &&
72716 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72717 (((cp->protocol != IPPROTO_TCP ||
72718 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
72719 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72720 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72721 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72722 ip_vs_rs_hash(dest);
72723 write_unlock_bh(&__ip_vs_rs_lock);
72724 }
72725 - atomic_set(&dest->conn_flags, conn_flags);
72726 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
72727
72728 /* bind the service */
72729 if (!dest->svc) {
72730 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72731 " %-7s %-6d %-10d %-10d\n",
72732 &dest->addr.in6,
72733 ntohs(dest->port),
72734 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72735 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72736 atomic_read(&dest->weight),
72737 atomic_read(&dest->activeconns),
72738 atomic_read(&dest->inactconns));
72739 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72740 "%-7s %-6d %-10d %-10d\n",
72741 ntohl(dest->addr.ip),
72742 ntohs(dest->port),
72743 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72744 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72745 atomic_read(&dest->weight),
72746 atomic_read(&dest->activeconns),
72747 atomic_read(&dest->inactconns));
72748 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72749 .open = ip_vs_info_open,
72750 .read = seq_read,
72751 .llseek = seq_lseek,
72752 - .release = seq_release_private,
72753 + .release = seq_release_net,
72754 };
72755
72756 #endif
72757 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72758 .open = ip_vs_stats_seq_open,
72759 .read = seq_read,
72760 .llseek = seq_lseek,
72761 - .release = single_release,
72762 + .release = single_release_net,
72763 };
72764
72765 #endif
72766 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72767
72768 entry.addr = dest->addr.ip;
72769 entry.port = dest->port;
72770 - entry.conn_flags = atomic_read(&dest->conn_flags);
72771 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72772 entry.weight = atomic_read(&dest->weight);
72773 entry.u_threshold = dest->u_threshold;
72774 entry.l_threshold = dest->l_threshold;
72775 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72776 unsigned char arg[128];
72777 int ret = 0;
72778
72779 + pax_track_stack();
72780 +
72781 if (!capable(CAP_NET_ADMIN))
72782 return -EPERM;
72783
72784 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72785 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72786
72787 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72788 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72789 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72790 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72791 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72792 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72793 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
72794 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72795 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72796 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72797
72798 if (opt)
72799 memcpy(&cp->in_seq, opt, sizeof(*opt));
72800 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72801 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72802 cp->state = state;
72803 cp->old_state = cp->state;
72804 /*
72805 diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
72806 --- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72807 +++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72808 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72809 else
72810 rc = NF_ACCEPT;
72811 /* do not touch skb anymore */
72812 - atomic_inc(&cp->in_pkts);
72813 + atomic_inc_unchecked(&cp->in_pkts);
72814 goto out;
72815 }
72816
72817 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72818 else
72819 rc = NF_ACCEPT;
72820 /* do not touch skb anymore */
72821 - atomic_inc(&cp->in_pkts);
72822 + atomic_inc_unchecked(&cp->in_pkts);
72823 goto out;
72824 }
72825
72826 diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
72827 --- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72828 +++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72829 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72830
72831 To compile it as a module, choose M here. If unsure, say N.
72832
72833 +config NETFILTER_XT_MATCH_GRADM
72834 + tristate '"gradm" match support'
72835 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72836 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72837 + ---help---
72838 + The gradm match allows to match on grsecurity RBAC being enabled.
72839 + It is useful when iptables rules are applied early on bootup to
72840 + prevent connections to the machine (except from a trusted host)
72841 + while the RBAC system is disabled.
72842 +
72843 config NETFILTER_XT_MATCH_HASHLIMIT
72844 tristate '"hashlimit" match support'
72845 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72846 diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
72847 --- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72848 +++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72849 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72850 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72851 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72852 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72853 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72854 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72855 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72856 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72857 diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
72858 --- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72859 +++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72860 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72861 static int
72862 ctnetlink_parse_tuple(const struct nlattr * const cda[],
72863 struct nf_conntrack_tuple *tuple,
72864 - enum ctattr_tuple type, u_int8_t l3num)
72865 + enum ctattr_type type, u_int8_t l3num)
72866 {
72867 struct nlattr *tb[CTA_TUPLE_MAX+1];
72868 int err;
72869 diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
72870 --- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
72871 +++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
72872 @@ -68,7 +68,7 @@ struct nfulnl_instance {
72873 };
72874
72875 static DEFINE_RWLOCK(instances_lock);
72876 -static atomic_t global_seq;
72877 +static atomic_unchecked_t global_seq;
72878
72879 #define INSTANCE_BUCKETS 16
72880 static struct hlist_head instance_table[INSTANCE_BUCKETS];
72881 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
72882 /* global sequence number */
72883 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
72884 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
72885 - htonl(atomic_inc_return(&global_seq)));
72886 + htonl(atomic_inc_return_unchecked(&global_seq)));
72887
72888 if (data_len) {
72889 struct nlattr *nla;
72890 diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
72891 --- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
72892 +++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
72893 @@ -0,0 +1,51 @@
72894 +/*
72895 + * gradm match for netfilter
72896 + * Copyright © Zbigniew Krzystolik, 2010
72897 + *
72898 + * This program is free software; you can redistribute it and/or modify
72899 + * it under the terms of the GNU General Public License; either version
72900 + * 2 or 3 as published by the Free Software Foundation.
72901 + */
72902 +#include <linux/module.h>
72903 +#include <linux/moduleparam.h>
72904 +#include <linux/skbuff.h>
72905 +#include <linux/netfilter/x_tables.h>
72906 +#include <linux/grsecurity.h>
72907 +#include <linux/netfilter/xt_gradm.h>
72908 +
72909 +static bool
72910 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
72911 +{
72912 + const struct xt_gradm_mtinfo *info = par->matchinfo;
72913 + bool retval = false;
72914 + if (gr_acl_is_enabled())
72915 + retval = true;
72916 + return retval ^ info->invflags;
72917 +}
72918 +
72919 +static struct xt_match gradm_mt_reg __read_mostly = {
72920 + .name = "gradm",
72921 + .revision = 0,
72922 + .family = NFPROTO_UNSPEC,
72923 + .match = gradm_mt,
72924 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
72925 + .me = THIS_MODULE,
72926 +};
72927 +
72928 +static int __init gradm_mt_init(void)
72929 +{
72930 + return xt_register_match(&gradm_mt_reg);
72931 +}
72932 +
72933 +static void __exit gradm_mt_exit(void)
72934 +{
72935 + xt_unregister_match(&gradm_mt_reg);
72936 +}
72937 +
72938 +module_init(gradm_mt_init);
72939 +module_exit(gradm_mt_exit);
72940 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
72941 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
72942 +MODULE_LICENSE("GPL");
72943 +MODULE_ALIAS("ipt_gradm");
72944 +MODULE_ALIAS("ip6t_gradm");
72945 diff -urNp linux-2.6.32.45/net/netlink/af_netlink.c linux-2.6.32.45/net/netlink/af_netlink.c
72946 --- linux-2.6.32.45/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
72947 +++ linux-2.6.32.45/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
72948 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
72949 sk->sk_error_report(sk);
72950 }
72951 }
72952 - atomic_inc(&sk->sk_drops);
72953 + atomic_inc_unchecked(&sk->sk_drops);
72954 }
72955
72956 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
72957 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
72958 struct netlink_sock *nlk = nlk_sk(s);
72959
72960 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
72961 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72962 + NULL,
72963 +#else
72964 s,
72965 +#endif
72966 s->sk_protocol,
72967 nlk->pid,
72968 nlk->groups ? (u32)nlk->groups[0] : 0,
72969 sk_rmem_alloc_get(s),
72970 sk_wmem_alloc_get(s),
72971 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72972 + NULL,
72973 +#else
72974 nlk->cb,
72975 +#endif
72976 atomic_read(&s->sk_refcnt),
72977 - atomic_read(&s->sk_drops)
72978 + atomic_read_unchecked(&s->sk_drops)
72979 );
72980
72981 }
72982 diff -urNp linux-2.6.32.45/net/netrom/af_netrom.c linux-2.6.32.45/net/netrom/af_netrom.c
72983 --- linux-2.6.32.45/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
72984 +++ linux-2.6.32.45/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
72985 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
72986 struct sock *sk = sock->sk;
72987 struct nr_sock *nr = nr_sk(sk);
72988
72989 + memset(sax, 0, sizeof(*sax));
72990 lock_sock(sk);
72991 if (peer != 0) {
72992 if (sk->sk_state != TCP_ESTABLISHED) {
72993 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
72994 *uaddr_len = sizeof(struct full_sockaddr_ax25);
72995 } else {
72996 sax->fsa_ax25.sax25_family = AF_NETROM;
72997 - sax->fsa_ax25.sax25_ndigis = 0;
72998 sax->fsa_ax25.sax25_call = nr->source_addr;
72999 *uaddr_len = sizeof(struct sockaddr_ax25);
73000 }
73001 diff -urNp linux-2.6.32.45/net/packet/af_packet.c linux-2.6.32.45/net/packet/af_packet.c
73002 --- linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
73003 +++ linux-2.6.32.45/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
73004 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
73005
73006 seq_printf(seq,
73007 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73008 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73009 + NULL,
73010 +#else
73011 s,
73012 +#endif
73013 atomic_read(&s->sk_refcnt),
73014 s->sk_type,
73015 ntohs(po->num),
73016 diff -urNp linux-2.6.32.45/net/phonet/af_phonet.c linux-2.6.32.45/net/phonet/af_phonet.c
73017 --- linux-2.6.32.45/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73018 +++ linux-2.6.32.45/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73019 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73020 {
73021 struct phonet_protocol *pp;
73022
73023 - if (protocol >= PHONET_NPROTO)
73024 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73025 return NULL;
73026
73027 spin_lock(&proto_tab_lock);
73028 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73029 {
73030 int err = 0;
73031
73032 - if (protocol >= PHONET_NPROTO)
73033 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73034 return -EINVAL;
73035
73036 err = proto_register(pp->prot, 1);
73037 diff -urNp linux-2.6.32.45/net/phonet/datagram.c linux-2.6.32.45/net/phonet/datagram.c
73038 --- linux-2.6.32.45/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
73039 +++ linux-2.6.32.45/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
73040 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
73041 if (err < 0) {
73042 kfree_skb(skb);
73043 if (err == -ENOMEM)
73044 - atomic_inc(&sk->sk_drops);
73045 + atomic_inc_unchecked(&sk->sk_drops);
73046 }
73047 return err ? NET_RX_DROP : NET_RX_SUCCESS;
73048 }
73049 diff -urNp linux-2.6.32.45/net/phonet/pep.c linux-2.6.32.45/net/phonet/pep.c
73050 --- linux-2.6.32.45/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
73051 +++ linux-2.6.32.45/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
73052 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
73053
73054 case PNS_PEP_CTRL_REQ:
73055 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
73056 - atomic_inc(&sk->sk_drops);
73057 + atomic_inc_unchecked(&sk->sk_drops);
73058 break;
73059 }
73060 __skb_pull(skb, 4);
73061 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
73062 if (!err)
73063 return 0;
73064 if (err == -ENOMEM)
73065 - atomic_inc(&sk->sk_drops);
73066 + atomic_inc_unchecked(&sk->sk_drops);
73067 break;
73068 }
73069
73070 if (pn->rx_credits == 0) {
73071 - atomic_inc(&sk->sk_drops);
73072 + atomic_inc_unchecked(&sk->sk_drops);
73073 err = -ENOBUFS;
73074 break;
73075 }
73076 diff -urNp linux-2.6.32.45/net/phonet/socket.c linux-2.6.32.45/net/phonet/socket.c
73077 --- linux-2.6.32.45/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
73078 +++ linux-2.6.32.45/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
73079 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
73080 sk->sk_state,
73081 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
73082 sock_i_uid(sk), sock_i_ino(sk),
73083 - atomic_read(&sk->sk_refcnt), sk,
73084 - atomic_read(&sk->sk_drops), &len);
73085 + atomic_read(&sk->sk_refcnt),
73086 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73087 + NULL,
73088 +#else
73089 + sk,
73090 +#endif
73091 + atomic_read_unchecked(&sk->sk_drops), &len);
73092 }
73093 seq_printf(seq, "%*s\n", 127 - len, "");
73094 return 0;
73095 diff -urNp linux-2.6.32.45/net/rds/cong.c linux-2.6.32.45/net/rds/cong.c
73096 --- linux-2.6.32.45/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
73097 +++ linux-2.6.32.45/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
73098 @@ -77,7 +77,7 @@
73099 * finds that the saved generation number is smaller than the global generation
73100 * number, it wakes up the process.
73101 */
73102 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
73103 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
73104
73105 /*
73106 * Congestion monitoring
73107 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
73108 rdsdebug("waking map %p for %pI4\n",
73109 map, &map->m_addr);
73110 rds_stats_inc(s_cong_update_received);
73111 - atomic_inc(&rds_cong_generation);
73112 + atomic_inc_unchecked(&rds_cong_generation);
73113 if (waitqueue_active(&map->m_waitq))
73114 wake_up(&map->m_waitq);
73115 if (waitqueue_active(&rds_poll_waitq))
73116 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
73117
73118 int rds_cong_updated_since(unsigned long *recent)
73119 {
73120 - unsigned long gen = atomic_read(&rds_cong_generation);
73121 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
73122
73123 if (likely(*recent == gen))
73124 return 0;
73125 diff -urNp linux-2.6.32.45/net/rds/iw_rdma.c linux-2.6.32.45/net/rds/iw_rdma.c
73126 --- linux-2.6.32.45/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
73127 +++ linux-2.6.32.45/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
73128 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
73129 struct rdma_cm_id *pcm_id;
73130 int rc;
73131
73132 + pax_track_stack();
73133 +
73134 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
73135 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
73136
73137 diff -urNp linux-2.6.32.45/net/rds/Kconfig linux-2.6.32.45/net/rds/Kconfig
73138 --- linux-2.6.32.45/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
73139 +++ linux-2.6.32.45/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
73140 @@ -1,7 +1,7 @@
73141
73142 config RDS
73143 tristate "The RDS Protocol (EXPERIMENTAL)"
73144 - depends on INET && EXPERIMENTAL
73145 + depends on INET && EXPERIMENTAL && BROKEN
73146 ---help---
73147 The RDS (Reliable Datagram Sockets) protocol provides reliable,
73148 sequenced delivery of datagrams over Infiniband, iWARP,
73149 diff -urNp linux-2.6.32.45/net/rxrpc/af_rxrpc.c linux-2.6.32.45/net/rxrpc/af_rxrpc.c
73150 --- linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
73151 +++ linux-2.6.32.45/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
73152 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
73153 __be32 rxrpc_epoch;
73154
73155 /* current debugging ID */
73156 -atomic_t rxrpc_debug_id;
73157 +atomic_unchecked_t rxrpc_debug_id;
73158
73159 /* count of skbs currently in use */
73160 atomic_t rxrpc_n_skbs;
73161 diff -urNp linux-2.6.32.45/net/rxrpc/ar-ack.c linux-2.6.32.45/net/rxrpc/ar-ack.c
73162 --- linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
73163 +++ linux-2.6.32.45/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
73164 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
73165
73166 _enter("{%d,%d,%d,%d},",
73167 call->acks_hard, call->acks_unacked,
73168 - atomic_read(&call->sequence),
73169 + atomic_read_unchecked(&call->sequence),
73170 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
73171
73172 stop = 0;
73173 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
73174
73175 /* each Tx packet has a new serial number */
73176 sp->hdr.serial =
73177 - htonl(atomic_inc_return(&call->conn->serial));
73178 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
73179
73180 hdr = (struct rxrpc_header *) txb->head;
73181 hdr->serial = sp->hdr.serial;
73182 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
73183 */
73184 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
73185 {
73186 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
73187 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
73188 }
73189
73190 /*
73191 @@ -627,7 +627,7 @@ process_further:
73192
73193 latest = ntohl(sp->hdr.serial);
73194 hard = ntohl(ack.firstPacket);
73195 - tx = atomic_read(&call->sequence);
73196 + tx = atomic_read_unchecked(&call->sequence);
73197
73198 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73199 latest,
73200 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
73201 u32 abort_code = RX_PROTOCOL_ERROR;
73202 u8 *acks = NULL;
73203
73204 + pax_track_stack();
73205 +
73206 //printk("\n--------------------\n");
73207 _enter("{%d,%s,%lx} [%lu]",
73208 call->debug_id, rxrpc_call_states[call->state], call->events,
73209 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
73210 goto maybe_reschedule;
73211
73212 send_ACK_with_skew:
73213 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
73214 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
73215 ntohl(ack.serial));
73216 send_ACK:
73217 mtu = call->conn->trans->peer->if_mtu;
73218 @@ -1171,7 +1173,7 @@ send_ACK:
73219 ackinfo.rxMTU = htonl(5692);
73220 ackinfo.jumbo_max = htonl(4);
73221
73222 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73223 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73224 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
73225 ntohl(hdr.serial),
73226 ntohs(ack.maxSkew),
73227 @@ -1189,7 +1191,7 @@ send_ACK:
73228 send_message:
73229 _debug("send message");
73230
73231 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
73232 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
73233 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
73234 send_message_2:
73235
73236 diff -urNp linux-2.6.32.45/net/rxrpc/ar-call.c linux-2.6.32.45/net/rxrpc/ar-call.c
73237 --- linux-2.6.32.45/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
73238 +++ linux-2.6.32.45/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
73239 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
73240 spin_lock_init(&call->lock);
73241 rwlock_init(&call->state_lock);
73242 atomic_set(&call->usage, 1);
73243 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
73244 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73245 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
73246
73247 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
73248 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connection.c linux-2.6.32.45/net/rxrpc/ar-connection.c
73249 --- linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
73250 +++ linux-2.6.32.45/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
73251 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
73252 rwlock_init(&conn->lock);
73253 spin_lock_init(&conn->state_lock);
73254 atomic_set(&conn->usage, 1);
73255 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
73256 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73257 conn->avail_calls = RXRPC_MAXCALLS;
73258 conn->size_align = 4;
73259 conn->header_size = sizeof(struct rxrpc_header);
73260 diff -urNp linux-2.6.32.45/net/rxrpc/ar-connevent.c linux-2.6.32.45/net/rxrpc/ar-connevent.c
73261 --- linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
73262 +++ linux-2.6.32.45/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
73263 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
73264
73265 len = iov[0].iov_len + iov[1].iov_len;
73266
73267 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73268 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73269 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
73270
73271 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73272 diff -urNp linux-2.6.32.45/net/rxrpc/ar-input.c linux-2.6.32.45/net/rxrpc/ar-input.c
73273 --- linux-2.6.32.45/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
73274 +++ linux-2.6.32.45/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
73275 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
73276 /* track the latest serial number on this connection for ACK packet
73277 * information */
73278 serial = ntohl(sp->hdr.serial);
73279 - hi_serial = atomic_read(&call->conn->hi_serial);
73280 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
73281 while (serial > hi_serial)
73282 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
73283 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
73284 serial);
73285
73286 /* request ACK generation for any ACK or DATA packet that requests
73287 diff -urNp linux-2.6.32.45/net/rxrpc/ar-internal.h linux-2.6.32.45/net/rxrpc/ar-internal.h
73288 --- linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
73289 +++ linux-2.6.32.45/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
73290 @@ -272,8 +272,8 @@ struct rxrpc_connection {
73291 int error; /* error code for local abort */
73292 int debug_id; /* debug ID for printks */
73293 unsigned call_counter; /* call ID counter */
73294 - atomic_t serial; /* packet serial number counter */
73295 - atomic_t hi_serial; /* highest serial number received */
73296 + atomic_unchecked_t serial; /* packet serial number counter */
73297 + atomic_unchecked_t hi_serial; /* highest serial number received */
73298 u8 avail_calls; /* number of calls available */
73299 u8 size_align; /* data size alignment (for security) */
73300 u8 header_size; /* rxrpc + security header size */
73301 @@ -346,7 +346,7 @@ struct rxrpc_call {
73302 spinlock_t lock;
73303 rwlock_t state_lock; /* lock for state transition */
73304 atomic_t usage;
73305 - atomic_t sequence; /* Tx data packet sequence counter */
73306 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
73307 u32 abort_code; /* local/remote abort code */
73308 enum { /* current state of call */
73309 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
73310 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
73311 */
73312 extern atomic_t rxrpc_n_skbs;
73313 extern __be32 rxrpc_epoch;
73314 -extern atomic_t rxrpc_debug_id;
73315 +extern atomic_unchecked_t rxrpc_debug_id;
73316 extern struct workqueue_struct *rxrpc_workqueue;
73317
73318 /*
73319 diff -urNp linux-2.6.32.45/net/rxrpc/ar-key.c linux-2.6.32.45/net/rxrpc/ar-key.c
73320 --- linux-2.6.32.45/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
73321 +++ linux-2.6.32.45/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
73322 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
73323 return ret;
73324
73325 plen -= sizeof(*token);
73326 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73327 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73328 if (!token)
73329 return -ENOMEM;
73330
73331 - token->kad = kmalloc(plen, GFP_KERNEL);
73332 + token->kad = kzalloc(plen, GFP_KERNEL);
73333 if (!token->kad) {
73334 kfree(token);
73335 return -ENOMEM;
73336 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
73337 goto error;
73338
73339 ret = -ENOMEM;
73340 - token = kmalloc(sizeof(*token), GFP_KERNEL);
73341 + token = kzalloc(sizeof(*token), GFP_KERNEL);
73342 if (!token)
73343 goto error;
73344 - token->kad = kmalloc(plen, GFP_KERNEL);
73345 + token->kad = kzalloc(plen, GFP_KERNEL);
73346 if (!token->kad)
73347 goto error_free;
73348
73349 diff -urNp linux-2.6.32.45/net/rxrpc/ar-local.c linux-2.6.32.45/net/rxrpc/ar-local.c
73350 --- linux-2.6.32.45/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
73351 +++ linux-2.6.32.45/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
73352 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
73353 spin_lock_init(&local->lock);
73354 rwlock_init(&local->services_lock);
73355 atomic_set(&local->usage, 1);
73356 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
73357 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73358 memcpy(&local->srx, srx, sizeof(*srx));
73359 }
73360
73361 diff -urNp linux-2.6.32.45/net/rxrpc/ar-output.c linux-2.6.32.45/net/rxrpc/ar-output.c
73362 --- linux-2.6.32.45/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
73363 +++ linux-2.6.32.45/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
73364 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
73365 sp->hdr.cid = call->cid;
73366 sp->hdr.callNumber = call->call_id;
73367 sp->hdr.seq =
73368 - htonl(atomic_inc_return(&call->sequence));
73369 + htonl(atomic_inc_return_unchecked(&call->sequence));
73370 sp->hdr.serial =
73371 - htonl(atomic_inc_return(&conn->serial));
73372 + htonl(atomic_inc_return_unchecked(&conn->serial));
73373 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
73374 sp->hdr.userStatus = 0;
73375 sp->hdr.securityIndex = conn->security_ix;
73376 diff -urNp linux-2.6.32.45/net/rxrpc/ar-peer.c linux-2.6.32.45/net/rxrpc/ar-peer.c
73377 --- linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
73378 +++ linux-2.6.32.45/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
73379 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
73380 INIT_LIST_HEAD(&peer->error_targets);
73381 spin_lock_init(&peer->lock);
73382 atomic_set(&peer->usage, 1);
73383 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
73384 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73385 memcpy(&peer->srx, srx, sizeof(*srx));
73386
73387 rxrpc_assess_MTU_size(peer);
73388 diff -urNp linux-2.6.32.45/net/rxrpc/ar-proc.c linux-2.6.32.45/net/rxrpc/ar-proc.c
73389 --- linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
73390 +++ linux-2.6.32.45/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
73391 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
73392 atomic_read(&conn->usage),
73393 rxrpc_conn_states[conn->state],
73394 key_serial(conn->key),
73395 - atomic_read(&conn->serial),
73396 - atomic_read(&conn->hi_serial));
73397 + atomic_read_unchecked(&conn->serial),
73398 + atomic_read_unchecked(&conn->hi_serial));
73399
73400 return 0;
73401 }
73402 diff -urNp linux-2.6.32.45/net/rxrpc/ar-transport.c linux-2.6.32.45/net/rxrpc/ar-transport.c
73403 --- linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
73404 +++ linux-2.6.32.45/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
73405 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
73406 spin_lock_init(&trans->client_lock);
73407 rwlock_init(&trans->conn_lock);
73408 atomic_set(&trans->usage, 1);
73409 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
73410 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
73411
73412 if (peer->srx.transport.family == AF_INET) {
73413 switch (peer->srx.transport_type) {
73414 diff -urNp linux-2.6.32.45/net/rxrpc/rxkad.c linux-2.6.32.45/net/rxrpc/rxkad.c
73415 --- linux-2.6.32.45/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
73416 +++ linux-2.6.32.45/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
73417 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
73418 u16 check;
73419 int nsg;
73420
73421 + pax_track_stack();
73422 +
73423 sp = rxrpc_skb(skb);
73424
73425 _enter("");
73426 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
73427 u16 check;
73428 int nsg;
73429
73430 + pax_track_stack();
73431 +
73432 _enter("");
73433
73434 sp = rxrpc_skb(skb);
73435 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
73436
73437 len = iov[0].iov_len + iov[1].iov_len;
73438
73439 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
73440 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73441 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
73442
73443 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
73444 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
73445
73446 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
73447
73448 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
73449 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
73450 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
73451
73452 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
73453 diff -urNp linux-2.6.32.45/net/sctp/proc.c linux-2.6.32.45/net/sctp/proc.c
73454 --- linux-2.6.32.45/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
73455 +++ linux-2.6.32.45/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
73456 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
73457 sctp_for_each_hentry(epb, node, &head->chain) {
73458 ep = sctp_ep(epb);
73459 sk = epb->sk;
73460 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
73461 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
73462 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73463 + NULL, NULL,
73464 +#else
73465 + ep, sk,
73466 +#endif
73467 sctp_sk(sk)->type, sk->sk_state, hash,
73468 epb->bind_addr.port,
73469 sock_i_uid(sk), sock_i_ino(sk));
73470 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
73471 seq_printf(seq,
73472 "%8p %8p %-3d %-3d %-2d %-4d "
73473 "%4d %8d %8d %7d %5lu %-5d %5d ",
73474 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
73475 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73476 + NULL, NULL,
73477 +#else
73478 + assoc, sk,
73479 +#endif
73480 + sctp_sk(sk)->type, sk->sk_state,
73481 assoc->state, hash,
73482 assoc->assoc_id,
73483 assoc->sndbuf_used,
73484 diff -urNp linux-2.6.32.45/net/sctp/socket.c linux-2.6.32.45/net/sctp/socket.c
73485 --- linux-2.6.32.45/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
73486 +++ linux-2.6.32.45/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
73487 @@ -5802,7 +5802,6 @@ pp_found:
73488 */
73489 int reuse = sk->sk_reuse;
73490 struct sock *sk2;
73491 - struct hlist_node *node;
73492
73493 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
73494 if (pp->fastreuse && sk->sk_reuse &&
73495 diff -urNp linux-2.6.32.45/net/socket.c linux-2.6.32.45/net/socket.c
73496 --- linux-2.6.32.45/net/socket.c 2011-03-27 14:31:47.000000000 -0400
73497 +++ linux-2.6.32.45/net/socket.c 2011-05-16 21:46:57.000000000 -0400
73498 @@ -87,6 +87,7 @@
73499 #include <linux/wireless.h>
73500 #include <linux/nsproxy.h>
73501 #include <linux/magic.h>
73502 +#include <linux/in.h>
73503
73504 #include <asm/uaccess.h>
73505 #include <asm/unistd.h>
73506 @@ -97,6 +98,21 @@
73507 #include <net/sock.h>
73508 #include <linux/netfilter.h>
73509
73510 +extern void gr_attach_curr_ip(const struct sock *sk);
73511 +extern int gr_handle_sock_all(const int family, const int type,
73512 + const int protocol);
73513 +extern int gr_handle_sock_server(const struct sockaddr *sck);
73514 +extern int gr_handle_sock_server_other(const struct sock *sck);
73515 +extern int gr_handle_sock_client(const struct sockaddr *sck);
73516 +extern int gr_search_connect(struct socket * sock,
73517 + struct sockaddr_in * addr);
73518 +extern int gr_search_bind(struct socket * sock,
73519 + struct sockaddr_in * addr);
73520 +extern int gr_search_listen(struct socket * sock);
73521 +extern int gr_search_accept(struct socket * sock);
73522 +extern int gr_search_socket(const int domain, const int type,
73523 + const int protocol);
73524 +
73525 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
73526 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
73527 unsigned long nr_segs, loff_t pos);
73528 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
73529 mnt);
73530 }
73531
73532 -static struct vfsmount *sock_mnt __read_mostly;
73533 +struct vfsmount *sock_mnt __read_mostly;
73534
73535 static struct file_system_type sock_fs_type = {
73536 .name = "sockfs",
73537 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
73538 return -EAFNOSUPPORT;
73539 if (type < 0 || type >= SOCK_MAX)
73540 return -EINVAL;
73541 + if (protocol < 0)
73542 + return -EINVAL;
73543
73544 /* Compatibility.
73545
73546 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
73547 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
73548 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
73549
73550 + if(!gr_search_socket(family, type, protocol)) {
73551 + retval = -EACCES;
73552 + goto out;
73553 + }
73554 +
73555 + if (gr_handle_sock_all(family, type, protocol)) {
73556 + retval = -EACCES;
73557 + goto out;
73558 + }
73559 +
73560 retval = sock_create(family, type, protocol, &sock);
73561 if (retval < 0)
73562 goto out;
73563 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73564 if (sock) {
73565 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
73566 if (err >= 0) {
73567 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
73568 + err = -EACCES;
73569 + goto error;
73570 + }
73571 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
73572 + if (err)
73573 + goto error;
73574 +
73575 err = security_socket_bind(sock,
73576 (struct sockaddr *)&address,
73577 addrlen);
73578 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
73579 (struct sockaddr *)
73580 &address, addrlen);
73581 }
73582 +error:
73583 fput_light(sock->file, fput_needed);
73584 }
73585 return err;
73586 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
73587 if ((unsigned)backlog > somaxconn)
73588 backlog = somaxconn;
73589
73590 + if (gr_handle_sock_server_other(sock->sk)) {
73591 + err = -EPERM;
73592 + goto error;
73593 + }
73594 +
73595 + err = gr_search_listen(sock);
73596 + if (err)
73597 + goto error;
73598 +
73599 err = security_socket_listen(sock, backlog);
73600 if (!err)
73601 err = sock->ops->listen(sock, backlog);
73602
73603 +error:
73604 fput_light(sock->file, fput_needed);
73605 }
73606 return err;
73607 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73608 newsock->type = sock->type;
73609 newsock->ops = sock->ops;
73610
73611 + if (gr_handle_sock_server_other(sock->sk)) {
73612 + err = -EPERM;
73613 + sock_release(newsock);
73614 + goto out_put;
73615 + }
73616 +
73617 + err = gr_search_accept(sock);
73618 + if (err) {
73619 + sock_release(newsock);
73620 + goto out_put;
73621 + }
73622 +
73623 /*
73624 * We don't need try_module_get here, as the listening socket (sock)
73625 * has the protocol module (sock->ops->owner) held.
73626 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
73627 fd_install(newfd, newfile);
73628 err = newfd;
73629
73630 + gr_attach_curr_ip(newsock->sk);
73631 +
73632 out_put:
73633 fput_light(sock->file, fput_needed);
73634 out:
73635 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73636 int, addrlen)
73637 {
73638 struct socket *sock;
73639 + struct sockaddr *sck;
73640 struct sockaddr_storage address;
73641 int err, fput_needed;
73642
73643 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
73644 if (err < 0)
73645 goto out_put;
73646
73647 + sck = (struct sockaddr *)&address;
73648 +
73649 + if (gr_handle_sock_client(sck)) {
73650 + err = -EACCES;
73651 + goto out_put;
73652 + }
73653 +
73654 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
73655 + if (err)
73656 + goto out_put;
73657 +
73658 err =
73659 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
73660 if (err)
73661 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
73662 int err, ctl_len, iov_size, total_len;
73663 int fput_needed;
73664
73665 + pax_track_stack();
73666 +
73667 err = -EFAULT;
73668 if (MSG_CMSG_COMPAT & flags) {
73669 if (get_compat_msghdr(&msg_sys, msg_compat))
73670 diff -urNp linux-2.6.32.45/net/sunrpc/sched.c linux-2.6.32.45/net/sunrpc/sched.c
73671 --- linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
73672 +++ linux-2.6.32.45/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
73673 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
73674 #ifdef RPC_DEBUG
73675 static void rpc_task_set_debuginfo(struct rpc_task *task)
73676 {
73677 - static atomic_t rpc_pid;
73678 + static atomic_unchecked_t rpc_pid;
73679
73680 task->tk_magic = RPC_TASK_MAGIC_ID;
73681 - task->tk_pid = atomic_inc_return(&rpc_pid);
73682 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
73683 }
73684 #else
73685 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
73686 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c
73687 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
73688 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
73689 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
73690 static unsigned int min_max_inline = 4096;
73691 static unsigned int max_max_inline = 65536;
73692
73693 -atomic_t rdma_stat_recv;
73694 -atomic_t rdma_stat_read;
73695 -atomic_t rdma_stat_write;
73696 -atomic_t rdma_stat_sq_starve;
73697 -atomic_t rdma_stat_rq_starve;
73698 -atomic_t rdma_stat_rq_poll;
73699 -atomic_t rdma_stat_rq_prod;
73700 -atomic_t rdma_stat_sq_poll;
73701 -atomic_t rdma_stat_sq_prod;
73702 +atomic_unchecked_t rdma_stat_recv;
73703 +atomic_unchecked_t rdma_stat_read;
73704 +atomic_unchecked_t rdma_stat_write;
73705 +atomic_unchecked_t rdma_stat_sq_starve;
73706 +atomic_unchecked_t rdma_stat_rq_starve;
73707 +atomic_unchecked_t rdma_stat_rq_poll;
73708 +atomic_unchecked_t rdma_stat_rq_prod;
73709 +atomic_unchecked_t rdma_stat_sq_poll;
73710 +atomic_unchecked_t rdma_stat_sq_prod;
73711
73712 /* Temporary NFS request map and context caches */
73713 struct kmem_cache *svc_rdma_map_cachep;
73714 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
73715 len -= *ppos;
73716 if (len > *lenp)
73717 len = *lenp;
73718 - if (len && copy_to_user(buffer, str_buf, len))
73719 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
73720 return -EFAULT;
73721 *lenp = len;
73722 *ppos += len;
73723 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
73724 {
73725 .procname = "rdma_stat_read",
73726 .data = &rdma_stat_read,
73727 - .maxlen = sizeof(atomic_t),
73728 + .maxlen = sizeof(atomic_unchecked_t),
73729 .mode = 0644,
73730 .proc_handler = &read_reset_stat,
73731 },
73732 {
73733 .procname = "rdma_stat_recv",
73734 .data = &rdma_stat_recv,
73735 - .maxlen = sizeof(atomic_t),
73736 + .maxlen = sizeof(atomic_unchecked_t),
73737 .mode = 0644,
73738 .proc_handler = &read_reset_stat,
73739 },
73740 {
73741 .procname = "rdma_stat_write",
73742 .data = &rdma_stat_write,
73743 - .maxlen = sizeof(atomic_t),
73744 + .maxlen = sizeof(atomic_unchecked_t),
73745 .mode = 0644,
73746 .proc_handler = &read_reset_stat,
73747 },
73748 {
73749 .procname = "rdma_stat_sq_starve",
73750 .data = &rdma_stat_sq_starve,
73751 - .maxlen = sizeof(atomic_t),
73752 + .maxlen = sizeof(atomic_unchecked_t),
73753 .mode = 0644,
73754 .proc_handler = &read_reset_stat,
73755 },
73756 {
73757 .procname = "rdma_stat_rq_starve",
73758 .data = &rdma_stat_rq_starve,
73759 - .maxlen = sizeof(atomic_t),
73760 + .maxlen = sizeof(atomic_unchecked_t),
73761 .mode = 0644,
73762 .proc_handler = &read_reset_stat,
73763 },
73764 {
73765 .procname = "rdma_stat_rq_poll",
73766 .data = &rdma_stat_rq_poll,
73767 - .maxlen = sizeof(atomic_t),
73768 + .maxlen = sizeof(atomic_unchecked_t),
73769 .mode = 0644,
73770 .proc_handler = &read_reset_stat,
73771 },
73772 {
73773 .procname = "rdma_stat_rq_prod",
73774 .data = &rdma_stat_rq_prod,
73775 - .maxlen = sizeof(atomic_t),
73776 + .maxlen = sizeof(atomic_unchecked_t),
73777 .mode = 0644,
73778 .proc_handler = &read_reset_stat,
73779 },
73780 {
73781 .procname = "rdma_stat_sq_poll",
73782 .data = &rdma_stat_sq_poll,
73783 - .maxlen = sizeof(atomic_t),
73784 + .maxlen = sizeof(atomic_unchecked_t),
73785 .mode = 0644,
73786 .proc_handler = &read_reset_stat,
73787 },
73788 {
73789 .procname = "rdma_stat_sq_prod",
73790 .data = &rdma_stat_sq_prod,
73791 - .maxlen = sizeof(atomic_t),
73792 + .maxlen = sizeof(atomic_unchecked_t),
73793 .mode = 0644,
73794 .proc_handler = &read_reset_stat,
73795 },
73796 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
73797 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
73798 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
73799 @@ -495,7 +495,7 @@ next_sge:
73800 svc_rdma_put_context(ctxt, 0);
73801 goto out;
73802 }
73803 - atomic_inc(&rdma_stat_read);
73804 + atomic_inc_unchecked(&rdma_stat_read);
73805
73806 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
73807 chl_map->ch[ch_no].count -= read_wr.num_sge;
73808 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
73809 dto_q);
73810 list_del_init(&ctxt->dto_q);
73811 } else {
73812 - atomic_inc(&rdma_stat_rq_starve);
73813 + atomic_inc_unchecked(&rdma_stat_rq_starve);
73814 clear_bit(XPT_DATA, &xprt->xpt_flags);
73815 ctxt = NULL;
73816 }
73817 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
73818 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
73819 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
73820 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
73821 - atomic_inc(&rdma_stat_recv);
73822 + atomic_inc_unchecked(&rdma_stat_recv);
73823
73824 /* Build up the XDR from the receive buffers. */
73825 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
73826 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c
73827 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
73828 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
73829 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
73830 write_wr.wr.rdma.remote_addr = to;
73831
73832 /* Post It */
73833 - atomic_inc(&rdma_stat_write);
73834 + atomic_inc_unchecked(&rdma_stat_write);
73835 if (svc_rdma_send(xprt, &write_wr))
73836 goto err;
73837 return 0;
73838 diff -urNp linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c
73839 --- linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
73840 +++ linux-2.6.32.45/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
73841 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
73842 return;
73843
73844 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
73845 - atomic_inc(&rdma_stat_rq_poll);
73846 + atomic_inc_unchecked(&rdma_stat_rq_poll);
73847
73848 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
73849 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
73850 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
73851 }
73852
73853 if (ctxt)
73854 - atomic_inc(&rdma_stat_rq_prod);
73855 + atomic_inc_unchecked(&rdma_stat_rq_prod);
73856
73857 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
73858 /*
73859 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
73860 return;
73861
73862 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
73863 - atomic_inc(&rdma_stat_sq_poll);
73864 + atomic_inc_unchecked(&rdma_stat_sq_poll);
73865 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
73866 if (wc.status != IB_WC_SUCCESS)
73867 /* Close the transport */
73868 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
73869 }
73870
73871 if (ctxt)
73872 - atomic_inc(&rdma_stat_sq_prod);
73873 + atomic_inc_unchecked(&rdma_stat_sq_prod);
73874 }
73875
73876 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
73877 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
73878 spin_lock_bh(&xprt->sc_lock);
73879 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
73880 spin_unlock_bh(&xprt->sc_lock);
73881 - atomic_inc(&rdma_stat_sq_starve);
73882 + atomic_inc_unchecked(&rdma_stat_sq_starve);
73883
73884 /* See if we can opportunistically reap SQ WR to make room */
73885 sq_cq_reap(xprt);
73886 diff -urNp linux-2.6.32.45/net/sysctl_net.c linux-2.6.32.45/net/sysctl_net.c
73887 --- linux-2.6.32.45/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
73888 +++ linux-2.6.32.45/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
73889 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
73890 struct ctl_table *table)
73891 {
73892 /* Allow network administrator to have same access as root. */
73893 - if (capable(CAP_NET_ADMIN)) {
73894 + if (capable_nolog(CAP_NET_ADMIN)) {
73895 int mode = (table->mode >> 6) & 7;
73896 return (mode << 6) | (mode << 3) | mode;
73897 }
73898 diff -urNp linux-2.6.32.45/net/unix/af_unix.c linux-2.6.32.45/net/unix/af_unix.c
73899 --- linux-2.6.32.45/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
73900 +++ linux-2.6.32.45/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
73901 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
73902 err = -ECONNREFUSED;
73903 if (!S_ISSOCK(inode->i_mode))
73904 goto put_fail;
73905 +
73906 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
73907 + err = -EACCES;
73908 + goto put_fail;
73909 + }
73910 +
73911 u = unix_find_socket_byinode(net, inode);
73912 if (!u)
73913 goto put_fail;
73914 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
73915 if (u) {
73916 struct dentry *dentry;
73917 dentry = unix_sk(u)->dentry;
73918 +
73919 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
73920 + err = -EPERM;
73921 + sock_put(u);
73922 + goto fail;
73923 + }
73924 +
73925 if (dentry)
73926 touch_atime(unix_sk(u)->mnt, dentry);
73927 } else
73928 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
73929 err = security_path_mknod(&nd.path, dentry, mode, 0);
73930 if (err)
73931 goto out_mknod_drop_write;
73932 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
73933 + err = -EACCES;
73934 + goto out_mknod_drop_write;
73935 + }
73936 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
73937 out_mknod_drop_write:
73938 mnt_drop_write(nd.path.mnt);
73939 if (err)
73940 goto out_mknod_dput;
73941 +
73942 + gr_handle_create(dentry, nd.path.mnt);
73943 +
73944 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
73945 dput(nd.path.dentry);
73946 nd.path.dentry = dentry;
73947 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
73948 unix_state_lock(s);
73949
73950 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
73951 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73952 + NULL,
73953 +#else
73954 s,
73955 +#endif
73956 atomic_read(&s->sk_refcnt),
73957 0,
73958 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
73959 diff -urNp linux-2.6.32.45/net/wireless/core.h linux-2.6.32.45/net/wireless/core.h
73960 --- linux-2.6.32.45/net/wireless/core.h 2011-03-27 14:31:47.000000000 -0400
73961 +++ linux-2.6.32.45/net/wireless/core.h 2011-08-23 21:22:38.000000000 -0400
73962 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
73963 struct mutex mtx;
73964
73965 /* rfkill support */
73966 - struct rfkill_ops rfkill_ops;
73967 + rfkill_ops_no_const rfkill_ops;
73968 struct rfkill *rfkill;
73969 struct work_struct rfkill_sync;
73970
73971 diff -urNp linux-2.6.32.45/net/wireless/wext.c linux-2.6.32.45/net/wireless/wext.c
73972 --- linux-2.6.32.45/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
73973 +++ linux-2.6.32.45/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
73974 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
73975 */
73976
73977 /* Support for very large requests */
73978 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
73979 - (user_length > descr->max_tokens)) {
73980 + if (user_length > descr->max_tokens) {
73981 /* Allow userspace to GET more than max so
73982 * we can support any size GET requests.
73983 * There is still a limit : -ENOMEM.
73984 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
73985 }
73986 }
73987
73988 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
73989 - /*
73990 - * If this is a GET, but not NOMAX, it means that the extra
73991 - * data is not bounded by userspace, but by max_tokens. Thus
73992 - * set the length to max_tokens. This matches the extra data
73993 - * allocation.
73994 - * The driver should fill it with the number of tokens it
73995 - * provided, and it may check iwp->length rather than having
73996 - * knowledge of max_tokens. If the driver doesn't change the
73997 - * iwp->length, this ioctl just copies back max_token tokens
73998 - * filled with zeroes. Hopefully the driver isn't claiming
73999 - * them to be valid data.
74000 - */
74001 - iwp->length = descr->max_tokens;
74002 - }
74003 -
74004 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74005
74006 iwp->length += essid_compat;
74007 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_policy.c linux-2.6.32.45/net/xfrm/xfrm_policy.c
74008 --- linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74009 +++ linux-2.6.32.45/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74010 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74011 hlist_add_head(&policy->bydst, chain);
74012 xfrm_pol_hold(policy);
74013 net->xfrm.policy_count[dir]++;
74014 - atomic_inc(&flow_cache_genid);
74015 + atomic_inc_unchecked(&flow_cache_genid);
74016 if (delpol)
74017 __xfrm_policy_unlink(delpol, dir);
74018 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74019 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74020 write_unlock_bh(&xfrm_policy_lock);
74021
74022 if (ret && delete) {
74023 - atomic_inc(&flow_cache_genid);
74024 + atomic_inc_unchecked(&flow_cache_genid);
74025 xfrm_policy_kill(ret);
74026 }
74027 return ret;
74028 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
74029 write_unlock_bh(&xfrm_policy_lock);
74030
74031 if (ret && delete) {
74032 - atomic_inc(&flow_cache_genid);
74033 + atomic_inc_unchecked(&flow_cache_genid);
74034 xfrm_policy_kill(ret);
74035 }
74036 return ret;
74037 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
74038 }
74039
74040 }
74041 - atomic_inc(&flow_cache_genid);
74042 + atomic_inc_unchecked(&flow_cache_genid);
74043 out:
74044 write_unlock_bh(&xfrm_policy_lock);
74045 return err;
74046 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
74047 write_unlock_bh(&xfrm_policy_lock);
74048 if (pol) {
74049 if (dir < XFRM_POLICY_MAX)
74050 - atomic_inc(&flow_cache_genid);
74051 + atomic_inc_unchecked(&flow_cache_genid);
74052 xfrm_policy_kill(pol);
74053 return 0;
74054 }
74055 @@ -1477,7 +1477,7 @@ free_dst:
74056 goto out;
74057 }
74058
74059 -static int inline
74060 +static inline int
74061 xfrm_dst_alloc_copy(void **target, void *src, int size)
74062 {
74063 if (!*target) {
74064 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
74065 return 0;
74066 }
74067
74068 -static int inline
74069 +static inline int
74070 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
74071 {
74072 #ifdef CONFIG_XFRM_SUB_POLICY
74073 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
74074 #endif
74075 }
74076
74077 -static int inline
74078 +static inline int
74079 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
74080 {
74081 #ifdef CONFIG_XFRM_SUB_POLICY
74082 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
74083 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
74084
74085 restart:
74086 - genid = atomic_read(&flow_cache_genid);
74087 + genid = atomic_read_unchecked(&flow_cache_genid);
74088 policy = NULL;
74089 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
74090 pols[pi] = NULL;
74091 @@ -1680,7 +1680,7 @@ restart:
74092 goto error;
74093 }
74094 if (nx == -EAGAIN ||
74095 - genid != atomic_read(&flow_cache_genid)) {
74096 + genid != atomic_read_unchecked(&flow_cache_genid)) {
74097 xfrm_pols_put(pols, npols);
74098 goto restart;
74099 }
74100 diff -urNp linux-2.6.32.45/net/xfrm/xfrm_user.c linux-2.6.32.45/net/xfrm/xfrm_user.c
74101 --- linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
74102 +++ linux-2.6.32.45/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
74103 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
74104 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
74105 int i;
74106
74107 + pax_track_stack();
74108 +
74109 if (xp->xfrm_nr == 0)
74110 return 0;
74111
74112 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
74113 int err;
74114 int n = 0;
74115
74116 + pax_track_stack();
74117 +
74118 if (attrs[XFRMA_MIGRATE] == NULL)
74119 return -EINVAL;
74120
74121 diff -urNp linux-2.6.32.45/samples/kobject/kset-example.c linux-2.6.32.45/samples/kobject/kset-example.c
74122 --- linux-2.6.32.45/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
74123 +++ linux-2.6.32.45/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
74124 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
74125 }
74126
74127 /* Our custom sysfs_ops that we will associate with our ktype later on */
74128 -static struct sysfs_ops foo_sysfs_ops = {
74129 +static const struct sysfs_ops foo_sysfs_ops = {
74130 .show = foo_attr_show,
74131 .store = foo_attr_store,
74132 };
74133 diff -urNp linux-2.6.32.45/scripts/basic/fixdep.c linux-2.6.32.45/scripts/basic/fixdep.c
74134 --- linux-2.6.32.45/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
74135 +++ linux-2.6.32.45/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
74136 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
74137
74138 static void parse_config_file(char *map, size_t len)
74139 {
74140 - int *end = (int *) (map + len);
74141 + unsigned int *end = (unsigned int *) (map + len);
74142 /* start at +1, so that p can never be < map */
74143 - int *m = (int *) map + 1;
74144 + unsigned int *m = (unsigned int *) map + 1;
74145 char *p, *q;
74146
74147 for (; m < end; m++) {
74148 @@ -371,7 +371,7 @@ static void print_deps(void)
74149 static void traps(void)
74150 {
74151 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
74152 - int *p = (int *)test;
74153 + unsigned int *p = (unsigned int *)test;
74154
74155 if (*p != INT_CONF) {
74156 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
74157 diff -urNp linux-2.6.32.45/scripts/gcc-plugin.sh linux-2.6.32.45/scripts/gcc-plugin.sh
74158 --- linux-2.6.32.45/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
74159 +++ linux-2.6.32.45/scripts/gcc-plugin.sh 2011-08-23 20:24:19.000000000 -0400
74160 @@ -0,0 +1,2 @@
74161 +#!/bin/sh
74162 +echo "#include \"gcc-plugin.h\"" | $* -x c -shared - -o /dev/null -I`$* -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
74163 diff -urNp linux-2.6.32.45/scripts/Makefile.build linux-2.6.32.45/scripts/Makefile.build
74164 --- linux-2.6.32.45/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
74165 +++ linux-2.6.32.45/scripts/Makefile.build 2011-08-23 20:45:11.000000000 -0400
74166 @@ -59,7 +59,7 @@ endif
74167 endif
74168
74169 # Do not include host rules unless needed
74170 -ifneq ($(hostprogs-y)$(hostprogs-m),)
74171 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
74172 include scripts/Makefile.host
74173 endif
74174
74175 diff -urNp linux-2.6.32.45/scripts/Makefile.clean linux-2.6.32.45/scripts/Makefile.clean
74176 --- linux-2.6.32.45/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
74177 +++ linux-2.6.32.45/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
74178 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
74179 __clean-files := $(extra-y) $(always) \
74180 $(targets) $(clean-files) \
74181 $(host-progs) \
74182 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
74183 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
74184 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
74185
74186 # as clean-files is given relative to the current directory, this adds
74187 # a $(obj) prefix, except for absolute paths
74188 diff -urNp linux-2.6.32.45/scripts/Makefile.host linux-2.6.32.45/scripts/Makefile.host
74189 --- linux-2.6.32.45/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
74190 +++ linux-2.6.32.45/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
74191 @@ -31,6 +31,7 @@
74192 # Note: Shared libraries consisting of C++ files are not supported
74193
74194 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
74195 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
74196
74197 # C code
74198 # Executables compiled from a single .c file
74199 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
74200 # Shared libaries (only .c supported)
74201 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
74202 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
74203 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
74204 # Remove .so files from "xxx-objs"
74205 host-cobjs := $(filter-out %.so,$(host-cobjs))
74206
74207 diff -urNp linux-2.6.32.45/scripts/mod/file2alias.c linux-2.6.32.45/scripts/mod/file2alias.c
74208 --- linux-2.6.32.45/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
74209 +++ linux-2.6.32.45/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
74210 @@ -72,7 +72,7 @@ static void device_id_check(const char *
74211 unsigned long size, unsigned long id_size,
74212 void *symval)
74213 {
74214 - int i;
74215 + unsigned int i;
74216
74217 if (size % id_size || size < id_size) {
74218 if (cross_build != 0)
74219 @@ -102,7 +102,7 @@ static void device_id_check(const char *
74220 /* USB is special because the bcdDevice can be matched against a numeric range */
74221 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
74222 static void do_usb_entry(struct usb_device_id *id,
74223 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
74224 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
74225 unsigned char range_lo, unsigned char range_hi,
74226 struct module *mod)
74227 {
74228 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
74229 for (i = 0; i < count; i++) {
74230 const char *id = (char *)devs[i].id;
74231 char acpi_id[sizeof(devs[0].id)];
74232 - int j;
74233 + unsigned int j;
74234
74235 buf_printf(&mod->dev_table_buf,
74236 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74237 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
74238
74239 for (j = 0; j < PNP_MAX_DEVICES; j++) {
74240 const char *id = (char *)card->devs[j].id;
74241 - int i2, j2;
74242 + unsigned int i2, j2;
74243 int dup = 0;
74244
74245 if (!id[0])
74246 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
74247 /* add an individual alias for every device entry */
74248 if (!dup) {
74249 char acpi_id[sizeof(card->devs[0].id)];
74250 - int k;
74251 + unsigned int k;
74252
74253 buf_printf(&mod->dev_table_buf,
74254 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
74255 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
74256 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
74257 char *alias)
74258 {
74259 - int i, j;
74260 + unsigned int i, j;
74261
74262 sprintf(alias, "dmi*");
74263
74264 diff -urNp linux-2.6.32.45/scripts/mod/modpost.c linux-2.6.32.45/scripts/mod/modpost.c
74265 --- linux-2.6.32.45/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
74266 +++ linux-2.6.32.45/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
74267 @@ -835,6 +835,7 @@ enum mismatch {
74268 INIT_TO_EXIT,
74269 EXIT_TO_INIT,
74270 EXPORT_TO_INIT_EXIT,
74271 + DATA_TO_TEXT
74272 };
74273
74274 struct sectioncheck {
74275 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
74276 .fromsec = { "__ksymtab*", NULL },
74277 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
74278 .mismatch = EXPORT_TO_INIT_EXIT
74279 +},
74280 +/* Do not reference code from writable data */
74281 +{
74282 + .fromsec = { DATA_SECTIONS, NULL },
74283 + .tosec = { TEXT_SECTIONS, NULL },
74284 + .mismatch = DATA_TO_TEXT
74285 }
74286 };
74287
74288 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
74289 continue;
74290 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
74291 continue;
74292 - if (sym->st_value == addr)
74293 - return sym;
74294 /* Find a symbol nearby - addr are maybe negative */
74295 d = sym->st_value - addr;
74296 + if (d == 0)
74297 + return sym;
74298 if (d < 0)
74299 d = addr - sym->st_value;
74300 if (d < distance) {
74301 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
74302 "Fix this by removing the %sannotation of %s "
74303 "or drop the export.\n",
74304 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
74305 + case DATA_TO_TEXT:
74306 +/*
74307 + fprintf(stderr,
74308 + "The variable %s references\n"
74309 + "the %s %s%s%s\n",
74310 + fromsym, to, sec2annotation(tosec), tosym, to_p);
74311 +*/
74312 + break;
74313 case NO_MISMATCH:
74314 /* To get warnings on missing members */
74315 break;
74316 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
74317 static void check_sec_ref(struct module *mod, const char *modname,
74318 struct elf_info *elf)
74319 {
74320 - int i;
74321 + unsigned int i;
74322 Elf_Shdr *sechdrs = elf->sechdrs;
74323
74324 /* Walk through all sections */
74325 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
74326 va_end(ap);
74327 }
74328
74329 -void buf_write(struct buffer *buf, const char *s, int len)
74330 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
74331 {
74332 if (buf->size - buf->pos < len) {
74333 buf->size += len + SZ;
74334 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
74335 if (fstat(fileno(file), &st) < 0)
74336 goto close_write;
74337
74338 - if (st.st_size != b->pos)
74339 + if (st.st_size != (off_t)b->pos)
74340 goto close_write;
74341
74342 tmp = NOFAIL(malloc(b->pos));
74343 diff -urNp linux-2.6.32.45/scripts/mod/modpost.h linux-2.6.32.45/scripts/mod/modpost.h
74344 --- linux-2.6.32.45/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
74345 +++ linux-2.6.32.45/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
74346 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
74347
74348 struct buffer {
74349 char *p;
74350 - int pos;
74351 - int size;
74352 + unsigned int pos;
74353 + unsigned int size;
74354 };
74355
74356 void __attribute__((format(printf, 2, 3)))
74357 buf_printf(struct buffer *buf, const char *fmt, ...);
74358
74359 void
74360 -buf_write(struct buffer *buf, const char *s, int len);
74361 +buf_write(struct buffer *buf, const char *s, unsigned int len);
74362
74363 struct module {
74364 struct module *next;
74365 diff -urNp linux-2.6.32.45/scripts/mod/sumversion.c linux-2.6.32.45/scripts/mod/sumversion.c
74366 --- linux-2.6.32.45/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
74367 +++ linux-2.6.32.45/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
74368 @@ -455,7 +455,7 @@ static void write_version(const char *fi
74369 goto out;
74370 }
74371
74372 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
74373 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
74374 warn("writing sum in %s failed: %s\n",
74375 filename, strerror(errno));
74376 goto out;
74377 diff -urNp linux-2.6.32.45/scripts/package/mkspec linux-2.6.32.45/scripts/package/mkspec
74378 --- linux-2.6.32.45/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
74379 +++ linux-2.6.32.45/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
74380 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
74381 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
74382 echo "%endif"
74383
74384 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
74385 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
74386 echo "%ifarch ia64"
74387 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
74388 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
74389 diff -urNp linux-2.6.32.45/scripts/pnmtologo.c linux-2.6.32.45/scripts/pnmtologo.c
74390 --- linux-2.6.32.45/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
74391 +++ linux-2.6.32.45/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
74392 @@ -237,14 +237,14 @@ static void write_header(void)
74393 fprintf(out, " * Linux logo %s\n", logoname);
74394 fputs(" */\n\n", out);
74395 fputs("#include <linux/linux_logo.h>\n\n", out);
74396 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
74397 + fprintf(out, "static unsigned char %s_data[] = {\n",
74398 logoname);
74399 }
74400
74401 static void write_footer(void)
74402 {
74403 fputs("\n};\n\n", out);
74404 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
74405 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
74406 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
74407 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
74408 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
74409 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
74410 fputs("\n};\n\n", out);
74411
74412 /* write logo clut */
74413 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
74414 + fprintf(out, "static unsigned char %s_clut[] = {\n",
74415 logoname);
74416 write_hex_cnt = 0;
74417 for (i = 0; i < logo_clutsize; i++) {
74418 diff -urNp linux-2.6.32.45/scripts/tags.sh linux-2.6.32.45/scripts/tags.sh
74419 --- linux-2.6.32.45/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
74420 +++ linux-2.6.32.45/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
74421 @@ -93,6 +93,11 @@ docscope()
74422 cscope -b -f cscope.out
74423 }
74424
74425 +dogtags()
74426 +{
74427 + all_sources | gtags -f -
74428 +}
74429 +
74430 exuberant()
74431 {
74432 all_sources | xargs $1 -a \
74433 @@ -164,6 +169,10 @@ case "$1" in
74434 docscope
74435 ;;
74436
74437 + "gtags")
74438 + dogtags
74439 + ;;
74440 +
74441 "tags")
74442 rm -f tags
74443 xtags ctags
74444 diff -urNp linux-2.6.32.45/security/capability.c linux-2.6.32.45/security/capability.c
74445 --- linux-2.6.32.45/security/capability.c 2011-03-27 14:31:47.000000000 -0400
74446 +++ linux-2.6.32.45/security/capability.c 2011-04-17 15:56:46.000000000 -0400
74447 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
74448 }
74449 #endif /* CONFIG_AUDIT */
74450
74451 -struct security_operations default_security_ops = {
74452 +struct security_operations default_security_ops __read_only = {
74453 .name = "default",
74454 };
74455
74456 diff -urNp linux-2.6.32.45/security/commoncap.c linux-2.6.32.45/security/commoncap.c
74457 --- linux-2.6.32.45/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
74458 +++ linux-2.6.32.45/security/commoncap.c 2011-08-17 19:22:13.000000000 -0400
74459 @@ -27,7 +27,7 @@
74460 #include <linux/sched.h>
74461 #include <linux/prctl.h>
74462 #include <linux/securebits.h>
74463 -
74464 +#include <net/sock.h>
74465 /*
74466 * If a non-root user executes a setuid-root binary in
74467 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
74468 @@ -50,9 +50,18 @@ static void warn_setuid_and_fcaps_mixed(
74469 }
74470 }
74471
74472 +#ifdef CONFIG_NET
74473 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
74474 +#endif
74475 +
74476 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
74477 {
74478 +#ifdef CONFIG_NET
74479 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
74480 +#else
74481 NETLINK_CB(skb).eff_cap = current_cap();
74482 +#endif
74483 +
74484 return 0;
74485 }
74486
74487 @@ -582,6 +591,9 @@ int cap_bprm_secureexec(struct linux_bin
74488 {
74489 const struct cred *cred = current_cred();
74490
74491 + if (gr_acl_enable_at_secure())
74492 + return 1;
74493 +
74494 if (cred->uid != 0) {
74495 if (bprm->cap_effective)
74496 return 1;
74497 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_api.c linux-2.6.32.45/security/integrity/ima/ima_api.c
74498 --- linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
74499 +++ linux-2.6.32.45/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
74500 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
74501 int result;
74502
74503 /* can overflow, only indicator */
74504 - atomic_long_inc(&ima_htable.violations);
74505 + atomic_long_inc_unchecked(&ima_htable.violations);
74506
74507 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
74508 if (!entry) {
74509 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_fs.c linux-2.6.32.45/security/integrity/ima/ima_fs.c
74510 --- linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
74511 +++ linux-2.6.32.45/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
74512 @@ -27,12 +27,12 @@
74513 static int valid_policy = 1;
74514 #define TMPBUFLEN 12
74515 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
74516 - loff_t *ppos, atomic_long_t *val)
74517 + loff_t *ppos, atomic_long_unchecked_t *val)
74518 {
74519 char tmpbuf[TMPBUFLEN];
74520 ssize_t len;
74521
74522 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
74523 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
74524 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
74525 }
74526
74527 diff -urNp linux-2.6.32.45/security/integrity/ima/ima.h linux-2.6.32.45/security/integrity/ima/ima.h
74528 --- linux-2.6.32.45/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
74529 +++ linux-2.6.32.45/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
74530 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
74531 extern spinlock_t ima_queue_lock;
74532
74533 struct ima_h_table {
74534 - atomic_long_t len; /* number of stored measurements in the list */
74535 - atomic_long_t violations;
74536 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
74537 + atomic_long_unchecked_t violations;
74538 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
74539 };
74540 extern struct ima_h_table ima_htable;
74541 diff -urNp linux-2.6.32.45/security/integrity/ima/ima_queue.c linux-2.6.32.45/security/integrity/ima/ima_queue.c
74542 --- linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
74543 +++ linux-2.6.32.45/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
74544 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
74545 INIT_LIST_HEAD(&qe->later);
74546 list_add_tail_rcu(&qe->later, &ima_measurements);
74547
74548 - atomic_long_inc(&ima_htable.len);
74549 + atomic_long_inc_unchecked(&ima_htable.len);
74550 key = ima_hash_key(entry->digest);
74551 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
74552 return 0;
74553 diff -urNp linux-2.6.32.45/security/Kconfig linux-2.6.32.45/security/Kconfig
74554 --- linux-2.6.32.45/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
74555 +++ linux-2.6.32.45/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
74556 @@ -4,6 +4,555 @@
74557
74558 menu "Security options"
74559
74560 +source grsecurity/Kconfig
74561 +
74562 +menu "PaX"
74563 +
74564 + config ARCH_TRACK_EXEC_LIMIT
74565 + bool
74566 +
74567 + config PAX_PER_CPU_PGD
74568 + bool
74569 +
74570 + config TASK_SIZE_MAX_SHIFT
74571 + int
74572 + depends on X86_64
74573 + default 47 if !PAX_PER_CPU_PGD
74574 + default 42 if PAX_PER_CPU_PGD
74575 +
74576 + config PAX_ENABLE_PAE
74577 + bool
74578 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
74579 +
74580 +config PAX
74581 + bool "Enable various PaX features"
74582 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
74583 + help
74584 + This allows you to enable various PaX features. PaX adds
74585 + intrusion prevention mechanisms to the kernel that reduce
74586 + the risks posed by exploitable memory corruption bugs.
74587 +
74588 +menu "PaX Control"
74589 + depends on PAX
74590 +
74591 +config PAX_SOFTMODE
74592 + bool 'Support soft mode'
74593 + select PAX_PT_PAX_FLAGS
74594 + help
74595 + Enabling this option will allow you to run PaX in soft mode, that
74596 + is, PaX features will not be enforced by default, only on executables
74597 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
74598 + is the only way to mark executables for soft mode use.
74599 +
74600 + Soft mode can be activated by using the "pax_softmode=1" kernel command
74601 + line option on boot. Furthermore you can control various PaX features
74602 + at runtime via the entries in /proc/sys/kernel/pax.
74603 +
74604 +config PAX_EI_PAX
74605 + bool 'Use legacy ELF header marking'
74606 + help
74607 + Enabling this option will allow you to control PaX features on
74608 + a per executable basis via the 'chpax' utility available at
74609 + http://pax.grsecurity.net/. The control flags will be read from
74610 + an otherwise reserved part of the ELF header. This marking has
74611 + numerous drawbacks (no support for soft-mode, toolchain does not
74612 + know about the non-standard use of the ELF header) therefore it
74613 + has been deprecated in favour of PT_PAX_FLAGS support.
74614 +
74615 + Note that if you enable PT_PAX_FLAGS marking support as well,
74616 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
74617 +
74618 +config PAX_PT_PAX_FLAGS
74619 + bool 'Use ELF program header marking'
74620 + help
74621 + Enabling this option will allow you to control PaX features on
74622 + a per executable basis via the 'paxctl' utility available at
74623 + http://pax.grsecurity.net/. The control flags will be read from
74624 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
74625 + has the benefits of supporting both soft mode and being fully
74626 + integrated into the toolchain (the binutils patch is available
74627 + from http://pax.grsecurity.net).
74628 +
74629 + If your toolchain does not support PT_PAX_FLAGS markings,
74630 + you can create one in most cases with 'paxctl -C'.
74631 +
74632 + Note that if you enable the legacy EI_PAX marking support as well,
74633 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
74634 +
74635 +choice
74636 + prompt 'MAC system integration'
74637 + default PAX_HAVE_ACL_FLAGS
74638 + help
74639 + Mandatory Access Control systems have the option of controlling
74640 + PaX flags on a per executable basis, choose the method supported
74641 + by your particular system.
74642 +
74643 + - "none": if your MAC system does not interact with PaX,
74644 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
74645 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
74646 +
74647 + NOTE: this option is for developers/integrators only.
74648 +
74649 + config PAX_NO_ACL_FLAGS
74650 + bool 'none'
74651 +
74652 + config PAX_HAVE_ACL_FLAGS
74653 + bool 'direct'
74654 +
74655 + config PAX_HOOK_ACL_FLAGS
74656 + bool 'hook'
74657 +endchoice
74658 +
74659 +endmenu
74660 +
74661 +menu "Non-executable pages"
74662 + depends on PAX
74663 +
74664 +config PAX_NOEXEC
74665 + bool "Enforce non-executable pages"
74666 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
74667 + help
74668 + By design some architectures do not allow for protecting memory
74669 + pages against execution or even if they do, Linux does not make
74670 + use of this feature. In practice this means that if a page is
74671 + readable (such as the stack or heap) it is also executable.
74672 +
74673 + There is a well known exploit technique that makes use of this
74674 + fact and a common programming mistake where an attacker can
74675 + introduce code of his choice somewhere in the attacked program's
74676 + memory (typically the stack or the heap) and then execute it.
74677 +
74678 + If the attacked program was running with different (typically
74679 + higher) privileges than that of the attacker, then he can elevate
74680 + his own privilege level (e.g. get a root shell, write to files for
74681 + which he does not have write access to, etc).
74682 +
74683 + Enabling this option will let you choose from various features
74684 + that prevent the injection and execution of 'foreign' code in
74685 + a program.
74686 +
74687 + This will also break programs that rely on the old behaviour and
74688 + expect that dynamically allocated memory via the malloc() family
74689 + of functions is executable (which it is not). Notable examples
74690 + are the XFree86 4.x server, the java runtime and wine.
74691 +
74692 +config PAX_PAGEEXEC
74693 + bool "Paging based non-executable pages"
74694 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
74695 + select S390_SWITCH_AMODE if S390
74696 + select S390_EXEC_PROTECT if S390
74697 + select ARCH_TRACK_EXEC_LIMIT if X86_32
74698 + help
74699 + This implementation is based on the paging feature of the CPU.
74700 + On i386 without hardware non-executable bit support there is a
74701 + variable but usually low performance impact, however on Intel's
74702 + P4 core based CPUs it is very high so you should not enable this
74703 + for kernels meant to be used on such CPUs.
74704 +
74705 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
74706 + with hardware non-executable bit support there is no performance
74707 + impact, on ppc the impact is negligible.
74708 +
74709 + Note that several architectures require various emulations due to
74710 + badly designed userland ABIs, this will cause a performance impact
74711 + but will disappear as soon as userland is fixed. For example, ppc
74712 + userland MUST have been built with secure-plt by a recent toolchain.
74713 +
74714 +config PAX_SEGMEXEC
74715 + bool "Segmentation based non-executable pages"
74716 + depends on PAX_NOEXEC && X86_32
74717 + help
74718 + This implementation is based on the segmentation feature of the
74719 + CPU and has a very small performance impact, however applications
74720 + will be limited to a 1.5 GB address space instead of the normal
74721 + 3 GB.
74722 +
74723 +config PAX_EMUTRAMP
74724 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
74725 + default y if PARISC
74726 + help
74727 + There are some programs and libraries that for one reason or
74728 + another attempt to execute special small code snippets from
74729 + non-executable memory pages. Most notable examples are the
74730 + signal handler return code generated by the kernel itself and
74731 + the GCC trampolines.
74732 +
74733 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
74734 + such programs will no longer work under your kernel.
74735 +
74736 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
74737 + utilities to enable trampoline emulation for the affected programs
74738 + yet still have the protection provided by the non-executable pages.
74739 +
74740 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
74741 + your system will not even boot.
74742 +
74743 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
74744 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
74745 + for the affected files.
74746 +
74747 + NOTE: enabling this feature *may* open up a loophole in the
74748 + protection provided by non-executable pages that an attacker
74749 + could abuse. Therefore the best solution is to not have any
74750 + files on your system that would require this option. This can
74751 + be achieved by not using libc5 (which relies on the kernel
74752 + signal handler return code) and not using or rewriting programs
74753 + that make use of the nested function implementation of GCC.
74754 + Skilled users can just fix GCC itself so that it implements
74755 + nested function calls in a way that does not interfere with PaX.
74756 +
74757 +config PAX_EMUSIGRT
74758 + bool "Automatically emulate sigreturn trampolines"
74759 + depends on PAX_EMUTRAMP && PARISC
74760 + default y
74761 + help
74762 + Enabling this option will have the kernel automatically detect
74763 + and emulate signal return trampolines executing on the stack
74764 + that would otherwise lead to task termination.
74765 +
74766 + This solution is intended as a temporary one for users with
74767 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
74768 + Modula-3 runtime, etc) or executables linked to such, basically
74769 + everything that does not specify its own SA_RESTORER function in
74770 + normal executable memory like glibc 2.1+ does.
74771 +
74772 + On parisc you MUST enable this option, otherwise your system will
74773 + not even boot.
74774 +
74775 + NOTE: this feature cannot be disabled on a per executable basis
74776 + and since it *does* open up a loophole in the protection provided
74777 + by non-executable pages, the best solution is to not have any
74778 + files on your system that would require this option.
74779 +
74780 +config PAX_MPROTECT
74781 + bool "Restrict mprotect()"
74782 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
74783 + help
74784 + Enabling this option will prevent programs from
74785 + - changing the executable status of memory pages that were
74786 + not originally created as executable,
74787 + - making read-only executable pages writable again,
74788 + - creating executable pages from anonymous memory,
74789 + - making read-only-after-relocations (RELRO) data pages writable again.
74790 +
74791 + You should say Y here to complete the protection provided by
74792 + the enforcement of non-executable pages.
74793 +
74794 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
74795 + this feature on a per file basis.
74796 +
74797 +config PAX_MPROTECT_COMPAT
74798 + bool "Use legacy/compat protection demoting (read help)"
74799 + depends on PAX_MPROTECT
74800 + default n
74801 + help
74802 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
74803 + by sending the proper error code to the application. For some broken
74804 + userland, this can cause problems with Python or other applications. The
74805 + current implementation however allows for applications like clamav to
74806 + detect if JIT compilation/execution is allowed and to fall back gracefully
74807 + to an interpreter-based mode if it does not. While we encourage everyone
74808 + to use the current implementation as-is and push upstream to fix broken
74809 + userland (note that the RWX logging option can assist with this), in some
74810 + environments this may not be possible. Having to disable MPROTECT
74811 + completely on certain binaries reduces the security benefit of PaX,
74812 + so this option is provided for those environments to revert to the old
74813 + behavior.
74814 +
74815 +config PAX_ELFRELOCS
74816 + bool "Allow ELF text relocations (read help)"
74817 + depends on PAX_MPROTECT
74818 + default n
74819 + help
74820 + Non-executable pages and mprotect() restrictions are effective
74821 + in preventing the introduction of new executable code into an
74822 + attacked task's address space. There remain only two venues
74823 + for this kind of attack: if the attacker can execute already
74824 + existing code in the attacked task then he can either have it
74825 + create and mmap() a file containing his code or have it mmap()
74826 + an already existing ELF library that does not have position
74827 + independent code in it and use mprotect() on it to make it
74828 + writable and copy his code there. While protecting against
74829 + the former approach is beyond PaX, the latter can be prevented
74830 + by having only PIC ELF libraries on one's system (which do not
74831 + need to relocate their code). If you are sure this is your case,
74832 + as is the case with all modern Linux distributions, then leave
74833 + this option disabled. You should say 'n' here.
74834 +
74835 +config PAX_ETEXECRELOCS
74836 + bool "Allow ELF ET_EXEC text relocations"
74837 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
74838 + select PAX_ELFRELOCS
74839 + default y
74840 + help
74841 + On some architectures there are incorrectly created applications
74842 + that require text relocations and would not work without enabling
74843 + this option. If you are an alpha, ia64 or parisc user, you should
74844 + enable this option and disable it once you have made sure that
74845 + none of your applications need it.
74846 +
74847 +config PAX_EMUPLT
74848 + bool "Automatically emulate ELF PLT"
74849 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
74850 + default y
74851 + help
74852 + Enabling this option will have the kernel automatically detect
74853 + and emulate the Procedure Linkage Table entries in ELF files.
74854 + On some architectures such entries are in writable memory, and
74855 + become non-executable leading to task termination. Therefore
74856 + it is mandatory that you enable this option on alpha, parisc,
74857 + sparc and sparc64, otherwise your system would not even boot.
74858 +
74859 + NOTE: this feature *does* open up a loophole in the protection
74860 + provided by the non-executable pages, therefore the proper
74861 + solution is to modify the toolchain to produce a PLT that does
74862 + not need to be writable.
74863 +
74864 +config PAX_DLRESOLVE
74865 + bool 'Emulate old glibc resolver stub'
74866 + depends on PAX_EMUPLT && SPARC
74867 + default n
74868 + help
74869 + This option is needed if userland has an old glibc (before 2.4)
74870 + that puts a 'save' instruction into the runtime generated resolver
74871 + stub that needs special emulation.
74872 +
74873 +config PAX_KERNEXEC
74874 + bool "Enforce non-executable kernel pages"
74875 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
74876 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
74877 + help
74878 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
74879 + that is, enabling this option will make it harder to inject
74880 + and execute 'foreign' code in kernel memory itself.
74881 +
74882 + Note that on x86_64 kernels there is a known regression when
74883 + this feature and KVM/VMX are both enabled in the host kernel.
74884 +
74885 +config PAX_KERNEXEC_MODULE_TEXT
74886 + int "Minimum amount of memory reserved for module code"
74887 + default "4"
74888 + depends on PAX_KERNEXEC && X86_32 && MODULES
74889 + help
74890 + Due to implementation details the kernel must reserve a fixed
74891 + amount of memory for module code at compile time that cannot be
74892 + changed at runtime. Here you can specify the minimum amount
74893 + in MB that will be reserved. Due to the same implementation
74894 + details this size will always be rounded up to the next 2/4 MB
74895 + boundary (depends on PAE) so the actually available memory for
74896 + module code will usually be more than this minimum.
74897 +
74898 + The default 4 MB should be enough for most users but if you have
74899 + an excessive number of modules (e.g., most distribution configs
74900 + compile many drivers as modules) or use huge modules such as
74901 + nvidia's kernel driver, you will need to adjust this amount.
74902 + A good rule of thumb is to look at your currently loaded kernel
74903 + modules and add up their sizes.
74904 +
74905 +endmenu
74906 +
74907 +menu "Address Space Layout Randomization"
74908 + depends on PAX
74909 +
74910 +config PAX_ASLR
74911 + bool "Address Space Layout Randomization"
74912 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
74913 + help
74914 + Many if not most exploit techniques rely on the knowledge of
74915 + certain addresses in the attacked program. The following options
74916 + will allow the kernel to apply a certain amount of randomization
74917 + to specific parts of the program thereby forcing an attacker to
74918 + guess them in most cases. Any failed guess will most likely crash
74919 + the attacked program which allows the kernel to detect such attempts
74920 + and react on them. PaX itself provides no reaction mechanisms,
74921 + instead it is strongly encouraged that you make use of Nergal's
74922 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
74923 + (http://www.grsecurity.net/) built-in crash detection features or
74924 + develop one yourself.
74925 +
74926 + By saying Y here you can choose to randomize the following areas:
74927 + - top of the task's kernel stack
74928 + - top of the task's userland stack
74929 + - base address for mmap() requests that do not specify one
74930 + (this includes all libraries)
74931 + - base address of the main executable
74932 +
74933 + It is strongly recommended to say Y here as address space layout
74934 + randomization has negligible impact on performance yet it provides
74935 + a very effective protection.
74936 +
74937 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
74938 + this feature on a per file basis.
74939 +
74940 +config PAX_RANDKSTACK
74941 + bool "Randomize kernel stack base"
74942 + depends on PAX_ASLR && X86_TSC && X86
74943 + help
74944 + By saying Y here the kernel will randomize every task's kernel
74945 + stack on every system call. This will not only force an attacker
74946 + to guess it but also prevent him from making use of possible
74947 + leaked information about it.
74948 +
74949 + Since the kernel stack is a rather scarce resource, randomization
74950 + may cause unexpected stack overflows, therefore you should very
74951 + carefully test your system. Note that once enabled in the kernel
74952 + configuration, this feature cannot be disabled on a per file basis.
74953 +
74954 +config PAX_RANDUSTACK
74955 + bool "Randomize user stack base"
74956 + depends on PAX_ASLR
74957 + help
74958 + By saying Y here the kernel will randomize every task's userland
74959 + stack. The randomization is done in two steps where the second
74960 + one may apply a big amount of shift to the top of the stack and
74961 + cause problems for programs that want to use lots of memory (more
74962 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
74963 + For this reason the second step can be controlled by 'chpax' or
74964 + 'paxctl' on a per file basis.
74965 +
74966 +config PAX_RANDMMAP
74967 + bool "Randomize mmap() base"
74968 + depends on PAX_ASLR
74969 + help
74970 + By saying Y here the kernel will use a randomized base address for
74971 + mmap() requests that do not specify one themselves. As a result
74972 + all dynamically loaded libraries will appear at random addresses
74973 + and therefore be harder to exploit by a technique where an attacker
74974 + attempts to execute library code for his purposes (e.g. spawn a
74975 + shell from an exploited program that is running at an elevated
74976 + privilege level).
74977 +
74978 + Furthermore, if a program is relinked as a dynamic ELF file, its
74979 + base address will be randomized as well, completing the full
74980 + randomization of the address space layout. Attacking such programs
74981 + becomes a guess game. You can find an example of doing this at
74982 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
74983 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
74984 +
74985 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
74986 + feature on a per file basis.
74987 +
74988 +endmenu
74989 +
74990 +menu "Miscellaneous hardening features"
74991 +
74992 +config PAX_MEMORY_SANITIZE
74993 + bool "Sanitize all freed memory"
74994 + help
74995 + By saying Y here the kernel will erase memory pages as soon as they
74996 + are freed. This in turn reduces the lifetime of data stored in the
74997 + pages, making it less likely that sensitive information such as
74998 + passwords, cryptographic secrets, etc stay in memory for too long.
74999 +
75000 + This is especially useful for programs whose runtime is short, long
75001 + lived processes and the kernel itself benefit from this as long as
75002 + they operate on whole memory pages and ensure timely freeing of pages
75003 + that may hold sensitive information.
75004 +
75005 + The tradeoff is performance impact, on a single CPU system kernel
75006 + compilation sees a 3% slowdown, other systems and workloads may vary
75007 + and you are advised to test this feature on your expected workload
75008 + before deploying it.
75009 +
75010 + Note that this feature does not protect data stored in live pages,
75011 + e.g., process memory swapped to disk may stay there for a long time.
75012 +
75013 +config PAX_MEMORY_STACKLEAK
75014 + bool "Sanitize kernel stack"
75015 + depends on X86
75016 + help
75017 + By saying Y here the kernel will erase the kernel stack before it
75018 + returns from a system call. This in turn reduces the information
75019 + that a kernel stack leak bug can reveal.
75020 +
75021 + Note that such a bug can still leak information that was put on
75022 + the stack by the current system call (the one eventually triggering
75023 + the bug) but traces of earlier system calls on the kernel stack
75024 + cannot leak anymore.
75025 +
75026 + The tradeoff is performance impact, on a single CPU system kernel
75027 + compilation sees a 1% slowdown, other systems and workloads may vary
75028 + and you are advised to test this feature on your expected workload
75029 + before deploying it.
75030 +
75031 + Note: full support for this feature requires gcc with plugin support
75032 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
75033 + is not supported). Using older gcc versions means that functions
75034 + with large enough stack frames may leave uninitialized memory behind
75035 + that may be exposed to a later syscall leaking the stack.
75036 +
75037 +config PAX_MEMORY_UDEREF
75038 + bool "Prevent invalid userland pointer dereference"
75039 + depends on X86 && !UML_X86 && !XEN
75040 + select PAX_PER_CPU_PGD if X86_64
75041 + help
75042 + By saying Y here the kernel will be prevented from dereferencing
75043 + userland pointers in contexts where the kernel expects only kernel
75044 + pointers. This is both a useful runtime debugging feature and a
75045 + security measure that prevents exploiting a class of kernel bugs.
75046 +
75047 + The tradeoff is that some virtualization solutions may experience
75048 + a huge slowdown and therefore you should not enable this feature
75049 + for kernels meant to run in such environments. Whether a given VM
75050 + solution is affected or not is best determined by simply trying it
75051 + out, the performance impact will be obvious right on boot as this
75052 + mechanism engages from very early on. A good rule of thumb is that
75053 + VMs running on CPUs without hardware virtualization support (i.e.,
75054 + the majority of IA-32 CPUs) will likely experience the slowdown.
75055 +
75056 +config PAX_REFCOUNT
75057 + bool "Prevent various kernel object reference counter overflows"
75058 + depends on GRKERNSEC && (X86 || SPARC64)
75059 + help
75060 + By saying Y here the kernel will detect and prevent overflowing
75061 + various (but not all) kinds of object reference counters. Such
75062 + overflows can normally occur due to bugs only and are often, if
75063 + not always, exploitable.
75064 +
75065 + The tradeoff is that data structures protected by an overflowed
75066 + refcount will never be freed and therefore will leak memory. Note
75067 + that this leak also happens even without this protection but in
75068 + that case the overflow can eventually trigger the freeing of the
75069 + data structure while it is still being used elsewhere, resulting
75070 + in the exploitable situation that this feature prevents.
75071 +
75072 + Since this has a negligible performance impact, you should enable
75073 + this feature.
75074 +
75075 +config PAX_USERCOPY
75076 + bool "Harden heap object copies between kernel and userland"
75077 + depends on X86 || PPC || SPARC || ARM
75078 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
75079 + help
75080 + By saying Y here the kernel will enforce the size of heap objects
75081 + when they are copied in either direction between the kernel and
75082 + userland, even if only a part of the heap object is copied.
75083 +
75084 + Specifically, this checking prevents information leaking from the
75085 + kernel heap during kernel to userland copies (if the kernel heap
75086 + object is otherwise fully initialized) and prevents kernel heap
75087 + overflows during userland to kernel copies.
75088 +
75089 + Note that the current implementation provides the strictest bounds
75090 + checks for the SLUB allocator.
75091 +
75092 + Enabling this option also enables per-slab cache protection against
75093 + data in a given cache being copied into/out of via userland
75094 + accessors. Though the whitelist of regions will be reduced over
75095 + time, it notably protects important data structures like task structs.
75096 +
75097 +
75098 + If frame pointers are enabled on x86, this option will also
75099 + restrict copies into and out of the kernel stack to local variables
75100 + within a single frame.
75101 +
75102 + Since this has a negligible performance impact, you should enable
75103 + this feature.
75104 +
75105 +endmenu
75106 +
75107 +endmenu
75108 +
75109 config KEYS
75110 bool "Enable access key retention support"
75111 help
75112 @@ -146,7 +695,7 @@ config INTEL_TXT
75113 config LSM_MMAP_MIN_ADDR
75114 int "Low address space for LSM to protect from user allocation"
75115 depends on SECURITY && SECURITY_SELINUX
75116 - default 65536
75117 + default 32768
75118 help
75119 This is the portion of low virtual memory which should be protected
75120 from userspace allocation. Keeping a user from writing to low pages
75121 diff -urNp linux-2.6.32.45/security/keys/keyring.c linux-2.6.32.45/security/keys/keyring.c
75122 --- linux-2.6.32.45/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
75123 +++ linux-2.6.32.45/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
75124 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
75125 ret = -EFAULT;
75126
75127 for (loop = 0; loop < klist->nkeys; loop++) {
75128 + key_serial_t serial;
75129 key = klist->keys[loop];
75130 + serial = key->serial;
75131
75132 tmp = sizeof(key_serial_t);
75133 if (tmp > buflen)
75134 tmp = buflen;
75135
75136 - if (copy_to_user(buffer,
75137 - &key->serial,
75138 - tmp) != 0)
75139 + if (copy_to_user(buffer, &serial, tmp))
75140 goto error;
75141
75142 buflen -= tmp;
75143 diff -urNp linux-2.6.32.45/security/min_addr.c linux-2.6.32.45/security/min_addr.c
75144 --- linux-2.6.32.45/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
75145 +++ linux-2.6.32.45/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
75146 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
75147 */
75148 static void update_mmap_min_addr(void)
75149 {
75150 +#ifndef SPARC
75151 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
75152 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
75153 mmap_min_addr = dac_mmap_min_addr;
75154 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
75155 #else
75156 mmap_min_addr = dac_mmap_min_addr;
75157 #endif
75158 +#endif
75159 }
75160
75161 /*
75162 diff -urNp linux-2.6.32.45/security/root_plug.c linux-2.6.32.45/security/root_plug.c
75163 --- linux-2.6.32.45/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
75164 +++ linux-2.6.32.45/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
75165 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
75166 return 0;
75167 }
75168
75169 -static struct security_operations rootplug_security_ops = {
75170 +static struct security_operations rootplug_security_ops __read_only = {
75171 .bprm_check_security = rootplug_bprm_check_security,
75172 };
75173
75174 diff -urNp linux-2.6.32.45/security/security.c linux-2.6.32.45/security/security.c
75175 --- linux-2.6.32.45/security/security.c 2011-03-27 14:31:47.000000000 -0400
75176 +++ linux-2.6.32.45/security/security.c 2011-04-17 15:56:46.000000000 -0400
75177 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
75178 extern struct security_operations default_security_ops;
75179 extern void security_fixup_ops(struct security_operations *ops);
75180
75181 -struct security_operations *security_ops; /* Initialized to NULL */
75182 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
75183
75184 static inline int verify(struct security_operations *ops)
75185 {
75186 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
75187 * If there is already a security module registered with the kernel,
75188 * an error will be returned. Otherwise %0 is returned on success.
75189 */
75190 -int register_security(struct security_operations *ops)
75191 +int __init register_security(struct security_operations *ops)
75192 {
75193 if (verify(ops)) {
75194 printk(KERN_DEBUG "%s could not verify "
75195 diff -urNp linux-2.6.32.45/security/selinux/hooks.c linux-2.6.32.45/security/selinux/hooks.c
75196 --- linux-2.6.32.45/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
75197 +++ linux-2.6.32.45/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
75198 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
75199 * Minimal support for a secondary security module,
75200 * just to allow the use of the capability module.
75201 */
75202 -static struct security_operations *secondary_ops;
75203 +static struct security_operations *secondary_ops __read_only;
75204
75205 /* Lists of inode and superblock security structures initialized
75206 before the policy was loaded. */
75207 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
75208
75209 #endif
75210
75211 -static struct security_operations selinux_ops = {
75212 +static struct security_operations selinux_ops __read_only = {
75213 .name = "selinux",
75214
75215 .ptrace_access_check = selinux_ptrace_access_check,
75216 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
75217 avc_disable();
75218
75219 /* Reset security_ops to the secondary module, dummy or capability. */
75220 + pax_open_kernel();
75221 security_ops = secondary_ops;
75222 + pax_close_kernel();
75223
75224 /* Unregister netfilter hooks. */
75225 selinux_nf_ip_exit();
75226 diff -urNp linux-2.6.32.45/security/selinux/include/xfrm.h linux-2.6.32.45/security/selinux/include/xfrm.h
75227 --- linux-2.6.32.45/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
75228 +++ linux-2.6.32.45/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
75229 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
75230
75231 static inline void selinux_xfrm_notify_policyload(void)
75232 {
75233 - atomic_inc(&flow_cache_genid);
75234 + atomic_inc_unchecked(&flow_cache_genid);
75235 }
75236 #else
75237 static inline int selinux_xfrm_enabled(void)
75238 diff -urNp linux-2.6.32.45/security/selinux/ss/services.c linux-2.6.32.45/security/selinux/ss/services.c
75239 --- linux-2.6.32.45/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
75240 +++ linux-2.6.32.45/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
75241 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
75242 int rc = 0;
75243 struct policy_file file = { data, len }, *fp = &file;
75244
75245 + pax_track_stack();
75246 +
75247 if (!ss_initialized) {
75248 avtab_cache_init();
75249 if (policydb_read(&policydb, fp)) {
75250 diff -urNp linux-2.6.32.45/security/smack/smack_lsm.c linux-2.6.32.45/security/smack/smack_lsm.c
75251 --- linux-2.6.32.45/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
75252 +++ linux-2.6.32.45/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
75253 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
75254 return 0;
75255 }
75256
75257 -struct security_operations smack_ops = {
75258 +struct security_operations smack_ops __read_only = {
75259 .name = "smack",
75260
75261 .ptrace_access_check = smack_ptrace_access_check,
75262 diff -urNp linux-2.6.32.45/security/tomoyo/tomoyo.c linux-2.6.32.45/security/tomoyo/tomoyo.c
75263 --- linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
75264 +++ linux-2.6.32.45/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
75265 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
75266 * tomoyo_security_ops is a "struct security_operations" which is used for
75267 * registering TOMOYO.
75268 */
75269 -static struct security_operations tomoyo_security_ops = {
75270 +static struct security_operations tomoyo_security_ops __read_only = {
75271 .name = "tomoyo",
75272 .cred_alloc_blank = tomoyo_cred_alloc_blank,
75273 .cred_prepare = tomoyo_cred_prepare,
75274 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.c linux-2.6.32.45/sound/aoa/codecs/onyx.c
75275 --- linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
75276 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
75277 @@ -53,7 +53,7 @@ struct onyx {
75278 spdif_locked:1,
75279 analog_locked:1,
75280 original_mute:2;
75281 - int open_count;
75282 + local_t open_count;
75283 struct codec_info *codec_info;
75284
75285 /* mutex serializes concurrent access to the device
75286 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
75287 struct onyx *onyx = cii->codec_data;
75288
75289 mutex_lock(&onyx->mutex);
75290 - onyx->open_count++;
75291 + local_inc(&onyx->open_count);
75292 mutex_unlock(&onyx->mutex);
75293
75294 return 0;
75295 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
75296 struct onyx *onyx = cii->codec_data;
75297
75298 mutex_lock(&onyx->mutex);
75299 - onyx->open_count--;
75300 - if (!onyx->open_count)
75301 + if (local_dec_and_test(&onyx->open_count))
75302 onyx->spdif_locked = onyx->analog_locked = 0;
75303 mutex_unlock(&onyx->mutex);
75304
75305 diff -urNp linux-2.6.32.45/sound/aoa/codecs/onyx.h linux-2.6.32.45/sound/aoa/codecs/onyx.h
75306 --- linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
75307 +++ linux-2.6.32.45/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
75308 @@ -11,6 +11,7 @@
75309 #include <linux/i2c.h>
75310 #include <asm/pmac_low_i2c.h>
75311 #include <asm/prom.h>
75312 +#include <asm/local.h>
75313
75314 /* PCM3052 register definitions */
75315
75316 diff -urNp linux-2.6.32.45/sound/core/seq/seq_device.c linux-2.6.32.45/sound/core/seq/seq_device.c
75317 --- linux-2.6.32.45/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
75318 +++ linux-2.6.32.45/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
75319 @@ -63,7 +63,7 @@ struct ops_list {
75320 int argsize; /* argument size */
75321
75322 /* operators */
75323 - struct snd_seq_dev_ops ops;
75324 + struct snd_seq_dev_ops *ops;
75325
75326 /* registred devices */
75327 struct list_head dev_list; /* list of devices */
75328 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
75329
75330 mutex_lock(&ops->reg_mutex);
75331 /* copy driver operators */
75332 - ops->ops = *entry;
75333 + ops->ops = entry;
75334 ops->driver |= DRIVER_LOADED;
75335 ops->argsize = argsize;
75336
75337 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
75338 dev->name, ops->id, ops->argsize, dev->argsize);
75339 return -EINVAL;
75340 }
75341 - if (ops->ops.init_device(dev) >= 0) {
75342 + if (ops->ops->init_device(dev) >= 0) {
75343 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
75344 ops->num_init_devices++;
75345 } else {
75346 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
75347 dev->name, ops->id, ops->argsize, dev->argsize);
75348 return -EINVAL;
75349 }
75350 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
75351 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
75352 dev->status = SNDRV_SEQ_DEVICE_FREE;
75353 dev->driver_data = NULL;
75354 ops->num_init_devices--;
75355 diff -urNp linux-2.6.32.45/sound/drivers/mts64.c linux-2.6.32.45/sound/drivers/mts64.c
75356 --- linux-2.6.32.45/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
75357 +++ linux-2.6.32.45/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
75358 @@ -27,6 +27,7 @@
75359 #include <sound/initval.h>
75360 #include <sound/rawmidi.h>
75361 #include <sound/control.h>
75362 +#include <asm/local.h>
75363
75364 #define CARD_NAME "Miditerminal 4140"
75365 #define DRIVER_NAME "MTS64"
75366 @@ -65,7 +66,7 @@ struct mts64 {
75367 struct pardevice *pardev;
75368 int pardev_claimed;
75369
75370 - int open_count;
75371 + local_t open_count;
75372 int current_midi_output_port;
75373 int current_midi_input_port;
75374 u8 mode[MTS64_NUM_INPUT_PORTS];
75375 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
75376 {
75377 struct mts64 *mts = substream->rmidi->private_data;
75378
75379 - if (mts->open_count == 0) {
75380 + if (local_read(&mts->open_count) == 0) {
75381 /* We don't need a spinlock here, because this is just called
75382 if the device has not been opened before.
75383 So there aren't any IRQs from the device */
75384 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
75385
75386 msleep(50);
75387 }
75388 - ++(mts->open_count);
75389 + local_inc(&mts->open_count);
75390
75391 return 0;
75392 }
75393 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
75394 struct mts64 *mts = substream->rmidi->private_data;
75395 unsigned long flags;
75396
75397 - --(mts->open_count);
75398 - if (mts->open_count == 0) {
75399 + if (local_dec_return(&mts->open_count) == 0) {
75400 /* We need the spinlock_irqsave here because we can still
75401 have IRQs at this point */
75402 spin_lock_irqsave(&mts->lock, flags);
75403 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
75404
75405 msleep(500);
75406
75407 - } else if (mts->open_count < 0)
75408 - mts->open_count = 0;
75409 + } else if (local_read(&mts->open_count) < 0)
75410 + local_set(&mts->open_count, 0);
75411
75412 return 0;
75413 }
75414 diff -urNp linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c
75415 --- linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
75416 +++ linux-2.6.32.45/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
75417 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
75418 MODULE_DESCRIPTION("OPL4 driver");
75419 MODULE_LICENSE("GPL");
75420
75421 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
75422 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
75423 {
75424 int timeout = 10;
75425 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
75426 diff -urNp linux-2.6.32.45/sound/drivers/portman2x4.c linux-2.6.32.45/sound/drivers/portman2x4.c
75427 --- linux-2.6.32.45/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
75428 +++ linux-2.6.32.45/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
75429 @@ -46,6 +46,7 @@
75430 #include <sound/initval.h>
75431 #include <sound/rawmidi.h>
75432 #include <sound/control.h>
75433 +#include <asm/local.h>
75434
75435 #define CARD_NAME "Portman 2x4"
75436 #define DRIVER_NAME "portman"
75437 @@ -83,7 +84,7 @@ struct portman {
75438 struct pardevice *pardev;
75439 int pardev_claimed;
75440
75441 - int open_count;
75442 + local_t open_count;
75443 int mode[PORTMAN_NUM_INPUT_PORTS];
75444 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
75445 };
75446 diff -urNp linux-2.6.32.45/sound/isa/cmi8330.c linux-2.6.32.45/sound/isa/cmi8330.c
75447 --- linux-2.6.32.45/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
75448 +++ linux-2.6.32.45/sound/isa/cmi8330.c 2011-08-23 21:22:32.000000000 -0400
75449 @@ -173,7 +173,7 @@ struct snd_cmi8330 {
75450
75451 struct snd_pcm *pcm;
75452 struct snd_cmi8330_stream {
75453 - struct snd_pcm_ops ops;
75454 + snd_pcm_ops_no_const ops;
75455 snd_pcm_open_callback_t open;
75456 void *private_data; /* sb or wss */
75457 } streams[2];
75458 diff -urNp linux-2.6.32.45/sound/oss/sb_audio.c linux-2.6.32.45/sound/oss/sb_audio.c
75459 --- linux-2.6.32.45/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
75460 +++ linux-2.6.32.45/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
75461 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
75462 buf16 = (signed short *)(localbuf + localoffs);
75463 while (c)
75464 {
75465 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75466 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
75467 if (copy_from_user(lbuf8,
75468 userbuf+useroffs + p,
75469 locallen))
75470 diff -urNp linux-2.6.32.45/sound/oss/swarm_cs4297a.c linux-2.6.32.45/sound/oss/swarm_cs4297a.c
75471 --- linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
75472 +++ linux-2.6.32.45/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
75473 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
75474 {
75475 struct cs4297a_state *s;
75476 u32 pwr, id;
75477 - mm_segment_t fs;
75478 int rval;
75479 #ifndef CONFIG_BCM_CS4297A_CSWARM
75480 u64 cfg;
75481 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
75482 if (!rval) {
75483 char *sb1250_duart_present;
75484
75485 +#if 0
75486 + mm_segment_t fs;
75487 fs = get_fs();
75488 set_fs(KERNEL_DS);
75489 -#if 0
75490 val = SOUND_MASK_LINE;
75491 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
75492 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
75493 val = initvol[i].vol;
75494 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
75495 }
75496 + set_fs(fs);
75497 // cs4297a_write_ac97(s, 0x18, 0x0808);
75498 #else
75499 // cs4297a_write_ac97(s, 0x5e, 0x180);
75500 cs4297a_write_ac97(s, 0x02, 0x0808);
75501 cs4297a_write_ac97(s, 0x18, 0x0808);
75502 #endif
75503 - set_fs(fs);
75504
75505 list_add(&s->list, &cs4297a_devs);
75506
75507 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_codec.c linux-2.6.32.45/sound/pci/ac97/ac97_codec.c
75508 --- linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
75509 +++ linux-2.6.32.45/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
75510 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
75511 }
75512
75513 /* build_ops to do nothing */
75514 -static struct snd_ac97_build_ops null_build_ops;
75515 +static const struct snd_ac97_build_ops null_build_ops;
75516
75517 #ifdef CONFIG_SND_AC97_POWER_SAVE
75518 static void do_update_power(struct work_struct *work)
75519 diff -urNp linux-2.6.32.45/sound/pci/ac97/ac97_patch.c linux-2.6.32.45/sound/pci/ac97/ac97_patch.c
75520 --- linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
75521 +++ linux-2.6.32.45/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
75522 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
75523 return 0;
75524 }
75525
75526 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75527 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
75528 .build_spdif = patch_yamaha_ymf743_build_spdif,
75529 .build_3d = patch_yamaha_ymf7x3_3d,
75530 };
75531 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
75532 return 0;
75533 }
75534
75535 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75536 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
75537 .build_3d = patch_yamaha_ymf7x3_3d,
75538 .build_post_spdif = patch_yamaha_ymf753_post_spdif
75539 };
75540 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
75541 return 0;
75542 }
75543
75544 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75545 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
75546 .build_specific = patch_wolfson_wm9703_specific,
75547 };
75548
75549 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
75550 return 0;
75551 }
75552
75553 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75554 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
75555 .build_specific = patch_wolfson_wm9704_specific,
75556 };
75557
75558 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
75559 return 0;
75560 }
75561
75562 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75563 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
75564 .build_specific = patch_wolfson_wm9705_specific,
75565 };
75566
75567 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
75568 return 0;
75569 }
75570
75571 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75572 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
75573 .build_specific = patch_wolfson_wm9711_specific,
75574 };
75575
75576 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
75577 }
75578 #endif
75579
75580 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75581 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
75582 .build_specific = patch_wolfson_wm9713_specific,
75583 .build_3d = patch_wolfson_wm9713_3d,
75584 #ifdef CONFIG_PM
75585 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
75586 return 0;
75587 }
75588
75589 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75590 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
75591 .build_3d = patch_sigmatel_stac9700_3d,
75592 .build_specific = patch_sigmatel_stac97xx_specific
75593 };
75594 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
75595 return patch_sigmatel_stac97xx_specific(ac97);
75596 }
75597
75598 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75599 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
75600 .build_3d = patch_sigmatel_stac9708_3d,
75601 .build_specific = patch_sigmatel_stac9708_specific
75602 };
75603 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
75604 return 0;
75605 }
75606
75607 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75608 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
75609 .build_3d = patch_sigmatel_stac9700_3d,
75610 .build_specific = patch_sigmatel_stac9758_specific
75611 };
75612 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
75613 return 0;
75614 }
75615
75616 -static struct snd_ac97_build_ops patch_cirrus_ops = {
75617 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
75618 .build_spdif = patch_cirrus_build_spdif
75619 };
75620
75621 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
75622 return 0;
75623 }
75624
75625 -static struct snd_ac97_build_ops patch_conexant_ops = {
75626 +static const struct snd_ac97_build_ops patch_conexant_ops = {
75627 .build_spdif = patch_conexant_build_spdif
75628 };
75629
75630 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
75631 }
75632 }
75633
75634 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
75635 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
75636 #ifdef CONFIG_PM
75637 .resume = ad18xx_resume
75638 #endif
75639 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
75640 return 0;
75641 }
75642
75643 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
75644 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
75645 .build_specific = &patch_ad1885_specific,
75646 #ifdef CONFIG_PM
75647 .resume = ad18xx_resume
75648 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
75649 return 0;
75650 }
75651
75652 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
75653 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
75654 .build_specific = &patch_ad1886_specific,
75655 #ifdef CONFIG_PM
75656 .resume = ad18xx_resume
75657 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
75658 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75659 }
75660
75661 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75662 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
75663 .build_post_spdif = patch_ad198x_post_spdif,
75664 .build_specific = patch_ad1981a_specific,
75665 #ifdef CONFIG_PM
75666 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
75667 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
75668 }
75669
75670 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75671 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
75672 .build_post_spdif = patch_ad198x_post_spdif,
75673 .build_specific = patch_ad1981b_specific,
75674 #ifdef CONFIG_PM
75675 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
75676 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
75677 }
75678
75679 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
75680 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
75681 .build_post_spdif = patch_ad198x_post_spdif,
75682 .build_specific = patch_ad1888_specific,
75683 #ifdef CONFIG_PM
75684 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
75685 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
75686 }
75687
75688 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
75689 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
75690 .build_post_spdif = patch_ad198x_post_spdif,
75691 .build_specific = patch_ad1980_specific,
75692 #ifdef CONFIG_PM
75693 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
75694 ARRAY_SIZE(snd_ac97_ad1985_controls));
75695 }
75696
75697 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
75698 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
75699 .build_post_spdif = patch_ad198x_post_spdif,
75700 .build_specific = patch_ad1985_specific,
75701 #ifdef CONFIG_PM
75702 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
75703 ARRAY_SIZE(snd_ac97_ad1985_controls));
75704 }
75705
75706 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
75707 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
75708 .build_post_spdif = patch_ad198x_post_spdif,
75709 .build_specific = patch_ad1986_specific,
75710 #ifdef CONFIG_PM
75711 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
75712 return 0;
75713 }
75714
75715 -static struct snd_ac97_build_ops patch_alc650_ops = {
75716 +static const struct snd_ac97_build_ops patch_alc650_ops = {
75717 .build_specific = patch_alc650_specific,
75718 .update_jacks = alc650_update_jacks
75719 };
75720 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
75721 return 0;
75722 }
75723
75724 -static struct snd_ac97_build_ops patch_alc655_ops = {
75725 +static const struct snd_ac97_build_ops patch_alc655_ops = {
75726 .build_specific = patch_alc655_specific,
75727 .update_jacks = alc655_update_jacks
75728 };
75729 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
75730 return 0;
75731 }
75732
75733 -static struct snd_ac97_build_ops patch_alc850_ops = {
75734 +static const struct snd_ac97_build_ops patch_alc850_ops = {
75735 .build_specific = patch_alc850_specific,
75736 .update_jacks = alc850_update_jacks
75737 };
75738 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
75739 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
75740 }
75741
75742 -static struct snd_ac97_build_ops patch_cm9738_ops = {
75743 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
75744 .build_specific = patch_cm9738_specific,
75745 .update_jacks = cm9738_update_jacks
75746 };
75747 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
75748 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
75749 }
75750
75751 -static struct snd_ac97_build_ops patch_cm9739_ops = {
75752 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
75753 .build_specific = patch_cm9739_specific,
75754 .build_post_spdif = patch_cm9739_post_spdif,
75755 .update_jacks = cm9739_update_jacks
75756 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
75757 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
75758 }
75759
75760 -static struct snd_ac97_build_ops patch_cm9761_ops = {
75761 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
75762 .build_specific = patch_cm9761_specific,
75763 .build_post_spdif = patch_cm9761_post_spdif,
75764 .update_jacks = cm9761_update_jacks
75765 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
75766 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
75767 }
75768
75769 -static struct snd_ac97_build_ops patch_cm9780_ops = {
75770 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
75771 .build_specific = patch_cm9780_specific,
75772 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
75773 };
75774 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
75775 return 0;
75776 }
75777
75778 -static struct snd_ac97_build_ops patch_vt1616_ops = {
75779 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
75780 .build_specific = patch_vt1616_specific
75781 };
75782
75783 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
75784 return 0;
75785 }
75786
75787 -static struct snd_ac97_build_ops patch_it2646_ops = {
75788 +static const struct snd_ac97_build_ops patch_it2646_ops = {
75789 .build_specific = patch_it2646_specific,
75790 .update_jacks = it2646_update_jacks
75791 };
75792 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
75793 return 0;
75794 }
75795
75796 -static struct snd_ac97_build_ops patch_si3036_ops = {
75797 +static const struct snd_ac97_build_ops patch_si3036_ops = {
75798 .build_specific = patch_si3036_specific,
75799 };
75800
75801 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
75802 return 0;
75803 }
75804
75805 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
75806 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
75807 .build_specific = patch_ucb1400_specific,
75808 };
75809
75810 diff -urNp linux-2.6.32.45/sound/pci/hda/hda_codec.h linux-2.6.32.45/sound/pci/hda/hda_codec.h
75811 --- linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
75812 +++ linux-2.6.32.45/sound/pci/hda/hda_codec.h 2011-08-23 21:22:32.000000000 -0400
75813 @@ -580,7 +580,7 @@ struct hda_bus_ops {
75814 /* notify power-up/down from codec to controller */
75815 void (*pm_notify)(struct hda_bus *bus);
75816 #endif
75817 -};
75818 +} __no_const;
75819
75820 /* template to pass to the bus constructor */
75821 struct hda_bus_template {
75822 @@ -675,6 +675,7 @@ struct hda_codec_ops {
75823 int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
75824 #endif
75825 };
75826 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
75827
75828 /* record for amp information cache */
75829 struct hda_cache_head {
75830 @@ -705,7 +706,7 @@ struct hda_pcm_ops {
75831 struct snd_pcm_substream *substream);
75832 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
75833 struct snd_pcm_substream *substream);
75834 -};
75835 +} __no_const;
75836
75837 /* PCM information for each substream */
75838 struct hda_pcm_stream {
75839 @@ -760,7 +761,7 @@ struct hda_codec {
75840 const char *modelname; /* model name for preset */
75841
75842 /* set by patch */
75843 - struct hda_codec_ops patch_ops;
75844 + hda_codec_ops_no_const patch_ops;
75845
75846 /* PCM to create, set by patch_ops.build_pcms callback */
75847 unsigned int num_pcms;
75848 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c
75849 --- linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
75850 +++ linux-2.6.32.45/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
75851 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
75852 */
75853 spec->multiout.dig_out_nid = CVT_NID;
75854
75855 - codec->patch_ops = atihdmi_patch_ops;
75856 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
75857
75858 return 0;
75859 }
75860 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c
75861 --- linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
75862 +++ linux-2.6.32.45/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
75863 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
75864 cp_ready);
75865
75866 /* TODO */
75867 - if (cp_state)
75868 - ;
75869 - if (cp_ready)
75870 - ;
75871 + if (cp_state) {
75872 + }
75873 + if (cp_ready) {
75874 + }
75875 }
75876
75877
75878 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
75879 spec->multiout.dig_out_nid = cvt_nid;
75880
75881 codec->spec = spec;
75882 - codec->patch_ops = intel_hdmi_patch_ops;
75883 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
75884
75885 snd_hda_eld_proc_new(codec, &spec->sink_eld);
75886
75887 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c
75888 --- linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
75889 +++ linux-2.6.32.45/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
75890 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
75891 spec->multiout.max_channels = 8;
75892 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
75893
75894 - codec->patch_ops = nvhdmi_patch_ops_8ch;
75895 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
75896
75897 return 0;
75898 }
75899 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
75900 spec->multiout.max_channels = 2;
75901 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
75902
75903 - codec->patch_ops = nvhdmi_patch_ops_2ch;
75904 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
75905
75906 return 0;
75907 }
75908 diff -urNp linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c
75909 --- linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
75910 +++ linux-2.6.32.45/sound/pci/hda/patch_sigmatel.c 2011-08-23 21:22:32.000000000 -0400
75911 @@ -5220,7 +5220,7 @@ again:
75912 snd_hda_codec_write_cache(codec, nid, 0,
75913 AC_VERB_SET_CONNECT_SEL, num_dacs);
75914
75915 - codec->patch_ops = stac92xx_patch_ops;
75916 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
75917
75918 codec->proc_widget_hook = stac92hd_proc_hook;
75919
75920 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
75921 return -ENOMEM;
75922
75923 codec->spec = spec;
75924 - codec->patch_ops = stac92xx_patch_ops;
75925 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
75926 spec->num_pins = STAC92HD71BXX_NUM_PINS;
75927 switch (codec->vendor_id) {
75928 case 0x111d76b6:
75929 diff -urNp linux-2.6.32.45/sound/pci/ice1712/ice1712.h linux-2.6.32.45/sound/pci/ice1712/ice1712.h
75930 --- linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
75931 +++ linux-2.6.32.45/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
75932 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
75933 unsigned int mask_flags; /* total mask bits */
75934 struct snd_akm4xxx_ops {
75935 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
75936 - } ops;
75937 + } __no_const ops;
75938 };
75939
75940 struct snd_ice1712_spdif {
75941 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
75942 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75943 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75944 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
75945 - } ops;
75946 + } __no_const ops;
75947 };
75948
75949
75950 diff -urNp linux-2.6.32.45/sound/pci/intel8x0m.c linux-2.6.32.45/sound/pci/intel8x0m.c
75951 --- linux-2.6.32.45/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
75952 +++ linux-2.6.32.45/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
75953 @@ -1264,7 +1264,7 @@ static struct shortname_table {
75954 { 0x5455, "ALi M5455" },
75955 { 0x746d, "AMD AMD8111" },
75956 #endif
75957 - { 0 },
75958 + { 0, },
75959 };
75960
75961 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
75962 diff -urNp linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c
75963 --- linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
75964 +++ linux-2.6.32.45/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
75965 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
75966 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
75967 break;
75968 }
75969 - if (atomic_read(&chip->interrupt_sleep_count)) {
75970 - atomic_set(&chip->interrupt_sleep_count, 0);
75971 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
75972 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
75973 wake_up(&chip->interrupt_sleep);
75974 }
75975 __end:
75976 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
75977 continue;
75978 init_waitqueue_entry(&wait, current);
75979 add_wait_queue(&chip->interrupt_sleep, &wait);
75980 - atomic_inc(&chip->interrupt_sleep_count);
75981 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
75982 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
75983 remove_wait_queue(&chip->interrupt_sleep, &wait);
75984 }
75985 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
75986 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
75987 spin_unlock(&chip->reg_lock);
75988
75989 - if (atomic_read(&chip->interrupt_sleep_count)) {
75990 - atomic_set(&chip->interrupt_sleep_count, 0);
75991 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
75992 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
75993 wake_up(&chip->interrupt_sleep);
75994 }
75995 }
75996 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
75997 spin_lock_init(&chip->reg_lock);
75998 spin_lock_init(&chip->voice_lock);
75999 init_waitqueue_head(&chip->interrupt_sleep);
76000 - atomic_set(&chip->interrupt_sleep_count, 0);
76001 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
76002 chip->card = card;
76003 chip->pci = pci;
76004 chip->irq = -1;
76005 diff -urNp linux-2.6.32.45/sound/soc/soc-core.c linux-2.6.32.45/sound/soc/soc-core.c
76006 --- linux-2.6.32.45/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
76007 +++ linux-2.6.32.45/sound/soc/soc-core.c 2011-08-23 21:22:32.000000000 -0400
76008 @@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pc
76009 }
76010
76011 /* ASoC PCM operations */
76012 -static struct snd_pcm_ops soc_pcm_ops = {
76013 +static snd_pcm_ops_no_const soc_pcm_ops = {
76014 .open = soc_pcm_open,
76015 .close = soc_codec_close,
76016 .hw_params = soc_pcm_hw_params,
76017 diff -urNp linux-2.6.32.45/sound/usb/usbaudio.c linux-2.6.32.45/sound/usb/usbaudio.c
76018 --- linux-2.6.32.45/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
76019 +++ linux-2.6.32.45/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
76020 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
76021 switch (cmd) {
76022 case SNDRV_PCM_TRIGGER_START:
76023 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76024 - subs->ops.prepare = prepare_playback_urb;
76025 + *(void **)&subs->ops.prepare = prepare_playback_urb;
76026 return 0;
76027 case SNDRV_PCM_TRIGGER_STOP:
76028 return deactivate_urbs(subs, 0, 0);
76029 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76030 - subs->ops.prepare = prepare_nodata_playback_urb;
76031 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76032 return 0;
76033 default:
76034 return -EINVAL;
76035 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
76036
76037 switch (cmd) {
76038 case SNDRV_PCM_TRIGGER_START:
76039 - subs->ops.retire = retire_capture_urb;
76040 + *(void **)&subs->ops.retire = retire_capture_urb;
76041 return start_urbs(subs, substream->runtime);
76042 case SNDRV_PCM_TRIGGER_STOP:
76043 return deactivate_urbs(subs, 0, 0);
76044 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
76045 - subs->ops.retire = retire_paused_capture_urb;
76046 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
76047 return 0;
76048 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
76049 - subs->ops.retire = retire_capture_urb;
76050 + *(void **)&subs->ops.retire = retire_capture_urb;
76051 return 0;
76052 default:
76053 return -EINVAL;
76054 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
76055 /* for playback, submit the URBs now; otherwise, the first hwptr_done
76056 * updates for all URBs would happen at the same time when starting */
76057 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
76058 - subs->ops.prepare = prepare_nodata_playback_urb;
76059 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
76060 return start_urbs(subs, runtime);
76061 } else
76062 return 0;
76063 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
76064 subs->direction = stream;
76065 subs->dev = as->chip->dev;
76066 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
76067 - subs->ops = audio_urb_ops[stream];
76068 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
76069 } else {
76070 - subs->ops = audio_urb_ops_high_speed[stream];
76071 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
76072 switch (as->chip->usb_id) {
76073 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
76074 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
76075 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
76076 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76077 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
76078 break;
76079 }
76080 }
76081 diff -urNp linux-2.6.32.45/tools/gcc/constify_plugin.c linux-2.6.32.45/tools/gcc/constify_plugin.c
76082 --- linux-2.6.32.45/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
76083 +++ linux-2.6.32.45/tools/gcc/constify_plugin.c 2011-08-26 20:19:09.000000000 -0400
76084 @@ -0,0 +1,288 @@
76085 +/*
76086 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
76087 + * Licensed under the GPL v2, or (at your option) v3
76088 + *
76089 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
76090 + *
76091 + * Usage:
76092 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
76093 + * $ gcc -fplugin=constify_plugin.so test.c -O2
76094 + */
76095 +
76096 +#include "gcc-plugin.h"
76097 +#include "config.h"
76098 +#include "system.h"
76099 +#include "coretypes.h"
76100 +#include "tree.h"
76101 +#include "tree-pass.h"
76102 +#include "intl.h"
76103 +#include "plugin-version.h"
76104 +#include "tm.h"
76105 +#include "toplev.h"
76106 +#include "function.h"
76107 +#include "tree-flow.h"
76108 +#include "plugin.h"
76109 +//#include "c-tree.h"
76110 +
76111 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
76112 +
76113 +int plugin_is_GPL_compatible;
76114 +
76115 +static struct plugin_info const_plugin_info = {
76116 + .version = "20110826",
76117 + .help = "no-constify\tturn off constification\n",
76118 +};
76119 +
76120 +static void constify_type(tree type);
76121 +static bool walk_struct(tree node);
76122 +
76123 +static tree deconstify_type(tree old_type)
76124 +{
76125 + tree new_type, field;
76126 +
76127 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
76128 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
76129 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
76130 + DECL_FIELD_CONTEXT(field) = new_type;
76131 + TYPE_READONLY(new_type) = 0;
76132 + C_TYPE_FIELDS_READONLY(new_type) = 0;
76133 + return new_type;
76134 +}
76135 +
76136 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76137 +{
76138 + tree type;
76139 +
76140 + *no_add_attrs = true;
76141 + if (TREE_CODE(*node) == FUNCTION_DECL) {
76142 + error("%qE attribute does not apply to functions", name);
76143 + return NULL_TREE;
76144 + }
76145 +
76146 + if (TREE_CODE(*node) == VAR_DECL) {
76147 + error("%qE attribute does not apply to variables", name);
76148 + return NULL_TREE;
76149 + }
76150 +
76151 + if (TYPE_P(*node)) {
76152 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
76153 + *no_add_attrs = false;
76154 + else
76155 + error("%qE attribute applies to struct and union types only", name);
76156 + return NULL_TREE;
76157 + }
76158 +
76159 + type = TREE_TYPE(*node);
76160 +
76161 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
76162 + error("%qE attribute applies to struct and union types only", name);
76163 + return NULL_TREE;
76164 + }
76165 +
76166 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
76167 + error("%qE attribute is already applied to the type", name);
76168 + return NULL_TREE;
76169 + }
76170 +
76171 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
76172 + error("%qE attribute used on type that is not constified", name);
76173 + return NULL_TREE;
76174 + }
76175 +
76176 + if (TREE_CODE(*node) == TYPE_DECL) {
76177 + TREE_TYPE(*node) = deconstify_type(type);
76178 + TREE_READONLY(*node) = 0;
76179 + return NULL_TREE;
76180 + }
76181 +
76182 + return NULL_TREE;
76183 +}
76184 +
76185 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
76186 +{
76187 + *no_add_attrs = true;
76188 + if (!TYPE_P(*node)) {
76189 + error("%qE attribute applies to types only", name);
76190 + return NULL_TREE;
76191 + }
76192 +
76193 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
76194 + error("%qE attribute applies to struct and union types only", name);
76195 + return NULL_TREE;
76196 + }
76197 +
76198 + *no_add_attrs = false;
76199 + constify_type(*node);
76200 + return NULL_TREE;
76201 +}
76202 +
76203 +static struct attribute_spec no_const_attr = {
76204 + .name = "no_const",
76205 + .min_length = 0,
76206 + .max_length = 0,
76207 + .decl_required = false,
76208 + .type_required = false,
76209 + .function_type_required = false,
76210 + .handler = handle_no_const_attribute
76211 +};
76212 +
76213 +static struct attribute_spec do_const_attr = {
76214 + .name = "do_const",
76215 + .min_length = 0,
76216 + .max_length = 0,
76217 + .decl_required = false,
76218 + .type_required = false,
76219 + .function_type_required = false,
76220 + .handler = handle_do_const_attribute
76221 +};
76222 +
76223 +static void register_attributes(void *event_data, void *data)
76224 +{
76225 + register_attribute(&no_const_attr);
76226 + register_attribute(&do_const_attr);
76227 +}
76228 +
76229 +static void constify_type(tree type)
76230 +{
76231 + TYPE_READONLY(type) = 1;
76232 + C_TYPE_FIELDS_READONLY(type) = 1;
76233 +}
76234 +
76235 +static bool is_fptr(tree field)
76236 +{
76237 + tree ptr = TREE_TYPE(field);
76238 +
76239 + if (TREE_CODE(ptr) != POINTER_TYPE)
76240 + return false;
76241 +
76242 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
76243 +}
76244 +
76245 +static bool walk_struct(tree node)
76246 +{
76247 + tree field;
76248 +
76249 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
76250 + return false;
76251 +
76252 + if (TYPE_FIELDS(node) == NULL_TREE)
76253 + return false;
76254 +
76255 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
76256 + tree type = TREE_TYPE(field);
76257 + enum tree_code code = TREE_CODE(type);
76258 + if (code == RECORD_TYPE || code == UNION_TYPE) {
76259 + if (!(walk_struct(type)))
76260 + return false;
76261 + } else if (!is_fptr(field) && !TREE_READONLY(field))
76262 + return false;
76263 + }
76264 + return true;
76265 +}
76266 +
76267 +static void finish_type(void *event_data, void *data)
76268 +{
76269 + tree type = (tree)event_data;
76270 +
76271 + if (type == NULL_TREE)
76272 + return;
76273 +
76274 + if (TYPE_READONLY(type))
76275 + return;
76276 +
76277 + if (walk_struct(type))
76278 + constify_type(type);
76279 +}
76280 +
76281 +static unsigned int check_local_variables(void);
76282 +
76283 +struct gimple_opt_pass pass_local_variable = {
76284 + {
76285 + .type = GIMPLE_PASS,
76286 + .name = "check_local_variables",
76287 + .gate = NULL,
76288 + .execute = check_local_variables,
76289 + .sub = NULL,
76290 + .next = NULL,
76291 + .static_pass_number = 0,
76292 + .tv_id = TV_NONE,
76293 + .properties_required = 0,
76294 + .properties_provided = 0,
76295 + .properties_destroyed = 0,
76296 + .todo_flags_start = 0,
76297 + .todo_flags_finish = 0
76298 + }
76299 +};
76300 +
76301 +static unsigned int check_local_variables(void)
76302 +{
76303 + tree var;
76304 + referenced_var_iterator rvi;
76305 +
76306 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
76307 + FOR_EACH_REFERENCED_VAR(var, rvi) {
76308 +#else
76309 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
76310 +#endif
76311 + tree type = TREE_TYPE(var);
76312 +
76313 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
76314 + continue;
76315 +
76316 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
76317 + continue;
76318 +
76319 + if (!TYPE_READONLY(type))
76320 + continue;
76321 +
76322 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
76323 +// continue;
76324 +
76325 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
76326 +// continue;
76327 +
76328 + if (walk_struct(type)) {
76329 + error("constified variable %qE cannot be local", var);
76330 + return 1;
76331 + }
76332 + }
76333 + return 0;
76334 +}
76335 +
76336 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
76337 +{
76338 + const char * const plugin_name = plugin_info->base_name;
76339 + const int argc = plugin_info->argc;
76340 + const struct plugin_argument * const argv = plugin_info->argv;
76341 + int i;
76342 + bool constify = true;
76343 +
76344 + struct register_pass_info local_variable_pass_info = {
76345 + .pass = &pass_local_variable.pass,
76346 + .reference_pass_name = "*referenced_vars",
76347 + .ref_pass_instance_number = 0,
76348 + .pos_op = PASS_POS_INSERT_AFTER
76349 + };
76350 +
76351 + if (!plugin_default_version_check(version, &gcc_version)) {
76352 + error(G_("incompatible gcc/plugin versions"));
76353 + return 1;
76354 + }
76355 +
76356 + for (i = 0; i < argc; ++i) {
76357 + if (!(strcmp(argv[i].key, "no-constify"))) {
76358 + constify = false;
76359 + continue;
76360 + }
76361 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76362 + }
76363 +
76364 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
76365 + if (constify) {
76366 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
76367 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
76368 + }
76369 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
76370 +
76371 + return 0;
76372 +}
76373 Binary files linux-2.6.32.45/tools/gcc/constify_plugin.so and linux-2.6.32.45/tools/gcc/constify_plugin.so differ
76374 diff -urNp linux-2.6.32.45/tools/gcc/Makefile linux-2.6.32.45/tools/gcc/Makefile
76375 --- linux-2.6.32.45/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
76376 +++ linux-2.6.32.45/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
76377 @@ -0,0 +1,12 @@
76378 +#CC := gcc
76379 +#PLUGIN_SOURCE_FILES := pax_plugin.c
76380 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
76381 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
76382 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
76383 +
76384 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
76385 +
76386 +hostlibs-y := stackleak_plugin.so constify_plugin.so
76387 +always := $(hostlibs-y)
76388 +stackleak_plugin-objs := stackleak_plugin.o
76389 +constify_plugin-objs := constify_plugin.o
76390 diff -urNp linux-2.6.32.45/tools/gcc/stackleak_plugin.c linux-2.6.32.45/tools/gcc/stackleak_plugin.c
76391 --- linux-2.6.32.45/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
76392 +++ linux-2.6.32.45/tools/gcc/stackleak_plugin.c 2011-08-23 20:24:26.000000000 -0400
76393 @@ -0,0 +1,243 @@
76394 +/*
76395 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
76396 + * Licensed under the GPL v2
76397 + *
76398 + * Note: the choice of the license means that the compilation process is
76399 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
76400 + * but for the kernel it doesn't matter since it doesn't link against
76401 + * any of the gcc libraries
76402 + *
76403 + * gcc plugin to help implement various PaX features
76404 + *
76405 + * - track lowest stack pointer
76406 + *
76407 + * TODO:
76408 + * - initialize all local variables
76409 + *
76410 + * BUGS:
76411 + * - cloned functions are instrumented twice
76412 + */
76413 +#include "gcc-plugin.h"
76414 +#include "config.h"
76415 +#include "system.h"
76416 +#include "coretypes.h"
76417 +#include "tree.h"
76418 +#include "tree-pass.h"
76419 +#include "intl.h"
76420 +#include "plugin-version.h"
76421 +#include "tm.h"
76422 +#include "toplev.h"
76423 +#include "basic-block.h"
76424 +#include "gimple.h"
76425 +//#include "expr.h" where are you...
76426 +#include "diagnostic.h"
76427 +#include "rtl.h"
76428 +#include "emit-rtl.h"
76429 +#include "function.h"
76430 +
76431 +int plugin_is_GPL_compatible;
76432 +
76433 +static int track_frame_size = -1;
76434 +static const char track_function[] = "pax_track_stack";
76435 +static bool init_locals;
76436 +
76437 +static struct plugin_info stackleak_plugin_info = {
76438 + .version = "201106030000",
76439 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
76440 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
76441 +};
76442 +
76443 +static bool gate_stackleak_track_stack(void);
76444 +static unsigned int execute_stackleak_tree_instrument(void);
76445 +static unsigned int execute_stackleak_final(void);
76446 +
76447 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
76448 + .pass = {
76449 + .type = GIMPLE_PASS,
76450 + .name = "stackleak_tree_instrument",
76451 + .gate = gate_stackleak_track_stack,
76452 + .execute = execute_stackleak_tree_instrument,
76453 + .sub = NULL,
76454 + .next = NULL,
76455 + .static_pass_number = 0,
76456 + .tv_id = TV_NONE,
76457 + .properties_required = PROP_gimple_leh | PROP_cfg,
76458 + .properties_provided = 0,
76459 + .properties_destroyed = 0,
76460 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
76461 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
76462 + }
76463 +};
76464 +
76465 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
76466 + .pass = {
76467 + .type = RTL_PASS,
76468 + .name = "stackleak_final",
76469 + .gate = gate_stackleak_track_stack,
76470 + .execute = execute_stackleak_final,
76471 + .sub = NULL,
76472 + .next = NULL,
76473 + .static_pass_number = 0,
76474 + .tv_id = TV_NONE,
76475 + .properties_required = 0,
76476 + .properties_provided = 0,
76477 + .properties_destroyed = 0,
76478 + .todo_flags_start = 0,
76479 + .todo_flags_finish = 0
76480 + }
76481 +};
76482 +
76483 +static bool gate_stackleak_track_stack(void)
76484 +{
76485 + return track_frame_size >= 0;
76486 +}
76487 +
76488 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
76489 +{
76490 + gimple call;
76491 + tree decl, type;
76492 +
76493 + // insert call to void pax_track_stack(void)
76494 + type = build_function_type_list(void_type_node, NULL_TREE);
76495 + decl = build_fn_decl(track_function, type);
76496 + DECL_ASSEMBLER_NAME(decl); // for LTO
76497 + call = gimple_build_call(decl, 0);
76498 + if (before)
76499 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
76500 + else
76501 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
76502 +}
76503 +
76504 +static unsigned int execute_stackleak_tree_instrument(void)
76505 +{
76506 + basic_block bb;
76507 + gimple_stmt_iterator gsi;
76508 +
76509 + // 1. loop through BBs and GIMPLE statements
76510 + FOR_EACH_BB(bb) {
76511 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
76512 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
76513 + tree decl;
76514 + gimple stmt = gsi_stmt(gsi);
76515 +
76516 + if (!is_gimple_call(stmt))
76517 + continue;
76518 + decl = gimple_call_fndecl(stmt);
76519 + if (!decl)
76520 + continue;
76521 + if (TREE_CODE(decl) != FUNCTION_DECL)
76522 + continue;
76523 + if (!DECL_BUILT_IN(decl))
76524 + continue;
76525 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
76526 + continue;
76527 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
76528 + continue;
76529 +
76530 + // 2. insert track call after each __builtin_alloca call
76531 + stackleak_add_instrumentation(&gsi, false);
76532 +// print_node(stderr, "pax", decl, 4);
76533 + }
76534 + }
76535 +
76536 + // 3. insert track call at the beginning
76537 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
76538 + gsi = gsi_start_bb(bb);
76539 + stackleak_add_instrumentation(&gsi, true);
76540 +
76541 + return 0;
76542 +}
76543 +
76544 +static unsigned int execute_stackleak_final(void)
76545 +{
76546 + rtx insn;
76547 +
76548 + if (cfun->calls_alloca)
76549 + return 0;
76550 +
76551 + // 1. find pax_track_stack calls
76552 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
76553 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
76554 + rtx body;
76555 +
76556 + if (!CALL_P(insn))
76557 + continue;
76558 + body = PATTERN(insn);
76559 + if (GET_CODE(body) != CALL)
76560 + continue;
76561 + body = XEXP(body, 0);
76562 + if (GET_CODE(body) != MEM)
76563 + continue;
76564 + body = XEXP(body, 0);
76565 + if (GET_CODE(body) != SYMBOL_REF)
76566 + continue;
76567 + if (strcmp(XSTR(body, 0), track_function))
76568 + continue;
76569 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
76570 + // 2. delete call if function frame is not big enough
76571 + if (get_frame_size() >= track_frame_size)
76572 + continue;
76573 + delete_insn_and_edges(insn);
76574 + }
76575 +
76576 +// print_simple_rtl(stderr, get_insns());
76577 +// print_rtl(stderr, get_insns());
76578 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
76579 +
76580 + return 0;
76581 +}
76582 +
76583 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
76584 +{
76585 + const char * const plugin_name = plugin_info->base_name;
76586 + const int argc = plugin_info->argc;
76587 + const struct plugin_argument * const argv = plugin_info->argv;
76588 + int i;
76589 + struct register_pass_info stackleak_tree_instrument_pass_info = {
76590 + .pass = &stackleak_tree_instrument_pass.pass,
76591 +// .reference_pass_name = "tree_profile",
76592 + .reference_pass_name = "optimized",
76593 + .ref_pass_instance_number = 0,
76594 + .pos_op = PASS_POS_INSERT_AFTER
76595 + };
76596 + struct register_pass_info stackleak_final_pass_info = {
76597 + .pass = &stackleak_final_rtl_opt_pass.pass,
76598 + .reference_pass_name = "final",
76599 + .ref_pass_instance_number = 0,
76600 + .pos_op = PASS_POS_INSERT_BEFORE
76601 + };
76602 +
76603 + if (!plugin_default_version_check(version, &gcc_version)) {
76604 + error(G_("incompatible gcc/plugin versions"));
76605 + return 1;
76606 + }
76607 +
76608 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
76609 +
76610 + for (i = 0; i < argc; ++i) {
76611 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
76612 + if (!argv[i].value) {
76613 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76614 + continue;
76615 + }
76616 + track_frame_size = atoi(argv[i].value);
76617 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
76618 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
76619 + continue;
76620 + }
76621 + if (!strcmp(argv[i].key, "initialize-locals")) {
76622 + if (argv[i].value) {
76623 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
76624 + continue;
76625 + }
76626 + init_locals = true;
76627 + continue;
76628 + }
76629 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
76630 + }
76631 +
76632 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
76633 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
76634 +
76635 + return 0;
76636 +}
76637 Binary files linux-2.6.32.45/tools/gcc/stackleak_plugin.so and linux-2.6.32.45/tools/gcc/stackleak_plugin.so differ
76638 diff -urNp linux-2.6.32.45/usr/gen_init_cpio.c linux-2.6.32.45/usr/gen_init_cpio.c
76639 --- linux-2.6.32.45/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
76640 +++ linux-2.6.32.45/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
76641 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
76642 int retval;
76643 int rc = -1;
76644 int namesize;
76645 - int i;
76646 + unsigned int i;
76647
76648 mode |= S_IFREG;
76649
76650 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
76651 *env_var = *expanded = '\0';
76652 strncat(env_var, start + 2, end - start - 2);
76653 strncat(expanded, new_location, start - new_location);
76654 - strncat(expanded, getenv(env_var), PATH_MAX);
76655 - strncat(expanded, end + 1, PATH_MAX);
76656 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
76657 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
76658 strncpy(new_location, expanded, PATH_MAX);
76659 + new_location[PATH_MAX] = 0;
76660 } else
76661 break;
76662 }
76663 diff -urNp linux-2.6.32.45/virt/kvm/kvm_main.c linux-2.6.32.45/virt/kvm/kvm_main.c
76664 --- linux-2.6.32.45/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
76665 +++ linux-2.6.32.45/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
76666 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
76667 if (kvm_rebooting)
76668 /* spin while reset goes on */
76669 while (true)
76670 - ;
76671 + cpu_relax();
76672 /* Fault while not rebooting. We want the trace. */
76673 BUG();
76674 }
76675 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
76676 kvm_arch_vcpu_put(vcpu);
76677 }
76678
76679 -int kvm_init(void *opaque, unsigned int vcpu_size,
76680 +int kvm_init(const void *opaque, unsigned int vcpu_size,
76681 struct module *module)
76682 {
76683 int r;
76684 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
76685 /* A kmem cache lets us meet the alignment requirements of fx_save. */
76686 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
76687 __alignof__(struct kvm_vcpu),
76688 - 0, NULL);
76689 + SLAB_USERCOPY, NULL);
76690 if (!kvm_vcpu_cache) {
76691 r = -ENOMEM;
76692 goto out_free_5;
76693 }
76694
76695 - kvm_chardev_ops.owner = module;
76696 - kvm_vm_fops.owner = module;
76697 - kvm_vcpu_fops.owner = module;
76698 + pax_open_kernel();
76699 + *(void **)&kvm_chardev_ops.owner = module;
76700 + *(void **)&kvm_vm_fops.owner = module;
76701 + *(void **)&kvm_vcpu_fops.owner = module;
76702 + pax_close_kernel();
76703
76704 r = misc_register(&kvm_dev);
76705 if (r) {