]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.44-201108091835.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.44-201108091835.patch
1 diff -urNp linux-2.6.32.44/arch/alpha/include/asm/elf.h linux-2.6.32.44/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.44/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.44/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.44/arch/alpha/include/asm/pgtable.h linux-2.6.32.44/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.44/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.44/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.44/arch/alpha/kernel/module.c linux-2.6.32.44/arch/alpha/kernel/module.c
40 --- linux-2.6.32.44/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.44/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.44/arch/alpha/kernel/osf_sys.c linux-2.6.32.44/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.44/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53 +++ linux-2.6.32.44/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.44/arch/alpha/mm/fault.c linux-2.6.32.44/arch/alpha/mm/fault.c
86 --- linux-2.6.32.44/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.44/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.44/arch/arm/include/asm/elf.h linux-2.6.32.44/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.44/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.44/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.44/arch/arm/include/asm/kmap_types.h linux-2.6.32.44/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.44/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.44/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.44/arch/arm/include/asm/uaccess.h linux-2.6.32.44/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.44/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.44/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277 @@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
282 +
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286 @@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294 +
295 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296 +{
297 + if (!__builtin_constant_p(n))
298 + check_object_size(to, n, false);
299 + return ___copy_from_user(to, from, n);
300 +}
301 +
302 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303 +{
304 + if (!__builtin_constant_p(n))
305 + check_object_size(from, n, true);
306 + return ___copy_to_user(to, from, n);
307 +}
308 +
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316 + if ((long)n < 0)
317 + return n;
318 +
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326 + if ((long)n < 0)
327 + return n;
328 +
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332 diff -urNp linux-2.6.32.44/arch/arm/kernel/armksyms.c linux-2.6.32.44/arch/arm/kernel/armksyms.c
333 --- linux-2.6.32.44/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334 +++ linux-2.6.32.44/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339 -EXPORT_SYMBOL(__copy_from_user);
340 -EXPORT_SYMBOL(__copy_to_user);
341 +EXPORT_SYMBOL(___copy_from_user);
342 +EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346 diff -urNp linux-2.6.32.44/arch/arm/kernel/kgdb.c linux-2.6.32.44/arch/arm/kernel/kgdb.c
347 --- linux-2.6.32.44/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348 +++ linux-2.6.32.44/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353 -struct kgdb_arch arch_kgdb_ops = {
354 +const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358 diff -urNp linux-2.6.32.44/arch/arm/kernel/traps.c linux-2.6.32.44/arch/arm/kernel/traps.c
359 --- linux-2.6.32.44/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360 +++ linux-2.6.32.44/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365 +extern void gr_handle_kernel_exploit(void);
366 +
367 /*
368 * This function is protected against re-entrancy.
369 */
370 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374 + gr_handle_kernel_exploit();
375 +
376 do_exit(SIGSEGV);
377 }
378
379 diff -urNp linux-2.6.32.44/arch/arm/lib/copy_from_user.S linux-2.6.32.44/arch/arm/lib/copy_from_user.S
380 --- linux-2.6.32.44/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381 +++ linux-2.6.32.44/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382 @@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386 - * size_t __copy_from_user(void *to, const void *from, size_t n)
387 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391 @@ -84,11 +84,11 @@
392
393 .text
394
395 -ENTRY(__copy_from_user)
396 +ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400 -ENDPROC(__copy_from_user)
401 +ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405 diff -urNp linux-2.6.32.44/arch/arm/lib/copy_to_user.S linux-2.6.32.44/arch/arm/lib/copy_to_user.S
406 --- linux-2.6.32.44/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.44/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408 @@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412 - * size_t __copy_to_user(void *to, const void *from, size_t n)
413 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417 @@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421 -WEAK(__copy_to_user)
422 +WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426 -ENDPROC(__copy_to_user)
427 +ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431 diff -urNp linux-2.6.32.44/arch/arm/lib/uaccess.S linux-2.6.32.44/arch/arm/lib/uaccess.S
432 --- linux-2.6.32.44/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.44/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434 @@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447 -ENTRY(__copy_to_user)
448 +ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456 -ENDPROC(__copy_to_user)
457 +ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473 -ENTRY(__copy_from_user)
474 +ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482 -ENDPROC(__copy_from_user)
483 +ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487 diff -urNp linux-2.6.32.44/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.44/arch/arm/lib/uaccess_with_memcpy.c
488 --- linux-2.6.32.44/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489 +++ linux-2.6.32.44/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490 @@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494 -__copy_to_user(void __user *to, const void *from, unsigned long n)
495 +___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499 diff -urNp linux-2.6.32.44/arch/arm/mach-at91/pm.c linux-2.6.32.44/arch/arm/mach-at91/pm.c
500 --- linux-2.6.32.44/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501 +++ linux-2.6.32.44/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506 -static struct platform_suspend_ops at91_pm_ops ={
507 +static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511 diff -urNp linux-2.6.32.44/arch/arm/mach-omap1/pm.c linux-2.6.32.44/arch/arm/mach-omap1/pm.c
512 --- linux-2.6.32.44/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513 +++ linux-2.6.32.44/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518 -static struct platform_suspend_ops omap_pm_ops ={
519 +static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523 diff -urNp linux-2.6.32.44/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.44/arch/arm/mach-omap2/pm24xx.c
524 --- linux-2.6.32.44/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525 +++ linux-2.6.32.44/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530 -static struct platform_suspend_ops omap_pm_ops = {
531 +static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535 diff -urNp linux-2.6.32.44/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.44/arch/arm/mach-omap2/pm34xx.c
536 --- linux-2.6.32.44/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537 +++ linux-2.6.32.44/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542 -static struct platform_suspend_ops omap_pm_ops = {
543 +static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547 diff -urNp linux-2.6.32.44/arch/arm/mach-pnx4008/pm.c linux-2.6.32.44/arch/arm/mach-pnx4008/pm.c
548 --- linux-2.6.32.44/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549 +++ linux-2.6.32.44/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554 -static struct platform_suspend_ops pnx4008_pm_ops = {
555 +static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559 diff -urNp linux-2.6.32.44/arch/arm/mach-pxa/pm.c linux-2.6.32.44/arch/arm/mach-pxa/pm.c
560 --- linux-2.6.32.44/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561 +++ linux-2.6.32.44/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566 -static struct platform_suspend_ops pxa_pm_ops = {
567 +static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571 diff -urNp linux-2.6.32.44/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.44/arch/arm/mach-pxa/sharpsl_pm.c
572 --- linux-2.6.32.44/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573 +++ linux-2.6.32.44/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578 -static struct platform_suspend_ops sharpsl_pm_ops = {
579 +static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583 diff -urNp linux-2.6.32.44/arch/arm/mach-sa1100/pm.c linux-2.6.32.44/arch/arm/mach-sa1100/pm.c
584 --- linux-2.6.32.44/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585 +++ linux-2.6.32.44/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590 -static struct platform_suspend_ops sa11x0_pm_ops = {
591 +static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595 diff -urNp linux-2.6.32.44/arch/arm/mm/fault.c linux-2.6.32.44/arch/arm/mm/fault.c
596 --- linux-2.6.32.44/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597 +++ linux-2.6.32.44/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602 +#ifdef CONFIG_PAX_PAGEEXEC
603 + if (fsr & FSR_LNX_PF) {
604 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605 + do_group_exit(SIGKILL);
606 + }
607 +#endif
608 +
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616 +#ifdef CONFIG_PAX_PAGEEXEC
617 +void pax_report_insns(void *pc, void *sp)
618 +{
619 + long i;
620 +
621 + printk(KERN_ERR "PAX: bytes at PC: ");
622 + for (i = 0; i < 20; i++) {
623 + unsigned char c;
624 + if (get_user(c, (__force unsigned char __user *)pc+i))
625 + printk(KERN_CONT "?? ");
626 + else
627 + printk(KERN_CONT "%02x ", c);
628 + }
629 + printk("\n");
630 +
631 + printk(KERN_ERR "PAX: bytes at SP-4: ");
632 + for (i = -1; i < 20; i++) {
633 + unsigned long c;
634 + if (get_user(c, (__force unsigned long __user *)sp+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08lx ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642 +
643 /*
644 * First Level Translation Fault Handler
645 *
646 diff -urNp linux-2.6.32.44/arch/arm/mm/mmap.c linux-2.6.32.44/arch/arm/mm/mmap.c
647 --- linux-2.6.32.44/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648 +++ linux-2.6.32.44/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653 +#ifdef CONFIG_PAX_RANDMMAP
654 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655 +#endif
656 +
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664 - if (TASK_SIZE - len >= addr &&
665 - (!vma || addr + len <= vma->vm_start))
666 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670 - start_addr = addr = mm->free_area_cache;
671 + start_addr = addr = mm->free_area_cache;
672 } else {
673 - start_addr = addr = TASK_UNMAPPED_BASE;
674 - mm->cached_hole_size = 0;
675 + start_addr = addr = mm->mmap_base;
676 + mm->cached_hole_size = 0;
677 }
678
679 full_search:
680 @@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684 - if (start_addr != TASK_UNMAPPED_BASE) {
685 - start_addr = addr = TASK_UNMAPPED_BASE;
686 + if (start_addr != mm->mmap_base) {
687 + start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693 - if (!vma || addr + len <= vma->vm_start) {
694 + if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698 diff -urNp linux-2.6.32.44/arch/arm/plat-s3c/pm.c linux-2.6.32.44/arch/arm/plat-s3c/pm.c
699 --- linux-2.6.32.44/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.44/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705 -static struct platform_suspend_ops s3c_pm_ops = {
706 +static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710 diff -urNp linux-2.6.32.44/arch/avr32/include/asm/elf.h linux-2.6.32.44/arch/avr32/include/asm/elf.h
711 --- linux-2.6.32.44/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712 +++ linux-2.6.32.44/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720 +#ifdef CONFIG_PAX_ASLR
721 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722 +
723 +#define PAX_DELTA_MMAP_LEN 15
724 +#define PAX_DELTA_STACK_LEN 15
725 +#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729 diff -urNp linux-2.6.32.44/arch/avr32/include/asm/kmap_types.h linux-2.6.32.44/arch/avr32/include/asm/kmap_types.h
730 --- linux-2.6.32.44/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731 +++ linux-2.6.32.44/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736 -D(14) KM_TYPE_NR
737 +D(14) KM_CLEARPAGE,
738 +D(15) KM_TYPE_NR
739 };
740
741 #undef D
742 diff -urNp linux-2.6.32.44/arch/avr32/mach-at32ap/pm.c linux-2.6.32.44/arch/avr32/mach-at32ap/pm.c
743 --- linux-2.6.32.44/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744 +++ linux-2.6.32.44/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745 @@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749 -static struct platform_suspend_ops avr32_pm_ops = {
750 +static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754 diff -urNp linux-2.6.32.44/arch/avr32/mm/fault.c linux-2.6.32.44/arch/avr32/mm/fault.c
755 --- linux-2.6.32.44/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756 +++ linux-2.6.32.44/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761 +#ifdef CONFIG_PAX_PAGEEXEC
762 +void pax_report_insns(void *pc, void *sp)
763 +{
764 + unsigned long i;
765 +
766 + printk(KERN_ERR "PAX: bytes at PC: ");
767 + for (i = 0; i < 20; i++) {
768 + unsigned char c;
769 + if (get_user(c, (unsigned char *)pc+i))
770 + printk(KERN_CONT "???????? ");
771 + else
772 + printk(KERN_CONT "%02x ", c);
773 + }
774 + printk("\n");
775 +}
776 +#endif
777 +
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781 @@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785 +
786 +#ifdef CONFIG_PAX_PAGEEXEC
787 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790 + do_group_exit(SIGKILL);
791 + }
792 + }
793 +#endif
794 +
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798 diff -urNp linux-2.6.32.44/arch/blackfin/kernel/kgdb.c linux-2.6.32.44/arch/blackfin/kernel/kgdb.c
799 --- linux-2.6.32.44/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.44/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805 -struct kgdb_arch arch_kgdb_ops = {
806 +const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810 diff -urNp linux-2.6.32.44/arch/blackfin/mach-common/pm.c linux-2.6.32.44/arch/blackfin/mach-common/pm.c
811 --- linux-2.6.32.44/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812 +++ linux-2.6.32.44/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817 -struct platform_suspend_ops bfin_pm_ops = {
818 +const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822 diff -urNp linux-2.6.32.44/arch/frv/include/asm/kmap_types.h linux-2.6.32.44/arch/frv/include/asm/kmap_types.h
823 --- linux-2.6.32.44/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824 +++ linux-2.6.32.44/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825 @@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829 + KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833 diff -urNp linux-2.6.32.44/arch/frv/mm/elf-fdpic.c linux-2.6.32.44/arch/frv/mm/elf-fdpic.c
834 --- linux-2.6.32.44/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835 +++ linux-2.6.32.44/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840 - if (TASK_SIZE - len >= addr &&
841 - (!vma || addr + len <= vma->vm_start))
842 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850 - if (addr + len <= vma->vm_start)
851 + if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859 - if (addr + len <= vma->vm_start)
860 + if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864 diff -urNp linux-2.6.32.44/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.44/arch/ia64/hp/common/hwsw_iommu.c
865 --- linux-2.6.32.44/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866 +++ linux-2.6.32.44/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867 @@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885 diff -urNp linux-2.6.32.44/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.44/arch/ia64/hp/common/sba_iommu.c
886 --- linux-2.6.32.44/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.44/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892 -extern struct dma_map_ops swiotlb_dma_ops;
893 +extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901 -struct dma_map_ops sba_dma_ops = {
902 +const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906 diff -urNp linux-2.6.32.44/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.44/arch/ia64/ia32/binfmt_elf32.c
907 --- linux-2.6.32.44/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908 +++ linux-2.6.32.44/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913 +#ifdef CONFIG_PAX_ASLR
914 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915 +
916 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918 +#endif
919 +
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923 diff -urNp linux-2.6.32.44/arch/ia64/ia32/ia32priv.h linux-2.6.32.44/arch/ia64/ia32/ia32priv.h
924 --- linux-2.6.32.44/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.44/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
931 +#ifdef CONFIG_PAX_RANDUSTACK
932 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
933 +#else
934 +#define __IA32_DELTA_STACK 0UL
935 +#endif
936 +
937 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938 +
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.44/arch/ia64/include/asm/dma-mapping.h
943 --- linux-2.6.32.44/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944 +++ linux-2.6.32.44/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945 @@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949 -extern struct dma_map_ops *dma_ops;
950 +extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
959 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
968 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
977 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
984 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/elf.h linux-2.6.32.44/arch/ia64/include/asm/elf.h
989 --- linux-2.6.32.44/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990 +++ linux-2.6.32.44/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991 @@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995 +#ifdef CONFIG_PAX_ASLR
996 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997 +
998 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000 +#endif
1001 +
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/machvec.h linux-2.6.32.44/arch/ia64/include/asm/machvec.h
1006 --- linux-2.6.32.44/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007 +++ linux-2.6.32.44/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021 -extern struct dma_map_ops *dma_get_ops(struct device *);
1022 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/pgtable.h linux-2.6.32.44/arch/ia64/include/asm/pgtable.h
1027 --- linux-2.6.32.44/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.44/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033 -
1034 +#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038 @@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042 +
1043 +#ifdef CONFIG_PAX_PAGEEXEC
1044 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047 +#else
1048 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050 +# define PAGE_COPY_NOEXEC PAGE_COPY
1051 +#endif
1052 +
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/spinlock.h linux-2.6.32.44/arch/ia64/include/asm/spinlock.h
1057 --- linux-2.6.32.44/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058 +++ linux-2.6.32.44/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068 diff -urNp linux-2.6.32.44/arch/ia64/include/asm/uaccess.h linux-2.6.32.44/arch/ia64/include/asm/uaccess.h
1069 --- linux-2.6.32.44/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070 +++ linux-2.6.32.44/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089 diff -urNp linux-2.6.32.44/arch/ia64/kernel/dma-mapping.c linux-2.6.32.44/arch/ia64/kernel/dma-mapping.c
1090 --- linux-2.6.32.44/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091 +++ linux-2.6.32.44/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092 @@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096 -struct dma_map_ops *dma_ops;
1097 +const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105 -struct dma_map_ops *dma_get_ops(struct device *dev)
1106 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110 diff -urNp linux-2.6.32.44/arch/ia64/kernel/module.c linux-2.6.32.44/arch/ia64/kernel/module.c
1111 --- linux-2.6.32.44/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112 +++ linux-2.6.32.44/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117 - if (mod && mod->arch.init_unw_table &&
1118 - module_region == mod->module_init) {
1119 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127 +in_init_rx (const struct module *mod, uint64_t addr)
1128 +{
1129 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130 +}
1131 +
1132 +static inline int
1133 +in_init_rw (const struct module *mod, uint64_t addr)
1134 +{
1135 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136 +}
1137 +
1138 +static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141 - return addr - (uint64_t) mod->module_init < mod->init_size;
1142 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143 +}
1144 +
1145 +static inline int
1146 +in_core_rx (const struct module *mod, uint64_t addr)
1147 +{
1148 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149 +}
1150 +
1151 +static inline int
1152 +in_core_rw (const struct module *mod, uint64_t addr)
1153 +{
1154 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160 - return addr - (uint64_t) mod->module_core < mod->core_size;
1161 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170 + if (in_init_rx(mod, val))
1171 + val -= (uint64_t) mod->module_init_rx;
1172 + else if (in_init_rw(mod, val))
1173 + val -= (uint64_t) mod->module_init_rw;
1174 + else if (in_core_rx(mod, val))
1175 + val -= (uint64_t) mod->module_core_rx;
1176 + else if (in_core_rw(mod, val))
1177 + val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185 - if (mod->core_size > MAX_LTOFF)
1186 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191 - gp = mod->core_size - MAX_LTOFF / 2;
1192 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194 - gp = mod->core_size / 2;
1195 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201 diff -urNp linux-2.6.32.44/arch/ia64/kernel/pci-dma.c linux-2.6.32.44/arch/ia64/kernel/pci-dma.c
1202 --- linux-2.6.32.44/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203 +++ linux-2.6.32.44/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208 -extern struct dma_map_ops intel_dma_ops;
1209 +extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224 +
1225 +static const struct dma_map_ops intel_iommu_dma_ops = {
1226 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227 + .alloc_coherent = intel_alloc_coherent,
1228 + .free_coherent = intel_free_coherent,
1229 + .map_sg = intel_map_sg,
1230 + .unmap_sg = intel_unmap_sg,
1231 + .map_page = intel_map_page,
1232 + .unmap_page = intel_unmap_page,
1233 + .mapping_error = intel_mapping_error,
1234 +
1235 + .sync_single_for_cpu = machvec_dma_sync_single,
1236 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1237 + .sync_single_for_device = machvec_dma_sync_single,
1238 + .sync_sg_for_device = machvec_dma_sync_sg,
1239 + .dma_supported = iommu_dma_supported,
1240 +};
1241 +
1242 void __init pci_iommu_alloc(void)
1243 {
1244 - dma_ops = &intel_dma_ops;
1245 -
1246 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250 - dma_ops->dma_supported = iommu_dma_supported;
1251 + dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255 diff -urNp linux-2.6.32.44/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.44/arch/ia64/kernel/pci-swiotlb.c
1256 --- linux-2.6.32.44/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257 +++ linux-2.6.32.44/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262 -struct dma_map_ops swiotlb_dma_ops = {
1263 +const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267 diff -urNp linux-2.6.32.44/arch/ia64/kernel/sys_ia64.c linux-2.6.32.44/arch/ia64/kernel/sys_ia64.c
1268 --- linux-2.6.32.44/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269 +++ linux-2.6.32.44/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274 +
1275 +#ifdef CONFIG_PAX_RANDMMAP
1276 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1277 + addr = mm->free_area_cache;
1278 + else
1279 +#endif
1280 +
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288 - if (start_addr != TASK_UNMAPPED_BASE) {
1289 + if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291 - addr = TASK_UNMAPPED_BASE;
1292 + addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297 - if (!vma || addr + len <= vma->vm_start) {
1298 + if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302 diff -urNp linux-2.6.32.44/arch/ia64/kernel/topology.c linux-2.6.32.44/arch/ia64/kernel/topology.c
1303 --- linux-2.6.32.44/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304 +++ linux-2.6.32.44/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309 -static struct sysfs_ops cache_sysfs_ops = {
1310 +static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314 diff -urNp linux-2.6.32.44/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.44/arch/ia64/kernel/vmlinux.lds.S
1315 --- linux-2.6.32.44/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316 +++ linux-2.6.32.44/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317 @@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321 - __phys_per_cpu_start = __per_cpu_load;
1322 + __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326 diff -urNp linux-2.6.32.44/arch/ia64/mm/fault.c linux-2.6.32.44/arch/ia64/mm/fault.c
1327 --- linux-2.6.32.44/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328 +++ linux-2.6.32.44/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333 +#ifdef CONFIG_PAX_PAGEEXEC
1334 +void pax_report_insns(void *pc, void *sp)
1335 +{
1336 + unsigned long i;
1337 +
1338 + printk(KERN_ERR "PAX: bytes at PC: ");
1339 + for (i = 0; i < 8; i++) {
1340 + unsigned int c;
1341 + if (get_user(c, (unsigned int *)pc+i))
1342 + printk(KERN_CONT "???????? ");
1343 + else
1344 + printk(KERN_CONT "%08x ", c);
1345 + }
1346 + printk("\n");
1347 +}
1348 +#endif
1349 +
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357 - if ((vma->vm_flags & mask) != mask)
1358 + if ((vma->vm_flags & mask) != mask) {
1359 +
1360 +#ifdef CONFIG_PAX_PAGEEXEC
1361 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363 + goto bad_area;
1364 +
1365 + up_read(&mm->mmap_sem);
1366 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367 + do_group_exit(SIGKILL);
1368 + }
1369 +#endif
1370 +
1371 goto bad_area;
1372
1373 + }
1374 +
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378 diff -urNp linux-2.6.32.44/arch/ia64/mm/hugetlbpage.c linux-2.6.32.44/arch/ia64/mm/hugetlbpage.c
1379 --- linux-2.6.32.44/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.44/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385 - if (!vmm || (addr + len) <= vmm->vm_start)
1386 + if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390 diff -urNp linux-2.6.32.44/arch/ia64/mm/init.c linux-2.6.32.44/arch/ia64/mm/init.c
1391 --- linux-2.6.32.44/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392 +++ linux-2.6.32.44/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400 + vma->vm_flags &= ~VM_EXEC;
1401 +
1402 +#ifdef CONFIG_PAX_MPROTECT
1403 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404 + vma->vm_flags &= ~VM_MAYEXEC;
1405 +#endif
1406 +
1407 + }
1408 +#endif
1409 +
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413 diff -urNp linux-2.6.32.44/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.44/arch/ia64/sn/pci/pci_dma.c
1414 --- linux-2.6.32.44/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415 +++ linux-2.6.32.44/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420 -static struct dma_map_ops sn_dma_ops = {
1421 +static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425 diff -urNp linux-2.6.32.44/arch/m32r/lib/usercopy.c linux-2.6.32.44/arch/m32r/lib/usercopy.c
1426 --- linux-2.6.32.44/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427 +++ linux-2.6.32.44/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428 @@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432 + if ((long)n < 0)
1433 + return n;
1434 +
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442 + if ((long)n < 0)
1443 + return n;
1444 +
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448 diff -urNp linux-2.6.32.44/arch/mips/alchemy/devboards/pm.c linux-2.6.32.44/arch/mips/alchemy/devboards/pm.c
1449 --- linux-2.6.32.44/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450 +++ linux-2.6.32.44/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455 -static struct platform_suspend_ops db1x_pm_ops = {
1456 +static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460 diff -urNp linux-2.6.32.44/arch/mips/include/asm/elf.h linux-2.6.32.44/arch/mips/include/asm/elf.h
1461 --- linux-2.6.32.44/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462 +++ linux-2.6.32.44/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467 +#ifdef CONFIG_PAX_ASLR
1468 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469 +
1470 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472 +#endif
1473 +
1474 #endif /* _ASM_ELF_H */
1475 diff -urNp linux-2.6.32.44/arch/mips/include/asm/page.h linux-2.6.32.44/arch/mips/include/asm/page.h
1476 --- linux-2.6.32.44/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477 +++ linux-2.6.32.44/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487 diff -urNp linux-2.6.32.44/arch/mips/include/asm/system.h linux-2.6.32.44/arch/mips/include/asm/system.h
1488 --- linux-2.6.32.44/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1489 +++ linux-2.6.32.44/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1490 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1491 */
1492 #define __ARCH_WANT_UNLOCKED_CTXSW
1493
1494 -extern unsigned long arch_align_stack(unsigned long sp);
1495 +#define arch_align_stack(x) ((x) & ~0xfUL)
1496
1497 #endif /* _ASM_SYSTEM_H */
1498 diff -urNp linux-2.6.32.44/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.44/arch/mips/kernel/binfmt_elfn32.c
1499 --- linux-2.6.32.44/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.44/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1501 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1502 #undef ELF_ET_DYN_BASE
1503 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #include <asm/processor.h>
1513 #include <linux/module.h>
1514 #include <linux/elfcore.h>
1515 diff -urNp linux-2.6.32.44/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.44/arch/mips/kernel/binfmt_elfo32.c
1516 --- linux-2.6.32.44/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1517 +++ linux-2.6.32.44/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1518 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1519 #undef ELF_ET_DYN_BASE
1520 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1521
1522 +#ifdef CONFIG_PAX_ASLR
1523 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1524 +
1525 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1526 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1527 +#endif
1528 +
1529 #include <asm/processor.h>
1530
1531 /*
1532 diff -urNp linux-2.6.32.44/arch/mips/kernel/kgdb.c linux-2.6.32.44/arch/mips/kernel/kgdb.c
1533 --- linux-2.6.32.44/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1534 +++ linux-2.6.32.44/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1535 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1536 return -1;
1537 }
1538
1539 +/* cannot be const */
1540 struct kgdb_arch arch_kgdb_ops;
1541
1542 /*
1543 diff -urNp linux-2.6.32.44/arch/mips/kernel/process.c linux-2.6.32.44/arch/mips/kernel/process.c
1544 --- linux-2.6.32.44/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1545 +++ linux-2.6.32.44/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1546 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1547 out:
1548 return pc;
1549 }
1550 -
1551 -/*
1552 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1553 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1554 - */
1555 -unsigned long arch_align_stack(unsigned long sp)
1556 -{
1557 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1558 - sp -= get_random_int() & ~PAGE_MASK;
1559 -
1560 - return sp & ALMASK;
1561 -}
1562 diff -urNp linux-2.6.32.44/arch/mips/kernel/syscall.c linux-2.6.32.44/arch/mips/kernel/syscall.c
1563 --- linux-2.6.32.44/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1564 +++ linux-2.6.32.44/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1565 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1566 do_color_align = 0;
1567 if (filp || (flags & MAP_SHARED))
1568 do_color_align = 1;
1569 +
1570 +#ifdef CONFIG_PAX_RANDMMAP
1571 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1572 +#endif
1573 +
1574 if (addr) {
1575 if (do_color_align)
1576 addr = COLOUR_ALIGN(addr, pgoff);
1577 else
1578 addr = PAGE_ALIGN(addr);
1579 vmm = find_vma(current->mm, addr);
1580 - if (task_size - len >= addr &&
1581 - (!vmm || addr + len <= vmm->vm_start))
1582 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1583 return addr;
1584 }
1585 - addr = TASK_UNMAPPED_BASE;
1586 + addr = current->mm->mmap_base;
1587 if (do_color_align)
1588 addr = COLOUR_ALIGN(addr, pgoff);
1589 else
1590 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1591 /* At this point: (!vmm || addr < vmm->vm_end). */
1592 if (task_size - len < addr)
1593 return -ENOMEM;
1594 - if (!vmm || addr + len <= vmm->vm_start)
1595 + if (check_heap_stack_gap(vmm, addr, len))
1596 return addr;
1597 addr = vmm->vm_end;
1598 if (do_color_align)
1599 diff -urNp linux-2.6.32.44/arch/mips/mm/fault.c linux-2.6.32.44/arch/mips/mm/fault.c
1600 --- linux-2.6.32.44/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1601 +++ linux-2.6.32.44/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1602 @@ -26,6 +26,23 @@
1603 #include <asm/ptrace.h>
1604 #include <asm/highmem.h> /* For VMALLOC_END */
1605
1606 +#ifdef CONFIG_PAX_PAGEEXEC
1607 +void pax_report_insns(void *pc, void *sp)
1608 +{
1609 + unsigned long i;
1610 +
1611 + printk(KERN_ERR "PAX: bytes at PC: ");
1612 + for (i = 0; i < 5; i++) {
1613 + unsigned int c;
1614 + if (get_user(c, (unsigned int *)pc+i))
1615 + printk(KERN_CONT "???????? ");
1616 + else
1617 + printk(KERN_CONT "%08x ", c);
1618 + }
1619 + printk("\n");
1620 +}
1621 +#endif
1622 +
1623 /*
1624 * This routine handles page faults. It determines the address,
1625 * and the problem, and then passes it off to one of the appropriate
1626 diff -urNp linux-2.6.32.44/arch/parisc/include/asm/elf.h linux-2.6.32.44/arch/parisc/include/asm/elf.h
1627 --- linux-2.6.32.44/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1628 +++ linux-2.6.32.44/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1629 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1630
1631 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1632
1633 +#ifdef CONFIG_PAX_ASLR
1634 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1635 +
1636 +#define PAX_DELTA_MMAP_LEN 16
1637 +#define PAX_DELTA_STACK_LEN 16
1638 +#endif
1639 +
1640 /* This yields a mask that user programs can use to figure out what
1641 instruction set this CPU supports. This could be done in user space,
1642 but it's not easy, and we've already done it here. */
1643 diff -urNp linux-2.6.32.44/arch/parisc/include/asm/pgtable.h linux-2.6.32.44/arch/parisc/include/asm/pgtable.h
1644 --- linux-2.6.32.44/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1645 +++ linux-2.6.32.44/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1646 @@ -207,6 +207,17 @@
1647 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1648 #define PAGE_COPY PAGE_EXECREAD
1649 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1650 +
1651 +#ifdef CONFIG_PAX_PAGEEXEC
1652 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1653 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1654 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1655 +#else
1656 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1657 +# define PAGE_COPY_NOEXEC PAGE_COPY
1658 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1659 +#endif
1660 +
1661 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1662 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1663 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1664 diff -urNp linux-2.6.32.44/arch/parisc/kernel/module.c linux-2.6.32.44/arch/parisc/kernel/module.c
1665 --- linux-2.6.32.44/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.44/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1667 @@ -95,16 +95,38 @@
1668
1669 /* three functions to determine where in the module core
1670 * or init pieces the location is */
1671 +static inline int in_init_rx(struct module *me, void *loc)
1672 +{
1673 + return (loc >= me->module_init_rx &&
1674 + loc < (me->module_init_rx + me->init_size_rx));
1675 +}
1676 +
1677 +static inline int in_init_rw(struct module *me, void *loc)
1678 +{
1679 + return (loc >= me->module_init_rw &&
1680 + loc < (me->module_init_rw + me->init_size_rw));
1681 +}
1682 +
1683 static inline int in_init(struct module *me, void *loc)
1684 {
1685 - return (loc >= me->module_init &&
1686 - loc <= (me->module_init + me->init_size));
1687 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1688 +}
1689 +
1690 +static inline int in_core_rx(struct module *me, void *loc)
1691 +{
1692 + return (loc >= me->module_core_rx &&
1693 + loc < (me->module_core_rx + me->core_size_rx));
1694 +}
1695 +
1696 +static inline int in_core_rw(struct module *me, void *loc)
1697 +{
1698 + return (loc >= me->module_core_rw &&
1699 + loc < (me->module_core_rw + me->core_size_rw));
1700 }
1701
1702 static inline int in_core(struct module *me, void *loc)
1703 {
1704 - return (loc >= me->module_core &&
1705 - loc <= (me->module_core + me->core_size));
1706 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1707 }
1708
1709 static inline int in_local(struct module *me, void *loc)
1710 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1711 }
1712
1713 /* align things a bit */
1714 - me->core_size = ALIGN(me->core_size, 16);
1715 - me->arch.got_offset = me->core_size;
1716 - me->core_size += gots * sizeof(struct got_entry);
1717 -
1718 - me->core_size = ALIGN(me->core_size, 16);
1719 - me->arch.fdesc_offset = me->core_size;
1720 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1721 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1722 + me->arch.got_offset = me->core_size_rw;
1723 + me->core_size_rw += gots * sizeof(struct got_entry);
1724 +
1725 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1726 + me->arch.fdesc_offset = me->core_size_rw;
1727 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1728
1729 me->arch.got_max = gots;
1730 me->arch.fdesc_max = fdescs;
1731 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1732
1733 BUG_ON(value == 0);
1734
1735 - got = me->module_core + me->arch.got_offset;
1736 + got = me->module_core_rw + me->arch.got_offset;
1737 for (i = 0; got[i].addr; i++)
1738 if (got[i].addr == value)
1739 goto out;
1740 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1741 #ifdef CONFIG_64BIT
1742 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1743 {
1744 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1745 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1746
1747 if (!value) {
1748 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1749 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1750
1751 /* Create new one */
1752 fdesc->addr = value;
1753 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1754 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1755 return (Elf_Addr)fdesc;
1756 }
1757 #endif /* CONFIG_64BIT */
1758 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1759
1760 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1761 end = table + sechdrs[me->arch.unwind_section].sh_size;
1762 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1763 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1764
1765 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1766 me->arch.unwind_section, table, end, gp);
1767 diff -urNp linux-2.6.32.44/arch/parisc/kernel/sys_parisc.c linux-2.6.32.44/arch/parisc/kernel/sys_parisc.c
1768 --- linux-2.6.32.44/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1769 +++ linux-2.6.32.44/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1770 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1771 /* At this point: (!vma || addr < vma->vm_end). */
1772 if (TASK_SIZE - len < addr)
1773 return -ENOMEM;
1774 - if (!vma || addr + len <= vma->vm_start)
1775 + if (check_heap_stack_gap(vma, addr, len))
1776 return addr;
1777 addr = vma->vm_end;
1778 }
1779 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1780 /* At this point: (!vma || addr < vma->vm_end). */
1781 if (TASK_SIZE - len < addr)
1782 return -ENOMEM;
1783 - if (!vma || addr + len <= vma->vm_start)
1784 + if (check_heap_stack_gap(vma, addr, len))
1785 return addr;
1786 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1787 if (addr < vma->vm_end) /* handle wraparound */
1788 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1789 if (flags & MAP_FIXED)
1790 return addr;
1791 if (!addr)
1792 - addr = TASK_UNMAPPED_BASE;
1793 + addr = current->mm->mmap_base;
1794
1795 if (filp) {
1796 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1797 diff -urNp linux-2.6.32.44/arch/parisc/kernel/traps.c linux-2.6.32.44/arch/parisc/kernel/traps.c
1798 --- linux-2.6.32.44/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1799 +++ linux-2.6.32.44/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1800 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1801
1802 down_read(&current->mm->mmap_sem);
1803 vma = find_vma(current->mm,regs->iaoq[0]);
1804 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1805 - && (vma->vm_flags & VM_EXEC)) {
1806 -
1807 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1808 fault_address = regs->iaoq[0];
1809 fault_space = regs->iasq[0];
1810
1811 diff -urNp linux-2.6.32.44/arch/parisc/mm/fault.c linux-2.6.32.44/arch/parisc/mm/fault.c
1812 --- linux-2.6.32.44/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1813 +++ linux-2.6.32.44/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1814 @@ -15,6 +15,7 @@
1815 #include <linux/sched.h>
1816 #include <linux/interrupt.h>
1817 #include <linux/module.h>
1818 +#include <linux/unistd.h>
1819
1820 #include <asm/uaccess.h>
1821 #include <asm/traps.h>
1822 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1823 static unsigned long
1824 parisc_acctyp(unsigned long code, unsigned int inst)
1825 {
1826 - if (code == 6 || code == 16)
1827 + if (code == 6 || code == 7 || code == 16)
1828 return VM_EXEC;
1829
1830 switch (inst & 0xf0000000) {
1831 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1832 }
1833 #endif
1834
1835 +#ifdef CONFIG_PAX_PAGEEXEC
1836 +/*
1837 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1838 + *
1839 + * returns 1 when task should be killed
1840 + * 2 when rt_sigreturn trampoline was detected
1841 + * 3 when unpatched PLT trampoline was detected
1842 + */
1843 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1844 +{
1845 +
1846 +#ifdef CONFIG_PAX_EMUPLT
1847 + int err;
1848 +
1849 + do { /* PaX: unpatched PLT emulation */
1850 + unsigned int bl, depwi;
1851 +
1852 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1853 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1859 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1860 +
1861 + err = get_user(ldw, (unsigned int *)addr);
1862 + err |= get_user(bv, (unsigned int *)(addr+4));
1863 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1864 +
1865 + if (err)
1866 + break;
1867 +
1868 + if (ldw == 0x0E801096U &&
1869 + bv == 0xEAC0C000U &&
1870 + ldw2 == 0x0E881095U)
1871 + {
1872 + unsigned int resolver, map;
1873 +
1874 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1875 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1876 + if (err)
1877 + break;
1878 +
1879 + regs->gr[20] = instruction_pointer(regs)+8;
1880 + regs->gr[21] = map;
1881 + regs->gr[22] = resolver;
1882 + regs->iaoq[0] = resolver | 3UL;
1883 + regs->iaoq[1] = regs->iaoq[0] + 4;
1884 + return 3;
1885 + }
1886 + }
1887 + } while (0);
1888 +#endif
1889 +
1890 +#ifdef CONFIG_PAX_EMUTRAMP
1891 +
1892 +#ifndef CONFIG_PAX_EMUSIGRT
1893 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1894 + return 1;
1895 +#endif
1896 +
1897 + do { /* PaX: rt_sigreturn emulation */
1898 + unsigned int ldi1, ldi2, bel, nop;
1899 +
1900 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1901 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1902 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1903 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1904 +
1905 + if (err)
1906 + break;
1907 +
1908 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1909 + ldi2 == 0x3414015AU &&
1910 + bel == 0xE4008200U &&
1911 + nop == 0x08000240U)
1912 + {
1913 + regs->gr[25] = (ldi1 & 2) >> 1;
1914 + regs->gr[20] = __NR_rt_sigreturn;
1915 + regs->gr[31] = regs->iaoq[1] + 16;
1916 + regs->sr[0] = regs->iasq[1];
1917 + regs->iaoq[0] = 0x100UL;
1918 + regs->iaoq[1] = regs->iaoq[0] + 4;
1919 + regs->iasq[0] = regs->sr[2];
1920 + regs->iasq[1] = regs->sr[2];
1921 + return 2;
1922 + }
1923 + } while (0);
1924 +#endif
1925 +
1926 + return 1;
1927 +}
1928 +
1929 +void pax_report_insns(void *pc, void *sp)
1930 +{
1931 + unsigned long i;
1932 +
1933 + printk(KERN_ERR "PAX: bytes at PC: ");
1934 + for (i = 0; i < 5; i++) {
1935 + unsigned int c;
1936 + if (get_user(c, (unsigned int *)pc+i))
1937 + printk(KERN_CONT "???????? ");
1938 + else
1939 + printk(KERN_CONT "%08x ", c);
1940 + }
1941 + printk("\n");
1942 +}
1943 +#endif
1944 +
1945 int fixup_exception(struct pt_regs *regs)
1946 {
1947 const struct exception_table_entry *fix;
1948 @@ -192,8 +303,33 @@ good_area:
1949
1950 acc_type = parisc_acctyp(code,regs->iir);
1951
1952 - if ((vma->vm_flags & acc_type) != acc_type)
1953 + if ((vma->vm_flags & acc_type) != acc_type) {
1954 +
1955 +#ifdef CONFIG_PAX_PAGEEXEC
1956 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1957 + (address & ~3UL) == instruction_pointer(regs))
1958 + {
1959 + up_read(&mm->mmap_sem);
1960 + switch (pax_handle_fetch_fault(regs)) {
1961 +
1962 +#ifdef CONFIG_PAX_EMUPLT
1963 + case 3:
1964 + return;
1965 +#endif
1966 +
1967 +#ifdef CONFIG_PAX_EMUTRAMP
1968 + case 2:
1969 + return;
1970 +#endif
1971 +
1972 + }
1973 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1974 + do_group_exit(SIGKILL);
1975 + }
1976 +#endif
1977 +
1978 goto bad_area;
1979 + }
1980
1981 /*
1982 * If for any reason at all we couldn't handle the fault, make
1983 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/device.h linux-2.6.32.44/arch/powerpc/include/asm/device.h
1984 --- linux-2.6.32.44/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1985 +++ linux-2.6.32.44/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1986 @@ -14,7 +14,7 @@ struct dev_archdata {
1987 struct device_node *of_node;
1988
1989 /* DMA operations on that device */
1990 - struct dma_map_ops *dma_ops;
1991 + const struct dma_map_ops *dma_ops;
1992
1993 /*
1994 * When an iommu is in use, dma_data is used as a ptr to the base of the
1995 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.44/arch/powerpc/include/asm/dma-mapping.h
1996 --- linux-2.6.32.44/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.44/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1999 #ifdef CONFIG_PPC64
2000 extern struct dma_map_ops dma_iommu_ops;
2001 #endif
2002 -extern struct dma_map_ops dma_direct_ops;
2003 +extern const struct dma_map_ops dma_direct_ops;
2004
2005 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2006 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2007 {
2008 /* We don't handle the NULL dev case for ISA for now. We could
2009 * do it via an out of line call but it is not needed for now. The
2010 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2011 return dev->archdata.dma_ops;
2012 }
2013
2014 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2015 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2016 {
2017 dev->archdata.dma_ops = ops;
2018 }
2019 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2020
2021 static inline int dma_supported(struct device *dev, u64 mask)
2022 {
2023 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2024 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2025
2026 if (unlikely(dma_ops == NULL))
2027 return 0;
2028 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2029
2030 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2031 {
2032 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2033 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2034
2035 if (unlikely(dma_ops == NULL))
2036 return -EIO;
2037 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2038 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2039 dma_addr_t *dma_handle, gfp_t flag)
2040 {
2041 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2042 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2043 void *cpu_addr;
2044
2045 BUG_ON(!dma_ops);
2046 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2047 static inline void dma_free_coherent(struct device *dev, size_t size,
2048 void *cpu_addr, dma_addr_t dma_handle)
2049 {
2050 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2051 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2052
2053 BUG_ON(!dma_ops);
2054
2055 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2056
2057 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2058 {
2059 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2060 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2061
2062 if (dma_ops->mapping_error)
2063 return dma_ops->mapping_error(dev, dma_addr);
2064 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/elf.h linux-2.6.32.44/arch/powerpc/include/asm/elf.h
2065 --- linux-2.6.32.44/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2066 +++ linux-2.6.32.44/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2067 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071 -extern unsigned long randomize_et_dyn(unsigned long base);
2072 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073 +#define ELF_ET_DYN_BASE (0x20000000)
2074 +
2075 +#ifdef CONFIG_PAX_ASLR
2076 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077 +
2078 +#ifdef __powerpc64__
2079 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2080 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2081 +#else
2082 +#define PAX_DELTA_MMAP_LEN 15
2083 +#define PAX_DELTA_STACK_LEN 15
2084 +#endif
2085 +#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094 -#define arch_randomize_brk arch_randomize_brk
2095 -
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/iommu.h linux-2.6.32.44/arch/powerpc/include/asm/iommu.h
2100 --- linux-2.6.32.44/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2101 +++ linux-2.6.32.44/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2102 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2103 extern void iommu_init_early_dart(void);
2104 extern void iommu_init_early_pasemi(void);
2105
2106 +/* dma-iommu.c */
2107 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2108 +
2109 #ifdef CONFIG_PCI
2110 extern void pci_iommu_init(void);
2111 extern void pci_direct_iommu_init(void);
2112 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.44/arch/powerpc/include/asm/kmap_types.h
2113 --- linux-2.6.32.44/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2114 +++ linux-2.6.32.44/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2115 @@ -26,6 +26,7 @@ enum km_type {
2116 KM_SOFTIRQ1,
2117 KM_PPC_SYNC_PAGE,
2118 KM_PPC_SYNC_ICACHE,
2119 + KM_CLEARPAGE,
2120 KM_TYPE_NR
2121 };
2122
2123 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/page_64.h linux-2.6.32.44/arch/powerpc/include/asm/page_64.h
2124 --- linux-2.6.32.44/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2125 +++ linux-2.6.32.44/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2126 @@ -180,15 +180,18 @@ do { \
2127 * stack by default, so in the absense of a PT_GNU_STACK program header
2128 * we turn execute permission off.
2129 */
2130 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2131 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2132 +#define VM_STACK_DEFAULT_FLAGS32 \
2133 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2134 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2135
2136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138
2139 +#ifndef CONFIG_PAX_PAGEEXEC
2140 #define VM_STACK_DEFAULT_FLAGS \
2141 (test_thread_flag(TIF_32BIT) ? \
2142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2143 +#endif
2144
2145 #include <asm-generic/getorder.h>
2146
2147 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/page.h linux-2.6.32.44/arch/powerpc/include/asm/page.h
2148 --- linux-2.6.32.44/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2149 +++ linux-2.6.32.44/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2150 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 #ifndef __ASSEMBLY__
2170
2171 #undef STRICT_MM_TYPECHECKS
2172 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/pci.h linux-2.6.32.44/arch/powerpc/include/asm/pci.h
2173 --- linux-2.6.32.44/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2174 +++ linux-2.6.32.44/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2175 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2176 }
2177
2178 #ifdef CONFIG_PCI
2179 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2180 -extern struct dma_map_ops *get_pci_dma_ops(void);
2181 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2182 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2183 #else /* CONFIG_PCI */
2184 #define set_pci_dma_ops(d)
2185 #define get_pci_dma_ops() NULL
2186 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/pgtable.h linux-2.6.32.44/arch/powerpc/include/asm/pgtable.h
2187 --- linux-2.6.32.44/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2188 +++ linux-2.6.32.44/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2189 @@ -2,6 +2,7 @@
2190 #define _ASM_POWERPC_PGTABLE_H
2191 #ifdef __KERNEL__
2192
2193 +#include <linux/const.h>
2194 #ifndef __ASSEMBLY__
2195 #include <asm/processor.h> /* For TASK_SIZE */
2196 #include <asm/mmu.h>
2197 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.44/arch/powerpc/include/asm/pte-hash32.h
2198 --- linux-2.6.32.44/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2199 +++ linux-2.6.32.44/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2200 @@ -21,6 +21,7 @@
2201 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2202 #define _PAGE_USER 0x004 /* usermode access allowed */
2203 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2204 +#define _PAGE_EXEC _PAGE_GUARDED
2205 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2206 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2207 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2208 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/reg.h linux-2.6.32.44/arch/powerpc/include/asm/reg.h
2209 --- linux-2.6.32.44/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2210 +++ linux-2.6.32.44/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2211 @@ -191,6 +191,7 @@
2212 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2213 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2214 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2215 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2216 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2217 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2218 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2219 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.44/arch/powerpc/include/asm/swiotlb.h
2220 --- linux-2.6.32.44/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2221 +++ linux-2.6.32.44/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2222 @@ -13,7 +13,7 @@
2223
2224 #include <linux/swiotlb.h>
2225
2226 -extern struct dma_map_ops swiotlb_dma_ops;
2227 +extern const struct dma_map_ops swiotlb_dma_ops;
2228
2229 static inline void dma_mark_clean(void *addr, size_t size) {}
2230
2231 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/system.h linux-2.6.32.44/arch/powerpc/include/asm/system.h
2232 --- linux-2.6.32.44/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2233 +++ linux-2.6.32.44/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2234 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238 -extern unsigned long arch_align_stack(unsigned long sp);
2239 +#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243 diff -urNp linux-2.6.32.44/arch/powerpc/include/asm/uaccess.h linux-2.6.32.44/arch/powerpc/include/asm/uaccess.h
2244 --- linux-2.6.32.44/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2245 +++ linux-2.6.32.44/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2246 @@ -13,6 +13,8 @@
2247 #define VERIFY_READ 0
2248 #define VERIFY_WRITE 1
2249
2250 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2251 +
2252 /*
2253 * The fs value determines whether argument validity checking should be
2254 * performed or not. If get_fs() == USER_DS, checking is performed, with
2255 @@ -327,52 +329,6 @@ do { \
2256 extern unsigned long __copy_tofrom_user(void __user *to,
2257 const void __user *from, unsigned long size);
2258
2259 -#ifndef __powerpc64__
2260 -
2261 -static inline unsigned long copy_from_user(void *to,
2262 - const void __user *from, unsigned long n)
2263 -{
2264 - unsigned long over;
2265 -
2266 - if (access_ok(VERIFY_READ, from, n))
2267 - return __copy_tofrom_user((__force void __user *)to, from, n);
2268 - if ((unsigned long)from < TASK_SIZE) {
2269 - over = (unsigned long)from + n - TASK_SIZE;
2270 - return __copy_tofrom_user((__force void __user *)to, from,
2271 - n - over) + over;
2272 - }
2273 - return n;
2274 -}
2275 -
2276 -static inline unsigned long copy_to_user(void __user *to,
2277 - const void *from, unsigned long n)
2278 -{
2279 - unsigned long over;
2280 -
2281 - if (access_ok(VERIFY_WRITE, to, n))
2282 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2283 - if ((unsigned long)to < TASK_SIZE) {
2284 - over = (unsigned long)to + n - TASK_SIZE;
2285 - return __copy_tofrom_user(to, (__force void __user *)from,
2286 - n - over) + over;
2287 - }
2288 - return n;
2289 -}
2290 -
2291 -#else /* __powerpc64__ */
2292 -
2293 -#define __copy_in_user(to, from, size) \
2294 - __copy_tofrom_user((to), (from), (size))
2295 -
2296 -extern unsigned long copy_from_user(void *to, const void __user *from,
2297 - unsigned long n);
2298 -extern unsigned long copy_to_user(void __user *to, const void *from,
2299 - unsigned long n);
2300 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2301 - unsigned long n);
2302 -
2303 -#endif /* __powerpc64__ */
2304 -
2305 static inline unsigned long __copy_from_user_inatomic(void *to,
2306 const void __user *from, unsigned long n)
2307 {
2308 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2309 if (ret == 0)
2310 return 0;
2311 }
2312 +
2313 + if (!__builtin_constant_p(n))
2314 + check_object_size(to, n, false);
2315 +
2316 return __copy_tofrom_user((__force void __user *)to, from, n);
2317 }
2318
2319 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2320 if (ret == 0)
2321 return 0;
2322 }
2323 +
2324 + if (!__builtin_constant_p(n))
2325 + check_object_size(from, n, true);
2326 +
2327 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2328 }
2329
2330 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2331 return __copy_to_user_inatomic(to, from, size);
2332 }
2333
2334 +#ifndef __powerpc64__
2335 +
2336 +static inline unsigned long __must_check copy_from_user(void *to,
2337 + const void __user *from, unsigned long n)
2338 +{
2339 + unsigned long over;
2340 +
2341 + if ((long)n < 0)
2342 + return n;
2343 +
2344 + if (access_ok(VERIFY_READ, from, n)) {
2345 + if (!__builtin_constant_p(n))
2346 + check_object_size(to, n, false);
2347 + return __copy_tofrom_user((__force void __user *)to, from, n);
2348 + }
2349 + if ((unsigned long)from < TASK_SIZE) {
2350 + over = (unsigned long)from + n - TASK_SIZE;
2351 + if (!__builtin_constant_p(n - over))
2352 + check_object_size(to, n - over, false);
2353 + return __copy_tofrom_user((__force void __user *)to, from,
2354 + n - over) + over;
2355 + }
2356 + return n;
2357 +}
2358 +
2359 +static inline unsigned long __must_check copy_to_user(void __user *to,
2360 + const void *from, unsigned long n)
2361 +{
2362 + unsigned long over;
2363 +
2364 + if ((long)n < 0)
2365 + return n;
2366 +
2367 + if (access_ok(VERIFY_WRITE, to, n)) {
2368 + if (!__builtin_constant_p(n))
2369 + check_object_size(from, n, true);
2370 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2371 + }
2372 + if ((unsigned long)to < TASK_SIZE) {
2373 + over = (unsigned long)to + n - TASK_SIZE;
2374 + if (!__builtin_constant_p(n))
2375 + check_object_size(from, n - over, true);
2376 + return __copy_tofrom_user(to, (__force void __user *)from,
2377 + n - over) + over;
2378 + }
2379 + return n;
2380 +}
2381 +
2382 +#else /* __powerpc64__ */
2383 +
2384 +#define __copy_in_user(to, from, size) \
2385 + __copy_tofrom_user((to), (from), (size))
2386 +
2387 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2388 +{
2389 + if ((long)n < 0 || n > INT_MAX)
2390 + return n;
2391 +
2392 + if (!__builtin_constant_p(n))
2393 + check_object_size(to, n, false);
2394 +
2395 + if (likely(access_ok(VERIFY_READ, from, n)))
2396 + n = __copy_from_user(to, from, n);
2397 + else
2398 + memset(to, 0, n);
2399 + return n;
2400 +}
2401 +
2402 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2403 +{
2404 + if ((long)n < 0 || n > INT_MAX)
2405 + return n;
2406 +
2407 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2408 + if (!__builtin_constant_p(n))
2409 + check_object_size(from, n, true);
2410 + n = __copy_to_user(to, from, n);
2411 + }
2412 + return n;
2413 +}
2414 +
2415 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2416 + unsigned long n);
2417 +
2418 +#endif /* __powerpc64__ */
2419 +
2420 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2421
2422 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2423 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.44/arch/powerpc/kernel/cacheinfo.c
2424 --- linux-2.6.32.44/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.44/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2427 &cache_assoc_attr,
2428 };
2429
2430 -static struct sysfs_ops cache_index_ops = {
2431 +static const struct sysfs_ops cache_index_ops = {
2432 .show = cache_index_show,
2433 };
2434
2435 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/dma.c linux-2.6.32.44/arch/powerpc/kernel/dma.c
2436 --- linux-2.6.32.44/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2437 +++ linux-2.6.32.44/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2438 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2439 }
2440 #endif
2441
2442 -struct dma_map_ops dma_direct_ops = {
2443 +const struct dma_map_ops dma_direct_ops = {
2444 .alloc_coherent = dma_direct_alloc_coherent,
2445 .free_coherent = dma_direct_free_coherent,
2446 .map_sg = dma_direct_map_sg,
2447 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.44/arch/powerpc/kernel/dma-iommu.c
2448 --- linux-2.6.32.44/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2449 +++ linux-2.6.32.44/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2450 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2451 }
2452
2453 /* We support DMA to/from any memory page via the iommu */
2454 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2455 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2456 {
2457 struct iommu_table *tbl = get_iommu_table_base(dev);
2458
2459 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.44/arch/powerpc/kernel/dma-swiotlb.c
2460 --- linux-2.6.32.44/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2461 +++ linux-2.6.32.44/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2462 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2463 * map_page, and unmap_page on highmem, use normal dma_ops
2464 * for everything else.
2465 */
2466 -struct dma_map_ops swiotlb_dma_ops = {
2467 +const struct dma_map_ops swiotlb_dma_ops = {
2468 .alloc_coherent = dma_direct_alloc_coherent,
2469 .free_coherent = dma_direct_free_coherent,
2470 .map_sg = swiotlb_map_sg_attrs,
2471 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.44/arch/powerpc/kernel/exceptions-64e.S
2472 --- linux-2.6.32.44/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2473 +++ linux-2.6.32.44/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2474 @@ -455,6 +455,7 @@ storage_fault_common:
2475 std r14,_DAR(r1)
2476 std r15,_DSISR(r1)
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 + bl .save_nvgprs
2479 mr r4,r14
2480 mr r5,r15
2481 ld r14,PACA_EXGEN+EX_R14(r13)
2482 @@ -464,8 +465,7 @@ storage_fault_common:
2483 cmpdi r3,0
2484 bne- 1f
2485 b .ret_from_except_lite
2486 -1: bl .save_nvgprs
2487 - mr r5,r3
2488 +1: mr r5,r3
2489 addi r3,r1,STACK_FRAME_OVERHEAD
2490 ld r4,_DAR(r1)
2491 bl .bad_page_fault
2492 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.44/arch/powerpc/kernel/exceptions-64s.S
2493 --- linux-2.6.32.44/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2494 +++ linux-2.6.32.44/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2495 @@ -818,10 +818,10 @@ handle_page_fault:
2496 11: ld r4,_DAR(r1)
2497 ld r5,_DSISR(r1)
2498 addi r3,r1,STACK_FRAME_OVERHEAD
2499 + bl .save_nvgprs
2500 bl .do_page_fault
2501 cmpdi r3,0
2502 beq+ 13f
2503 - bl .save_nvgprs
2504 mr r5,r3
2505 addi r3,r1,STACK_FRAME_OVERHEAD
2506 lwz r4,_DAR(r1)
2507 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/ibmebus.c linux-2.6.32.44/arch/powerpc/kernel/ibmebus.c
2508 --- linux-2.6.32.44/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2509 +++ linux-2.6.32.44/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2510 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2511 return 1;
2512 }
2513
2514 -static struct dma_map_ops ibmebus_dma_ops = {
2515 +static const struct dma_map_ops ibmebus_dma_ops = {
2516 .alloc_coherent = ibmebus_alloc_coherent,
2517 .free_coherent = ibmebus_free_coherent,
2518 .map_sg = ibmebus_map_sg,
2519 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/kgdb.c linux-2.6.32.44/arch/powerpc/kernel/kgdb.c
2520 --- linux-2.6.32.44/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2521 +++ linux-2.6.32.44/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2522 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2523 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2524 return 0;
2525
2526 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2527 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2528 regs->nip += 4;
2529
2530 return 1;
2531 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2532 /*
2533 * Global data
2534 */
2535 -struct kgdb_arch arch_kgdb_ops = {
2536 +const struct kgdb_arch arch_kgdb_ops = {
2537 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2538 };
2539
2540 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/module_32.c linux-2.6.32.44/arch/powerpc/kernel/module_32.c
2541 --- linux-2.6.32.44/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.44/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2544 me->arch.core_plt_section = i;
2545 }
2546 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2547 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2548 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2549 return -ENOEXEC;
2550 }
2551
2552 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2553
2554 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2555 /* Init, or core PLT? */
2556 - if (location >= mod->module_core
2557 - && location < mod->module_core + mod->core_size)
2558 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2559 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2560 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2561 - else
2562 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2563 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2564 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2565 + else {
2566 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2567 + return ~0UL;
2568 + }
2569
2570 /* Find this entry, or if that fails, the next avail. entry */
2571 while (entry->jump[0]) {
2572 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/module.c linux-2.6.32.44/arch/powerpc/kernel/module.c
2573 --- linux-2.6.32.44/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2574 +++ linux-2.6.32.44/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2575 @@ -31,11 +31,24 @@
2576
2577 LIST_HEAD(module_bug_list);
2578
2579 +#ifdef CONFIG_PAX_KERNEXEC
2580 void *module_alloc(unsigned long size)
2581 {
2582 if (size == 0)
2583 return NULL;
2584
2585 + return vmalloc(size);
2586 +}
2587 +
2588 +void *module_alloc_exec(unsigned long size)
2589 +#else
2590 +void *module_alloc(unsigned long size)
2591 +#endif
2592 +
2593 +{
2594 + if (size == 0)
2595 + return NULL;
2596 +
2597 return vmalloc_exec(size);
2598 }
2599
2600 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2601 vfree(module_region);
2602 }
2603
2604 +#ifdef CONFIG_PAX_KERNEXEC
2605 +void module_free_exec(struct module *mod, void *module_region)
2606 +{
2607 + module_free(mod, module_region);
2608 +}
2609 +#endif
2610 +
2611 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2612 const Elf_Shdr *sechdrs,
2613 const char *name)
2614 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/pci-common.c linux-2.6.32.44/arch/powerpc/kernel/pci-common.c
2615 --- linux-2.6.32.44/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2616 +++ linux-2.6.32.44/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2617 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2618 unsigned int ppc_pci_flags = 0;
2619
2620
2621 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2622 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2623
2624 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2625 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2626 {
2627 pci_dma_ops = dma_ops;
2628 }
2629
2630 -struct dma_map_ops *get_pci_dma_ops(void)
2631 +const struct dma_map_ops *get_pci_dma_ops(void)
2632 {
2633 return pci_dma_ops;
2634 }
2635 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/process.c linux-2.6.32.44/arch/powerpc/kernel/process.c
2636 --- linux-2.6.32.44/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2637 +++ linux-2.6.32.44/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2638 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2639 * Lookup NIP late so we have the best change of getting the
2640 * above info out without failing
2641 */
2642 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2643 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2644 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2645 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2646 #endif
2647 show_stack(current, (unsigned long *) regs->gpr[1]);
2648 if (!user_mode(regs))
2649 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2650 newsp = stack[0];
2651 ip = stack[STACK_FRAME_LR_SAVE];
2652 if (!firstframe || ip != lr) {
2653 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2654 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2656 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2657 - printk(" (%pS)",
2658 + printk(" (%pA)",
2659 (void *)current->ret_stack[curr_frame].ret);
2660 curr_frame--;
2661 }
2662 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2663 struct pt_regs *regs = (struct pt_regs *)
2664 (sp + STACK_FRAME_OVERHEAD);
2665 lr = regs->link;
2666 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2667 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2668 regs->trap, (void *)regs->nip, (void *)lr);
2669 firstframe = 1;
2670 }
2671 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2672 }
2673
2674 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2675 -
2676 -unsigned long arch_align_stack(unsigned long sp)
2677 -{
2678 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2679 - sp -= get_random_int() & ~PAGE_MASK;
2680 - return sp & ~0xf;
2681 -}
2682 -
2683 -static inline unsigned long brk_rnd(void)
2684 -{
2685 - unsigned long rnd = 0;
2686 -
2687 - /* 8MB for 32bit, 1GB for 64bit */
2688 - if (is_32bit_task())
2689 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2690 - else
2691 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2692 -
2693 - return rnd << PAGE_SHIFT;
2694 -}
2695 -
2696 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2697 -{
2698 - unsigned long base = mm->brk;
2699 - unsigned long ret;
2700 -
2701 -#ifdef CONFIG_PPC_STD_MMU_64
2702 - /*
2703 - * If we are using 1TB segments and we are allowed to randomise
2704 - * the heap, we can put it above 1TB so it is backed by a 1TB
2705 - * segment. Otherwise the heap will be in the bottom 1TB
2706 - * which always uses 256MB segments and this may result in a
2707 - * performance penalty.
2708 - */
2709 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2710 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2711 -#endif
2712 -
2713 - ret = PAGE_ALIGN(base + brk_rnd());
2714 -
2715 - if (ret < mm->brk)
2716 - return mm->brk;
2717 -
2718 - return ret;
2719 -}
2720 -
2721 -unsigned long randomize_et_dyn(unsigned long base)
2722 -{
2723 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2724 -
2725 - if (ret < base)
2726 - return base;
2727 -
2728 - return ret;
2729 -}
2730 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/signal_32.c linux-2.6.32.44/arch/powerpc/kernel/signal_32.c
2731 --- linux-2.6.32.44/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2732 +++ linux-2.6.32.44/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2733 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2734 /* Save user registers on the stack */
2735 frame = &rt_sf->uc.uc_mcontext;
2736 addr = frame;
2737 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2738 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2739 if (save_user_regs(regs, frame, 0, 1))
2740 goto badframe;
2741 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2742 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/signal_64.c linux-2.6.32.44/arch/powerpc/kernel/signal_64.c
2743 --- linux-2.6.32.44/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2744 +++ linux-2.6.32.44/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2745 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2746 current->thread.fpscr.val = 0;
2747
2748 /* Set up to return from userspace. */
2749 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2750 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2751 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2752 } else {
2753 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2754 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.44/arch/powerpc/kernel/sys_ppc32.c
2755 --- linux-2.6.32.44/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2756 +++ linux-2.6.32.44/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2757 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2758 if (oldlenp) {
2759 if (!error) {
2760 if (get_user(oldlen, oldlenp) ||
2761 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2762 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2763 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2764 error = -EFAULT;
2765 }
2766 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2767 }
2768 return error;
2769 }
2770 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/traps.c linux-2.6.32.44/arch/powerpc/kernel/traps.c
2771 --- linux-2.6.32.44/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2772 +++ linux-2.6.32.44/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2773 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2774 static inline void pmac_backlight_unblank(void) { }
2775 #endif
2776
2777 +extern void gr_handle_kernel_exploit(void);
2778 +
2779 int die(const char *str, struct pt_regs *regs, long err)
2780 {
2781 static struct {
2782 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2783 if (panic_on_oops)
2784 panic("Fatal exception");
2785
2786 + gr_handle_kernel_exploit();
2787 +
2788 oops_exit();
2789 do_exit(err);
2790
2791 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/vdso.c linux-2.6.32.44/arch/powerpc/kernel/vdso.c
2792 --- linux-2.6.32.44/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2793 +++ linux-2.6.32.44/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2794 @@ -36,6 +36,7 @@
2795 #include <asm/firmware.h>
2796 #include <asm/vdso.h>
2797 #include <asm/vdso_datapage.h>
2798 +#include <asm/mman.h>
2799
2800 #include "setup.h"
2801
2802 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2803 vdso_base = VDSO32_MBASE;
2804 #endif
2805
2806 - current->mm->context.vdso_base = 0;
2807 + current->mm->context.vdso_base = ~0UL;
2808
2809 /* vDSO has a problem and was disabled, just don't "enable" it for the
2810 * process
2811 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2812 vdso_base = get_unmapped_area(NULL, vdso_base,
2813 (vdso_pages << PAGE_SHIFT) +
2814 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2815 - 0, 0);
2816 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2817 if (IS_ERR_VALUE(vdso_base)) {
2818 rc = vdso_base;
2819 goto fail_mmapsem;
2820 diff -urNp linux-2.6.32.44/arch/powerpc/kernel/vio.c linux-2.6.32.44/arch/powerpc/kernel/vio.c
2821 --- linux-2.6.32.44/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2822 +++ linux-2.6.32.44/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2823 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2824 vio_cmo_dealloc(viodev, alloc_size);
2825 }
2826
2827 -struct dma_map_ops vio_dma_mapping_ops = {
2828 +static const struct dma_map_ops vio_dma_mapping_ops = {
2829 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2830 .free_coherent = vio_dma_iommu_free_coherent,
2831 .map_sg = vio_dma_iommu_map_sg,
2832 .unmap_sg = vio_dma_iommu_unmap_sg,
2833 + .dma_supported = dma_iommu_dma_supported,
2834 .map_page = vio_dma_iommu_map_page,
2835 .unmap_page = vio_dma_iommu_unmap_page,
2836
2837 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2838
2839 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2840 {
2841 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2842 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2843 }
2844
2845 diff -urNp linux-2.6.32.44/arch/powerpc/lib/usercopy_64.c linux-2.6.32.44/arch/powerpc/lib/usercopy_64.c
2846 --- linux-2.6.32.44/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2847 +++ linux-2.6.32.44/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2848 @@ -9,22 +9,6 @@
2849 #include <linux/module.h>
2850 #include <asm/uaccess.h>
2851
2852 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2853 -{
2854 - if (likely(access_ok(VERIFY_READ, from, n)))
2855 - n = __copy_from_user(to, from, n);
2856 - else
2857 - memset(to, 0, n);
2858 - return n;
2859 -}
2860 -
2861 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2862 -{
2863 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2864 - n = __copy_to_user(to, from, n);
2865 - return n;
2866 -}
2867 -
2868 unsigned long copy_in_user(void __user *to, const void __user *from,
2869 unsigned long n)
2870 {
2871 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2872 return n;
2873 }
2874
2875 -EXPORT_SYMBOL(copy_from_user);
2876 -EXPORT_SYMBOL(copy_to_user);
2877 EXPORT_SYMBOL(copy_in_user);
2878
2879 diff -urNp linux-2.6.32.44/arch/powerpc/mm/fault.c linux-2.6.32.44/arch/powerpc/mm/fault.c
2880 --- linux-2.6.32.44/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2881 +++ linux-2.6.32.44/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2882 @@ -30,6 +30,10 @@
2883 #include <linux/kprobes.h>
2884 #include <linux/kdebug.h>
2885 #include <linux/perf_event.h>
2886 +#include <linux/slab.h>
2887 +#include <linux/pagemap.h>
2888 +#include <linux/compiler.h>
2889 +#include <linux/unistd.h>
2890
2891 #include <asm/firmware.h>
2892 #include <asm/page.h>
2893 @@ -40,6 +44,7 @@
2894 #include <asm/uaccess.h>
2895 #include <asm/tlbflush.h>
2896 #include <asm/siginfo.h>
2897 +#include <asm/ptrace.h>
2898
2899
2900 #ifdef CONFIG_KPROBES
2901 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2902 }
2903 #endif
2904
2905 +#ifdef CONFIG_PAX_PAGEEXEC
2906 +/*
2907 + * PaX: decide what to do with offenders (regs->nip = fault address)
2908 + *
2909 + * returns 1 when task should be killed
2910 + */
2911 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2912 +{
2913 + return 1;
2914 +}
2915 +
2916 +void pax_report_insns(void *pc, void *sp)
2917 +{
2918 + unsigned long i;
2919 +
2920 + printk(KERN_ERR "PAX: bytes at PC: ");
2921 + for (i = 0; i < 5; i++) {
2922 + unsigned int c;
2923 + if (get_user(c, (unsigned int __user *)pc+i))
2924 + printk(KERN_CONT "???????? ");
2925 + else
2926 + printk(KERN_CONT "%08x ", c);
2927 + }
2928 + printk("\n");
2929 +}
2930 +#endif
2931 +
2932 /*
2933 * Check whether the instruction at regs->nip is a store using
2934 * an update addressing form which will update r1.
2935 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2936 * indicate errors in DSISR but can validly be set in SRR1.
2937 */
2938 if (trap == 0x400)
2939 - error_code &= 0x48200000;
2940 + error_code &= 0x58200000;
2941 else
2942 is_write = error_code & DSISR_ISSTORE;
2943 #else
2944 @@ -250,7 +282,7 @@ good_area:
2945 * "undefined". Of those that can be set, this is the only
2946 * one which seems bad.
2947 */
2948 - if (error_code & 0x10000000)
2949 + if (error_code & DSISR_GUARDED)
2950 /* Guarded storage error. */
2951 goto bad_area;
2952 #endif /* CONFIG_8xx */
2953 @@ -265,7 +297,7 @@ good_area:
2954 * processors use the same I/D cache coherency mechanism
2955 * as embedded.
2956 */
2957 - if (error_code & DSISR_PROTFAULT)
2958 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2959 goto bad_area;
2960 #endif /* CONFIG_PPC_STD_MMU */
2961
2962 @@ -335,6 +367,23 @@ bad_area:
2963 bad_area_nosemaphore:
2964 /* User mode accesses cause a SIGSEGV */
2965 if (user_mode(regs)) {
2966 +
2967 +#ifdef CONFIG_PAX_PAGEEXEC
2968 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2969 +#ifdef CONFIG_PPC_STD_MMU
2970 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2971 +#else
2972 + if (is_exec && regs->nip == address) {
2973 +#endif
2974 + switch (pax_handle_fetch_fault(regs)) {
2975 + }
2976 +
2977 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2978 + do_group_exit(SIGKILL);
2979 + }
2980 + }
2981 +#endif
2982 +
2983 _exception(SIGSEGV, regs, code, address);
2984 return 0;
2985 }
2986 diff -urNp linux-2.6.32.44/arch/powerpc/mm/mmap_64.c linux-2.6.32.44/arch/powerpc/mm/mmap_64.c
2987 --- linux-2.6.32.44/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2988 +++ linux-2.6.32.44/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2989 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2990 */
2991 if (mmap_is_legacy()) {
2992 mm->mmap_base = TASK_UNMAPPED_BASE;
2993 +
2994 +#ifdef CONFIG_PAX_RANDMMAP
2995 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2996 + mm->mmap_base += mm->delta_mmap;
2997 +#endif
2998 +
2999 mm->get_unmapped_area = arch_get_unmapped_area;
3000 mm->unmap_area = arch_unmap_area;
3001 } else {
3002 mm->mmap_base = mmap_base();
3003 +
3004 +#ifdef CONFIG_PAX_RANDMMAP
3005 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3006 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3007 +#endif
3008 +
3009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3010 mm->unmap_area = arch_unmap_area_topdown;
3011 }
3012 diff -urNp linux-2.6.32.44/arch/powerpc/mm/slice.c linux-2.6.32.44/arch/powerpc/mm/slice.c
3013 --- linux-2.6.32.44/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.44/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3016 if ((mm->task_size - len) < addr)
3017 return 0;
3018 vma = find_vma(mm, addr);
3019 - return (!vma || (addr + len) <= vma->vm_start);
3020 + return check_heap_stack_gap(vma, addr, len);
3021 }
3022
3023 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3024 @@ -256,7 +256,7 @@ full_search:
3025 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3026 continue;
3027 }
3028 - if (!vma || addr + len <= vma->vm_start) {
3029 + if (check_heap_stack_gap(vma, addr, len)) {
3030 /*
3031 * Remember the place where we stopped the search:
3032 */
3033 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3034 }
3035 }
3036
3037 - addr = mm->mmap_base;
3038 - while (addr > len) {
3039 + if (mm->mmap_base < len)
3040 + addr = -ENOMEM;
3041 + else
3042 + addr = mm->mmap_base - len;
3043 +
3044 + while (!IS_ERR_VALUE(addr)) {
3045 /* Go down by chunk size */
3046 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3047 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3048
3049 /* Check for hit with different page size */
3050 mask = slice_range_to_mask(addr, len);
3051 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3052 * return with success:
3053 */
3054 vma = find_vma(mm, addr);
3055 - if (!vma || (addr + len) <= vma->vm_start) {
3056 + if (check_heap_stack_gap(vma, addr, len)) {
3057 /* remember the address as a hint for next time */
3058 if (use_cache)
3059 mm->free_area_cache = addr;
3060 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3061 mm->cached_hole_size = vma->vm_start - addr;
3062
3063 /* try just below the current vma->vm_start */
3064 - addr = vma->vm_start;
3065 + addr = skip_heap_stack_gap(vma, len);
3066 }
3067
3068 /*
3069 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3070 if (fixed && addr > (mm->task_size - len))
3071 return -EINVAL;
3072
3073 +#ifdef CONFIG_PAX_RANDMMAP
3074 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3075 + addr = 0;
3076 +#endif
3077 +
3078 /* If hint, make sure it matches our alignment restrictions */
3079 if (!fixed && addr) {
3080 addr = _ALIGN_UP(addr, 1ul << pshift);
3081 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.44/arch/powerpc/platforms/52xx/lite5200_pm.c
3082 --- linux-2.6.32.44/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.44/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3084 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3085 lite5200_pm_target_state = PM_SUSPEND_ON;
3086 }
3087
3088 -static struct platform_suspend_ops lite5200_pm_ops = {
3089 +static const struct platform_suspend_ops lite5200_pm_ops = {
3090 .valid = lite5200_pm_valid,
3091 .begin = lite5200_pm_begin,
3092 .prepare = lite5200_pm_prepare,
3093 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.44/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3094 --- linux-2.6.32.44/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3095 +++ linux-2.6.32.44/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3096 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3097 iounmap(mbar);
3098 }
3099
3100 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3101 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3102 .valid = mpc52xx_pm_valid,
3103 .prepare = mpc52xx_pm_prepare,
3104 .enter = mpc52xx_pm_enter,
3105 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.44/arch/powerpc/platforms/83xx/suspend.c
3106 --- linux-2.6.32.44/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3107 +++ linux-2.6.32.44/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3108 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3109 return ret;
3110 }
3111
3112 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3113 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3114 .valid = mpc83xx_suspend_valid,
3115 .begin = mpc83xx_suspend_begin,
3116 .enter = mpc83xx_suspend_enter,
3117 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.44/arch/powerpc/platforms/cell/iommu.c
3118 --- linux-2.6.32.44/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3119 +++ linux-2.6.32.44/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3120 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3121
3122 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3123
3124 -struct dma_map_ops dma_iommu_fixed_ops = {
3125 +const struct dma_map_ops dma_iommu_fixed_ops = {
3126 .alloc_coherent = dma_fixed_alloc_coherent,
3127 .free_coherent = dma_fixed_free_coherent,
3128 .map_sg = dma_fixed_map_sg,
3129 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.44/arch/powerpc/platforms/ps3/system-bus.c
3130 --- linux-2.6.32.44/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3131 +++ linux-2.6.32.44/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3132 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3133 return mask >= DMA_BIT_MASK(32);
3134 }
3135
3136 -static struct dma_map_ops ps3_sb_dma_ops = {
3137 +static const struct dma_map_ops ps3_sb_dma_ops = {
3138 .alloc_coherent = ps3_alloc_coherent,
3139 .free_coherent = ps3_free_coherent,
3140 .map_sg = ps3_sb_map_sg,
3141 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3142 .unmap_page = ps3_unmap_page,
3143 };
3144
3145 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3146 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3147 .alloc_coherent = ps3_alloc_coherent,
3148 .free_coherent = ps3_free_coherent,
3149 .map_sg = ps3_ioc0_map_sg,
3150 diff -urNp linux-2.6.32.44/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.44/arch/powerpc/platforms/pseries/Kconfig
3151 --- linux-2.6.32.44/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3152 +++ linux-2.6.32.44/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3153 @@ -2,6 +2,8 @@ config PPC_PSERIES
3154 depends on PPC64 && PPC_BOOK3S
3155 bool "IBM pSeries & new (POWER5-based) iSeries"
3156 select MPIC
3157 + select PCI_MSI
3158 + select XICS
3159 select PPC_I8259
3160 select PPC_RTAS
3161 select RTAS_ERROR_LOGGING
3162 diff -urNp linux-2.6.32.44/arch/s390/include/asm/elf.h linux-2.6.32.44/arch/s390/include/asm/elf.h
3163 --- linux-2.6.32.44/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3164 +++ linux-2.6.32.44/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3165 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3166 that it will "exec", and that there is sufficient room for the brk. */
3167 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3168
3169 +#ifdef CONFIG_PAX_ASLR
3170 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3171 +
3172 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3173 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3174 +#endif
3175 +
3176 /* This yields a mask that user programs can use to figure out what
3177 instruction set this CPU supports. */
3178
3179 diff -urNp linux-2.6.32.44/arch/s390/include/asm/setup.h linux-2.6.32.44/arch/s390/include/asm/setup.h
3180 --- linux-2.6.32.44/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3181 +++ linux-2.6.32.44/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3182 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3183 void detect_memory_layout(struct mem_chunk chunk[]);
3184
3185 #ifdef CONFIG_S390_SWITCH_AMODE
3186 -extern unsigned int switch_amode;
3187 +#define switch_amode (1)
3188 #else
3189 #define switch_amode (0)
3190 #endif
3191
3192 #ifdef CONFIG_S390_EXEC_PROTECT
3193 -extern unsigned int s390_noexec;
3194 +#define s390_noexec (1)
3195 #else
3196 #define s390_noexec (0)
3197 #endif
3198 diff -urNp linux-2.6.32.44/arch/s390/include/asm/uaccess.h linux-2.6.32.44/arch/s390/include/asm/uaccess.h
3199 --- linux-2.6.32.44/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3200 +++ linux-2.6.32.44/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3201 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3202 copy_to_user(void __user *to, const void *from, unsigned long n)
3203 {
3204 might_fault();
3205 +
3206 + if ((long)n < 0)
3207 + return n;
3208 +
3209 if (access_ok(VERIFY_WRITE, to, n))
3210 n = __copy_to_user(to, from, n);
3211 return n;
3212 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3213 static inline unsigned long __must_check
3214 __copy_from_user(void *to, const void __user *from, unsigned long n)
3215 {
3216 + if ((long)n < 0)
3217 + return n;
3218 +
3219 if (__builtin_constant_p(n) && (n <= 256))
3220 return uaccess.copy_from_user_small(n, from, to);
3221 else
3222 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3223 copy_from_user(void *to, const void __user *from, unsigned long n)
3224 {
3225 might_fault();
3226 +
3227 + if ((long)n < 0)
3228 + return n;
3229 +
3230 if (access_ok(VERIFY_READ, from, n))
3231 n = __copy_from_user(to, from, n);
3232 else
3233 diff -urNp linux-2.6.32.44/arch/s390/Kconfig linux-2.6.32.44/arch/s390/Kconfig
3234 --- linux-2.6.32.44/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3235 +++ linux-2.6.32.44/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3236 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3237
3238 config S390_SWITCH_AMODE
3239 bool "Switch kernel/user addressing modes"
3240 + default y
3241 help
3242 This option allows to switch the addressing modes of kernel and user
3243 - space. The kernel parameter switch_amode=on will enable this feature,
3244 - default is disabled. Enabling this (via kernel parameter) on machines
3245 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3246 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3247 + will reduce system performance.
3248
3249 Note that this option will also be selected by selecting the execute
3250 - protection option below. Enabling the execute protection via the
3251 - noexec kernel parameter will also switch the addressing modes,
3252 - independent of the switch_amode kernel parameter.
3253 + protection option below. Enabling the execute protection will also
3254 + switch the addressing modes, independent of this option.
3255
3256
3257 config S390_EXEC_PROTECT
3258 bool "Data execute protection"
3259 + default y
3260 select S390_SWITCH_AMODE
3261 help
3262 This option allows to enable a buffer overflow protection for user
3263 space programs and it also selects the addressing mode option above.
3264 - The kernel parameter noexec=on will enable this feature and also
3265 - switch the addressing modes, default is disabled. Enabling this (via
3266 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3267 - will reduce system performance.
3268 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3269 + reduce system performance.
3270
3271 comment "Code generation options"
3272
3273 diff -urNp linux-2.6.32.44/arch/s390/kernel/module.c linux-2.6.32.44/arch/s390/kernel/module.c
3274 --- linux-2.6.32.44/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3275 +++ linux-2.6.32.44/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3276 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3277
3278 /* Increase core size by size of got & plt and set start
3279 offsets for got and plt. */
3280 - me->core_size = ALIGN(me->core_size, 4);
3281 - me->arch.got_offset = me->core_size;
3282 - me->core_size += me->arch.got_size;
3283 - me->arch.plt_offset = me->core_size;
3284 - me->core_size += me->arch.plt_size;
3285 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3286 + me->arch.got_offset = me->core_size_rw;
3287 + me->core_size_rw += me->arch.got_size;
3288 + me->arch.plt_offset = me->core_size_rx;
3289 + me->core_size_rx += me->arch.plt_size;
3290 return 0;
3291 }
3292
3293 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3294 if (info->got_initialized == 0) {
3295 Elf_Addr *gotent;
3296
3297 - gotent = me->module_core + me->arch.got_offset +
3298 + gotent = me->module_core_rw + me->arch.got_offset +
3299 info->got_offset;
3300 *gotent = val;
3301 info->got_initialized = 1;
3302 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3303 else if (r_type == R_390_GOTENT ||
3304 r_type == R_390_GOTPLTENT)
3305 *(unsigned int *) loc =
3306 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3307 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3308 else if (r_type == R_390_GOT64 ||
3309 r_type == R_390_GOTPLT64)
3310 *(unsigned long *) loc = val;
3311 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3312 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3313 if (info->plt_initialized == 0) {
3314 unsigned int *ip;
3315 - ip = me->module_core + me->arch.plt_offset +
3316 + ip = me->module_core_rx + me->arch.plt_offset +
3317 info->plt_offset;
3318 #ifndef CONFIG_64BIT
3319 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3320 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3321 val - loc + 0xffffUL < 0x1ffffeUL) ||
3322 (r_type == R_390_PLT32DBL &&
3323 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3324 - val = (Elf_Addr) me->module_core +
3325 + val = (Elf_Addr) me->module_core_rx +
3326 me->arch.plt_offset +
3327 info->plt_offset;
3328 val += rela->r_addend - loc;
3329 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3330 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3331 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3332 val = val + rela->r_addend -
3333 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3334 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3335 if (r_type == R_390_GOTOFF16)
3336 *(unsigned short *) loc = val;
3337 else if (r_type == R_390_GOTOFF32)
3338 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3339 break;
3340 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3341 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3342 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3343 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3344 rela->r_addend - loc;
3345 if (r_type == R_390_GOTPC)
3346 *(unsigned int *) loc = val;
3347 diff -urNp linux-2.6.32.44/arch/s390/kernel/setup.c linux-2.6.32.44/arch/s390/kernel/setup.c
3348 --- linux-2.6.32.44/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.44/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3351 early_param("mem", early_parse_mem);
3352
3353 #ifdef CONFIG_S390_SWITCH_AMODE
3354 -unsigned int switch_amode = 0;
3355 -EXPORT_SYMBOL_GPL(switch_amode);
3356 -
3357 static int set_amode_and_uaccess(unsigned long user_amode,
3358 unsigned long user32_amode)
3359 {
3360 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3361 return 0;
3362 }
3363 }
3364 -
3365 -/*
3366 - * Switch kernel/user addressing modes?
3367 - */
3368 -static int __init early_parse_switch_amode(char *p)
3369 -{
3370 - switch_amode = 1;
3371 - return 0;
3372 -}
3373 -early_param("switch_amode", early_parse_switch_amode);
3374 -
3375 #else /* CONFIG_S390_SWITCH_AMODE */
3376 static inline int set_amode_and_uaccess(unsigned long user_amode,
3377 unsigned long user32_amode)
3378 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3379 }
3380 #endif /* CONFIG_S390_SWITCH_AMODE */
3381
3382 -#ifdef CONFIG_S390_EXEC_PROTECT
3383 -unsigned int s390_noexec = 0;
3384 -EXPORT_SYMBOL_GPL(s390_noexec);
3385 -
3386 -/*
3387 - * Enable execute protection?
3388 - */
3389 -static int __init early_parse_noexec(char *p)
3390 -{
3391 - if (!strncmp(p, "off", 3))
3392 - return 0;
3393 - switch_amode = 1;
3394 - s390_noexec = 1;
3395 - return 0;
3396 -}
3397 -early_param("noexec", early_parse_noexec);
3398 -#endif /* CONFIG_S390_EXEC_PROTECT */
3399 -
3400 static void setup_addressing_mode(void)
3401 {
3402 if (s390_noexec) {
3403 diff -urNp linux-2.6.32.44/arch/s390/mm/mmap.c linux-2.6.32.44/arch/s390/mm/mmap.c
3404 --- linux-2.6.32.44/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3405 +++ linux-2.6.32.44/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3406 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3407 */
3408 if (mmap_is_legacy()) {
3409 mm->mmap_base = TASK_UNMAPPED_BASE;
3410 +
3411 +#ifdef CONFIG_PAX_RANDMMAP
3412 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3413 + mm->mmap_base += mm->delta_mmap;
3414 +#endif
3415 +
3416 mm->get_unmapped_area = arch_get_unmapped_area;
3417 mm->unmap_area = arch_unmap_area;
3418 } else {
3419 mm->mmap_base = mmap_base();
3420 +
3421 +#ifdef CONFIG_PAX_RANDMMAP
3422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3423 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3424 +#endif
3425 +
3426 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3427 mm->unmap_area = arch_unmap_area_topdown;
3428 }
3429 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3430 */
3431 if (mmap_is_legacy()) {
3432 mm->mmap_base = TASK_UNMAPPED_BASE;
3433 +
3434 +#ifdef CONFIG_PAX_RANDMMAP
3435 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3436 + mm->mmap_base += mm->delta_mmap;
3437 +#endif
3438 +
3439 mm->get_unmapped_area = s390_get_unmapped_area;
3440 mm->unmap_area = arch_unmap_area;
3441 } else {
3442 mm->mmap_base = mmap_base();
3443 +
3444 +#ifdef CONFIG_PAX_RANDMMAP
3445 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3446 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3447 +#endif
3448 +
3449 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3450 mm->unmap_area = arch_unmap_area_topdown;
3451 }
3452 diff -urNp linux-2.6.32.44/arch/score/include/asm/system.h linux-2.6.32.44/arch/score/include/asm/system.h
3453 --- linux-2.6.32.44/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3454 +++ linux-2.6.32.44/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3455 @@ -17,7 +17,7 @@ do { \
3456 #define finish_arch_switch(prev) do {} while (0)
3457
3458 typedef void (*vi_handler_t)(void);
3459 -extern unsigned long arch_align_stack(unsigned long sp);
3460 +#define arch_align_stack(x) (x)
3461
3462 #define mb() barrier()
3463 #define rmb() barrier()
3464 diff -urNp linux-2.6.32.44/arch/score/kernel/process.c linux-2.6.32.44/arch/score/kernel/process.c
3465 --- linux-2.6.32.44/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3466 +++ linux-2.6.32.44/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3467 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3468
3469 return task_pt_regs(task)->cp0_epc;
3470 }
3471 -
3472 -unsigned long arch_align_stack(unsigned long sp)
3473 -{
3474 - return sp;
3475 -}
3476 diff -urNp linux-2.6.32.44/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.44/arch/sh/boards/mach-hp6xx/pm.c
3477 --- linux-2.6.32.44/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3478 +++ linux-2.6.32.44/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3479 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3480 return 0;
3481 }
3482
3483 -static struct platform_suspend_ops hp6x0_pm_ops = {
3484 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3485 .enter = hp6x0_pm_enter,
3486 .valid = suspend_valid_only_mem,
3487 };
3488 diff -urNp linux-2.6.32.44/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.44/arch/sh/kernel/cpu/sh4/sq.c
3489 --- linux-2.6.32.44/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3490 +++ linux-2.6.32.44/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3491 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3492 NULL,
3493 };
3494
3495 -static struct sysfs_ops sq_sysfs_ops = {
3496 +static const struct sysfs_ops sq_sysfs_ops = {
3497 .show = sq_sysfs_show,
3498 .store = sq_sysfs_store,
3499 };
3500 diff -urNp linux-2.6.32.44/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.44/arch/sh/kernel/cpu/shmobile/pm.c
3501 --- linux-2.6.32.44/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3502 +++ linux-2.6.32.44/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3503 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3504 return 0;
3505 }
3506
3507 -static struct platform_suspend_ops sh_pm_ops = {
3508 +static const struct platform_suspend_ops sh_pm_ops = {
3509 .enter = sh_pm_enter,
3510 .valid = suspend_valid_only_mem,
3511 };
3512 diff -urNp linux-2.6.32.44/arch/sh/kernel/kgdb.c linux-2.6.32.44/arch/sh/kernel/kgdb.c
3513 --- linux-2.6.32.44/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3514 +++ linux-2.6.32.44/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3515 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3516 {
3517 }
3518
3519 -struct kgdb_arch arch_kgdb_ops = {
3520 +const struct kgdb_arch arch_kgdb_ops = {
3521 /* Breakpoint instruction: trapa #0x3c */
3522 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3523 .gdb_bpt_instr = { 0x3c, 0xc3 },
3524 diff -urNp linux-2.6.32.44/arch/sh/mm/mmap.c linux-2.6.32.44/arch/sh/mm/mmap.c
3525 --- linux-2.6.32.44/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3526 +++ linux-2.6.32.44/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3527 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3528 addr = PAGE_ALIGN(addr);
3529
3530 vma = find_vma(mm, addr);
3531 - if (TASK_SIZE - len >= addr &&
3532 - (!vma || addr + len <= vma->vm_start))
3533 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3534 return addr;
3535 }
3536
3537 @@ -106,7 +105,7 @@ full_search:
3538 }
3539 return -ENOMEM;
3540 }
3541 - if (likely(!vma || addr + len <= vma->vm_start)) {
3542 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3543 /*
3544 * Remember the place where we stopped the search:
3545 */
3546 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3547 addr = PAGE_ALIGN(addr);
3548
3549 vma = find_vma(mm, addr);
3550 - if (TASK_SIZE - len >= addr &&
3551 - (!vma || addr + len <= vma->vm_start))
3552 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3553 return addr;
3554 }
3555
3556 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3557 /* make sure it can fit in the remaining address space */
3558 if (likely(addr > len)) {
3559 vma = find_vma(mm, addr-len);
3560 - if (!vma || addr <= vma->vm_start) {
3561 + if (check_heap_stack_gap(vma, addr - len, len)) {
3562 /* remember the address as a hint for next time */
3563 return (mm->free_area_cache = addr-len);
3564 }
3565 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3566 if (unlikely(mm->mmap_base < len))
3567 goto bottomup;
3568
3569 - addr = mm->mmap_base-len;
3570 - if (do_colour_align)
3571 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3572 + addr = mm->mmap_base - len;
3573
3574 do {
3575 + if (do_colour_align)
3576 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3577 /*
3578 * Lookup failure means no vma is above this address,
3579 * else if new region fits below vma->vm_start,
3580 * return with success:
3581 */
3582 vma = find_vma(mm, addr);
3583 - if (likely(!vma || addr+len <= vma->vm_start)) {
3584 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3585 /* remember the address as a hint for next time */
3586 return (mm->free_area_cache = addr);
3587 }
3588 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3589 mm->cached_hole_size = vma->vm_start - addr;
3590
3591 /* try just below the current vma->vm_start */
3592 - addr = vma->vm_start-len;
3593 - if (do_colour_align)
3594 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3595 - } while (likely(len < vma->vm_start));
3596 + addr = skip_heap_stack_gap(vma, len);
3597 + } while (!IS_ERR_VALUE(addr));
3598
3599 bottomup:
3600 /*
3601 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/atomic_64.h linux-2.6.32.44/arch/sparc/include/asm/atomic_64.h
3602 --- linux-2.6.32.44/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.44/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3604 @@ -14,18 +14,40 @@
3605 #define ATOMIC64_INIT(i) { (i) }
3606
3607 #define atomic_read(v) ((v)->counter)
3608 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3609 +{
3610 + return v->counter;
3611 +}
3612 #define atomic64_read(v) ((v)->counter)
3613 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3614 +{
3615 + return v->counter;
3616 +}
3617
3618 #define atomic_set(v, i) (((v)->counter) = i)
3619 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3620 +{
3621 + v->counter = i;
3622 +}
3623 #define atomic64_set(v, i) (((v)->counter) = i)
3624 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3625 +{
3626 + v->counter = i;
3627 +}
3628
3629 extern void atomic_add(int, atomic_t *);
3630 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3631 extern void atomic64_add(long, atomic64_t *);
3632 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3633 extern void atomic_sub(int, atomic_t *);
3634 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3635 extern void atomic64_sub(long, atomic64_t *);
3636 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3637
3638 extern int atomic_add_ret(int, atomic_t *);
3639 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3640 extern long atomic64_add_ret(long, atomic64_t *);
3641 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3642 extern int atomic_sub_ret(int, atomic_t *);
3643 extern long atomic64_sub_ret(long, atomic64_t *);
3644
3645 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3646 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3647
3648 #define atomic_inc_return(v) atomic_add_ret(1, v)
3649 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3650 +{
3651 + return atomic_add_ret_unchecked(1, v);
3652 +}
3653 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3654 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3655 +{
3656 + return atomic64_add_ret_unchecked(1, v);
3657 +}
3658
3659 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3660 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3661
3662 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3663 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3664 +{
3665 + return atomic_add_ret_unchecked(i, v);
3666 +}
3667 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3668 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3669 +{
3670 + return atomic64_add_ret_unchecked(i, v);
3671 +}
3672
3673 /*
3674 * atomic_inc_and_test - increment and test
3675 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3676 * other cases.
3677 */
3678 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3679 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3680 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3681
3682 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3683 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3684 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3685
3686 #define atomic_inc(v) atomic_add(1, v)
3687 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3688 +{
3689 + atomic_add_unchecked(1, v);
3690 +}
3691 #define atomic64_inc(v) atomic64_add(1, v)
3692 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3693 +{
3694 + atomic64_add_unchecked(1, v);
3695 +}
3696
3697 #define atomic_dec(v) atomic_sub(1, v)
3698 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3699 +{
3700 + atomic_sub_unchecked(1, v);
3701 +}
3702 #define atomic64_dec(v) atomic64_sub(1, v)
3703 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3704 +{
3705 + atomic64_sub_unchecked(1, v);
3706 +}
3707
3708 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3709 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3710
3711 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3712 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3713 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3714 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3715
3716 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3717 {
3718 - int c, old;
3719 + int c, old, new;
3720 c = atomic_read(v);
3721 for (;;) {
3722 - if (unlikely(c == (u)))
3723 + if (unlikely(c == u))
3724 break;
3725 - old = atomic_cmpxchg((v), c, c + (a));
3726 +
3727 + asm volatile("addcc %2, %0, %0\n"
3728 +
3729 +#ifdef CONFIG_PAX_REFCOUNT
3730 + "tvs %%icc, 6\n"
3731 +#endif
3732 +
3733 + : "=r" (new)
3734 + : "0" (c), "ir" (a)
3735 + : "cc");
3736 +
3737 + old = atomic_cmpxchg(v, c, new);
3738 if (likely(old == c))
3739 break;
3740 c = old;
3741 }
3742 - return c != (u);
3743 + return c != u;
3744 }
3745
3746 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3747 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3748
3749 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3750 {
3751 - long c, old;
3752 + long c, old, new;
3753 c = atomic64_read(v);
3754 for (;;) {
3755 - if (unlikely(c == (u)))
3756 + if (unlikely(c == u))
3757 break;
3758 - old = atomic64_cmpxchg((v), c, c + (a));
3759 +
3760 + asm volatile("addcc %2, %0, %0\n"
3761 +
3762 +#ifdef CONFIG_PAX_REFCOUNT
3763 + "tvs %%xcc, 6\n"
3764 +#endif
3765 +
3766 + : "=r" (new)
3767 + : "0" (c), "ir" (a)
3768 + : "cc");
3769 +
3770 + old = atomic64_cmpxchg(v, c, new);
3771 if (likely(old == c))
3772 break;
3773 c = old;
3774 }
3775 - return c != (u);
3776 + return c != u;
3777 }
3778
3779 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3780 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/cache.h linux-2.6.32.44/arch/sparc/include/asm/cache.h
3781 --- linux-2.6.32.44/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3782 +++ linux-2.6.32.44/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3783 @@ -8,7 +8,7 @@
3784 #define _SPARC_CACHE_H
3785
3786 #define L1_CACHE_SHIFT 5
3787 -#define L1_CACHE_BYTES 32
3788 +#define L1_CACHE_BYTES 32UL
3789 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3790
3791 #ifdef CONFIG_SPARC32
3792 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.44/arch/sparc/include/asm/dma-mapping.h
3793 --- linux-2.6.32.44/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3794 +++ linux-2.6.32.44/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3795 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3796 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3797 #define dma_is_consistent(d, h) (1)
3798
3799 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3800 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3801 extern struct bus_type pci_bus_type;
3802
3803 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3804 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3805 {
3806 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3807 if (dev->bus == &pci_bus_type)
3808 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3809 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3810 dma_addr_t *dma_handle, gfp_t flag)
3811 {
3812 - struct dma_map_ops *ops = get_dma_ops(dev);
3813 + const struct dma_map_ops *ops = get_dma_ops(dev);
3814 void *cpu_addr;
3815
3816 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3817 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3818 static inline void dma_free_coherent(struct device *dev, size_t size,
3819 void *cpu_addr, dma_addr_t dma_handle)
3820 {
3821 - struct dma_map_ops *ops = get_dma_ops(dev);
3822 + const struct dma_map_ops *ops = get_dma_ops(dev);
3823
3824 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3825 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3826 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/elf_32.h linux-2.6.32.44/arch/sparc/include/asm/elf_32.h
3827 --- linux-2.6.32.44/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3828 +++ linux-2.6.32.44/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3829 @@ -116,6 +116,13 @@ typedef struct {
3830
3831 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3832
3833 +#ifdef CONFIG_PAX_ASLR
3834 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3835 +
3836 +#define PAX_DELTA_MMAP_LEN 16
3837 +#define PAX_DELTA_STACK_LEN 16
3838 +#endif
3839 +
3840 /* This yields a mask that user programs can use to figure out what
3841 instruction set this cpu supports. This can NOT be done in userspace
3842 on Sparc. */
3843 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/elf_64.h linux-2.6.32.44/arch/sparc/include/asm/elf_64.h
3844 --- linux-2.6.32.44/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3845 +++ linux-2.6.32.44/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3846 @@ -163,6 +163,12 @@ typedef struct {
3847 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3848 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3849
3850 +#ifdef CONFIG_PAX_ASLR
3851 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3852 +
3853 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3854 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3855 +#endif
3856
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this cpu supports. */
3859 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.44/arch/sparc/include/asm/pgtable_32.h
3860 --- linux-2.6.32.44/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3861 +++ linux-2.6.32.44/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3862 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3863 BTFIXUPDEF_INT(page_none)
3864 BTFIXUPDEF_INT(page_copy)
3865 BTFIXUPDEF_INT(page_readonly)
3866 +
3867 +#ifdef CONFIG_PAX_PAGEEXEC
3868 +BTFIXUPDEF_INT(page_shared_noexec)
3869 +BTFIXUPDEF_INT(page_copy_noexec)
3870 +BTFIXUPDEF_INT(page_readonly_noexec)
3871 +#endif
3872 +
3873 BTFIXUPDEF_INT(page_kernel)
3874
3875 #define PMD_SHIFT SUN4C_PMD_SHIFT
3876 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3877 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3878 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3879
3880 +#ifdef CONFIG_PAX_PAGEEXEC
3881 +extern pgprot_t PAGE_SHARED_NOEXEC;
3882 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3883 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3884 +#else
3885 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3886 +# define PAGE_COPY_NOEXEC PAGE_COPY
3887 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3888 +#endif
3889 +
3890 extern unsigned long page_kernel;
3891
3892 #ifdef MODULE
3893 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.44/arch/sparc/include/asm/pgtsrmmu.h
3894 --- linux-2.6.32.44/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3895 +++ linux-2.6.32.44/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3896 @@ -115,6 +115,13 @@
3897 SRMMU_EXEC | SRMMU_REF)
3898 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3899 SRMMU_EXEC | SRMMU_REF)
3900 +
3901 +#ifdef CONFIG_PAX_PAGEEXEC
3902 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3903 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3904 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3905 +#endif
3906 +
3907 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3908 SRMMU_DIRTY | SRMMU_REF)
3909
3910 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.44/arch/sparc/include/asm/spinlock_64.h
3911 --- linux-2.6.32.44/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3912 +++ linux-2.6.32.44/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3913 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3914
3915 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3916
3917 -static void inline arch_read_lock(raw_rwlock_t *lock)
3918 +static inline void arch_read_lock(raw_rwlock_t *lock)
3919 {
3920 unsigned long tmp1, tmp2;
3921
3922 __asm__ __volatile__ (
3923 "1: ldsw [%2], %0\n"
3924 " brlz,pn %0, 2f\n"
3925 -"4: add %0, 1, %1\n"
3926 +"4: addcc %0, 1, %1\n"
3927 +
3928 +#ifdef CONFIG_PAX_REFCOUNT
3929 +" tvs %%icc, 6\n"
3930 +#endif
3931 +
3932 " cas [%2], %0, %1\n"
3933 " cmp %0, %1\n"
3934 " bne,pn %%icc, 1b\n"
3935 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3936 " .previous"
3937 : "=&r" (tmp1), "=&r" (tmp2)
3938 : "r" (lock)
3939 - : "memory");
3940 + : "memory", "cc");
3941 }
3942
3943 static int inline arch_read_trylock(raw_rwlock_t *lock)
3944 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3945 "1: ldsw [%2], %0\n"
3946 " brlz,a,pn %0, 2f\n"
3947 " mov 0, %0\n"
3948 -" add %0, 1, %1\n"
3949 +" addcc %0, 1, %1\n"
3950 +
3951 +#ifdef CONFIG_PAX_REFCOUNT
3952 +" tvs %%icc, 6\n"
3953 +#endif
3954 +
3955 " cas [%2], %0, %1\n"
3956 " cmp %0, %1\n"
3957 " bne,pn %%icc, 1b\n"
3958 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3959 return tmp1;
3960 }
3961
3962 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3963 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3964 {
3965 unsigned long tmp1, tmp2;
3966
3967 __asm__ __volatile__(
3968 "1: lduw [%2], %0\n"
3969 -" sub %0, 1, %1\n"
3970 +" subcc %0, 1, %1\n"
3971 +
3972 +#ifdef CONFIG_PAX_REFCOUNT
3973 +" tvs %%icc, 6\n"
3974 +#endif
3975 +
3976 " cas [%2], %0, %1\n"
3977 " cmp %0, %1\n"
3978 " bne,pn %%xcc, 1b\n"
3979 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3980 : "memory");
3981 }
3982
3983 -static void inline arch_write_lock(raw_rwlock_t *lock)
3984 +static inline void arch_write_lock(raw_rwlock_t *lock)
3985 {
3986 unsigned long mask, tmp1, tmp2;
3987
3988 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3989 : "memory");
3990 }
3991
3992 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3993 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3994 {
3995 __asm__ __volatile__(
3996 " stw %%g0, [%0]"
3997 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.44/arch/sparc/include/asm/thread_info_32.h
3998 --- linux-2.6.32.44/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3999 +++ linux-2.6.32.44/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4000 @@ -50,6 +50,8 @@ struct thread_info {
4001 unsigned long w_saved;
4002
4003 struct restart_block restart_block;
4004 +
4005 + unsigned long lowest_stack;
4006 };
4007
4008 /*
4009 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.44/arch/sparc/include/asm/thread_info_64.h
4010 --- linux-2.6.32.44/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4011 +++ linux-2.6.32.44/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4012 @@ -68,6 +68,8 @@ struct thread_info {
4013 struct pt_regs *kern_una_regs;
4014 unsigned int kern_una_insn;
4015
4016 + unsigned long lowest_stack;
4017 +
4018 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4019 };
4020
4021 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.44/arch/sparc/include/asm/uaccess_32.h
4022 --- linux-2.6.32.44/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4023 +++ linux-2.6.32.44/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4024 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4025
4026 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4027 {
4028 - if (n && __access_ok((unsigned long) to, n))
4029 + if ((long)n < 0)
4030 + return n;
4031 +
4032 + if (n && __access_ok((unsigned long) to, n)) {
4033 + if (!__builtin_constant_p(n))
4034 + check_object_size(from, n, true);
4035 return __copy_user(to, (__force void __user *) from, n);
4036 - else
4037 + } else
4038 return n;
4039 }
4040
4041 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4042 {
4043 + if ((long)n < 0)
4044 + return n;
4045 +
4046 + if (!__builtin_constant_p(n))
4047 + check_object_size(from, n, true);
4048 +
4049 return __copy_user(to, (__force void __user *) from, n);
4050 }
4051
4052 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4053 {
4054 - if (n && __access_ok((unsigned long) from, n))
4055 + if ((long)n < 0)
4056 + return n;
4057 +
4058 + if (n && __access_ok((unsigned long) from, n)) {
4059 + if (!__builtin_constant_p(n))
4060 + check_object_size(to, n, false);
4061 return __copy_user((__force void __user *) to, from, n);
4062 - else
4063 + } else
4064 return n;
4065 }
4066
4067 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4068 {
4069 + if ((long)n < 0)
4070 + return n;
4071 +
4072 return __copy_user((__force void __user *) to, from, n);
4073 }
4074
4075 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.44/arch/sparc/include/asm/uaccess_64.h
4076 --- linux-2.6.32.44/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4077 +++ linux-2.6.32.44/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4078 @@ -9,6 +9,7 @@
4079 #include <linux/compiler.h>
4080 #include <linux/string.h>
4081 #include <linux/thread_info.h>
4082 +#include <linux/kernel.h>
4083 #include <asm/asi.h>
4084 #include <asm/system.h>
4085 #include <asm/spitfire.h>
4086 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4087 static inline unsigned long __must_check
4088 copy_from_user(void *to, const void __user *from, unsigned long size)
4089 {
4090 - unsigned long ret = ___copy_from_user(to, from, size);
4091 + unsigned long ret;
4092
4093 + if ((long)size < 0 || size > INT_MAX)
4094 + return size;
4095 +
4096 + if (!__builtin_constant_p(size))
4097 + check_object_size(to, size, false);
4098 +
4099 + ret = ___copy_from_user(to, from, size);
4100 if (unlikely(ret))
4101 ret = copy_from_user_fixup(to, from, size);
4102 return ret;
4103 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4104 static inline unsigned long __must_check
4105 copy_to_user(void __user *to, const void *from, unsigned long size)
4106 {
4107 - unsigned long ret = ___copy_to_user(to, from, size);
4108 + unsigned long ret;
4109 +
4110 + if ((long)size < 0 || size > INT_MAX)
4111 + return size;
4112 +
4113 + if (!__builtin_constant_p(size))
4114 + check_object_size(from, size, true);
4115
4116 + ret = ___copy_to_user(to, from, size);
4117 if (unlikely(ret))
4118 ret = copy_to_user_fixup(to, from, size);
4119 return ret;
4120 diff -urNp linux-2.6.32.44/arch/sparc/include/asm/uaccess.h linux-2.6.32.44/arch/sparc/include/asm/uaccess.h
4121 --- linux-2.6.32.44/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4122 +++ linux-2.6.32.44/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4123 @@ -1,5 +1,13 @@
4124 #ifndef ___ASM_SPARC_UACCESS_H
4125 #define ___ASM_SPARC_UACCESS_H
4126 +
4127 +#ifdef __KERNEL__
4128 +#ifndef __ASSEMBLY__
4129 +#include <linux/types.h>
4130 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4131 +#endif
4132 +#endif
4133 +
4134 #if defined(__sparc__) && defined(__arch64__)
4135 #include <asm/uaccess_64.h>
4136 #else
4137 diff -urNp linux-2.6.32.44/arch/sparc/kernel/iommu.c linux-2.6.32.44/arch/sparc/kernel/iommu.c
4138 --- linux-2.6.32.44/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4139 +++ linux-2.6.32.44/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4140 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4141 spin_unlock_irqrestore(&iommu->lock, flags);
4142 }
4143
4144 -static struct dma_map_ops sun4u_dma_ops = {
4145 +static const struct dma_map_ops sun4u_dma_ops = {
4146 .alloc_coherent = dma_4u_alloc_coherent,
4147 .free_coherent = dma_4u_free_coherent,
4148 .map_page = dma_4u_map_page,
4149 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4150 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4151 };
4152
4153 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4154 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4155 EXPORT_SYMBOL(dma_ops);
4156
4157 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4158 diff -urNp linux-2.6.32.44/arch/sparc/kernel/ioport.c linux-2.6.32.44/arch/sparc/kernel/ioport.c
4159 --- linux-2.6.32.44/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4160 +++ linux-2.6.32.44/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4161 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4162 BUG();
4163 }
4164
4165 -struct dma_map_ops sbus_dma_ops = {
4166 +const struct dma_map_ops sbus_dma_ops = {
4167 .alloc_coherent = sbus_alloc_coherent,
4168 .free_coherent = sbus_free_coherent,
4169 .map_page = sbus_map_page,
4170 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4171 .sync_sg_for_device = sbus_sync_sg_for_device,
4172 };
4173
4174 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4175 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4176 EXPORT_SYMBOL(dma_ops);
4177
4178 static int __init sparc_register_ioport(void)
4179 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4180 }
4181 }
4182
4183 -struct dma_map_ops pci32_dma_ops = {
4184 +const struct dma_map_ops pci32_dma_ops = {
4185 .alloc_coherent = pci32_alloc_coherent,
4186 .free_coherent = pci32_free_coherent,
4187 .map_page = pci32_map_page,
4188 diff -urNp linux-2.6.32.44/arch/sparc/kernel/kgdb_32.c linux-2.6.32.44/arch/sparc/kernel/kgdb_32.c
4189 --- linux-2.6.32.44/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4190 +++ linux-2.6.32.44/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4191 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4192 {
4193 }
4194
4195 -struct kgdb_arch arch_kgdb_ops = {
4196 +const struct kgdb_arch arch_kgdb_ops = {
4197 /* Breakpoint instruction: ta 0x7d */
4198 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4199 };
4200 diff -urNp linux-2.6.32.44/arch/sparc/kernel/kgdb_64.c linux-2.6.32.44/arch/sparc/kernel/kgdb_64.c
4201 --- linux-2.6.32.44/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4202 +++ linux-2.6.32.44/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4203 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4204 {
4205 }
4206
4207 -struct kgdb_arch arch_kgdb_ops = {
4208 +const struct kgdb_arch arch_kgdb_ops = {
4209 /* Breakpoint instruction: ta 0x72 */
4210 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4211 };
4212 diff -urNp linux-2.6.32.44/arch/sparc/kernel/Makefile linux-2.6.32.44/arch/sparc/kernel/Makefile
4213 --- linux-2.6.32.44/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4214 +++ linux-2.6.32.44/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4215 @@ -3,7 +3,7 @@
4216 #
4217
4218 asflags-y := -ansi
4219 -ccflags-y := -Werror
4220 +#ccflags-y := -Werror
4221
4222 extra-y := head_$(BITS).o
4223 extra-y += init_task.o
4224 diff -urNp linux-2.6.32.44/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.44/arch/sparc/kernel/pci_sun4v.c
4225 --- linux-2.6.32.44/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4226 +++ linux-2.6.32.44/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4227 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4228 spin_unlock_irqrestore(&iommu->lock, flags);
4229 }
4230
4231 -static struct dma_map_ops sun4v_dma_ops = {
4232 +static const struct dma_map_ops sun4v_dma_ops = {
4233 .alloc_coherent = dma_4v_alloc_coherent,
4234 .free_coherent = dma_4v_free_coherent,
4235 .map_page = dma_4v_map_page,
4236 diff -urNp linux-2.6.32.44/arch/sparc/kernel/process_32.c linux-2.6.32.44/arch/sparc/kernel/process_32.c
4237 --- linux-2.6.32.44/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4238 +++ linux-2.6.32.44/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4239 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4240 rw->ins[4], rw->ins[5],
4241 rw->ins[6],
4242 rw->ins[7]);
4243 - printk("%pS\n", (void *) rw->ins[7]);
4244 + printk("%pA\n", (void *) rw->ins[7]);
4245 rw = (struct reg_window32 *) rw->ins[6];
4246 }
4247 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4248 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4249
4250 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4251 r->psr, r->pc, r->npc, r->y, print_tainted());
4252 - printk("PC: <%pS>\n", (void *) r->pc);
4253 + printk("PC: <%pA>\n", (void *) r->pc);
4254 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4255 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4256 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4257 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4258 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4259 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4260 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4261 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4262
4263 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4264 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4265 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4266 rw = (struct reg_window32 *) fp;
4267 pc = rw->ins[7];
4268 printk("[%08lx : ", pc);
4269 - printk("%pS ] ", (void *) pc);
4270 + printk("%pA ] ", (void *) pc);
4271 fp = rw->ins[6];
4272 } while (++count < 16);
4273 printk("\n");
4274 diff -urNp linux-2.6.32.44/arch/sparc/kernel/process_64.c linux-2.6.32.44/arch/sparc/kernel/process_64.c
4275 --- linux-2.6.32.44/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4276 +++ linux-2.6.32.44/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4277 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4278 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4279 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4280 if (regs->tstate & TSTATE_PRIV)
4281 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4282 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4283 }
4284
4285 void show_regs(struct pt_regs *regs)
4286 {
4287 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4288 regs->tpc, regs->tnpc, regs->y, print_tainted());
4289 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4290 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4291 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4292 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4293 regs->u_regs[3]);
4294 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4295 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4296 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4297 regs->u_regs[15]);
4298 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4299 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4300 show_regwindow(regs);
4301 }
4302
4303 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4304 ((tp && tp->task) ? tp->task->pid : -1));
4305
4306 if (gp->tstate & TSTATE_PRIV) {
4307 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4308 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4309 (void *) gp->tpc,
4310 (void *) gp->o7,
4311 (void *) gp->i7,
4312 diff -urNp linux-2.6.32.44/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.44/arch/sparc/kernel/sys_sparc_32.c
4313 --- linux-2.6.32.44/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4314 +++ linux-2.6.32.44/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4315 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4316 if (ARCH_SUN4C && len > 0x20000000)
4317 return -ENOMEM;
4318 if (!addr)
4319 - addr = TASK_UNMAPPED_BASE;
4320 + addr = current->mm->mmap_base;
4321
4322 if (flags & MAP_SHARED)
4323 addr = COLOUR_ALIGN(addr);
4324 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4325 }
4326 if (TASK_SIZE - PAGE_SIZE - len < addr)
4327 return -ENOMEM;
4328 - if (!vmm || addr + len <= vmm->vm_start)
4329 + if (check_heap_stack_gap(vmm, addr, len))
4330 return addr;
4331 addr = vmm->vm_end;
4332 if (flags & MAP_SHARED)
4333 diff -urNp linux-2.6.32.44/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.44/arch/sparc/kernel/sys_sparc_64.c
4334 --- linux-2.6.32.44/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4335 +++ linux-2.6.32.44/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4336 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4337 /* We do not accept a shared mapping if it would violate
4338 * cache aliasing constraints.
4339 */
4340 - if ((flags & MAP_SHARED) &&
4341 + if ((filp || (flags & MAP_SHARED)) &&
4342 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4343 return -EINVAL;
4344 return addr;
4345 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4346 if (filp || (flags & MAP_SHARED))
4347 do_color_align = 1;
4348
4349 +#ifdef CONFIG_PAX_RANDMMAP
4350 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4351 +#endif
4352 +
4353 if (addr) {
4354 if (do_color_align)
4355 addr = COLOUR_ALIGN(addr, pgoff);
4356 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4357 addr = PAGE_ALIGN(addr);
4358
4359 vma = find_vma(mm, addr);
4360 - if (task_size - len >= addr &&
4361 - (!vma || addr + len <= vma->vm_start))
4362 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4363 return addr;
4364 }
4365
4366 if (len > mm->cached_hole_size) {
4367 - start_addr = addr = mm->free_area_cache;
4368 + start_addr = addr = mm->free_area_cache;
4369 } else {
4370 - start_addr = addr = TASK_UNMAPPED_BASE;
4371 + start_addr = addr = mm->mmap_base;
4372 mm->cached_hole_size = 0;
4373 }
4374
4375 @@ -175,14 +178,14 @@ full_search:
4376 vma = find_vma(mm, VA_EXCLUDE_END);
4377 }
4378 if (unlikely(task_size < addr)) {
4379 - if (start_addr != TASK_UNMAPPED_BASE) {
4380 - start_addr = addr = TASK_UNMAPPED_BASE;
4381 + if (start_addr != mm->mmap_base) {
4382 + start_addr = addr = mm->mmap_base;
4383 mm->cached_hole_size = 0;
4384 goto full_search;
4385 }
4386 return -ENOMEM;
4387 }
4388 - if (likely(!vma || addr + len <= vma->vm_start)) {
4389 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4390 /*
4391 * Remember the place where we stopped the search:
4392 */
4393 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4394 /* We do not accept a shared mapping if it would violate
4395 * cache aliasing constraints.
4396 */
4397 - if ((flags & MAP_SHARED) &&
4398 + if ((filp || (flags & MAP_SHARED)) &&
4399 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4400 return -EINVAL;
4401 return addr;
4402 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4403 addr = PAGE_ALIGN(addr);
4404
4405 vma = find_vma(mm, addr);
4406 - if (task_size - len >= addr &&
4407 - (!vma || addr + len <= vma->vm_start))
4408 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4409 return addr;
4410 }
4411
4412 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4413 /* make sure it can fit in the remaining address space */
4414 if (likely(addr > len)) {
4415 vma = find_vma(mm, addr-len);
4416 - if (!vma || addr <= vma->vm_start) {
4417 + if (check_heap_stack_gap(vma, addr - len, len)) {
4418 /* remember the address as a hint for next time */
4419 return (mm->free_area_cache = addr-len);
4420 }
4421 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4422 if (unlikely(mm->mmap_base < len))
4423 goto bottomup;
4424
4425 - addr = mm->mmap_base-len;
4426 - if (do_color_align)
4427 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4428 + addr = mm->mmap_base - len;
4429
4430 do {
4431 + if (do_color_align)
4432 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433 /*
4434 * Lookup failure means no vma is above this address,
4435 * else if new region fits below vma->vm_start,
4436 * return with success:
4437 */
4438 vma = find_vma(mm, addr);
4439 - if (likely(!vma || addr+len <= vma->vm_start)) {
4440 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4441 /* remember the address as a hint for next time */
4442 return (mm->free_area_cache = addr);
4443 }
4444 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4445 mm->cached_hole_size = vma->vm_start - addr;
4446
4447 /* try just below the current vma->vm_start */
4448 - addr = vma->vm_start-len;
4449 - if (do_color_align)
4450 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4451 - } while (likely(len < vma->vm_start));
4452 + addr = skip_heap_stack_gap(vma, len);
4453 + } while (!IS_ERR_VALUE(addr));
4454
4455 bottomup:
4456 /*
4457 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4458 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4459 sysctl_legacy_va_layout) {
4460 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4461 +
4462 +#ifdef CONFIG_PAX_RANDMMAP
4463 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4464 + mm->mmap_base += mm->delta_mmap;
4465 +#endif
4466 +
4467 mm->get_unmapped_area = arch_get_unmapped_area;
4468 mm->unmap_area = arch_unmap_area;
4469 } else {
4470 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4471 gap = (task_size / 6 * 5);
4472
4473 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4474 +
4475 +#ifdef CONFIG_PAX_RANDMMAP
4476 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4477 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4478 +#endif
4479 +
4480 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4481 mm->unmap_area = arch_unmap_area_topdown;
4482 }
4483 diff -urNp linux-2.6.32.44/arch/sparc/kernel/traps_32.c linux-2.6.32.44/arch/sparc/kernel/traps_32.c
4484 --- linux-2.6.32.44/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4485 +++ linux-2.6.32.44/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4486 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4487 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4488 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4489
4490 +extern void gr_handle_kernel_exploit(void);
4491 +
4492 void die_if_kernel(char *str, struct pt_regs *regs)
4493 {
4494 static int die_counter;
4495 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4496 count++ < 30 &&
4497 (((unsigned long) rw) >= PAGE_OFFSET) &&
4498 !(((unsigned long) rw) & 0x7)) {
4499 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4500 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4501 (void *) rw->ins[7]);
4502 rw = (struct reg_window32 *)rw->ins[6];
4503 }
4504 }
4505 printk("Instruction DUMP:");
4506 instruction_dump ((unsigned long *) regs->pc);
4507 - if(regs->psr & PSR_PS)
4508 + if(regs->psr & PSR_PS) {
4509 + gr_handle_kernel_exploit();
4510 do_exit(SIGKILL);
4511 + }
4512 do_exit(SIGSEGV);
4513 }
4514
4515 diff -urNp linux-2.6.32.44/arch/sparc/kernel/traps_64.c linux-2.6.32.44/arch/sparc/kernel/traps_64.c
4516 --- linux-2.6.32.44/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4517 +++ linux-2.6.32.44/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4518 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4519 i + 1,
4520 p->trapstack[i].tstate, p->trapstack[i].tpc,
4521 p->trapstack[i].tnpc, p->trapstack[i].tt);
4522 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4523 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4524 }
4525 }
4526
4527 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4528
4529 lvl -= 0x100;
4530 if (regs->tstate & TSTATE_PRIV) {
4531 +
4532 +#ifdef CONFIG_PAX_REFCOUNT
4533 + if (lvl == 6)
4534 + pax_report_refcount_overflow(regs);
4535 +#endif
4536 +
4537 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4538 die_if_kernel(buffer, regs);
4539 }
4540 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4541 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4542 {
4543 char buffer[32];
4544 -
4545 +
4546 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4547 0, lvl, SIGTRAP) == NOTIFY_STOP)
4548 return;
4549
4550 +#ifdef CONFIG_PAX_REFCOUNT
4551 + if (lvl == 6)
4552 + pax_report_refcount_overflow(regs);
4553 +#endif
4554 +
4555 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4556
4557 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4558 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4559 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4560 printk("%s" "ERROR(%d): ",
4561 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4562 - printk("TPC<%pS>\n", (void *) regs->tpc);
4563 + printk("TPC<%pA>\n", (void *) regs->tpc);
4564 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4565 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4566 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4567 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4568 smp_processor_id(),
4569 (type & 0x1) ? 'I' : 'D',
4570 regs->tpc);
4571 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4572 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4573 panic("Irrecoverable Cheetah+ parity error.");
4574 }
4575
4576 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4577 smp_processor_id(),
4578 (type & 0x1) ? 'I' : 'D',
4579 regs->tpc);
4580 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4581 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4582 }
4583
4584 struct sun4v_error_entry {
4585 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4586
4587 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4588 regs->tpc, tl);
4589 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4590 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4591 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4592 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4593 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4594 (void *) regs->u_regs[UREG_I7]);
4595 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4596 "pte[%lx] error[%lx]\n",
4597 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4598
4599 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4600 regs->tpc, tl);
4601 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4602 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4603 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4604 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4605 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4606 (void *) regs->u_regs[UREG_I7]);
4607 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4608 "pte[%lx] error[%lx]\n",
4609 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4610 fp = (unsigned long)sf->fp + STACK_BIAS;
4611 }
4612
4613 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4614 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4615 } while (++count < 16);
4616 }
4617
4618 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4619 return (struct reg_window *) (fp + STACK_BIAS);
4620 }
4621
4622 +extern void gr_handle_kernel_exploit(void);
4623 +
4624 void die_if_kernel(char *str, struct pt_regs *regs)
4625 {
4626 static int die_counter;
4627 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4628 while (rw &&
4629 count++ < 30&&
4630 is_kernel_stack(current, rw)) {
4631 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4632 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4633 (void *) rw->ins[7]);
4634
4635 rw = kernel_stack_up(rw);
4636 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4637 }
4638 user_instruction_dump ((unsigned int __user *) regs->tpc);
4639 }
4640 - if (regs->tstate & TSTATE_PRIV)
4641 + if (regs->tstate & TSTATE_PRIV) {
4642 + gr_handle_kernel_exploit();
4643 do_exit(SIGKILL);
4644 + }
4645 +
4646 do_exit(SIGSEGV);
4647 }
4648 EXPORT_SYMBOL(die_if_kernel);
4649 diff -urNp linux-2.6.32.44/arch/sparc/kernel/una_asm_64.S linux-2.6.32.44/arch/sparc/kernel/una_asm_64.S
4650 --- linux-2.6.32.44/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4651 +++ linux-2.6.32.44/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4652 @@ -127,7 +127,7 @@ do_int_load:
4653 wr %o5, 0x0, %asi
4654 retl
4655 mov 0, %o0
4656 - .size __do_int_load, .-__do_int_load
4657 + .size do_int_load, .-do_int_load
4658
4659 .section __ex_table,"a"
4660 .word 4b, __retl_efault
4661 diff -urNp linux-2.6.32.44/arch/sparc/kernel/unaligned_64.c linux-2.6.32.44/arch/sparc/kernel/unaligned_64.c
4662 --- linux-2.6.32.44/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4663 +++ linux-2.6.32.44/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4664 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4665 if (count < 5) {
4666 last_time = jiffies;
4667 count++;
4668 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4669 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4670 regs->tpc, (void *) regs->tpc);
4671 }
4672 }
4673 diff -urNp linux-2.6.32.44/arch/sparc/lib/atomic_64.S linux-2.6.32.44/arch/sparc/lib/atomic_64.S
4674 --- linux-2.6.32.44/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4675 +++ linux-2.6.32.44/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4676 @@ -18,7 +18,12 @@
4677 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4678 BACKOFF_SETUP(%o2)
4679 1: lduw [%o1], %g1
4680 - add %g1, %o0, %g7
4681 + addcc %g1, %o0, %g7
4682 +
4683 +#ifdef CONFIG_PAX_REFCOUNT
4684 + tvs %icc, 6
4685 +#endif
4686 +
4687 cas [%o1], %g1, %g7
4688 cmp %g1, %g7
4689 bne,pn %icc, 2f
4690 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4691 2: BACKOFF_SPIN(%o2, %o3, 1b)
4692 .size atomic_add, .-atomic_add
4693
4694 + .globl atomic_add_unchecked
4695 + .type atomic_add_unchecked,#function
4696 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4697 + BACKOFF_SETUP(%o2)
4698 +1: lduw [%o1], %g1
4699 + add %g1, %o0, %g7
4700 + cas [%o1], %g1, %g7
4701 + cmp %g1, %g7
4702 + bne,pn %icc, 2f
4703 + nop
4704 + retl
4705 + nop
4706 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4707 + .size atomic_add_unchecked, .-atomic_add_unchecked
4708 +
4709 .globl atomic_sub
4710 .type atomic_sub,#function
4711 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4712 BACKOFF_SETUP(%o2)
4713 1: lduw [%o1], %g1
4714 - sub %g1, %o0, %g7
4715 + subcc %g1, %o0, %g7
4716 +
4717 +#ifdef CONFIG_PAX_REFCOUNT
4718 + tvs %icc, 6
4719 +#endif
4720 +
4721 cas [%o1], %g1, %g7
4722 cmp %g1, %g7
4723 bne,pn %icc, 2f
4724 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4725 2: BACKOFF_SPIN(%o2, %o3, 1b)
4726 .size atomic_sub, .-atomic_sub
4727
4728 + .globl atomic_sub_unchecked
4729 + .type atomic_sub_unchecked,#function
4730 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4731 + BACKOFF_SETUP(%o2)
4732 +1: lduw [%o1], %g1
4733 + sub %g1, %o0, %g7
4734 + cas [%o1], %g1, %g7
4735 + cmp %g1, %g7
4736 + bne,pn %icc, 2f
4737 + nop
4738 + retl
4739 + nop
4740 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4741 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4742 +
4743 .globl atomic_add_ret
4744 .type atomic_add_ret,#function
4745 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4746 BACKOFF_SETUP(%o2)
4747 1: lduw [%o1], %g1
4748 - add %g1, %o0, %g7
4749 + addcc %g1, %o0, %g7
4750 +
4751 +#ifdef CONFIG_PAX_REFCOUNT
4752 + tvs %icc, 6
4753 +#endif
4754 +
4755 cas [%o1], %g1, %g7
4756 cmp %g1, %g7
4757 bne,pn %icc, 2f
4758 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4759 2: BACKOFF_SPIN(%o2, %o3, 1b)
4760 .size atomic_add_ret, .-atomic_add_ret
4761
4762 + .globl atomic_add_ret_unchecked
4763 + .type atomic_add_ret_unchecked,#function
4764 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4765 + BACKOFF_SETUP(%o2)
4766 +1: lduw [%o1], %g1
4767 + addcc %g1, %o0, %g7
4768 + cas [%o1], %g1, %g7
4769 + cmp %g1, %g7
4770 + bne,pn %icc, 2f
4771 + add %g7, %o0, %g7
4772 + sra %g7, 0, %o0
4773 + retl
4774 + nop
4775 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4776 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4777 +
4778 .globl atomic_sub_ret
4779 .type atomic_sub_ret,#function
4780 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4781 BACKOFF_SETUP(%o2)
4782 1: lduw [%o1], %g1
4783 - sub %g1, %o0, %g7
4784 + subcc %g1, %o0, %g7
4785 +
4786 +#ifdef CONFIG_PAX_REFCOUNT
4787 + tvs %icc, 6
4788 +#endif
4789 +
4790 cas [%o1], %g1, %g7
4791 cmp %g1, %g7
4792 bne,pn %icc, 2f
4793 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4794 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4795 BACKOFF_SETUP(%o2)
4796 1: ldx [%o1], %g1
4797 - add %g1, %o0, %g7
4798 + addcc %g1, %o0, %g7
4799 +
4800 +#ifdef CONFIG_PAX_REFCOUNT
4801 + tvs %xcc, 6
4802 +#endif
4803 +
4804 casx [%o1], %g1, %g7
4805 cmp %g1, %g7
4806 bne,pn %xcc, 2f
4807 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4808 2: BACKOFF_SPIN(%o2, %o3, 1b)
4809 .size atomic64_add, .-atomic64_add
4810
4811 + .globl atomic64_add_unchecked
4812 + .type atomic64_add_unchecked,#function
4813 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4814 + BACKOFF_SETUP(%o2)
4815 +1: ldx [%o1], %g1
4816 + addcc %g1, %o0, %g7
4817 + casx [%o1], %g1, %g7
4818 + cmp %g1, %g7
4819 + bne,pn %xcc, 2f
4820 + nop
4821 + retl
4822 + nop
4823 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4824 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4825 +
4826 .globl atomic64_sub
4827 .type atomic64_sub,#function
4828 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4829 BACKOFF_SETUP(%o2)
4830 1: ldx [%o1], %g1
4831 - sub %g1, %o0, %g7
4832 + subcc %g1, %o0, %g7
4833 +
4834 +#ifdef CONFIG_PAX_REFCOUNT
4835 + tvs %xcc, 6
4836 +#endif
4837 +
4838 casx [%o1], %g1, %g7
4839 cmp %g1, %g7
4840 bne,pn %xcc, 2f
4841 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4842 2: BACKOFF_SPIN(%o2, %o3, 1b)
4843 .size atomic64_sub, .-atomic64_sub
4844
4845 + .globl atomic64_sub_unchecked
4846 + .type atomic64_sub_unchecked,#function
4847 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4848 + BACKOFF_SETUP(%o2)
4849 +1: ldx [%o1], %g1
4850 + subcc %g1, %o0, %g7
4851 + casx [%o1], %g1, %g7
4852 + cmp %g1, %g7
4853 + bne,pn %xcc, 2f
4854 + nop
4855 + retl
4856 + nop
4857 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4858 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4859 +
4860 .globl atomic64_add_ret
4861 .type atomic64_add_ret,#function
4862 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4863 BACKOFF_SETUP(%o2)
4864 1: ldx [%o1], %g1
4865 - add %g1, %o0, %g7
4866 + addcc %g1, %o0, %g7
4867 +
4868 +#ifdef CONFIG_PAX_REFCOUNT
4869 + tvs %xcc, 6
4870 +#endif
4871 +
4872 casx [%o1], %g1, %g7
4873 cmp %g1, %g7
4874 bne,pn %xcc, 2f
4875 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4876 2: BACKOFF_SPIN(%o2, %o3, 1b)
4877 .size atomic64_add_ret, .-atomic64_add_ret
4878
4879 + .globl atomic64_add_ret_unchecked
4880 + .type atomic64_add_ret_unchecked,#function
4881 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4882 + BACKOFF_SETUP(%o2)
4883 +1: ldx [%o1], %g1
4884 + addcc %g1, %o0, %g7
4885 + casx [%o1], %g1, %g7
4886 + cmp %g1, %g7
4887 + bne,pn %xcc, 2f
4888 + add %g7, %o0, %g7
4889 + mov %g7, %o0
4890 + retl
4891 + nop
4892 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4893 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4894 +
4895 .globl atomic64_sub_ret
4896 .type atomic64_sub_ret,#function
4897 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4898 BACKOFF_SETUP(%o2)
4899 1: ldx [%o1], %g1
4900 - sub %g1, %o0, %g7
4901 + subcc %g1, %o0, %g7
4902 +
4903 +#ifdef CONFIG_PAX_REFCOUNT
4904 + tvs %xcc, 6
4905 +#endif
4906 +
4907 casx [%o1], %g1, %g7
4908 cmp %g1, %g7
4909 bne,pn %xcc, 2f
4910 diff -urNp linux-2.6.32.44/arch/sparc/lib/ksyms.c linux-2.6.32.44/arch/sparc/lib/ksyms.c
4911 --- linux-2.6.32.44/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4912 +++ linux-2.6.32.44/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4913 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4914
4915 /* Atomic counter implementation. */
4916 EXPORT_SYMBOL(atomic_add);
4917 +EXPORT_SYMBOL(atomic_add_unchecked);
4918 EXPORT_SYMBOL(atomic_add_ret);
4919 EXPORT_SYMBOL(atomic_sub);
4920 +EXPORT_SYMBOL(atomic_sub_unchecked);
4921 EXPORT_SYMBOL(atomic_sub_ret);
4922 EXPORT_SYMBOL(atomic64_add);
4923 +EXPORT_SYMBOL(atomic64_add_unchecked);
4924 EXPORT_SYMBOL(atomic64_add_ret);
4925 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4926 EXPORT_SYMBOL(atomic64_sub);
4927 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4928 EXPORT_SYMBOL(atomic64_sub_ret);
4929
4930 /* Atomic bit operations. */
4931 diff -urNp linux-2.6.32.44/arch/sparc/lib/Makefile linux-2.6.32.44/arch/sparc/lib/Makefile
4932 --- linux-2.6.32.44/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4933 +++ linux-2.6.32.44/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4934 @@ -2,7 +2,7 @@
4935 #
4936
4937 asflags-y := -ansi -DST_DIV0=0x02
4938 -ccflags-y := -Werror
4939 +#ccflags-y := -Werror
4940
4941 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4942 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4943 diff -urNp linux-2.6.32.44/arch/sparc/lib/rwsem_64.S linux-2.6.32.44/arch/sparc/lib/rwsem_64.S
4944 --- linux-2.6.32.44/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4945 +++ linux-2.6.32.44/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4946 @@ -11,7 +11,12 @@
4947 .globl __down_read
4948 __down_read:
4949 1: lduw [%o0], %g1
4950 - add %g1, 1, %g7
4951 + addcc %g1, 1, %g7
4952 +
4953 +#ifdef CONFIG_PAX_REFCOUNT
4954 + tvs %icc, 6
4955 +#endif
4956 +
4957 cas [%o0], %g1, %g7
4958 cmp %g1, %g7
4959 bne,pn %icc, 1b
4960 @@ -33,7 +38,12 @@ __down_read:
4961 .globl __down_read_trylock
4962 __down_read_trylock:
4963 1: lduw [%o0], %g1
4964 - add %g1, 1, %g7
4965 + addcc %g1, 1, %g7
4966 +
4967 +#ifdef CONFIG_PAX_REFCOUNT
4968 + tvs %icc, 6
4969 +#endif
4970 +
4971 cmp %g7, 0
4972 bl,pn %icc, 2f
4973 mov 0, %o1
4974 @@ -51,7 +61,12 @@ __down_write:
4975 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4976 1:
4977 lduw [%o0], %g3
4978 - add %g3, %g1, %g7
4979 + addcc %g3, %g1, %g7
4980 +
4981 +#ifdef CONFIG_PAX_REFCOUNT
4982 + tvs %icc, 6
4983 +#endif
4984 +
4985 cas [%o0], %g3, %g7
4986 cmp %g3, %g7
4987 bne,pn %icc, 1b
4988 @@ -77,7 +92,12 @@ __down_write_trylock:
4989 cmp %g3, 0
4990 bne,pn %icc, 2f
4991 mov 0, %o1
4992 - add %g3, %g1, %g7
4993 + addcc %g3, %g1, %g7
4994 +
4995 +#ifdef CONFIG_PAX_REFCOUNT
4996 + tvs %icc, 6
4997 +#endif
4998 +
4999 cas [%o0], %g3, %g7
5000 cmp %g3, %g7
5001 bne,pn %icc, 1b
5002 @@ -90,7 +110,12 @@ __down_write_trylock:
5003 __up_read:
5004 1:
5005 lduw [%o0], %g1
5006 - sub %g1, 1, %g7
5007 + subcc %g1, 1, %g7
5008 +
5009 +#ifdef CONFIG_PAX_REFCOUNT
5010 + tvs %icc, 6
5011 +#endif
5012 +
5013 cas [%o0], %g1, %g7
5014 cmp %g1, %g7
5015 bne,pn %icc, 1b
5016 @@ -118,7 +143,12 @@ __up_write:
5017 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5018 1:
5019 lduw [%o0], %g3
5020 - sub %g3, %g1, %g7
5021 + subcc %g3, %g1, %g7
5022 +
5023 +#ifdef CONFIG_PAX_REFCOUNT
5024 + tvs %icc, 6
5025 +#endif
5026 +
5027 cas [%o0], %g3, %g7
5028 cmp %g3, %g7
5029 bne,pn %icc, 1b
5030 @@ -143,7 +173,12 @@ __downgrade_write:
5031 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5032 1:
5033 lduw [%o0], %g3
5034 - sub %g3, %g1, %g7
5035 + subcc %g3, %g1, %g7
5036 +
5037 +#ifdef CONFIG_PAX_REFCOUNT
5038 + tvs %icc, 6
5039 +#endif
5040 +
5041 cas [%o0], %g3, %g7
5042 cmp %g3, %g7
5043 bne,pn %icc, 1b
5044 diff -urNp linux-2.6.32.44/arch/sparc/Makefile linux-2.6.32.44/arch/sparc/Makefile
5045 --- linux-2.6.32.44/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5046 +++ linux-2.6.32.44/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5047 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5048 # Export what is needed by arch/sparc/boot/Makefile
5049 export VMLINUX_INIT VMLINUX_MAIN
5050 VMLINUX_INIT := $(head-y) $(init-y)
5051 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5052 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5053 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5054 VMLINUX_MAIN += $(drivers-y) $(net-y)
5055
5056 diff -urNp linux-2.6.32.44/arch/sparc/mm/fault_32.c linux-2.6.32.44/arch/sparc/mm/fault_32.c
5057 --- linux-2.6.32.44/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5058 +++ linux-2.6.32.44/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5059 @@ -21,6 +21,9 @@
5060 #include <linux/interrupt.h>
5061 #include <linux/module.h>
5062 #include <linux/kdebug.h>
5063 +#include <linux/slab.h>
5064 +#include <linux/pagemap.h>
5065 +#include <linux/compiler.h>
5066
5067 #include <asm/system.h>
5068 #include <asm/page.h>
5069 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5070 return safe_compute_effective_address(regs, insn);
5071 }
5072
5073 +#ifdef CONFIG_PAX_PAGEEXEC
5074 +#ifdef CONFIG_PAX_DLRESOLVE
5075 +static void pax_emuplt_close(struct vm_area_struct *vma)
5076 +{
5077 + vma->vm_mm->call_dl_resolve = 0UL;
5078 +}
5079 +
5080 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5081 +{
5082 + unsigned int *kaddr;
5083 +
5084 + vmf->page = alloc_page(GFP_HIGHUSER);
5085 + if (!vmf->page)
5086 + return VM_FAULT_OOM;
5087 +
5088 + kaddr = kmap(vmf->page);
5089 + memset(kaddr, 0, PAGE_SIZE);
5090 + kaddr[0] = 0x9DE3BFA8U; /* save */
5091 + flush_dcache_page(vmf->page);
5092 + kunmap(vmf->page);
5093 + return VM_FAULT_MAJOR;
5094 +}
5095 +
5096 +static const struct vm_operations_struct pax_vm_ops = {
5097 + .close = pax_emuplt_close,
5098 + .fault = pax_emuplt_fault
5099 +};
5100 +
5101 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5102 +{
5103 + int ret;
5104 +
5105 + vma->vm_mm = current->mm;
5106 + vma->vm_start = addr;
5107 + vma->vm_end = addr + PAGE_SIZE;
5108 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5109 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5110 + vma->vm_ops = &pax_vm_ops;
5111 +
5112 + ret = insert_vm_struct(current->mm, vma);
5113 + if (ret)
5114 + return ret;
5115 +
5116 + ++current->mm->total_vm;
5117 + return 0;
5118 +}
5119 +#endif
5120 +
5121 +/*
5122 + * PaX: decide what to do with offenders (regs->pc = fault address)
5123 + *
5124 + * returns 1 when task should be killed
5125 + * 2 when patched PLT trampoline was detected
5126 + * 3 when unpatched PLT trampoline was detected
5127 + */
5128 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5129 +{
5130 +
5131 +#ifdef CONFIG_PAX_EMUPLT
5132 + int err;
5133 +
5134 + do { /* PaX: patched PLT emulation #1 */
5135 + unsigned int sethi1, sethi2, jmpl;
5136 +
5137 + err = get_user(sethi1, (unsigned int *)regs->pc);
5138 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5139 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5140 +
5141 + if (err)
5142 + break;
5143 +
5144 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5145 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5146 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5147 + {
5148 + unsigned int addr;
5149 +
5150 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5151 + addr = regs->u_regs[UREG_G1];
5152 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5153 + regs->pc = addr;
5154 + regs->npc = addr+4;
5155 + return 2;
5156 + }
5157 + } while (0);
5158 +
5159 + { /* PaX: patched PLT emulation #2 */
5160 + unsigned int ba;
5161 +
5162 + err = get_user(ba, (unsigned int *)regs->pc);
5163 +
5164 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5165 + unsigned int addr;
5166 +
5167 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5168 + regs->pc = addr;
5169 + regs->npc = addr+4;
5170 + return 2;
5171 + }
5172 + }
5173 +
5174 + do { /* PaX: patched PLT emulation #3 */
5175 + unsigned int sethi, jmpl, nop;
5176 +
5177 + err = get_user(sethi, (unsigned int *)regs->pc);
5178 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5179 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5180 +
5181 + if (err)
5182 + break;
5183 +
5184 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5185 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5186 + nop == 0x01000000U)
5187 + {
5188 + unsigned int addr;
5189 +
5190 + addr = (sethi & 0x003FFFFFU) << 10;
5191 + regs->u_regs[UREG_G1] = addr;
5192 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5193 + regs->pc = addr;
5194 + regs->npc = addr+4;
5195 + return 2;
5196 + }
5197 + } while (0);
5198 +
5199 + do { /* PaX: unpatched PLT emulation step 1 */
5200 + unsigned int sethi, ba, nop;
5201 +
5202 + err = get_user(sethi, (unsigned int *)regs->pc);
5203 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5204 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5205 +
5206 + if (err)
5207 + break;
5208 +
5209 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5210 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5211 + nop == 0x01000000U)
5212 + {
5213 + unsigned int addr, save, call;
5214 +
5215 + if ((ba & 0xFFC00000U) == 0x30800000U)
5216 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5217 + else
5218 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5219 +
5220 + err = get_user(save, (unsigned int *)addr);
5221 + err |= get_user(call, (unsigned int *)(addr+4));
5222 + err |= get_user(nop, (unsigned int *)(addr+8));
5223 + if (err)
5224 + break;
5225 +
5226 +#ifdef CONFIG_PAX_DLRESOLVE
5227 + if (save == 0x9DE3BFA8U &&
5228 + (call & 0xC0000000U) == 0x40000000U &&
5229 + nop == 0x01000000U)
5230 + {
5231 + struct vm_area_struct *vma;
5232 + unsigned long call_dl_resolve;
5233 +
5234 + down_read(&current->mm->mmap_sem);
5235 + call_dl_resolve = current->mm->call_dl_resolve;
5236 + up_read(&current->mm->mmap_sem);
5237 + if (likely(call_dl_resolve))
5238 + goto emulate;
5239 +
5240 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5241 +
5242 + down_write(&current->mm->mmap_sem);
5243 + if (current->mm->call_dl_resolve) {
5244 + call_dl_resolve = current->mm->call_dl_resolve;
5245 + up_write(&current->mm->mmap_sem);
5246 + if (vma)
5247 + kmem_cache_free(vm_area_cachep, vma);
5248 + goto emulate;
5249 + }
5250 +
5251 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5252 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5253 + up_write(&current->mm->mmap_sem);
5254 + if (vma)
5255 + kmem_cache_free(vm_area_cachep, vma);
5256 + return 1;
5257 + }
5258 +
5259 + if (pax_insert_vma(vma, call_dl_resolve)) {
5260 + up_write(&current->mm->mmap_sem);
5261 + kmem_cache_free(vm_area_cachep, vma);
5262 + return 1;
5263 + }
5264 +
5265 + current->mm->call_dl_resolve = call_dl_resolve;
5266 + up_write(&current->mm->mmap_sem);
5267 +
5268 +emulate:
5269 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5270 + regs->pc = call_dl_resolve;
5271 + regs->npc = addr+4;
5272 + return 3;
5273 + }
5274 +#endif
5275 +
5276 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5277 + if ((save & 0xFFC00000U) == 0x05000000U &&
5278 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5279 + nop == 0x01000000U)
5280 + {
5281 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5282 + regs->u_regs[UREG_G2] = addr + 4;
5283 + addr = (save & 0x003FFFFFU) << 10;
5284 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5285 + regs->pc = addr;
5286 + regs->npc = addr+4;
5287 + return 3;
5288 + }
5289 + }
5290 + } while (0);
5291 +
5292 + do { /* PaX: unpatched PLT emulation step 2 */
5293 + unsigned int save, call, nop;
5294 +
5295 + err = get_user(save, (unsigned int *)(regs->pc-4));
5296 + err |= get_user(call, (unsigned int *)regs->pc);
5297 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5298 + if (err)
5299 + break;
5300 +
5301 + if (save == 0x9DE3BFA8U &&
5302 + (call & 0xC0000000U) == 0x40000000U &&
5303 + nop == 0x01000000U)
5304 + {
5305 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5306 +
5307 + regs->u_regs[UREG_RETPC] = regs->pc;
5308 + regs->pc = dl_resolve;
5309 + regs->npc = dl_resolve+4;
5310 + return 3;
5311 + }
5312 + } while (0);
5313 +#endif
5314 +
5315 + return 1;
5316 +}
5317 +
5318 +void pax_report_insns(void *pc, void *sp)
5319 +{
5320 + unsigned long i;
5321 +
5322 + printk(KERN_ERR "PAX: bytes at PC: ");
5323 + for (i = 0; i < 8; i++) {
5324 + unsigned int c;
5325 + if (get_user(c, (unsigned int *)pc+i))
5326 + printk(KERN_CONT "???????? ");
5327 + else
5328 + printk(KERN_CONT "%08x ", c);
5329 + }
5330 + printk("\n");
5331 +}
5332 +#endif
5333 +
5334 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5335 unsigned long address)
5336 {
5337 @@ -231,6 +495,24 @@ good_area:
5338 if(!(vma->vm_flags & VM_WRITE))
5339 goto bad_area;
5340 } else {
5341 +
5342 +#ifdef CONFIG_PAX_PAGEEXEC
5343 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5344 + up_read(&mm->mmap_sem);
5345 + switch (pax_handle_fetch_fault(regs)) {
5346 +
5347 +#ifdef CONFIG_PAX_EMUPLT
5348 + case 2:
5349 + case 3:
5350 + return;
5351 +#endif
5352 +
5353 + }
5354 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5355 + do_group_exit(SIGKILL);
5356 + }
5357 +#endif
5358 +
5359 /* Allow reads even for write-only mappings */
5360 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5361 goto bad_area;
5362 diff -urNp linux-2.6.32.44/arch/sparc/mm/fault_64.c linux-2.6.32.44/arch/sparc/mm/fault_64.c
5363 --- linux-2.6.32.44/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5364 +++ linux-2.6.32.44/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5365 @@ -20,6 +20,9 @@
5366 #include <linux/kprobes.h>
5367 #include <linux/kdebug.h>
5368 #include <linux/percpu.h>
5369 +#include <linux/slab.h>
5370 +#include <linux/pagemap.h>
5371 +#include <linux/compiler.h>
5372
5373 #include <asm/page.h>
5374 #include <asm/pgtable.h>
5375 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5376 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5377 regs->tpc);
5378 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5379 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5380 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5381 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5382 dump_stack();
5383 unhandled_fault(regs->tpc, current, regs);
5384 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5385 show_regs(regs);
5386 }
5387
5388 +#ifdef CONFIG_PAX_PAGEEXEC
5389 +#ifdef CONFIG_PAX_DLRESOLVE
5390 +static void pax_emuplt_close(struct vm_area_struct *vma)
5391 +{
5392 + vma->vm_mm->call_dl_resolve = 0UL;
5393 +}
5394 +
5395 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5396 +{
5397 + unsigned int *kaddr;
5398 +
5399 + vmf->page = alloc_page(GFP_HIGHUSER);
5400 + if (!vmf->page)
5401 + return VM_FAULT_OOM;
5402 +
5403 + kaddr = kmap(vmf->page);
5404 + memset(kaddr, 0, PAGE_SIZE);
5405 + kaddr[0] = 0x9DE3BFA8U; /* save */
5406 + flush_dcache_page(vmf->page);
5407 + kunmap(vmf->page);
5408 + return VM_FAULT_MAJOR;
5409 +}
5410 +
5411 +static const struct vm_operations_struct pax_vm_ops = {
5412 + .close = pax_emuplt_close,
5413 + .fault = pax_emuplt_fault
5414 +};
5415 +
5416 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5417 +{
5418 + int ret;
5419 +
5420 + vma->vm_mm = current->mm;
5421 + vma->vm_start = addr;
5422 + vma->vm_end = addr + PAGE_SIZE;
5423 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5424 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5425 + vma->vm_ops = &pax_vm_ops;
5426 +
5427 + ret = insert_vm_struct(current->mm, vma);
5428 + if (ret)
5429 + return ret;
5430 +
5431 + ++current->mm->total_vm;
5432 + return 0;
5433 +}
5434 +#endif
5435 +
5436 +/*
5437 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5438 + *
5439 + * returns 1 when task should be killed
5440 + * 2 when patched PLT trampoline was detected
5441 + * 3 when unpatched PLT trampoline was detected
5442 + */
5443 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5444 +{
5445 +
5446 +#ifdef CONFIG_PAX_EMUPLT
5447 + int err;
5448 +
5449 + do { /* PaX: patched PLT emulation #1 */
5450 + unsigned int sethi1, sethi2, jmpl;
5451 +
5452 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5453 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5454 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5455 +
5456 + if (err)
5457 + break;
5458 +
5459 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5460 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5461 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5462 + {
5463 + unsigned long addr;
5464 +
5465 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5466 + addr = regs->u_regs[UREG_G1];
5467 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5468 +
5469 + if (test_thread_flag(TIF_32BIT))
5470 + addr &= 0xFFFFFFFFUL;
5471 +
5472 + regs->tpc = addr;
5473 + regs->tnpc = addr+4;
5474 + return 2;
5475 + }
5476 + } while (0);
5477 +
5478 + { /* PaX: patched PLT emulation #2 */
5479 + unsigned int ba;
5480 +
5481 + err = get_user(ba, (unsigned int *)regs->tpc);
5482 +
5483 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5484 + unsigned long addr;
5485 +
5486 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5487 +
5488 + if (test_thread_flag(TIF_32BIT))
5489 + addr &= 0xFFFFFFFFUL;
5490 +
5491 + regs->tpc = addr;
5492 + regs->tnpc = addr+4;
5493 + return 2;
5494 + }
5495 + }
5496 +
5497 + do { /* PaX: patched PLT emulation #3 */
5498 + unsigned int sethi, jmpl, nop;
5499 +
5500 + err = get_user(sethi, (unsigned int *)regs->tpc);
5501 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5502 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5503 +
5504 + if (err)
5505 + break;
5506 +
5507 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5508 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5509 + nop == 0x01000000U)
5510 + {
5511 + unsigned long addr;
5512 +
5513 + addr = (sethi & 0x003FFFFFU) << 10;
5514 + regs->u_regs[UREG_G1] = addr;
5515 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5516 +
5517 + if (test_thread_flag(TIF_32BIT))
5518 + addr &= 0xFFFFFFFFUL;
5519 +
5520 + regs->tpc = addr;
5521 + regs->tnpc = addr+4;
5522 + return 2;
5523 + }
5524 + } while (0);
5525 +
5526 + do { /* PaX: patched PLT emulation #4 */
5527 + unsigned int sethi, mov1, call, mov2;
5528 +
5529 + err = get_user(sethi, (unsigned int *)regs->tpc);
5530 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5531 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5532 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5533 +
5534 + if (err)
5535 + break;
5536 +
5537 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5538 + mov1 == 0x8210000FU &&
5539 + (call & 0xC0000000U) == 0x40000000U &&
5540 + mov2 == 0x9E100001U)
5541 + {
5542 + unsigned long addr;
5543 +
5544 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5545 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5546 +
5547 + if (test_thread_flag(TIF_32BIT))
5548 + addr &= 0xFFFFFFFFUL;
5549 +
5550 + regs->tpc = addr;
5551 + regs->tnpc = addr+4;
5552 + return 2;
5553 + }
5554 + } while (0);
5555 +
5556 + do { /* PaX: patched PLT emulation #5 */
5557 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5558 +
5559 + err = get_user(sethi, (unsigned int *)regs->tpc);
5560 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5561 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5562 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5563 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5564 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5565 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5566 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5567 +
5568 + if (err)
5569 + break;
5570 +
5571 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5572 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5573 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5574 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5575 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5576 + sllx == 0x83287020U &&
5577 + jmpl == 0x81C04005U &&
5578 + nop == 0x01000000U)
5579 + {
5580 + unsigned long addr;
5581 +
5582 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5583 + regs->u_regs[UREG_G1] <<= 32;
5584 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5585 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5586 + regs->tpc = addr;
5587 + regs->tnpc = addr+4;
5588 + return 2;
5589 + }
5590 + } while (0);
5591 +
5592 + do { /* PaX: patched PLT emulation #6 */
5593 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5594 +
5595 + err = get_user(sethi, (unsigned int *)regs->tpc);
5596 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5597 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5598 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5599 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5600 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5601 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5602 +
5603 + if (err)
5604 + break;
5605 +
5606 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5607 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5608 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5609 + sllx == 0x83287020U &&
5610 + (or & 0xFFFFE000U) == 0x8A116000U &&
5611 + jmpl == 0x81C04005U &&
5612 + nop == 0x01000000U)
5613 + {
5614 + unsigned long addr;
5615 +
5616 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5617 + regs->u_regs[UREG_G1] <<= 32;
5618 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5619 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5620 + regs->tpc = addr;
5621 + regs->tnpc = addr+4;
5622 + return 2;
5623 + }
5624 + } while (0);
5625 +
5626 + do { /* PaX: unpatched PLT emulation step 1 */
5627 + unsigned int sethi, ba, nop;
5628 +
5629 + err = get_user(sethi, (unsigned int *)regs->tpc);
5630 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5631 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5632 +
5633 + if (err)
5634 + break;
5635 +
5636 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5637 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5638 + nop == 0x01000000U)
5639 + {
5640 + unsigned long addr;
5641 + unsigned int save, call;
5642 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5643 +
5644 + if ((ba & 0xFFC00000U) == 0x30800000U)
5645 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5646 + else
5647 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5648 +
5649 + if (test_thread_flag(TIF_32BIT))
5650 + addr &= 0xFFFFFFFFUL;
5651 +
5652 + err = get_user(save, (unsigned int *)addr);
5653 + err |= get_user(call, (unsigned int *)(addr+4));
5654 + err |= get_user(nop, (unsigned int *)(addr+8));
5655 + if (err)
5656 + break;
5657 +
5658 +#ifdef CONFIG_PAX_DLRESOLVE
5659 + if (save == 0x9DE3BFA8U &&
5660 + (call & 0xC0000000U) == 0x40000000U &&
5661 + nop == 0x01000000U)
5662 + {
5663 + struct vm_area_struct *vma;
5664 + unsigned long call_dl_resolve;
5665 +
5666 + down_read(&current->mm->mmap_sem);
5667 + call_dl_resolve = current->mm->call_dl_resolve;
5668 + up_read(&current->mm->mmap_sem);
5669 + if (likely(call_dl_resolve))
5670 + goto emulate;
5671 +
5672 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5673 +
5674 + down_write(&current->mm->mmap_sem);
5675 + if (current->mm->call_dl_resolve) {
5676 + call_dl_resolve = current->mm->call_dl_resolve;
5677 + up_write(&current->mm->mmap_sem);
5678 + if (vma)
5679 + kmem_cache_free(vm_area_cachep, vma);
5680 + goto emulate;
5681 + }
5682 +
5683 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5684 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5685 + up_write(&current->mm->mmap_sem);
5686 + if (vma)
5687 + kmem_cache_free(vm_area_cachep, vma);
5688 + return 1;
5689 + }
5690 +
5691 + if (pax_insert_vma(vma, call_dl_resolve)) {
5692 + up_write(&current->mm->mmap_sem);
5693 + kmem_cache_free(vm_area_cachep, vma);
5694 + return 1;
5695 + }
5696 +
5697 + current->mm->call_dl_resolve = call_dl_resolve;
5698 + up_write(&current->mm->mmap_sem);
5699 +
5700 +emulate:
5701 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5702 + regs->tpc = call_dl_resolve;
5703 + regs->tnpc = addr+4;
5704 + return 3;
5705 + }
5706 +#endif
5707 +
5708 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5709 + if ((save & 0xFFC00000U) == 0x05000000U &&
5710 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5711 + nop == 0x01000000U)
5712 + {
5713 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5714 + regs->u_regs[UREG_G2] = addr + 4;
5715 + addr = (save & 0x003FFFFFU) << 10;
5716 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5717 +
5718 + if (test_thread_flag(TIF_32BIT))
5719 + addr &= 0xFFFFFFFFUL;
5720 +
5721 + regs->tpc = addr;
5722 + regs->tnpc = addr+4;
5723 + return 3;
5724 + }
5725 +
5726 + /* PaX: 64-bit PLT stub */
5727 + err = get_user(sethi1, (unsigned int *)addr);
5728 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5729 + err |= get_user(or1, (unsigned int *)(addr+8));
5730 + err |= get_user(or2, (unsigned int *)(addr+12));
5731 + err |= get_user(sllx, (unsigned int *)(addr+16));
5732 + err |= get_user(add, (unsigned int *)(addr+20));
5733 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5734 + err |= get_user(nop, (unsigned int *)(addr+28));
5735 + if (err)
5736 + break;
5737 +
5738 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5739 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5740 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5741 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5742 + sllx == 0x89293020U &&
5743 + add == 0x8A010005U &&
5744 + jmpl == 0x89C14000U &&
5745 + nop == 0x01000000U)
5746 + {
5747 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5748 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5749 + regs->u_regs[UREG_G4] <<= 32;
5750 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5751 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5752 + regs->u_regs[UREG_G4] = addr + 24;
5753 + addr = regs->u_regs[UREG_G5];
5754 + regs->tpc = addr;
5755 + regs->tnpc = addr+4;
5756 + return 3;
5757 + }
5758 + }
5759 + } while (0);
5760 +
5761 +#ifdef CONFIG_PAX_DLRESOLVE
5762 + do { /* PaX: unpatched PLT emulation step 2 */
5763 + unsigned int save, call, nop;
5764 +
5765 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5766 + err |= get_user(call, (unsigned int *)regs->tpc);
5767 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5768 + if (err)
5769 + break;
5770 +
5771 + if (save == 0x9DE3BFA8U &&
5772 + (call & 0xC0000000U) == 0x40000000U &&
5773 + nop == 0x01000000U)
5774 + {
5775 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5776 +
5777 + if (test_thread_flag(TIF_32BIT))
5778 + dl_resolve &= 0xFFFFFFFFUL;
5779 +
5780 + regs->u_regs[UREG_RETPC] = regs->tpc;
5781 + regs->tpc = dl_resolve;
5782 + regs->tnpc = dl_resolve+4;
5783 + return 3;
5784 + }
5785 + } while (0);
5786 +#endif
5787 +
5788 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5789 + unsigned int sethi, ba, nop;
5790 +
5791 + err = get_user(sethi, (unsigned int *)regs->tpc);
5792 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5793 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5794 +
5795 + if (err)
5796 + break;
5797 +
5798 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5799 + (ba & 0xFFF00000U) == 0x30600000U &&
5800 + nop == 0x01000000U)
5801 + {
5802 + unsigned long addr;
5803 +
5804 + addr = (sethi & 0x003FFFFFU) << 10;
5805 + regs->u_regs[UREG_G1] = addr;
5806 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5807 +
5808 + if (test_thread_flag(TIF_32BIT))
5809 + addr &= 0xFFFFFFFFUL;
5810 +
5811 + regs->tpc = addr;
5812 + regs->tnpc = addr+4;
5813 + return 2;
5814 + }
5815 + } while (0);
5816 +
5817 +#endif
5818 +
5819 + return 1;
5820 +}
5821 +
5822 +void pax_report_insns(void *pc, void *sp)
5823 +{
5824 + unsigned long i;
5825 +
5826 + printk(KERN_ERR "PAX: bytes at PC: ");
5827 + for (i = 0; i < 8; i++) {
5828 + unsigned int c;
5829 + if (get_user(c, (unsigned int *)pc+i))
5830 + printk(KERN_CONT "???????? ");
5831 + else
5832 + printk(KERN_CONT "%08x ", c);
5833 + }
5834 + printk("\n");
5835 +}
5836 +#endif
5837 +
5838 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5839 {
5840 struct mm_struct *mm = current->mm;
5841 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5842 if (!vma)
5843 goto bad_area;
5844
5845 +#ifdef CONFIG_PAX_PAGEEXEC
5846 + /* PaX: detect ITLB misses on non-exec pages */
5847 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5848 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5849 + {
5850 + if (address != regs->tpc)
5851 + goto good_area;
5852 +
5853 + up_read(&mm->mmap_sem);
5854 + switch (pax_handle_fetch_fault(regs)) {
5855 +
5856 +#ifdef CONFIG_PAX_EMUPLT
5857 + case 2:
5858 + case 3:
5859 + return;
5860 +#endif
5861 +
5862 + }
5863 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5864 + do_group_exit(SIGKILL);
5865 + }
5866 +#endif
5867 +
5868 /* Pure DTLB misses do not tell us whether the fault causing
5869 * load/store/atomic was a write or not, it only says that there
5870 * was no match. So in such a case we (carefully) read the
5871 diff -urNp linux-2.6.32.44/arch/sparc/mm/hugetlbpage.c linux-2.6.32.44/arch/sparc/mm/hugetlbpage.c
5872 --- linux-2.6.32.44/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5873 +++ linux-2.6.32.44/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5874 @@ -69,7 +69,7 @@ full_search:
5875 }
5876 return -ENOMEM;
5877 }
5878 - if (likely(!vma || addr + len <= vma->vm_start)) {
5879 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5880 /*
5881 * Remember the place where we stopped the search:
5882 */
5883 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5884 /* make sure it can fit in the remaining address space */
5885 if (likely(addr > len)) {
5886 vma = find_vma(mm, addr-len);
5887 - if (!vma || addr <= vma->vm_start) {
5888 + if (check_heap_stack_gap(vma, addr - len, len)) {
5889 /* remember the address as a hint for next time */
5890 return (mm->free_area_cache = addr-len);
5891 }
5892 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5893 if (unlikely(mm->mmap_base < len))
5894 goto bottomup;
5895
5896 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5897 + addr = mm->mmap_base - len;
5898
5899 do {
5900 + addr &= HPAGE_MASK;
5901 /*
5902 * Lookup failure means no vma is above this address,
5903 * else if new region fits below vma->vm_start,
5904 * return with success:
5905 */
5906 vma = find_vma(mm, addr);
5907 - if (likely(!vma || addr+len <= vma->vm_start)) {
5908 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5909 /* remember the address as a hint for next time */
5910 return (mm->free_area_cache = addr);
5911 }
5912 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5913 mm->cached_hole_size = vma->vm_start - addr;
5914
5915 /* try just below the current vma->vm_start */
5916 - addr = (vma->vm_start-len) & HPAGE_MASK;
5917 - } while (likely(len < vma->vm_start));
5918 + addr = skip_heap_stack_gap(vma, len);
5919 + } while (!IS_ERR_VALUE(addr));
5920
5921 bottomup:
5922 /*
5923 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5924 if (addr) {
5925 addr = ALIGN(addr, HPAGE_SIZE);
5926 vma = find_vma(mm, addr);
5927 - if (task_size - len >= addr &&
5928 - (!vma || addr + len <= vma->vm_start))
5929 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5930 return addr;
5931 }
5932 if (mm->get_unmapped_area == arch_get_unmapped_area)
5933 diff -urNp linux-2.6.32.44/arch/sparc/mm/init_32.c linux-2.6.32.44/arch/sparc/mm/init_32.c
5934 --- linux-2.6.32.44/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5935 +++ linux-2.6.32.44/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5936 @@ -317,6 +317,9 @@ extern void device_scan(void);
5937 pgprot_t PAGE_SHARED __read_mostly;
5938 EXPORT_SYMBOL(PAGE_SHARED);
5939
5940 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5941 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5942 +
5943 void __init paging_init(void)
5944 {
5945 switch(sparc_cpu_model) {
5946 @@ -345,17 +348,17 @@ void __init paging_init(void)
5947
5948 /* Initialize the protection map with non-constant, MMU dependent values. */
5949 protection_map[0] = PAGE_NONE;
5950 - protection_map[1] = PAGE_READONLY;
5951 - protection_map[2] = PAGE_COPY;
5952 - protection_map[3] = PAGE_COPY;
5953 + protection_map[1] = PAGE_READONLY_NOEXEC;
5954 + protection_map[2] = PAGE_COPY_NOEXEC;
5955 + protection_map[3] = PAGE_COPY_NOEXEC;
5956 protection_map[4] = PAGE_READONLY;
5957 protection_map[5] = PAGE_READONLY;
5958 protection_map[6] = PAGE_COPY;
5959 protection_map[7] = PAGE_COPY;
5960 protection_map[8] = PAGE_NONE;
5961 - protection_map[9] = PAGE_READONLY;
5962 - protection_map[10] = PAGE_SHARED;
5963 - protection_map[11] = PAGE_SHARED;
5964 + protection_map[9] = PAGE_READONLY_NOEXEC;
5965 + protection_map[10] = PAGE_SHARED_NOEXEC;
5966 + protection_map[11] = PAGE_SHARED_NOEXEC;
5967 protection_map[12] = PAGE_READONLY;
5968 protection_map[13] = PAGE_READONLY;
5969 protection_map[14] = PAGE_SHARED;
5970 diff -urNp linux-2.6.32.44/arch/sparc/mm/Makefile linux-2.6.32.44/arch/sparc/mm/Makefile
5971 --- linux-2.6.32.44/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5972 +++ linux-2.6.32.44/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5973 @@ -2,7 +2,7 @@
5974 #
5975
5976 asflags-y := -ansi
5977 -ccflags-y := -Werror
5978 +#ccflags-y := -Werror
5979
5980 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5981 obj-y += fault_$(BITS).o
5982 diff -urNp linux-2.6.32.44/arch/sparc/mm/srmmu.c linux-2.6.32.44/arch/sparc/mm/srmmu.c
5983 --- linux-2.6.32.44/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5984 +++ linux-2.6.32.44/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5985 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5986 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5987 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5988 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5989 +
5990 +#ifdef CONFIG_PAX_PAGEEXEC
5991 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5992 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5993 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5994 +#endif
5995 +
5996 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5997 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5998
5999 diff -urNp linux-2.6.32.44/arch/um/include/asm/kmap_types.h linux-2.6.32.44/arch/um/include/asm/kmap_types.h
6000 --- linux-2.6.32.44/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6001 +++ linux-2.6.32.44/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6002 @@ -23,6 +23,7 @@ enum km_type {
6003 KM_IRQ1,
6004 KM_SOFTIRQ0,
6005 KM_SOFTIRQ1,
6006 + KM_CLEARPAGE,
6007 KM_TYPE_NR
6008 };
6009
6010 diff -urNp linux-2.6.32.44/arch/um/include/asm/page.h linux-2.6.32.44/arch/um/include/asm/page.h
6011 --- linux-2.6.32.44/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6012 +++ linux-2.6.32.44/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6013 @@ -14,6 +14,9 @@
6014 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6015 #define PAGE_MASK (~(PAGE_SIZE-1))
6016
6017 +#define ktla_ktva(addr) (addr)
6018 +#define ktva_ktla(addr) (addr)
6019 +
6020 #ifndef __ASSEMBLY__
6021
6022 struct page;
6023 diff -urNp linux-2.6.32.44/arch/um/kernel/process.c linux-2.6.32.44/arch/um/kernel/process.c
6024 --- linux-2.6.32.44/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6025 +++ linux-2.6.32.44/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6026 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6027 return 2;
6028 }
6029
6030 -/*
6031 - * Only x86 and x86_64 have an arch_align_stack().
6032 - * All other arches have "#define arch_align_stack(x) (x)"
6033 - * in their asm/system.h
6034 - * As this is included in UML from asm-um/system-generic.h,
6035 - * we can use it to behave as the subarch does.
6036 - */
6037 -#ifndef arch_align_stack
6038 -unsigned long arch_align_stack(unsigned long sp)
6039 -{
6040 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6041 - sp -= get_random_int() % 8192;
6042 - return sp & ~0xf;
6043 -}
6044 -#endif
6045 -
6046 unsigned long get_wchan(struct task_struct *p)
6047 {
6048 unsigned long stack_page, sp, ip;
6049 diff -urNp linux-2.6.32.44/arch/um/sys-i386/syscalls.c linux-2.6.32.44/arch/um/sys-i386/syscalls.c
6050 --- linux-2.6.32.44/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6051 +++ linux-2.6.32.44/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6052 @@ -11,6 +11,21 @@
6053 #include "asm/uaccess.h"
6054 #include "asm/unistd.h"
6055
6056 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6057 +{
6058 + unsigned long pax_task_size = TASK_SIZE;
6059 +
6060 +#ifdef CONFIG_PAX_SEGMEXEC
6061 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6062 + pax_task_size = SEGMEXEC_TASK_SIZE;
6063 +#endif
6064 +
6065 + if (len > pax_task_size || addr > pax_task_size - len)
6066 + return -EINVAL;
6067 +
6068 + return 0;
6069 +}
6070 +
6071 /*
6072 * Perform the select(nd, in, out, ex, tv) and mmap() system
6073 * calls. Linux/i386 didn't use to be able to handle more than
6074 diff -urNp linux-2.6.32.44/arch/x86/boot/bitops.h linux-2.6.32.44/arch/x86/boot/bitops.h
6075 --- linux-2.6.32.44/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6076 +++ linux-2.6.32.44/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6077 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6078 u8 v;
6079 const u32 *p = (const u32 *)addr;
6080
6081 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6082 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6083 return v;
6084 }
6085
6086 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6087
6088 static inline void set_bit(int nr, void *addr)
6089 {
6090 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6091 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6092 }
6093
6094 #endif /* BOOT_BITOPS_H */
6095 diff -urNp linux-2.6.32.44/arch/x86/boot/boot.h linux-2.6.32.44/arch/x86/boot/boot.h
6096 --- linux-2.6.32.44/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6097 +++ linux-2.6.32.44/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6098 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6099 static inline u16 ds(void)
6100 {
6101 u16 seg;
6102 - asm("movw %%ds,%0" : "=rm" (seg));
6103 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6104 return seg;
6105 }
6106
6107 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6108 static inline int memcmp(const void *s1, const void *s2, size_t len)
6109 {
6110 u8 diff;
6111 - asm("repe; cmpsb; setnz %0"
6112 + asm volatile("repe; cmpsb; setnz %0"
6113 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6114 return diff;
6115 }
6116 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/head_32.S linux-2.6.32.44/arch/x86/boot/compressed/head_32.S
6117 --- linux-2.6.32.44/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6118 +++ linux-2.6.32.44/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6119 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6120 notl %eax
6121 andl %eax, %ebx
6122 #else
6123 - movl $LOAD_PHYSICAL_ADDR, %ebx
6124 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6125 #endif
6126
6127 /* Target address to relocate to for decompression */
6128 @@ -149,7 +149,7 @@ relocated:
6129 * and where it was actually loaded.
6130 */
6131 movl %ebp, %ebx
6132 - subl $LOAD_PHYSICAL_ADDR, %ebx
6133 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6134 jz 2f /* Nothing to be done if loaded at compiled addr. */
6135 /*
6136 * Process relocations.
6137 @@ -157,8 +157,7 @@ relocated:
6138
6139 1: subl $4, %edi
6140 movl (%edi), %ecx
6141 - testl %ecx, %ecx
6142 - jz 2f
6143 + jecxz 2f
6144 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6145 jmp 1b
6146 2:
6147 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/head_64.S linux-2.6.32.44/arch/x86/boot/compressed/head_64.S
6148 --- linux-2.6.32.44/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6149 +++ linux-2.6.32.44/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6150 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6151 notl %eax
6152 andl %eax, %ebx
6153 #else
6154 - movl $LOAD_PHYSICAL_ADDR, %ebx
6155 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6156 #endif
6157
6158 /* Target address to relocate to for decompression */
6159 @@ -183,7 +183,7 @@ no_longmode:
6160 hlt
6161 jmp 1b
6162
6163 -#include "../../kernel/verify_cpu_64.S"
6164 +#include "../../kernel/verify_cpu.S"
6165
6166 /*
6167 * Be careful here startup_64 needs to be at a predictable
6168 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6169 notq %rax
6170 andq %rax, %rbp
6171 #else
6172 - movq $LOAD_PHYSICAL_ADDR, %rbp
6173 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6174 #endif
6175
6176 /* Target address to relocate to for decompression */
6177 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/Makefile linux-2.6.32.44/arch/x86/boot/compressed/Makefile
6178 --- linux-2.6.32.44/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6179 +++ linux-2.6.32.44/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6180 @@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6181 KBUILD_CFLAGS += $(cflags-y)
6182 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6183 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6184 +ifdef CONSTIFY_PLUGIN
6185 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6186 +endif
6187
6188 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6189 GCOV_PROFILE := n
6190 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/misc.c linux-2.6.32.44/arch/x86/boot/compressed/misc.c
6191 --- linux-2.6.32.44/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6192 +++ linux-2.6.32.44/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6193 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6194 case PT_LOAD:
6195 #ifdef CONFIG_RELOCATABLE
6196 dest = output;
6197 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6198 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6199 #else
6200 dest = (void *)(phdr->p_paddr);
6201 #endif
6202 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6203 error("Destination address too large");
6204 #endif
6205 #ifndef CONFIG_RELOCATABLE
6206 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6207 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6208 error("Wrong destination address");
6209 #endif
6210
6211 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.44/arch/x86/boot/compressed/mkpiggy.c
6212 --- linux-2.6.32.44/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6213 +++ linux-2.6.32.44/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6214 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6215
6216 offs = (olen > ilen) ? olen - ilen : 0;
6217 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6218 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6219 + offs += 64*1024; /* Add 64K bytes slack */
6220 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6221
6222 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6223 diff -urNp linux-2.6.32.44/arch/x86/boot/compressed/relocs.c linux-2.6.32.44/arch/x86/boot/compressed/relocs.c
6224 --- linux-2.6.32.44/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6225 +++ linux-2.6.32.44/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6226 @@ -10,8 +10,11 @@
6227 #define USE_BSD
6228 #include <endian.h>
6229
6230 +#include "../../../../include/linux/autoconf.h"
6231 +
6232 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6233 static Elf32_Ehdr ehdr;
6234 +static Elf32_Phdr *phdr;
6235 static unsigned long reloc_count, reloc_idx;
6236 static unsigned long *relocs;
6237
6238 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6239
6240 static int is_safe_abs_reloc(const char* sym_name)
6241 {
6242 - int i;
6243 + unsigned int i;
6244
6245 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6246 if (!strcmp(sym_name, safe_abs_relocs[i]))
6247 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6248 }
6249 }
6250
6251 +static void read_phdrs(FILE *fp)
6252 +{
6253 + unsigned int i;
6254 +
6255 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6256 + if (!phdr) {
6257 + die("Unable to allocate %d program headers\n",
6258 + ehdr.e_phnum);
6259 + }
6260 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6261 + die("Seek to %d failed: %s\n",
6262 + ehdr.e_phoff, strerror(errno));
6263 + }
6264 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6265 + die("Cannot read ELF program headers: %s\n",
6266 + strerror(errno));
6267 + }
6268 + for(i = 0; i < ehdr.e_phnum; i++) {
6269 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6270 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6271 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6272 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6273 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6274 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6275 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6276 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6277 + }
6278 +
6279 +}
6280 +
6281 static void read_shdrs(FILE *fp)
6282 {
6283 - int i;
6284 + unsigned int i;
6285 Elf32_Shdr shdr;
6286
6287 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6288 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6289
6290 static void read_strtabs(FILE *fp)
6291 {
6292 - int i;
6293 + unsigned int i;
6294 for (i = 0; i < ehdr.e_shnum; i++) {
6295 struct section *sec = &secs[i];
6296 if (sec->shdr.sh_type != SHT_STRTAB) {
6297 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6298
6299 static void read_symtabs(FILE *fp)
6300 {
6301 - int i,j;
6302 + unsigned int i,j;
6303 for (i = 0; i < ehdr.e_shnum; i++) {
6304 struct section *sec = &secs[i];
6305 if (sec->shdr.sh_type != SHT_SYMTAB) {
6306 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6307
6308 static void read_relocs(FILE *fp)
6309 {
6310 - int i,j;
6311 + unsigned int i,j;
6312 + uint32_t base;
6313 +
6314 for (i = 0; i < ehdr.e_shnum; i++) {
6315 struct section *sec = &secs[i];
6316 if (sec->shdr.sh_type != SHT_REL) {
6317 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6318 die("Cannot read symbol table: %s\n",
6319 strerror(errno));
6320 }
6321 + base = 0;
6322 + for (j = 0; j < ehdr.e_phnum; j++) {
6323 + if (phdr[j].p_type != PT_LOAD )
6324 + continue;
6325 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6326 + continue;
6327 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6328 + break;
6329 + }
6330 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6331 Elf32_Rel *rel = &sec->reltab[j];
6332 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6333 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6334 rel->r_info = elf32_to_cpu(rel->r_info);
6335 }
6336 }
6337 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6338
6339 static void print_absolute_symbols(void)
6340 {
6341 - int i;
6342 + unsigned int i;
6343 printf("Absolute symbols\n");
6344 printf(" Num: Value Size Type Bind Visibility Name\n");
6345 for (i = 0; i < ehdr.e_shnum; i++) {
6346 struct section *sec = &secs[i];
6347 char *sym_strtab;
6348 Elf32_Sym *sh_symtab;
6349 - int j;
6350 + unsigned int j;
6351
6352 if (sec->shdr.sh_type != SHT_SYMTAB) {
6353 continue;
6354 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6355
6356 static void print_absolute_relocs(void)
6357 {
6358 - int i, printed = 0;
6359 + unsigned int i, printed = 0;
6360
6361 for (i = 0; i < ehdr.e_shnum; i++) {
6362 struct section *sec = &secs[i];
6363 struct section *sec_applies, *sec_symtab;
6364 char *sym_strtab;
6365 Elf32_Sym *sh_symtab;
6366 - int j;
6367 + unsigned int j;
6368 if (sec->shdr.sh_type != SHT_REL) {
6369 continue;
6370 }
6371 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6372
6373 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6374 {
6375 - int i;
6376 + unsigned int i;
6377 /* Walk through the relocations */
6378 for (i = 0; i < ehdr.e_shnum; i++) {
6379 char *sym_strtab;
6380 Elf32_Sym *sh_symtab;
6381 struct section *sec_applies, *sec_symtab;
6382 - int j;
6383 + unsigned int j;
6384 struct section *sec = &secs[i];
6385
6386 if (sec->shdr.sh_type != SHT_REL) {
6387 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6388 if (sym->st_shndx == SHN_ABS) {
6389 continue;
6390 }
6391 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6392 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6393 + continue;
6394 +
6395 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6396 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6397 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6398 + continue;
6399 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6400 + continue;
6401 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6402 + continue;
6403 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6404 + continue;
6405 +#endif
6406 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6407 /*
6408 * NONE can be ignored and and PC relative
6409 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6410
6411 static void emit_relocs(int as_text)
6412 {
6413 - int i;
6414 + unsigned int i;
6415 /* Count how many relocations I have and allocate space for them. */
6416 reloc_count = 0;
6417 walk_relocs(count_reloc);
6418 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6419 fname, strerror(errno));
6420 }
6421 read_ehdr(fp);
6422 + read_phdrs(fp);
6423 read_shdrs(fp);
6424 read_strtabs(fp);
6425 read_symtabs(fp);
6426 diff -urNp linux-2.6.32.44/arch/x86/boot/cpucheck.c linux-2.6.32.44/arch/x86/boot/cpucheck.c
6427 --- linux-2.6.32.44/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6428 +++ linux-2.6.32.44/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6429 @@ -74,7 +74,7 @@ static int has_fpu(void)
6430 u16 fcw = -1, fsw = -1;
6431 u32 cr0;
6432
6433 - asm("movl %%cr0,%0" : "=r" (cr0));
6434 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6435 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6436 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6437 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6438 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6439 {
6440 u32 f0, f1;
6441
6442 - asm("pushfl ; "
6443 + asm volatile("pushfl ; "
6444 "pushfl ; "
6445 "popl %0 ; "
6446 "movl %0,%1 ; "
6447 @@ -115,7 +115,7 @@ static void get_flags(void)
6448 set_bit(X86_FEATURE_FPU, cpu.flags);
6449
6450 if (has_eflag(X86_EFLAGS_ID)) {
6451 - asm("cpuid"
6452 + asm volatile("cpuid"
6453 : "=a" (max_intel_level),
6454 "=b" (cpu_vendor[0]),
6455 "=d" (cpu_vendor[1]),
6456 @@ -124,7 +124,7 @@ static void get_flags(void)
6457
6458 if (max_intel_level >= 0x00000001 &&
6459 max_intel_level <= 0x0000ffff) {
6460 - asm("cpuid"
6461 + asm volatile("cpuid"
6462 : "=a" (tfms),
6463 "=c" (cpu.flags[4]),
6464 "=d" (cpu.flags[0])
6465 @@ -136,7 +136,7 @@ static void get_flags(void)
6466 cpu.model += ((tfms >> 16) & 0xf) << 4;
6467 }
6468
6469 - asm("cpuid"
6470 + asm volatile("cpuid"
6471 : "=a" (max_amd_level)
6472 : "a" (0x80000000)
6473 : "ebx", "ecx", "edx");
6474 @@ -144,7 +144,7 @@ static void get_flags(void)
6475 if (max_amd_level >= 0x80000001 &&
6476 max_amd_level <= 0x8000ffff) {
6477 u32 eax = 0x80000001;
6478 - asm("cpuid"
6479 + asm volatile("cpuid"
6480 : "+a" (eax),
6481 "=c" (cpu.flags[6]),
6482 "=d" (cpu.flags[1])
6483 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6484 u32 ecx = MSR_K7_HWCR;
6485 u32 eax, edx;
6486
6487 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6488 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6489 eax &= ~(1 << 15);
6490 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6491 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6492
6493 get_flags(); /* Make sure it really did something */
6494 err = check_flags();
6495 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6496 u32 ecx = MSR_VIA_FCR;
6497 u32 eax, edx;
6498
6499 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6500 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6501 eax |= (1<<1)|(1<<7);
6502 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6503 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6504
6505 set_bit(X86_FEATURE_CX8, cpu.flags);
6506 err = check_flags();
6507 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6508 u32 eax, edx;
6509 u32 level = 1;
6510
6511 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6512 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6513 - asm("cpuid"
6514 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6515 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6516 + asm volatile("cpuid"
6517 : "+a" (level), "=d" (cpu.flags[0])
6518 : : "ecx", "ebx");
6519 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6520 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6521
6522 err = check_flags();
6523 }
6524 diff -urNp linux-2.6.32.44/arch/x86/boot/header.S linux-2.6.32.44/arch/x86/boot/header.S
6525 --- linux-2.6.32.44/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6526 +++ linux-2.6.32.44/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6527 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6528 # single linked list of
6529 # struct setup_data
6530
6531 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6532 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6533
6534 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6535 #define VO_INIT_SIZE (VO__end - VO__text)
6536 diff -urNp linux-2.6.32.44/arch/x86/boot/Makefile linux-2.6.32.44/arch/x86/boot/Makefile
6537 --- linux-2.6.32.44/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6538 +++ linux-2.6.32.44/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6539 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6540 $(call cc-option, -fno-stack-protector) \
6541 $(call cc-option, -mpreferred-stack-boundary=2)
6542 KBUILD_CFLAGS += $(call cc-option, -m32)
6543 +ifdef CONSTIFY_PLUGIN
6544 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6545 +endif
6546 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6547 GCOV_PROFILE := n
6548
6549 diff -urNp linux-2.6.32.44/arch/x86/boot/memory.c linux-2.6.32.44/arch/x86/boot/memory.c
6550 --- linux-2.6.32.44/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6551 +++ linux-2.6.32.44/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6552 @@ -19,7 +19,7 @@
6553
6554 static int detect_memory_e820(void)
6555 {
6556 - int count = 0;
6557 + unsigned int count = 0;
6558 struct biosregs ireg, oreg;
6559 struct e820entry *desc = boot_params.e820_map;
6560 static struct e820entry buf; /* static so it is zeroed */
6561 diff -urNp linux-2.6.32.44/arch/x86/boot/video.c linux-2.6.32.44/arch/x86/boot/video.c
6562 --- linux-2.6.32.44/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6563 +++ linux-2.6.32.44/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6564 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6565 static unsigned int get_entry(void)
6566 {
6567 char entry_buf[4];
6568 - int i, len = 0;
6569 + unsigned int i, len = 0;
6570 int key;
6571 unsigned int v;
6572
6573 diff -urNp linux-2.6.32.44/arch/x86/boot/video-vesa.c linux-2.6.32.44/arch/x86/boot/video-vesa.c
6574 --- linux-2.6.32.44/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6575 +++ linux-2.6.32.44/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6576 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6577
6578 boot_params.screen_info.vesapm_seg = oreg.es;
6579 boot_params.screen_info.vesapm_off = oreg.di;
6580 + boot_params.screen_info.vesapm_size = oreg.cx;
6581 }
6582
6583 /*
6584 diff -urNp linux-2.6.32.44/arch/x86/ia32/ia32_aout.c linux-2.6.32.44/arch/x86/ia32/ia32_aout.c
6585 --- linux-2.6.32.44/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6586 +++ linux-2.6.32.44/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6587 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6588 unsigned long dump_start, dump_size;
6589 struct user32 dump;
6590
6591 + memset(&dump, 0, sizeof(dump));
6592 +
6593 fs = get_fs();
6594 set_fs(KERNEL_DS);
6595 has_dumped = 1;
6596 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6597 dump_size = dump.u_ssize << PAGE_SHIFT;
6598 DUMP_WRITE(dump_start, dump_size);
6599 }
6600 - /*
6601 - * Finally dump the task struct. Not be used by gdb, but
6602 - * could be useful
6603 - */
6604 - set_fs(KERNEL_DS);
6605 - DUMP_WRITE(current, sizeof(*current));
6606 end_coredump:
6607 set_fs(fs);
6608 return has_dumped;
6609 diff -urNp linux-2.6.32.44/arch/x86/ia32/ia32entry.S linux-2.6.32.44/arch/x86/ia32/ia32entry.S
6610 --- linux-2.6.32.44/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6611 +++ linux-2.6.32.44/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6612 @@ -13,6 +13,7 @@
6613 #include <asm/thread_info.h>
6614 #include <asm/segment.h>
6615 #include <asm/irqflags.h>
6616 +#include <asm/pgtable.h>
6617 #include <linux/linkage.h>
6618
6619 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6620 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6621 ENDPROC(native_irq_enable_sysexit)
6622 #endif
6623
6624 + .macro pax_enter_kernel_user
6625 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6626 + call pax_enter_kernel_user
6627 +#endif
6628 + .endm
6629 +
6630 + .macro pax_exit_kernel_user
6631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6632 + call pax_exit_kernel_user
6633 +#endif
6634 +#ifdef CONFIG_PAX_RANDKSTACK
6635 + pushq %rax
6636 + call pax_randomize_kstack
6637 + popq %rax
6638 +#endif
6639 + pax_erase_kstack
6640 + .endm
6641 +
6642 +.macro pax_erase_kstack
6643 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6644 + call pax_erase_kstack
6645 +#endif
6646 +.endm
6647 +
6648 /*
6649 * 32bit SYSENTER instruction entry.
6650 *
6651 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6652 CFI_REGISTER rsp,rbp
6653 SWAPGS_UNSAFE_STACK
6654 movq PER_CPU_VAR(kernel_stack), %rsp
6655 - addq $(KERNEL_STACK_OFFSET),%rsp
6656 + pax_enter_kernel_user
6657 /*
6658 * No need to follow this irqs on/off section: the syscall
6659 * disabled irqs, here we enable it straight after entry:
6660 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6661 pushfq
6662 CFI_ADJUST_CFA_OFFSET 8
6663 /*CFI_REL_OFFSET rflags,0*/
6664 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6665 + GET_THREAD_INFO(%r10)
6666 + movl TI_sysenter_return(%r10), %r10d
6667 CFI_REGISTER rip,r10
6668 pushq $__USER32_CS
6669 CFI_ADJUST_CFA_OFFSET 8
6670 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6671 SAVE_ARGS 0,0,1
6672 /* no need to do an access_ok check here because rbp has been
6673 32bit zero extended */
6674 +
6675 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6676 + mov $PAX_USER_SHADOW_BASE,%r10
6677 + add %r10,%rbp
6678 +#endif
6679 +
6680 1: movl (%rbp),%ebp
6681 .section __ex_table,"a"
6682 .quad 1b,ia32_badarg
6683 @@ -172,6 +204,7 @@ sysenter_dispatch:
6684 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6685 jnz sysexit_audit
6686 sysexit_from_sys_call:
6687 + pax_exit_kernel_user
6688 andl $~TS_COMPAT,TI_status(%r10)
6689 /* clear IF, that popfq doesn't enable interrupts early */
6690 andl $~0x200,EFLAGS-R11(%rsp)
6691 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6692 movl %eax,%esi /* 2nd arg: syscall number */
6693 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6694 call audit_syscall_entry
6695 +
6696 + pax_erase_kstack
6697 +
6698 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6699 cmpq $(IA32_NR_syscalls-1),%rax
6700 ja ia32_badsys
6701 @@ -252,6 +288,9 @@ sysenter_tracesys:
6702 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6703 movq %rsp,%rdi /* &pt_regs -> arg1 */
6704 call syscall_trace_enter
6705 +
6706 + pax_erase_kstack
6707 +
6708 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6709 RESTORE_REST
6710 cmpq $(IA32_NR_syscalls-1),%rax
6711 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6712 ENTRY(ia32_cstar_target)
6713 CFI_STARTPROC32 simple
6714 CFI_SIGNAL_FRAME
6715 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6716 + CFI_DEF_CFA rsp,0
6717 CFI_REGISTER rip,rcx
6718 /*CFI_REGISTER rflags,r11*/
6719 SWAPGS_UNSAFE_STACK
6720 movl %esp,%r8d
6721 CFI_REGISTER rsp,r8
6722 movq PER_CPU_VAR(kernel_stack),%rsp
6723 +
6724 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6725 + pax_enter_kernel_user
6726 +#endif
6727 +
6728 /*
6729 * No need to follow this irqs on/off section: the syscall
6730 * disabled irqs and here we enable it straight after entry:
6731 */
6732 ENABLE_INTERRUPTS(CLBR_NONE)
6733 - SAVE_ARGS 8,1,1
6734 + SAVE_ARGS 8*6,1,1
6735 movl %eax,%eax /* zero extension */
6736 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6737 movq %rcx,RIP-ARGOFFSET(%rsp)
6738 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6739 /* no need to do an access_ok check here because r8 has been
6740 32bit zero extended */
6741 /* hardware stack frame is complete now */
6742 +
6743 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6744 + mov $PAX_USER_SHADOW_BASE,%r10
6745 + add %r10,%r8
6746 +#endif
6747 +
6748 1: movl (%r8),%r9d
6749 .section __ex_table,"a"
6750 .quad 1b,ia32_badarg
6751 @@ -333,6 +383,7 @@ cstar_dispatch:
6752 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6753 jnz sysretl_audit
6754 sysretl_from_sys_call:
6755 + pax_exit_kernel_user
6756 andl $~TS_COMPAT,TI_status(%r10)
6757 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6758 movl RIP-ARGOFFSET(%rsp),%ecx
6759 @@ -370,6 +421,9 @@ cstar_tracesys:
6760 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6761 movq %rsp,%rdi /* &pt_regs -> arg1 */
6762 call syscall_trace_enter
6763 +
6764 + pax_erase_kstack
6765 +
6766 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6767 RESTORE_REST
6768 xchgl %ebp,%r9d
6769 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6770 CFI_REL_OFFSET rip,RIP-RIP
6771 PARAVIRT_ADJUST_EXCEPTION_FRAME
6772 SWAPGS
6773 + pax_enter_kernel_user
6774 /*
6775 * No need to follow this irqs on/off section: the syscall
6776 * disabled irqs and here we enable it straight after entry:
6777 @@ -448,6 +503,9 @@ ia32_tracesys:
6778 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6779 movq %rsp,%rdi /* &pt_regs -> arg1 */
6780 call syscall_trace_enter
6781 +
6782 + pax_erase_kstack
6783 +
6784 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6785 RESTORE_REST
6786 cmpq $(IA32_NR_syscalls-1),%rax
6787 diff -urNp linux-2.6.32.44/arch/x86/ia32/ia32_signal.c linux-2.6.32.44/arch/x86/ia32/ia32_signal.c
6788 --- linux-2.6.32.44/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6789 +++ linux-2.6.32.44/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6790 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6791 sp -= frame_size;
6792 /* Align the stack pointer according to the i386 ABI,
6793 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6794 - sp = ((sp + 4) & -16ul) - 4;
6795 + sp = ((sp - 12) & -16ul) - 4;
6796 return (void __user *) sp;
6797 }
6798
6799 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6800 * These are actually not used anymore, but left because some
6801 * gdb versions depend on them as a marker.
6802 */
6803 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6804 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6805 } put_user_catch(err);
6806
6807 if (err)
6808 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6809 0xb8,
6810 __NR_ia32_rt_sigreturn,
6811 0x80cd,
6812 - 0,
6813 + 0
6814 };
6815
6816 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6817 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6818
6819 if (ka->sa.sa_flags & SA_RESTORER)
6820 restorer = ka->sa.sa_restorer;
6821 + else if (current->mm->context.vdso)
6822 + /* Return stub is in 32bit vsyscall page */
6823 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6824 else
6825 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6826 - rt_sigreturn);
6827 + restorer = &frame->retcode;
6828 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6829
6830 /*
6831 * Not actually used anymore, but left because some gdb
6832 * versions need it.
6833 */
6834 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6835 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6836 } put_user_catch(err);
6837
6838 if (err)
6839 diff -urNp linux-2.6.32.44/arch/x86/include/asm/alternative.h linux-2.6.32.44/arch/x86/include/asm/alternative.h
6840 --- linux-2.6.32.44/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6841 +++ linux-2.6.32.44/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6842 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6843 " .byte 662b-661b\n" /* sourcelen */ \
6844 " .byte 664f-663f\n" /* replacementlen */ \
6845 ".previous\n" \
6846 - ".section .altinstr_replacement, \"ax\"\n" \
6847 + ".section .altinstr_replacement, \"a\"\n" \
6848 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6849 ".previous"
6850
6851 diff -urNp linux-2.6.32.44/arch/x86/include/asm/apm.h linux-2.6.32.44/arch/x86/include/asm/apm.h
6852 --- linux-2.6.32.44/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6853 +++ linux-2.6.32.44/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6854 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6855 __asm__ __volatile__(APM_DO_ZERO_SEGS
6856 "pushl %%edi\n\t"
6857 "pushl %%ebp\n\t"
6858 - "lcall *%%cs:apm_bios_entry\n\t"
6859 + "lcall *%%ss:apm_bios_entry\n\t"
6860 "setc %%al\n\t"
6861 "popl %%ebp\n\t"
6862 "popl %%edi\n\t"
6863 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6864 __asm__ __volatile__(APM_DO_ZERO_SEGS
6865 "pushl %%edi\n\t"
6866 "pushl %%ebp\n\t"
6867 - "lcall *%%cs:apm_bios_entry\n\t"
6868 + "lcall *%%ss:apm_bios_entry\n\t"
6869 "setc %%bl\n\t"
6870 "popl %%ebp\n\t"
6871 "popl %%edi\n\t"
6872 diff -urNp linux-2.6.32.44/arch/x86/include/asm/atomic_32.h linux-2.6.32.44/arch/x86/include/asm/atomic_32.h
6873 --- linux-2.6.32.44/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6874 +++ linux-2.6.32.44/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6875 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6876 }
6877
6878 /**
6879 + * atomic_read_unchecked - read atomic variable
6880 + * @v: pointer of type atomic_unchecked_t
6881 + *
6882 + * Atomically reads the value of @v.
6883 + */
6884 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6885 +{
6886 + return v->counter;
6887 +}
6888 +
6889 +/**
6890 * atomic_set - set atomic variable
6891 * @v: pointer of type atomic_t
6892 * @i: required value
6893 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6894 }
6895
6896 /**
6897 + * atomic_set_unchecked - set atomic variable
6898 + * @v: pointer of type atomic_unchecked_t
6899 + * @i: required value
6900 + *
6901 + * Atomically sets the value of @v to @i.
6902 + */
6903 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6904 +{
6905 + v->counter = i;
6906 +}
6907 +
6908 +/**
6909 * atomic_add - add integer to atomic variable
6910 * @i: integer value to add
6911 * @v: pointer of type atomic_t
6912 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6913 */
6914 static inline void atomic_add(int i, atomic_t *v)
6915 {
6916 - asm volatile(LOCK_PREFIX "addl %1,%0"
6917 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6918 +
6919 +#ifdef CONFIG_PAX_REFCOUNT
6920 + "jno 0f\n"
6921 + LOCK_PREFIX "subl %1,%0\n"
6922 + "int $4\n0:\n"
6923 + _ASM_EXTABLE(0b, 0b)
6924 +#endif
6925 +
6926 + : "+m" (v->counter)
6927 + : "ir" (i));
6928 +}
6929 +
6930 +/**
6931 + * atomic_add_unchecked - add integer to atomic variable
6932 + * @i: integer value to add
6933 + * @v: pointer of type atomic_unchecked_t
6934 + *
6935 + * Atomically adds @i to @v.
6936 + */
6937 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6938 +{
6939 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6940 : "+m" (v->counter)
6941 : "ir" (i));
6942 }
6943 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6944 */
6945 static inline void atomic_sub(int i, atomic_t *v)
6946 {
6947 - asm volatile(LOCK_PREFIX "subl %1,%0"
6948 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6949 +
6950 +#ifdef CONFIG_PAX_REFCOUNT
6951 + "jno 0f\n"
6952 + LOCK_PREFIX "addl %1,%0\n"
6953 + "int $4\n0:\n"
6954 + _ASM_EXTABLE(0b, 0b)
6955 +#endif
6956 +
6957 + : "+m" (v->counter)
6958 + : "ir" (i));
6959 +}
6960 +
6961 +/**
6962 + * atomic_sub_unchecked - subtract integer from atomic variable
6963 + * @i: integer value to subtract
6964 + * @v: pointer of type atomic_unchecked_t
6965 + *
6966 + * Atomically subtracts @i from @v.
6967 + */
6968 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6969 +{
6970 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6971 : "+m" (v->counter)
6972 : "ir" (i));
6973 }
6974 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6975 {
6976 unsigned char c;
6977
6978 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6979 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6980 +
6981 +#ifdef CONFIG_PAX_REFCOUNT
6982 + "jno 0f\n"
6983 + LOCK_PREFIX "addl %2,%0\n"
6984 + "int $4\n0:\n"
6985 + _ASM_EXTABLE(0b, 0b)
6986 +#endif
6987 +
6988 + "sete %1\n"
6989 : "+m" (v->counter), "=qm" (c)
6990 : "ir" (i) : "memory");
6991 return c;
6992 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6993 */
6994 static inline void atomic_inc(atomic_t *v)
6995 {
6996 - asm volatile(LOCK_PREFIX "incl %0"
6997 + asm volatile(LOCK_PREFIX "incl %0\n"
6998 +
6999 +#ifdef CONFIG_PAX_REFCOUNT
7000 + "jno 0f\n"
7001 + LOCK_PREFIX "decl %0\n"
7002 + "int $4\n0:\n"
7003 + _ASM_EXTABLE(0b, 0b)
7004 +#endif
7005 +
7006 + : "+m" (v->counter));
7007 +}
7008 +
7009 +/**
7010 + * atomic_inc_unchecked - increment atomic variable
7011 + * @v: pointer of type atomic_unchecked_t
7012 + *
7013 + * Atomically increments @v by 1.
7014 + */
7015 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7016 +{
7017 + asm volatile(LOCK_PREFIX "incl %0\n"
7018 : "+m" (v->counter));
7019 }
7020
7021 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7022 */
7023 static inline void atomic_dec(atomic_t *v)
7024 {
7025 - asm volatile(LOCK_PREFIX "decl %0"
7026 + asm volatile(LOCK_PREFIX "decl %0\n"
7027 +
7028 +#ifdef CONFIG_PAX_REFCOUNT
7029 + "jno 0f\n"
7030 + LOCK_PREFIX "incl %0\n"
7031 + "int $4\n0:\n"
7032 + _ASM_EXTABLE(0b, 0b)
7033 +#endif
7034 +
7035 + : "+m" (v->counter));
7036 +}
7037 +
7038 +/**
7039 + * atomic_dec_unchecked - decrement atomic variable
7040 + * @v: pointer of type atomic_unchecked_t
7041 + *
7042 + * Atomically decrements @v by 1.
7043 + */
7044 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7045 +{
7046 + asm volatile(LOCK_PREFIX "decl %0\n"
7047 : "+m" (v->counter));
7048 }
7049
7050 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7051 {
7052 unsigned char c;
7053
7054 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7055 + asm volatile(LOCK_PREFIX "decl %0\n"
7056 +
7057 +#ifdef CONFIG_PAX_REFCOUNT
7058 + "jno 0f\n"
7059 + LOCK_PREFIX "incl %0\n"
7060 + "int $4\n0:\n"
7061 + _ASM_EXTABLE(0b, 0b)
7062 +#endif
7063 +
7064 + "sete %1\n"
7065 : "+m" (v->counter), "=qm" (c)
7066 : : "memory");
7067 return c != 0;
7068 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7069 {
7070 unsigned char c;
7071
7072 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7073 + asm volatile(LOCK_PREFIX "incl %0\n"
7074 +
7075 +#ifdef CONFIG_PAX_REFCOUNT
7076 + "jno 0f\n"
7077 + LOCK_PREFIX "decl %0\n"
7078 + "into\n0:\n"
7079 + _ASM_EXTABLE(0b, 0b)
7080 +#endif
7081 +
7082 + "sete %1\n"
7083 + : "+m" (v->counter), "=qm" (c)
7084 + : : "memory");
7085 + return c != 0;
7086 +}
7087 +
7088 +/**
7089 + * atomic_inc_and_test_unchecked - increment and test
7090 + * @v: pointer of type atomic_unchecked_t
7091 + *
7092 + * Atomically increments @v by 1
7093 + * and returns true if the result is zero, or false for all
7094 + * other cases.
7095 + */
7096 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7097 +{
7098 + unsigned char c;
7099 +
7100 + asm volatile(LOCK_PREFIX "incl %0\n"
7101 + "sete %1\n"
7102 : "+m" (v->counter), "=qm" (c)
7103 : : "memory");
7104 return c != 0;
7105 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7106 {
7107 unsigned char c;
7108
7109 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7110 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7111 +
7112 +#ifdef CONFIG_PAX_REFCOUNT
7113 + "jno 0f\n"
7114 + LOCK_PREFIX "subl %2,%0\n"
7115 + "int $4\n0:\n"
7116 + _ASM_EXTABLE(0b, 0b)
7117 +#endif
7118 +
7119 + "sets %1\n"
7120 : "+m" (v->counter), "=qm" (c)
7121 : "ir" (i) : "memory");
7122 return c;
7123 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7124 #endif
7125 /* Modern 486+ processor */
7126 __i = i;
7127 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7128 +
7129 +#ifdef CONFIG_PAX_REFCOUNT
7130 + "jno 0f\n"
7131 + "movl %0, %1\n"
7132 + "int $4\n0:\n"
7133 + _ASM_EXTABLE(0b, 0b)
7134 +#endif
7135 +
7136 + : "+r" (i), "+m" (v->counter)
7137 + : : "memory");
7138 + return i + __i;
7139 +
7140 +#ifdef CONFIG_M386
7141 +no_xadd: /* Legacy 386 processor */
7142 + local_irq_save(flags);
7143 + __i = atomic_read(v);
7144 + atomic_set(v, i + __i);
7145 + local_irq_restore(flags);
7146 + return i + __i;
7147 +#endif
7148 +}
7149 +
7150 +/**
7151 + * atomic_add_return_unchecked - add integer and return
7152 + * @v: pointer of type atomic_unchecked_t
7153 + * @i: integer value to add
7154 + *
7155 + * Atomically adds @i to @v and returns @i + @v
7156 + */
7157 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7158 +{
7159 + int __i;
7160 +#ifdef CONFIG_M386
7161 + unsigned long flags;
7162 + if (unlikely(boot_cpu_data.x86 <= 3))
7163 + goto no_xadd;
7164 +#endif
7165 + /* Modern 486+ processor */
7166 + __i = i;
7167 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7168 : "+r" (i), "+m" (v->counter)
7169 : : "memory");
7170 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7171 return cmpxchg(&v->counter, old, new);
7172 }
7173
7174 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7175 +{
7176 + return cmpxchg(&v->counter, old, new);
7177 +}
7178 +
7179 static inline int atomic_xchg(atomic_t *v, int new)
7180 {
7181 return xchg(&v->counter, new);
7182 }
7183
7184 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7185 +{
7186 + return xchg(&v->counter, new);
7187 +}
7188 +
7189 /**
7190 * atomic_add_unless - add unless the number is already a given value
7191 * @v: pointer of type atomic_t
7192 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7193 */
7194 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7195 {
7196 - int c, old;
7197 + int c, old, new;
7198 c = atomic_read(v);
7199 for (;;) {
7200 - if (unlikely(c == (u)))
7201 + if (unlikely(c == u))
7202 break;
7203 - old = atomic_cmpxchg((v), c, c + (a));
7204 +
7205 + asm volatile("addl %2,%0\n"
7206 +
7207 +#ifdef CONFIG_PAX_REFCOUNT
7208 + "jno 0f\n"
7209 + "subl %2,%0\n"
7210 + "int $4\n0:\n"
7211 + _ASM_EXTABLE(0b, 0b)
7212 +#endif
7213 +
7214 + : "=r" (new)
7215 + : "0" (c), "ir" (a));
7216 +
7217 + old = atomic_cmpxchg(v, c, new);
7218 if (likely(old == c))
7219 break;
7220 c = old;
7221 }
7222 - return c != (u);
7223 + return c != u;
7224 }
7225
7226 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7227
7228 #define atomic_inc_return(v) (atomic_add_return(1, v))
7229 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7230 +{
7231 + return atomic_add_return_unchecked(1, v);
7232 +}
7233 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7234
7235 /* These are x86-specific, used by some header files */
7236 @@ -266,9 +495,18 @@ typedef struct {
7237 u64 __aligned(8) counter;
7238 } atomic64_t;
7239
7240 +#ifdef CONFIG_PAX_REFCOUNT
7241 +typedef struct {
7242 + u64 __aligned(8) counter;
7243 +} atomic64_unchecked_t;
7244 +#else
7245 +typedef atomic64_t atomic64_unchecked_t;
7246 +#endif
7247 +
7248 #define ATOMIC64_INIT(val) { (val) }
7249
7250 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7251 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7252
7253 /**
7254 * atomic64_xchg - xchg atomic64 variable
7255 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7256 * the old value.
7257 */
7258 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7259 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7260
7261 /**
7262 * atomic64_set - set atomic64 variable
7263 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7264 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7265
7266 /**
7267 + * atomic64_unchecked_set - set atomic64 variable
7268 + * @ptr: pointer to type atomic64_unchecked_t
7269 + * @new_val: value to assign
7270 + *
7271 + * Atomically sets the value of @ptr to @new_val.
7272 + */
7273 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7274 +
7275 +/**
7276 * atomic64_read - read atomic64 variable
7277 * @ptr: pointer to type atomic64_t
7278 *
7279 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7280 return res;
7281 }
7282
7283 -extern u64 atomic64_read(atomic64_t *ptr);
7284 +/**
7285 + * atomic64_read_unchecked - read atomic64 variable
7286 + * @ptr: pointer to type atomic64_unchecked_t
7287 + *
7288 + * Atomically reads the value of @ptr and returns it.
7289 + */
7290 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7291 +{
7292 + u64 res;
7293 +
7294 + /*
7295 + * Note, we inline this atomic64_unchecked_t primitive because
7296 + * it only clobbers EAX/EDX and leaves the others
7297 + * untouched. We also (somewhat subtly) rely on the
7298 + * fact that cmpxchg8b returns the current 64-bit value
7299 + * of the memory location we are touching:
7300 + */
7301 + asm volatile(
7302 + "mov %%ebx, %%eax\n\t"
7303 + "mov %%ecx, %%edx\n\t"
7304 + LOCK_PREFIX "cmpxchg8b %1\n"
7305 + : "=&A" (res)
7306 + : "m" (*ptr)
7307 + );
7308 +
7309 + return res;
7310 +}
7311
7312 /**
7313 * atomic64_add_return - add and return
7314 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7315 * Other variants with different arithmetic operators:
7316 */
7317 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7318 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7319 extern u64 atomic64_inc_return(atomic64_t *ptr);
7320 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7321 extern u64 atomic64_dec_return(atomic64_t *ptr);
7322 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7323
7324 /**
7325 * atomic64_add - add integer to atomic64 variable
7326 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7327 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7328
7329 /**
7330 + * atomic64_add_unchecked - add integer to atomic64 variable
7331 + * @delta: integer value to add
7332 + * @ptr: pointer to type atomic64_unchecked_t
7333 + *
7334 + * Atomically adds @delta to @ptr.
7335 + */
7336 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7337 +
7338 +/**
7339 * atomic64_sub - subtract the atomic64 variable
7340 * @delta: integer value to subtract
7341 * @ptr: pointer to type atomic64_t
7342 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7343 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7344
7345 /**
7346 + * atomic64_sub_unchecked - subtract the atomic64 variable
7347 + * @delta: integer value to subtract
7348 + * @ptr: pointer to type atomic64_unchecked_t
7349 + *
7350 + * Atomically subtracts @delta from @ptr.
7351 + */
7352 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7353 +
7354 +/**
7355 * atomic64_sub_and_test - subtract value from variable and test result
7356 * @delta: integer value to subtract
7357 * @ptr: pointer to type atomic64_t
7358 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7359 extern void atomic64_inc(atomic64_t *ptr);
7360
7361 /**
7362 + * atomic64_inc_unchecked - increment atomic64 variable
7363 + * @ptr: pointer to type atomic64_unchecked_t
7364 + *
7365 + * Atomically increments @ptr by 1.
7366 + */
7367 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7368 +
7369 +/**
7370 * atomic64_dec - decrement atomic64 variable
7371 * @ptr: pointer to type atomic64_t
7372 *
7373 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7374 extern void atomic64_dec(atomic64_t *ptr);
7375
7376 /**
7377 + * atomic64_dec_unchecked - decrement atomic64 variable
7378 + * @ptr: pointer to type atomic64_unchecked_t
7379 + *
7380 + * Atomically decrements @ptr by 1.
7381 + */
7382 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7383 +
7384 +/**
7385 * atomic64_dec_and_test - decrement and test
7386 * @ptr: pointer to type atomic64_t
7387 *
7388 diff -urNp linux-2.6.32.44/arch/x86/include/asm/atomic_64.h linux-2.6.32.44/arch/x86/include/asm/atomic_64.h
7389 --- linux-2.6.32.44/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7390 +++ linux-2.6.32.44/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7391 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7392 }
7393
7394 /**
7395 + * atomic_read_unchecked - read atomic variable
7396 + * @v: pointer of type atomic_unchecked_t
7397 + *
7398 + * Atomically reads the value of @v.
7399 + */
7400 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7401 +{
7402 + return v->counter;
7403 +}
7404 +
7405 +/**
7406 * atomic_set - set atomic variable
7407 * @v: pointer of type atomic_t
7408 * @i: required value
7409 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7410 }
7411
7412 /**
7413 + * atomic_set_unchecked - set atomic variable
7414 + * @v: pointer of type atomic_unchecked_t
7415 + * @i: required value
7416 + *
7417 + * Atomically sets the value of @v to @i.
7418 + */
7419 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7420 +{
7421 + v->counter = i;
7422 +}
7423 +
7424 +/**
7425 * atomic_add - add integer to atomic variable
7426 * @i: integer value to add
7427 * @v: pointer of type atomic_t
7428 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7429 */
7430 static inline void atomic_add(int i, atomic_t *v)
7431 {
7432 - asm volatile(LOCK_PREFIX "addl %1,%0"
7433 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 + "jno 0f\n"
7437 + LOCK_PREFIX "subl %1,%0\n"
7438 + "int $4\n0:\n"
7439 + _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 + : "=m" (v->counter)
7443 + : "ir" (i), "m" (v->counter));
7444 +}
7445 +
7446 +/**
7447 + * atomic_add_unchecked - add integer to atomic variable
7448 + * @i: integer value to add
7449 + * @v: pointer of type atomic_unchecked_t
7450 + *
7451 + * Atomically adds @i to @v.
7452 + */
7453 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7454 +{
7455 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7456 : "=m" (v->counter)
7457 : "ir" (i), "m" (v->counter));
7458 }
7459 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7460 */
7461 static inline void atomic_sub(int i, atomic_t *v)
7462 {
7463 - asm volatile(LOCK_PREFIX "subl %1,%0"
7464 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7465 +
7466 +#ifdef CONFIG_PAX_REFCOUNT
7467 + "jno 0f\n"
7468 + LOCK_PREFIX "addl %1,%0\n"
7469 + "int $4\n0:\n"
7470 + _ASM_EXTABLE(0b, 0b)
7471 +#endif
7472 +
7473 + : "=m" (v->counter)
7474 + : "ir" (i), "m" (v->counter));
7475 +}
7476 +
7477 +/**
7478 + * atomic_sub_unchecked - subtract the atomic variable
7479 + * @i: integer value to subtract
7480 + * @v: pointer of type atomic_unchecked_t
7481 + *
7482 + * Atomically subtracts @i from @v.
7483 + */
7484 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7485 +{
7486 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7487 : "=m" (v->counter)
7488 : "ir" (i), "m" (v->counter));
7489 }
7490 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7491 {
7492 unsigned char c;
7493
7494 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7495 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7496 +
7497 +#ifdef CONFIG_PAX_REFCOUNT
7498 + "jno 0f\n"
7499 + LOCK_PREFIX "addl %2,%0\n"
7500 + "int $4\n0:\n"
7501 + _ASM_EXTABLE(0b, 0b)
7502 +#endif
7503 +
7504 + "sete %1\n"
7505 : "=m" (v->counter), "=qm" (c)
7506 : "ir" (i), "m" (v->counter) : "memory");
7507 return c;
7508 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7509 */
7510 static inline void atomic_inc(atomic_t *v)
7511 {
7512 - asm volatile(LOCK_PREFIX "incl %0"
7513 + asm volatile(LOCK_PREFIX "incl %0\n"
7514 +
7515 +#ifdef CONFIG_PAX_REFCOUNT
7516 + "jno 0f\n"
7517 + LOCK_PREFIX "decl %0\n"
7518 + "int $4\n0:\n"
7519 + _ASM_EXTABLE(0b, 0b)
7520 +#endif
7521 +
7522 + : "=m" (v->counter)
7523 + : "m" (v->counter));
7524 +}
7525 +
7526 +/**
7527 + * atomic_inc_unchecked - increment atomic variable
7528 + * @v: pointer of type atomic_unchecked_t
7529 + *
7530 + * Atomically increments @v by 1.
7531 + */
7532 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7533 +{
7534 + asm volatile(LOCK_PREFIX "incl %0\n"
7535 : "=m" (v->counter)
7536 : "m" (v->counter));
7537 }
7538 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7539 */
7540 static inline void atomic_dec(atomic_t *v)
7541 {
7542 - asm volatile(LOCK_PREFIX "decl %0"
7543 + asm volatile(LOCK_PREFIX "decl %0\n"
7544 +
7545 +#ifdef CONFIG_PAX_REFCOUNT
7546 + "jno 0f\n"
7547 + LOCK_PREFIX "incl %0\n"
7548 + "int $4\n0:\n"
7549 + _ASM_EXTABLE(0b, 0b)
7550 +#endif
7551 +
7552 + : "=m" (v->counter)
7553 + : "m" (v->counter));
7554 +}
7555 +
7556 +/**
7557 + * atomic_dec_unchecked - decrement atomic variable
7558 + * @v: pointer of type atomic_unchecked_t
7559 + *
7560 + * Atomically decrements @v by 1.
7561 + */
7562 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7563 +{
7564 + asm volatile(LOCK_PREFIX "decl %0\n"
7565 : "=m" (v->counter)
7566 : "m" (v->counter));
7567 }
7568 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7569 {
7570 unsigned char c;
7571
7572 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7573 + asm volatile(LOCK_PREFIX "decl %0\n"
7574 +
7575 +#ifdef CONFIG_PAX_REFCOUNT
7576 + "jno 0f\n"
7577 + LOCK_PREFIX "incl %0\n"
7578 + "int $4\n0:\n"
7579 + _ASM_EXTABLE(0b, 0b)
7580 +#endif
7581 +
7582 + "sete %1\n"
7583 : "=m" (v->counter), "=qm" (c)
7584 : "m" (v->counter) : "memory");
7585 return c != 0;
7586 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7587 {
7588 unsigned char c;
7589
7590 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7591 + asm volatile(LOCK_PREFIX "incl %0\n"
7592 +
7593 +#ifdef CONFIG_PAX_REFCOUNT
7594 + "jno 0f\n"
7595 + LOCK_PREFIX "decl %0\n"
7596 + "int $4\n0:\n"
7597 + _ASM_EXTABLE(0b, 0b)
7598 +#endif
7599 +
7600 + "sete %1\n"
7601 + : "=m" (v->counter), "=qm" (c)
7602 + : "m" (v->counter) : "memory");
7603 + return c != 0;
7604 +}
7605 +
7606 +/**
7607 + * atomic_inc_and_test_unchecked - increment and test
7608 + * @v: pointer of type atomic_unchecked_t
7609 + *
7610 + * Atomically increments @v by 1
7611 + * and returns true if the result is zero, or false for all
7612 + * other cases.
7613 + */
7614 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7615 +{
7616 + unsigned char c;
7617 +
7618 + asm volatile(LOCK_PREFIX "incl %0\n"
7619 + "sete %1\n"
7620 : "=m" (v->counter), "=qm" (c)
7621 : "m" (v->counter) : "memory");
7622 return c != 0;
7623 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7624 {
7625 unsigned char c;
7626
7627 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7628 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7629 +
7630 +#ifdef CONFIG_PAX_REFCOUNT
7631 + "jno 0f\n"
7632 + LOCK_PREFIX "subl %2,%0\n"
7633 + "int $4\n0:\n"
7634 + _ASM_EXTABLE(0b, 0b)
7635 +#endif
7636 +
7637 + "sets %1\n"
7638 : "=m" (v->counter), "=qm" (c)
7639 : "ir" (i), "m" (v->counter) : "memory");
7640 return c;
7641 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7642 static inline int atomic_add_return(int i, atomic_t *v)
7643 {
7644 int __i = i;
7645 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7646 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7647 +
7648 +#ifdef CONFIG_PAX_REFCOUNT
7649 + "jno 0f\n"
7650 + "movl %0, %1\n"
7651 + "int $4\n0:\n"
7652 + _ASM_EXTABLE(0b, 0b)
7653 +#endif
7654 +
7655 + : "+r" (i), "+m" (v->counter)
7656 + : : "memory");
7657 + return i + __i;
7658 +}
7659 +
7660 +/**
7661 + * atomic_add_return_unchecked - add and return
7662 + * @i: integer value to add
7663 + * @v: pointer of type atomic_unchecked_t
7664 + *
7665 + * Atomically adds @i to @v and returns @i + @v
7666 + */
7667 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7668 +{
7669 + int __i = i;
7670 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7671 : "+r" (i), "+m" (v->counter)
7672 : : "memory");
7673 return i + __i;
7674 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7675 }
7676
7677 #define atomic_inc_return(v) (atomic_add_return(1, v))
7678 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7679 +{
7680 + return atomic_add_return_unchecked(1, v);
7681 +}
7682 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7683
7684 /* The 64-bit atomic type */
7685 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7686 }
7687
7688 /**
7689 + * atomic64_read_unchecked - read atomic64 variable
7690 + * @v: pointer of type atomic64_unchecked_t
7691 + *
7692 + * Atomically reads the value of @v.
7693 + * Doesn't imply a read memory barrier.
7694 + */
7695 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7696 +{
7697 + return v->counter;
7698 +}
7699 +
7700 +/**
7701 * atomic64_set - set atomic64 variable
7702 * @v: pointer to type atomic64_t
7703 * @i: required value
7704 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7705 }
7706
7707 /**
7708 + * atomic64_set_unchecked - set atomic64 variable
7709 + * @v: pointer to type atomic64_unchecked_t
7710 + * @i: required value
7711 + *
7712 + * Atomically sets the value of @v to @i.
7713 + */
7714 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7715 +{
7716 + v->counter = i;
7717 +}
7718 +
7719 +/**
7720 * atomic64_add - add integer to atomic64 variable
7721 * @i: integer value to add
7722 * @v: pointer to type atomic64_t
7723 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7724 */
7725 static inline void atomic64_add(long i, atomic64_t *v)
7726 {
7727 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7728 +
7729 +#ifdef CONFIG_PAX_REFCOUNT
7730 + "jno 0f\n"
7731 + LOCK_PREFIX "subq %1,%0\n"
7732 + "int $4\n0:\n"
7733 + _ASM_EXTABLE(0b, 0b)
7734 +#endif
7735 +
7736 + : "=m" (v->counter)
7737 + : "er" (i), "m" (v->counter));
7738 +}
7739 +
7740 +/**
7741 + * atomic64_add_unchecked - add integer to atomic64 variable
7742 + * @i: integer value to add
7743 + * @v: pointer to type atomic64_unchecked_t
7744 + *
7745 + * Atomically adds @i to @v.
7746 + */
7747 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7748 +{
7749 asm volatile(LOCK_PREFIX "addq %1,%0"
7750 : "=m" (v->counter)
7751 : "er" (i), "m" (v->counter));
7752 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7753 */
7754 static inline void atomic64_sub(long i, atomic64_t *v)
7755 {
7756 - asm volatile(LOCK_PREFIX "subq %1,%0"
7757 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7758 +
7759 +#ifdef CONFIG_PAX_REFCOUNT
7760 + "jno 0f\n"
7761 + LOCK_PREFIX "addq %1,%0\n"
7762 + "int $4\n0:\n"
7763 + _ASM_EXTABLE(0b, 0b)
7764 +#endif
7765 +
7766 : "=m" (v->counter)
7767 : "er" (i), "m" (v->counter));
7768 }
7769 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7770 {
7771 unsigned char c;
7772
7773 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7774 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7775 +
7776 +#ifdef CONFIG_PAX_REFCOUNT
7777 + "jno 0f\n"
7778 + LOCK_PREFIX "addq %2,%0\n"
7779 + "int $4\n0:\n"
7780 + _ASM_EXTABLE(0b, 0b)
7781 +#endif
7782 +
7783 + "sete %1\n"
7784 : "=m" (v->counter), "=qm" (c)
7785 : "er" (i), "m" (v->counter) : "memory");
7786 return c;
7787 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7788 */
7789 static inline void atomic64_inc(atomic64_t *v)
7790 {
7791 + asm volatile(LOCK_PREFIX "incq %0\n"
7792 +
7793 +#ifdef CONFIG_PAX_REFCOUNT
7794 + "jno 0f\n"
7795 + LOCK_PREFIX "decq %0\n"
7796 + "int $4\n0:\n"
7797 + _ASM_EXTABLE(0b, 0b)
7798 +#endif
7799 +
7800 + : "=m" (v->counter)
7801 + : "m" (v->counter));
7802 +}
7803 +
7804 +/**
7805 + * atomic64_inc_unchecked - increment atomic64 variable
7806 + * @v: pointer to type atomic64_unchecked_t
7807 + *
7808 + * Atomically increments @v by 1.
7809 + */
7810 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7811 +{
7812 asm volatile(LOCK_PREFIX "incq %0"
7813 : "=m" (v->counter)
7814 : "m" (v->counter));
7815 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7816 */
7817 static inline void atomic64_dec(atomic64_t *v)
7818 {
7819 - asm volatile(LOCK_PREFIX "decq %0"
7820 + asm volatile(LOCK_PREFIX "decq %0\n"
7821 +
7822 +#ifdef CONFIG_PAX_REFCOUNT
7823 + "jno 0f\n"
7824 + LOCK_PREFIX "incq %0\n"
7825 + "int $4\n0:\n"
7826 + _ASM_EXTABLE(0b, 0b)
7827 +#endif
7828 +
7829 + : "=m" (v->counter)
7830 + : "m" (v->counter));
7831 +}
7832 +
7833 +/**
7834 + * atomic64_dec_unchecked - decrement atomic64 variable
7835 + * @v: pointer to type atomic64_t
7836 + *
7837 + * Atomically decrements @v by 1.
7838 + */
7839 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7840 +{
7841 + asm volatile(LOCK_PREFIX "decq %0\n"
7842 : "=m" (v->counter)
7843 : "m" (v->counter));
7844 }
7845 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7846 {
7847 unsigned char c;
7848
7849 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7850 + asm volatile(LOCK_PREFIX "decq %0\n"
7851 +
7852 +#ifdef CONFIG_PAX_REFCOUNT
7853 + "jno 0f\n"
7854 + LOCK_PREFIX "incq %0\n"
7855 + "int $4\n0:\n"
7856 + _ASM_EXTABLE(0b, 0b)
7857 +#endif
7858 +
7859 + "sete %1\n"
7860 : "=m" (v->counter), "=qm" (c)
7861 : "m" (v->counter) : "memory");
7862 return c != 0;
7863 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7864 {
7865 unsigned char c;
7866
7867 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7868 + asm volatile(LOCK_PREFIX "incq %0\n"
7869 +
7870 +#ifdef CONFIG_PAX_REFCOUNT
7871 + "jno 0f\n"
7872 + LOCK_PREFIX "decq %0\n"
7873 + "int $4\n0:\n"
7874 + _ASM_EXTABLE(0b, 0b)
7875 +#endif
7876 +
7877 + "sete %1\n"
7878 : "=m" (v->counter), "=qm" (c)
7879 : "m" (v->counter) : "memory");
7880 return c != 0;
7881 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7882 {
7883 unsigned char c;
7884
7885 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7886 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7887 +
7888 +#ifdef CONFIG_PAX_REFCOUNT
7889 + "jno 0f\n"
7890 + LOCK_PREFIX "subq %2,%0\n"
7891 + "int $4\n0:\n"
7892 + _ASM_EXTABLE(0b, 0b)
7893 +#endif
7894 +
7895 + "sets %1\n"
7896 : "=m" (v->counter), "=qm" (c)
7897 : "er" (i), "m" (v->counter) : "memory");
7898 return c;
7899 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7900 static inline long atomic64_add_return(long i, atomic64_t *v)
7901 {
7902 long __i = i;
7903 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7904 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7905 +
7906 +#ifdef CONFIG_PAX_REFCOUNT
7907 + "jno 0f\n"
7908 + "movq %0, %1\n"
7909 + "int $4\n0:\n"
7910 + _ASM_EXTABLE(0b, 0b)
7911 +#endif
7912 +
7913 + : "+r" (i), "+m" (v->counter)
7914 + : : "memory");
7915 + return i + __i;
7916 +}
7917 +
7918 +/**
7919 + * atomic64_add_return_unchecked - add and return
7920 + * @i: integer value to add
7921 + * @v: pointer to type atomic64_unchecked_t
7922 + *
7923 + * Atomically adds @i to @v and returns @i + @v
7924 + */
7925 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7926 +{
7927 + long __i = i;
7928 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7929 : "+r" (i), "+m" (v->counter)
7930 : : "memory");
7931 return i + __i;
7932 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7933 }
7934
7935 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7936 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7937 +{
7938 + return atomic64_add_return_unchecked(1, v);
7939 +}
7940 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7941
7942 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7943 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7944 return cmpxchg(&v->counter, old, new);
7945 }
7946
7947 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7948 +{
7949 + return cmpxchg(&v->counter, old, new);
7950 +}
7951 +
7952 static inline long atomic64_xchg(atomic64_t *v, long new)
7953 {
7954 return xchg(&v->counter, new);
7955 }
7956
7957 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7958 +{
7959 + return xchg(&v->counter, new);
7960 +}
7961 +
7962 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7963 {
7964 return cmpxchg(&v->counter, old, new);
7965 }
7966
7967 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7968 +{
7969 + return cmpxchg(&v->counter, old, new);
7970 +}
7971 +
7972 static inline long atomic_xchg(atomic_t *v, int new)
7973 {
7974 return xchg(&v->counter, new);
7975 }
7976
7977 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7978 +{
7979 + return xchg(&v->counter, new);
7980 +}
7981 +
7982 /**
7983 * atomic_add_unless - add unless the number is a given value
7984 * @v: pointer of type atomic_t
7985 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7986 */
7987 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7988 {
7989 - int c, old;
7990 + int c, old, new;
7991 c = atomic_read(v);
7992 for (;;) {
7993 - if (unlikely(c == (u)))
7994 + if (unlikely(c == u))
7995 break;
7996 - old = atomic_cmpxchg((v), c, c + (a));
7997 +
7998 + asm volatile("addl %2,%0\n"
7999 +
8000 +#ifdef CONFIG_PAX_REFCOUNT
8001 + "jno 0f\n"
8002 + "subl %2,%0\n"
8003 + "int $4\n0:\n"
8004 + _ASM_EXTABLE(0b, 0b)
8005 +#endif
8006 +
8007 + : "=r" (new)
8008 + : "0" (c), "ir" (a));
8009 +
8010 + old = atomic_cmpxchg(v, c, new);
8011 if (likely(old == c))
8012 break;
8013 c = old;
8014 }
8015 - return c != (u);
8016 + return c != u;
8017 }
8018
8019 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8020 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8021 */
8022 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8023 {
8024 - long c, old;
8025 + long c, old, new;
8026 c = atomic64_read(v);
8027 for (;;) {
8028 - if (unlikely(c == (u)))
8029 + if (unlikely(c == u))
8030 break;
8031 - old = atomic64_cmpxchg((v), c, c + (a));
8032 +
8033 + asm volatile("addq %2,%0\n"
8034 +
8035 +#ifdef CONFIG_PAX_REFCOUNT
8036 + "jno 0f\n"
8037 + "subq %2,%0\n"
8038 + "int $4\n0:\n"
8039 + _ASM_EXTABLE(0b, 0b)
8040 +#endif
8041 +
8042 + : "=r" (new)
8043 + : "0" (c), "er" (a));
8044 +
8045 + old = atomic64_cmpxchg(v, c, new);
8046 if (likely(old == c))
8047 break;
8048 c = old;
8049 }
8050 - return c != (u);
8051 + return c != u;
8052 }
8053
8054 /**
8055 diff -urNp linux-2.6.32.44/arch/x86/include/asm/bitops.h linux-2.6.32.44/arch/x86/include/asm/bitops.h
8056 --- linux-2.6.32.44/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8057 +++ linux-2.6.32.44/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8058 @@ -38,7 +38,7 @@
8059 * a mask operation on a byte.
8060 */
8061 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8062 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8063 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8064 #define CONST_MASK(nr) (1 << ((nr) & 7))
8065
8066 /**
8067 diff -urNp linux-2.6.32.44/arch/x86/include/asm/boot.h linux-2.6.32.44/arch/x86/include/asm/boot.h
8068 --- linux-2.6.32.44/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8069 +++ linux-2.6.32.44/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8070 @@ -11,10 +11,15 @@
8071 #include <asm/pgtable_types.h>
8072
8073 /* Physical address where kernel should be loaded. */
8074 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8075 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8078
8079 +#ifndef __ASSEMBLY__
8080 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8081 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8082 +#endif
8083 +
8084 /* Minimum kernel alignment, as a power of two */
8085 #ifdef CONFIG_X86_64
8086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8087 diff -urNp linux-2.6.32.44/arch/x86/include/asm/cacheflush.h linux-2.6.32.44/arch/x86/include/asm/cacheflush.h
8088 --- linux-2.6.32.44/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8089 +++ linux-2.6.32.44/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8090 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8091 static inline unsigned long get_page_memtype(struct page *pg)
8092 {
8093 if (!PageUncached(pg) && !PageWC(pg))
8094 - return -1;
8095 + return ~0UL;
8096 else if (!PageUncached(pg) && PageWC(pg))
8097 return _PAGE_CACHE_WC;
8098 else if (PageUncached(pg) && !PageWC(pg))
8099 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8100 SetPageWC(pg);
8101 break;
8102 default:
8103 - case -1:
8104 + case ~0UL:
8105 ClearPageUncached(pg);
8106 ClearPageWC(pg);
8107 break;
8108 diff -urNp linux-2.6.32.44/arch/x86/include/asm/cache.h linux-2.6.32.44/arch/x86/include/asm/cache.h
8109 --- linux-2.6.32.44/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8110 +++ linux-2.6.32.44/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8111 @@ -5,9 +5,10 @@
8112
8113 /* L1 cache line size */
8114 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8115 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8116 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8117
8118 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8119 +#define __read_only __attribute__((__section__(".data.read_only")))
8120
8121 #ifdef CONFIG_X86_VSMP
8122 /* vSMP Internode cacheline shift */
8123 diff -urNp linux-2.6.32.44/arch/x86/include/asm/checksum_32.h linux-2.6.32.44/arch/x86/include/asm/checksum_32.h
8124 --- linux-2.6.32.44/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8125 +++ linux-2.6.32.44/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8126 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8127 int len, __wsum sum,
8128 int *src_err_ptr, int *dst_err_ptr);
8129
8130 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8131 + int len, __wsum sum,
8132 + int *src_err_ptr, int *dst_err_ptr);
8133 +
8134 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8135 + int len, __wsum sum,
8136 + int *src_err_ptr, int *dst_err_ptr);
8137 +
8138 /*
8139 * Note: when you get a NULL pointer exception here this means someone
8140 * passed in an incorrect kernel address to one of these functions.
8141 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8142 int *err_ptr)
8143 {
8144 might_sleep();
8145 - return csum_partial_copy_generic((__force void *)src, dst,
8146 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8147 len, sum, err_ptr, NULL);
8148 }
8149
8150 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8151 {
8152 might_sleep();
8153 if (access_ok(VERIFY_WRITE, dst, len))
8154 - return csum_partial_copy_generic(src, (__force void *)dst,
8155 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8156 len, sum, NULL, err_ptr);
8157
8158 if (len)
8159 diff -urNp linux-2.6.32.44/arch/x86/include/asm/desc_defs.h linux-2.6.32.44/arch/x86/include/asm/desc_defs.h
8160 --- linux-2.6.32.44/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8161 +++ linux-2.6.32.44/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8162 @@ -31,6 +31,12 @@ struct desc_struct {
8163 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8164 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8165 };
8166 + struct {
8167 + u16 offset_low;
8168 + u16 seg;
8169 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8170 + unsigned offset_high: 16;
8171 + } gate;
8172 };
8173 } __attribute__((packed));
8174
8175 diff -urNp linux-2.6.32.44/arch/x86/include/asm/desc.h linux-2.6.32.44/arch/x86/include/asm/desc.h
8176 --- linux-2.6.32.44/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8177 +++ linux-2.6.32.44/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8178 @@ -4,6 +4,7 @@
8179 #include <asm/desc_defs.h>
8180 #include <asm/ldt.h>
8181 #include <asm/mmu.h>
8182 +#include <asm/pgtable.h>
8183 #include <linux/smp.h>
8184
8185 static inline void fill_ldt(struct desc_struct *desc,
8186 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8187 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8188 desc->type = (info->read_exec_only ^ 1) << 1;
8189 desc->type |= info->contents << 2;
8190 + desc->type |= info->seg_not_present ^ 1;
8191 desc->s = 1;
8192 desc->dpl = 0x3;
8193 desc->p = info->seg_not_present ^ 1;
8194 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8195 }
8196
8197 extern struct desc_ptr idt_descr;
8198 -extern gate_desc idt_table[];
8199 -
8200 -struct gdt_page {
8201 - struct desc_struct gdt[GDT_ENTRIES];
8202 -} __attribute__((aligned(PAGE_SIZE)));
8203 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8204 +extern gate_desc idt_table[256];
8205
8206 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8207 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8208 {
8209 - return per_cpu(gdt_page, cpu).gdt;
8210 + return cpu_gdt_table[cpu];
8211 }
8212
8213 #ifdef CONFIG_X86_64
8214 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8215 unsigned long base, unsigned dpl, unsigned flags,
8216 unsigned short seg)
8217 {
8218 - gate->a = (seg << 16) | (base & 0xffff);
8219 - gate->b = (base & 0xffff0000) |
8220 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8221 + gate->gate.offset_low = base;
8222 + gate->gate.seg = seg;
8223 + gate->gate.reserved = 0;
8224 + gate->gate.type = type;
8225 + gate->gate.s = 0;
8226 + gate->gate.dpl = dpl;
8227 + gate->gate.p = 1;
8228 + gate->gate.offset_high = base >> 16;
8229 }
8230
8231 #endif
8232 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8233 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8234 const gate_desc *gate)
8235 {
8236 + pax_open_kernel();
8237 memcpy(&idt[entry], gate, sizeof(*gate));
8238 + pax_close_kernel();
8239 }
8240
8241 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8242 const void *desc)
8243 {
8244 + pax_open_kernel();
8245 memcpy(&ldt[entry], desc, 8);
8246 + pax_close_kernel();
8247 }
8248
8249 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8250 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8251 size = sizeof(struct desc_struct);
8252 break;
8253 }
8254 +
8255 + pax_open_kernel();
8256 memcpy(&gdt[entry], desc, size);
8257 + pax_close_kernel();
8258 }
8259
8260 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8261 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8262
8263 static inline void native_load_tr_desc(void)
8264 {
8265 + pax_open_kernel();
8266 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8267 + pax_close_kernel();
8268 }
8269
8270 static inline void native_load_gdt(const struct desc_ptr *dtr)
8271 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8272 unsigned int i;
8273 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8274
8275 + pax_open_kernel();
8276 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8277 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8278 + pax_close_kernel();
8279 }
8280
8281 #define _LDT_empty(info) \
8282 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8283 desc->limit = (limit >> 16) & 0xf;
8284 }
8285
8286 -static inline void _set_gate(int gate, unsigned type, void *addr,
8287 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8288 unsigned dpl, unsigned ist, unsigned seg)
8289 {
8290 gate_desc s;
8291 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8292 * Pentium F0 0F bugfix can have resulted in the mapped
8293 * IDT being write-protected.
8294 */
8295 -static inline void set_intr_gate(unsigned int n, void *addr)
8296 +static inline void set_intr_gate(unsigned int n, const void *addr)
8297 {
8298 BUG_ON((unsigned)n > 0xFF);
8299 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8300 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8301 /*
8302 * This routine sets up an interrupt gate at directory privilege level 3.
8303 */
8304 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8305 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8306 {
8307 BUG_ON((unsigned)n > 0xFF);
8308 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8309 }
8310
8311 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8312 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8313 {
8314 BUG_ON((unsigned)n > 0xFF);
8315 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8316 }
8317
8318 -static inline void set_trap_gate(unsigned int n, void *addr)
8319 +static inline void set_trap_gate(unsigned int n, const void *addr)
8320 {
8321 BUG_ON((unsigned)n > 0xFF);
8322 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8323 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8324 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8325 {
8326 BUG_ON((unsigned)n > 0xFF);
8327 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8328 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8329 }
8330
8331 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8332 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8333 {
8334 BUG_ON((unsigned)n > 0xFF);
8335 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8336 }
8337
8338 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8339 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8343 }
8344
8345 +#ifdef CONFIG_X86_32
8346 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8347 +{
8348 + struct desc_struct d;
8349 +
8350 + if (likely(limit))
8351 + limit = (limit - 1UL) >> PAGE_SHIFT;
8352 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8353 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8354 +}
8355 +#endif
8356 +
8357 #endif /* _ASM_X86_DESC_H */
8358 diff -urNp linux-2.6.32.44/arch/x86/include/asm/device.h linux-2.6.32.44/arch/x86/include/asm/device.h
8359 --- linux-2.6.32.44/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8360 +++ linux-2.6.32.44/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8361 @@ -6,7 +6,7 @@ struct dev_archdata {
8362 void *acpi_handle;
8363 #endif
8364 #ifdef CONFIG_X86_64
8365 -struct dma_map_ops *dma_ops;
8366 + const struct dma_map_ops *dma_ops;
8367 #endif
8368 #ifdef CONFIG_DMAR
8369 void *iommu; /* hook for IOMMU specific extension */
8370 diff -urNp linux-2.6.32.44/arch/x86/include/asm/dma-mapping.h linux-2.6.32.44/arch/x86/include/asm/dma-mapping.h
8371 --- linux-2.6.32.44/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8372 +++ linux-2.6.32.44/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8373 @@ -25,9 +25,9 @@ extern int iommu_merge;
8374 extern struct device x86_dma_fallback_dev;
8375 extern int panic_on_overflow;
8376
8377 -extern struct dma_map_ops *dma_ops;
8378 +extern const struct dma_map_ops *dma_ops;
8379
8380 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8381 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8382 {
8383 #ifdef CONFIG_X86_32
8384 return dma_ops;
8385 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8386 /* Make sure we keep the same behaviour */
8387 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8388 {
8389 - struct dma_map_ops *ops = get_dma_ops(dev);
8390 + const struct dma_map_ops *ops = get_dma_ops(dev);
8391 if (ops->mapping_error)
8392 return ops->mapping_error(dev, dma_addr);
8393
8394 @@ -122,7 +122,7 @@ static inline void *
8395 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8396 gfp_t gfp)
8397 {
8398 - struct dma_map_ops *ops = get_dma_ops(dev);
8399 + const struct dma_map_ops *ops = get_dma_ops(dev);
8400 void *memory;
8401
8402 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8403 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8404 static inline void dma_free_coherent(struct device *dev, size_t size,
8405 void *vaddr, dma_addr_t bus)
8406 {
8407 - struct dma_map_ops *ops = get_dma_ops(dev);
8408 + const struct dma_map_ops *ops = get_dma_ops(dev);
8409
8410 WARN_ON(irqs_disabled()); /* for portability */
8411
8412 diff -urNp linux-2.6.32.44/arch/x86/include/asm/e820.h linux-2.6.32.44/arch/x86/include/asm/e820.h
8413 --- linux-2.6.32.44/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8414 +++ linux-2.6.32.44/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8415 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8416 #define ISA_END_ADDRESS 0x100000
8417 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8418
8419 -#define BIOS_BEGIN 0x000a0000
8420 +#define BIOS_BEGIN 0x000c0000
8421 #define BIOS_END 0x00100000
8422
8423 #ifdef __KERNEL__
8424 diff -urNp linux-2.6.32.44/arch/x86/include/asm/elf.h linux-2.6.32.44/arch/x86/include/asm/elf.h
8425 --- linux-2.6.32.44/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8426 +++ linux-2.6.32.44/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8427 @@ -257,7 +257,25 @@ extern int force_personality32;
8428 the loader. We need to make sure that it is out of the way of the program
8429 that it will "exec", and that there is sufficient room for the brk. */
8430
8431 +#ifdef CONFIG_PAX_SEGMEXEC
8432 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8433 +#else
8434 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8435 +#endif
8436 +
8437 +#ifdef CONFIG_PAX_ASLR
8438 +#ifdef CONFIG_X86_32
8439 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8440 +
8441 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8442 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8443 +#else
8444 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8445 +
8446 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8447 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8448 +#endif
8449 +#endif
8450
8451 /* This yields a mask that user programs can use to figure out what
8452 instruction set this CPU supports. This could be done in user space,
8453 @@ -311,8 +329,7 @@ do { \
8454 #define ARCH_DLINFO \
8455 do { \
8456 if (vdso_enabled) \
8457 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8458 - (unsigned long)current->mm->context.vdso); \
8459 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8460 } while (0)
8461
8462 #define AT_SYSINFO 32
8463 @@ -323,7 +340,7 @@ do { \
8464
8465 #endif /* !CONFIG_X86_32 */
8466
8467 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8468 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8469
8470 #define VDSO_ENTRY \
8471 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8472 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8473 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8474 #define compat_arch_setup_additional_pages syscall32_setup_pages
8475
8476 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8477 -#define arch_randomize_brk arch_randomize_brk
8478 -
8479 #endif /* _ASM_X86_ELF_H */
8480 diff -urNp linux-2.6.32.44/arch/x86/include/asm/emergency-restart.h linux-2.6.32.44/arch/x86/include/asm/emergency-restart.h
8481 --- linux-2.6.32.44/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8482 +++ linux-2.6.32.44/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8483 @@ -15,6 +15,6 @@ enum reboot_type {
8484
8485 extern enum reboot_type reboot_type;
8486
8487 -extern void machine_emergency_restart(void);
8488 +extern void machine_emergency_restart(void) __noreturn;
8489
8490 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8491 diff -urNp linux-2.6.32.44/arch/x86/include/asm/futex.h linux-2.6.32.44/arch/x86/include/asm/futex.h
8492 --- linux-2.6.32.44/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8493 +++ linux-2.6.32.44/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8494 @@ -12,16 +12,18 @@
8495 #include <asm/system.h>
8496
8497 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8498 + typecheck(u32 *, uaddr); \
8499 asm volatile("1:\t" insn "\n" \
8500 "2:\t.section .fixup,\"ax\"\n" \
8501 "3:\tmov\t%3, %1\n" \
8502 "\tjmp\t2b\n" \
8503 "\t.previous\n" \
8504 _ASM_EXTABLE(1b, 3b) \
8505 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8506 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8507 : "i" (-EFAULT), "0" (oparg), "1" (0))
8508
8509 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8510 + typecheck(u32 *, uaddr); \
8511 asm volatile("1:\tmovl %2, %0\n" \
8512 "\tmovl\t%0, %3\n" \
8513 "\t" insn "\n" \
8514 @@ -34,10 +36,10 @@
8515 _ASM_EXTABLE(1b, 4b) \
8516 _ASM_EXTABLE(2b, 4b) \
8517 : "=&a" (oldval), "=&r" (ret), \
8518 - "+m" (*uaddr), "=&r" (tem) \
8519 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8520 : "r" (oparg), "i" (-EFAULT), "1" (0))
8521
8522 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8523 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8524 {
8525 int op = (encoded_op >> 28) & 7;
8526 int cmp = (encoded_op >> 24) & 15;
8527 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8528
8529 switch (op) {
8530 case FUTEX_OP_SET:
8531 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8532 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8533 break;
8534 case FUTEX_OP_ADD:
8535 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8536 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8537 uaddr, oparg);
8538 break;
8539 case FUTEX_OP_OR:
8540 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8541 return ret;
8542 }
8543
8544 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8545 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8546 int newval)
8547 {
8548
8549 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8550 return -ENOSYS;
8551 #endif
8552
8553 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8554 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8555 return -EFAULT;
8556
8557 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8558 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8559 "2:\t.section .fixup, \"ax\"\n"
8560 "3:\tmov %2, %0\n"
8561 "\tjmp 2b\n"
8562 "\t.previous\n"
8563 _ASM_EXTABLE(1b, 3b)
8564 - : "=a" (oldval), "+m" (*uaddr)
8565 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8566 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8567 : "memory"
8568 );
8569 diff -urNp linux-2.6.32.44/arch/x86/include/asm/hw_irq.h linux-2.6.32.44/arch/x86/include/asm/hw_irq.h
8570 --- linux-2.6.32.44/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8571 +++ linux-2.6.32.44/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8572 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8573 extern void enable_IO_APIC(void);
8574
8575 /* Statistics */
8576 -extern atomic_t irq_err_count;
8577 -extern atomic_t irq_mis_count;
8578 +extern atomic_unchecked_t irq_err_count;
8579 +extern atomic_unchecked_t irq_mis_count;
8580
8581 /* EISA */
8582 extern void eisa_set_level_irq(unsigned int irq);
8583 diff -urNp linux-2.6.32.44/arch/x86/include/asm/i387.h linux-2.6.32.44/arch/x86/include/asm/i387.h
8584 --- linux-2.6.32.44/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8585 +++ linux-2.6.32.44/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8586 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8587 {
8588 int err;
8589
8590 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8591 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8592 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8593 +#endif
8594 +
8595 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8596 "2:\n"
8597 ".section .fixup,\"ax\"\n"
8598 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8599 {
8600 int err;
8601
8602 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8603 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8604 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8605 +#endif
8606 +
8607 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8608 "2:\n"
8609 ".section .fixup,\"ax\"\n"
8610 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8611 }
8612
8613 /* We need a safe address that is cheap to find and that is already
8614 - in L1 during context switch. The best choices are unfortunately
8615 - different for UP and SMP */
8616 -#ifdef CONFIG_SMP
8617 -#define safe_address (__per_cpu_offset[0])
8618 -#else
8619 -#define safe_address (kstat_cpu(0).cpustat.user)
8620 -#endif
8621 + in L1 during context switch. */
8622 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8623
8624 /*
8625 * These must be called with preempt disabled
8626 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8627 struct thread_info *me = current_thread_info();
8628 preempt_disable();
8629 if (me->status & TS_USEDFPU)
8630 - __save_init_fpu(me->task);
8631 + __save_init_fpu(current);
8632 else
8633 clts();
8634 }
8635 diff -urNp linux-2.6.32.44/arch/x86/include/asm/io_32.h linux-2.6.32.44/arch/x86/include/asm/io_32.h
8636 --- linux-2.6.32.44/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8637 +++ linux-2.6.32.44/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8638 @@ -3,6 +3,7 @@
8639
8640 #include <linux/string.h>
8641 #include <linux/compiler.h>
8642 +#include <asm/processor.h>
8643
8644 /*
8645 * This file contains the definitions for the x86 IO instructions
8646 @@ -42,6 +43,17 @@
8647
8648 #ifdef __KERNEL__
8649
8650 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8651 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8652 +{
8653 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8654 +}
8655 +
8656 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8657 +{
8658 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8659 +}
8660 +
8661 #include <asm-generic/iomap.h>
8662
8663 #include <linux/vmalloc.h>
8664 diff -urNp linux-2.6.32.44/arch/x86/include/asm/io_64.h linux-2.6.32.44/arch/x86/include/asm/io_64.h
8665 --- linux-2.6.32.44/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8666 +++ linux-2.6.32.44/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8667 @@ -140,6 +140,17 @@ __OUTS(l)
8668
8669 #include <linux/vmalloc.h>
8670
8671 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8672 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8673 +{
8674 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8675 +}
8676 +
8677 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8678 +{
8679 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8680 +}
8681 +
8682 #include <asm-generic/iomap.h>
8683
8684 void __memcpy_fromio(void *, unsigned long, unsigned);
8685 diff -urNp linux-2.6.32.44/arch/x86/include/asm/iommu.h linux-2.6.32.44/arch/x86/include/asm/iommu.h
8686 --- linux-2.6.32.44/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8687 +++ linux-2.6.32.44/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8688 @@ -3,7 +3,7 @@
8689
8690 extern void pci_iommu_shutdown(void);
8691 extern void no_iommu_init(void);
8692 -extern struct dma_map_ops nommu_dma_ops;
8693 +extern const struct dma_map_ops nommu_dma_ops;
8694 extern int force_iommu, no_iommu;
8695 extern int iommu_detected;
8696 extern int iommu_pass_through;
8697 diff -urNp linux-2.6.32.44/arch/x86/include/asm/irqflags.h linux-2.6.32.44/arch/x86/include/asm/irqflags.h
8698 --- linux-2.6.32.44/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8699 +++ linux-2.6.32.44/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8700 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8701 sti; \
8702 sysexit
8703
8704 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8705 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8706 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8707 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8708 +
8709 #else
8710 #define INTERRUPT_RETURN iret
8711 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8712 diff -urNp linux-2.6.32.44/arch/x86/include/asm/kprobes.h linux-2.6.32.44/arch/x86/include/asm/kprobes.h
8713 --- linux-2.6.32.44/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8714 +++ linux-2.6.32.44/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8715 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8716 #define BREAKPOINT_INSTRUCTION 0xcc
8717 #define RELATIVEJUMP_INSTRUCTION 0xe9
8718 #define MAX_INSN_SIZE 16
8719 -#define MAX_STACK_SIZE 64
8720 -#define MIN_STACK_SIZE(ADDR) \
8721 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8722 - THREAD_SIZE - (unsigned long)(ADDR))) \
8723 - ? (MAX_STACK_SIZE) \
8724 - : (((unsigned long)current_thread_info()) + \
8725 - THREAD_SIZE - (unsigned long)(ADDR)))
8726 +#define MAX_STACK_SIZE 64UL
8727 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8728
8729 #define flush_insn_slot(p) do { } while (0)
8730
8731 diff -urNp linux-2.6.32.44/arch/x86/include/asm/kvm_host.h linux-2.6.32.44/arch/x86/include/asm/kvm_host.h
8732 --- linux-2.6.32.44/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8733 +++ linux-2.6.32.44/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8734 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8735 const struct trace_print_flags *exit_reasons_str;
8736 };
8737
8738 -extern struct kvm_x86_ops *kvm_x86_ops;
8739 +extern const struct kvm_x86_ops *kvm_x86_ops;
8740
8741 int kvm_mmu_module_init(void);
8742 void kvm_mmu_module_exit(void);
8743 diff -urNp linux-2.6.32.44/arch/x86/include/asm/local.h linux-2.6.32.44/arch/x86/include/asm/local.h
8744 --- linux-2.6.32.44/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8745 +++ linux-2.6.32.44/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8746 @@ -18,26 +18,58 @@ typedef struct {
8747
8748 static inline void local_inc(local_t *l)
8749 {
8750 - asm volatile(_ASM_INC "%0"
8751 + asm volatile(_ASM_INC "%0\n"
8752 +
8753 +#ifdef CONFIG_PAX_REFCOUNT
8754 + "jno 0f\n"
8755 + _ASM_DEC "%0\n"
8756 + "int $4\n0:\n"
8757 + _ASM_EXTABLE(0b, 0b)
8758 +#endif
8759 +
8760 : "+m" (l->a.counter));
8761 }
8762
8763 static inline void local_dec(local_t *l)
8764 {
8765 - asm volatile(_ASM_DEC "%0"
8766 + asm volatile(_ASM_DEC "%0\n"
8767 +
8768 +#ifdef CONFIG_PAX_REFCOUNT
8769 + "jno 0f\n"
8770 + _ASM_INC "%0\n"
8771 + "int $4\n0:\n"
8772 + _ASM_EXTABLE(0b, 0b)
8773 +#endif
8774 +
8775 : "+m" (l->a.counter));
8776 }
8777
8778 static inline void local_add(long i, local_t *l)
8779 {
8780 - asm volatile(_ASM_ADD "%1,%0"
8781 + asm volatile(_ASM_ADD "%1,%0\n"
8782 +
8783 +#ifdef CONFIG_PAX_REFCOUNT
8784 + "jno 0f\n"
8785 + _ASM_SUB "%1,%0\n"
8786 + "int $4\n0:\n"
8787 + _ASM_EXTABLE(0b, 0b)
8788 +#endif
8789 +
8790 : "+m" (l->a.counter)
8791 : "ir" (i));
8792 }
8793
8794 static inline void local_sub(long i, local_t *l)
8795 {
8796 - asm volatile(_ASM_SUB "%1,%0"
8797 + asm volatile(_ASM_SUB "%1,%0\n"
8798 +
8799 +#ifdef CONFIG_PAX_REFCOUNT
8800 + "jno 0f\n"
8801 + _ASM_ADD "%1,%0\n"
8802 + "int $4\n0:\n"
8803 + _ASM_EXTABLE(0b, 0b)
8804 +#endif
8805 +
8806 : "+m" (l->a.counter)
8807 : "ir" (i));
8808 }
8809 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8810 {
8811 unsigned char c;
8812
8813 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8814 + asm volatile(_ASM_SUB "%2,%0\n"
8815 +
8816 +#ifdef CONFIG_PAX_REFCOUNT
8817 + "jno 0f\n"
8818 + _ASM_ADD "%2,%0\n"
8819 + "int $4\n0:\n"
8820 + _ASM_EXTABLE(0b, 0b)
8821 +#endif
8822 +
8823 + "sete %1\n"
8824 : "+m" (l->a.counter), "=qm" (c)
8825 : "ir" (i) : "memory");
8826 return c;
8827 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8828 {
8829 unsigned char c;
8830
8831 - asm volatile(_ASM_DEC "%0; sete %1"
8832 + asm volatile(_ASM_DEC "%0\n"
8833 +
8834 +#ifdef CONFIG_PAX_REFCOUNT
8835 + "jno 0f\n"
8836 + _ASM_INC "%0\n"
8837 + "int $4\n0:\n"
8838 + _ASM_EXTABLE(0b, 0b)
8839 +#endif
8840 +
8841 + "sete %1\n"
8842 : "+m" (l->a.counter), "=qm" (c)
8843 : : "memory");
8844 return c != 0;
8845 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8846 {
8847 unsigned char c;
8848
8849 - asm volatile(_ASM_INC "%0; sete %1"
8850 + asm volatile(_ASM_INC "%0\n"
8851 +
8852 +#ifdef CONFIG_PAX_REFCOUNT
8853 + "jno 0f\n"
8854 + _ASM_DEC "%0\n"
8855 + "int $4\n0:\n"
8856 + _ASM_EXTABLE(0b, 0b)
8857 +#endif
8858 +
8859 + "sete %1\n"
8860 : "+m" (l->a.counter), "=qm" (c)
8861 : : "memory");
8862 return c != 0;
8863 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8864 {
8865 unsigned char c;
8866
8867 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8868 + asm volatile(_ASM_ADD "%2,%0\n"
8869 +
8870 +#ifdef CONFIG_PAX_REFCOUNT
8871 + "jno 0f\n"
8872 + _ASM_SUB "%2,%0\n"
8873 + "int $4\n0:\n"
8874 + _ASM_EXTABLE(0b, 0b)
8875 +#endif
8876 +
8877 + "sets %1\n"
8878 : "+m" (l->a.counter), "=qm" (c)
8879 : "ir" (i) : "memory");
8880 return c;
8881 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8882 #endif
8883 /* Modern 486+ processor */
8884 __i = i;
8885 - asm volatile(_ASM_XADD "%0, %1;"
8886 + asm volatile(_ASM_XADD "%0, %1\n"
8887 +
8888 +#ifdef CONFIG_PAX_REFCOUNT
8889 + "jno 0f\n"
8890 + _ASM_MOV "%0,%1\n"
8891 + "int $4\n0:\n"
8892 + _ASM_EXTABLE(0b, 0b)
8893 +#endif
8894 +
8895 : "+r" (i), "+m" (l->a.counter)
8896 : : "memory");
8897 return i + __i;
8898 diff -urNp linux-2.6.32.44/arch/x86/include/asm/microcode.h linux-2.6.32.44/arch/x86/include/asm/microcode.h
8899 --- linux-2.6.32.44/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8900 +++ linux-2.6.32.44/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8901 @@ -12,13 +12,13 @@ struct device;
8902 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8903
8904 struct microcode_ops {
8905 - enum ucode_state (*request_microcode_user) (int cpu,
8906 + enum ucode_state (* const request_microcode_user) (int cpu,
8907 const void __user *buf, size_t size);
8908
8909 - enum ucode_state (*request_microcode_fw) (int cpu,
8910 + enum ucode_state (* const request_microcode_fw) (int cpu,
8911 struct device *device);
8912
8913 - void (*microcode_fini_cpu) (int cpu);
8914 + void (* const microcode_fini_cpu) (int cpu);
8915
8916 /*
8917 * The generic 'microcode_core' part guarantees that
8918 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8919 extern struct ucode_cpu_info ucode_cpu_info[];
8920
8921 #ifdef CONFIG_MICROCODE_INTEL
8922 -extern struct microcode_ops * __init init_intel_microcode(void);
8923 +extern const struct microcode_ops * __init init_intel_microcode(void);
8924 #else
8925 -static inline struct microcode_ops * __init init_intel_microcode(void)
8926 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8927 {
8928 return NULL;
8929 }
8930 #endif /* CONFIG_MICROCODE_INTEL */
8931
8932 #ifdef CONFIG_MICROCODE_AMD
8933 -extern struct microcode_ops * __init init_amd_microcode(void);
8934 +extern const struct microcode_ops * __init init_amd_microcode(void);
8935 #else
8936 -static inline struct microcode_ops * __init init_amd_microcode(void)
8937 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8938 {
8939 return NULL;
8940 }
8941 diff -urNp linux-2.6.32.44/arch/x86/include/asm/mman.h linux-2.6.32.44/arch/x86/include/asm/mman.h
8942 --- linux-2.6.32.44/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8943 +++ linux-2.6.32.44/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8944 @@ -5,4 +5,14 @@
8945
8946 #include <asm-generic/mman.h>
8947
8948 +#ifdef __KERNEL__
8949 +#ifndef __ASSEMBLY__
8950 +#ifdef CONFIG_X86_32
8951 +#define arch_mmap_check i386_mmap_check
8952 +int i386_mmap_check(unsigned long addr, unsigned long len,
8953 + unsigned long flags);
8954 +#endif
8955 +#endif
8956 +#endif
8957 +
8958 #endif /* _ASM_X86_MMAN_H */
8959 diff -urNp linux-2.6.32.44/arch/x86/include/asm/mmu_context.h linux-2.6.32.44/arch/x86/include/asm/mmu_context.h
8960 --- linux-2.6.32.44/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8961 +++ linux-2.6.32.44/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8962 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8963
8964 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8965 {
8966 +
8967 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8968 + unsigned int i;
8969 + pgd_t *pgd;
8970 +
8971 + pax_open_kernel();
8972 + pgd = get_cpu_pgd(smp_processor_id());
8973 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8974 + if (paravirt_enabled())
8975 + set_pgd(pgd+i, native_make_pgd(0));
8976 + else
8977 + pgd[i] = native_make_pgd(0);
8978 + pax_close_kernel();
8979 +#endif
8980 +
8981 #ifdef CONFIG_SMP
8982 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8983 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8984 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8985 struct task_struct *tsk)
8986 {
8987 unsigned cpu = smp_processor_id();
8988 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8989 + int tlbstate = TLBSTATE_OK;
8990 +#endif
8991
8992 if (likely(prev != next)) {
8993 #ifdef CONFIG_SMP
8994 +#ifdef CONFIG_X86_32
8995 + tlbstate = percpu_read(cpu_tlbstate.state);
8996 +#endif
8997 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8998 percpu_write(cpu_tlbstate.active_mm, next);
8999 #endif
9000 cpumask_set_cpu(cpu, mm_cpumask(next));
9001
9002 /* Re-load page tables */
9003 +#ifdef CONFIG_PAX_PER_CPU_PGD
9004 + pax_open_kernel();
9005 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9006 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9007 + pax_close_kernel();
9008 + load_cr3(get_cpu_pgd(cpu));
9009 +#else
9010 load_cr3(next->pgd);
9011 +#endif
9012
9013 /* stop flush ipis for the previous mm */
9014 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9015 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9016 */
9017 if (unlikely(prev->context.ldt != next->context.ldt))
9018 load_LDT_nolock(&next->context);
9019 - }
9020 +
9021 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9022 + if (!nx_enabled) {
9023 + smp_mb__before_clear_bit();
9024 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9025 + smp_mb__after_clear_bit();
9026 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9027 + }
9028 +#endif
9029 +
9030 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9031 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9032 + prev->context.user_cs_limit != next->context.user_cs_limit))
9033 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9034 #ifdef CONFIG_SMP
9035 + else if (unlikely(tlbstate != TLBSTATE_OK))
9036 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037 +#endif
9038 +#endif
9039 +
9040 + }
9041 else {
9042 +
9043 +#ifdef CONFIG_PAX_PER_CPU_PGD
9044 + pax_open_kernel();
9045 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9046 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9047 + pax_close_kernel();
9048 + load_cr3(get_cpu_pgd(cpu));
9049 +#endif
9050 +
9051 +#ifdef CONFIG_SMP
9052 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9053 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9054
9055 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9056 * tlb flush IPI delivery. We must reload CR3
9057 * to make sure to use no freed page tables.
9058 */
9059 +
9060 +#ifndef CONFIG_PAX_PER_CPU_PGD
9061 load_cr3(next->pgd);
9062 +#endif
9063 +
9064 load_LDT_nolock(&next->context);
9065 +
9066 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9067 + if (!nx_enabled)
9068 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9069 +#endif
9070 +
9071 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9072 +#ifdef CONFIG_PAX_PAGEEXEC
9073 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9074 +#endif
9075 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9076 +#endif
9077 +
9078 }
9079 - }
9080 #endif
9081 + }
9082 }
9083
9084 #define activate_mm(prev, next) \
9085 diff -urNp linux-2.6.32.44/arch/x86/include/asm/mmu.h linux-2.6.32.44/arch/x86/include/asm/mmu.h
9086 --- linux-2.6.32.44/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9087 +++ linux-2.6.32.44/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9088 @@ -9,10 +9,23 @@
9089 * we put the segment information here.
9090 */
9091 typedef struct {
9092 - void *ldt;
9093 + struct desc_struct *ldt;
9094 int size;
9095 struct mutex lock;
9096 - void *vdso;
9097 + unsigned long vdso;
9098 +
9099 +#ifdef CONFIG_X86_32
9100 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9101 + unsigned long user_cs_base;
9102 + unsigned long user_cs_limit;
9103 +
9104 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9105 + cpumask_t cpu_user_cs_mask;
9106 +#endif
9107 +
9108 +#endif
9109 +#endif
9110 +
9111 } mm_context_t;
9112
9113 #ifdef CONFIG_SMP
9114 diff -urNp linux-2.6.32.44/arch/x86/include/asm/module.h linux-2.6.32.44/arch/x86/include/asm/module.h
9115 --- linux-2.6.32.44/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9116 +++ linux-2.6.32.44/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9117 @@ -5,6 +5,7 @@
9118
9119 #ifdef CONFIG_X86_64
9120 /* X86_64 does not define MODULE_PROC_FAMILY */
9121 +#define MODULE_PROC_FAMILY ""
9122 #elif defined CONFIG_M386
9123 #define MODULE_PROC_FAMILY "386 "
9124 #elif defined CONFIG_M486
9125 @@ -59,13 +60,36 @@
9126 #error unknown processor family
9127 #endif
9128
9129 -#ifdef CONFIG_X86_32
9130 -# ifdef CONFIG_4KSTACKS
9131 -# define MODULE_STACKSIZE "4KSTACKS "
9132 -# else
9133 -# define MODULE_STACKSIZE ""
9134 -# endif
9135 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9136 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9137 +#define MODULE_PAX_UDEREF "UDEREF "
9138 +#else
9139 +#define MODULE_PAX_UDEREF ""
9140 +#endif
9141 +
9142 +#ifdef CONFIG_PAX_KERNEXEC
9143 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9144 +#else
9145 +#define MODULE_PAX_KERNEXEC ""
9146 +#endif
9147 +
9148 +#ifdef CONFIG_PAX_REFCOUNT
9149 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9150 +#else
9151 +#define MODULE_PAX_REFCOUNT ""
9152 #endif
9153
9154 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9155 +#define MODULE_STACKSIZE "4KSTACKS "
9156 +#else
9157 +#define MODULE_STACKSIZE ""
9158 +#endif
9159 +
9160 +#ifdef CONFIG_GRKERNSEC
9161 +#define MODULE_GRSEC "GRSECURITY "
9162 +#else
9163 +#define MODULE_GRSEC ""
9164 +#endif
9165 +
9166 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9167 +
9168 #endif /* _ASM_X86_MODULE_H */
9169 diff -urNp linux-2.6.32.44/arch/x86/include/asm/page_64_types.h linux-2.6.32.44/arch/x86/include/asm/page_64_types.h
9170 --- linux-2.6.32.44/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9171 +++ linux-2.6.32.44/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9172 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9173
9174 /* duplicated to the one in bootmem.h */
9175 extern unsigned long max_pfn;
9176 -extern unsigned long phys_base;
9177 +extern const unsigned long phys_base;
9178
9179 extern unsigned long __phys_addr(unsigned long);
9180 #define __phys_reloc_hide(x) (x)
9181 diff -urNp linux-2.6.32.44/arch/x86/include/asm/paravirt.h linux-2.6.32.44/arch/x86/include/asm/paravirt.h
9182 --- linux-2.6.32.44/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9183 +++ linux-2.6.32.44/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9184 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9185 pv_mmu_ops.set_fixmap(idx, phys, flags);
9186 }
9187
9188 +#ifdef CONFIG_PAX_KERNEXEC
9189 +static inline unsigned long pax_open_kernel(void)
9190 +{
9191 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9192 +}
9193 +
9194 +static inline unsigned long pax_close_kernel(void)
9195 +{
9196 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9197 +}
9198 +#else
9199 +static inline unsigned long pax_open_kernel(void) { return 0; }
9200 +static inline unsigned long pax_close_kernel(void) { return 0; }
9201 +#endif
9202 +
9203 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9204
9205 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9206 @@ -945,7 +960,7 @@ extern void default_banner(void);
9207
9208 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9209 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9210 -#define PARA_INDIRECT(addr) *%cs:addr
9211 +#define PARA_INDIRECT(addr) *%ss:addr
9212 #endif
9213
9214 #define INTERRUPT_RETURN \
9215 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9216 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9217 CLBR_NONE, \
9218 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9219 +
9220 +#define GET_CR0_INTO_RDI \
9221 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9222 + mov %rax,%rdi
9223 +
9224 +#define SET_RDI_INTO_CR0 \
9225 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9226 +
9227 +#define GET_CR3_INTO_RDI \
9228 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9229 + mov %rax,%rdi
9230 +
9231 +#define SET_RDI_INTO_CR3 \
9232 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9233 +
9234 #endif /* CONFIG_X86_32 */
9235
9236 #endif /* __ASSEMBLY__ */
9237 diff -urNp linux-2.6.32.44/arch/x86/include/asm/paravirt_types.h linux-2.6.32.44/arch/x86/include/asm/paravirt_types.h
9238 --- linux-2.6.32.44/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9239 +++ linux-2.6.32.44/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9240 @@ -78,19 +78,19 @@ struct pv_init_ops {
9241 */
9242 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9243 unsigned long addr, unsigned len);
9244 -};
9245 +} __no_const;
9246
9247
9248 struct pv_lazy_ops {
9249 /* Set deferred update mode, used for batching operations. */
9250 void (*enter)(void);
9251 void (*leave)(void);
9252 -};
9253 +} __no_const;
9254
9255 struct pv_time_ops {
9256 unsigned long long (*sched_clock)(void);
9257 unsigned long (*get_tsc_khz)(void);
9258 -};
9259 +} __no_const;
9260
9261 struct pv_cpu_ops {
9262 /* hooks for various privileged instructions */
9263 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
9264
9265 void (*start_context_switch)(struct task_struct *prev);
9266 void (*end_context_switch)(struct task_struct *next);
9267 -};
9268 +} __no_const;
9269
9270 struct pv_irq_ops {
9271 /*
9272 @@ -217,7 +217,7 @@ struct pv_apic_ops {
9273 unsigned long start_eip,
9274 unsigned long start_esp);
9275 #endif
9276 -};
9277 +} __no_const;
9278
9279 struct pv_mmu_ops {
9280 unsigned long (*read_cr2)(void);
9281 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9282 an mfn. We can tell which is which from the index. */
9283 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9284 phys_addr_t phys, pgprot_t flags);
9285 +
9286 +#ifdef CONFIG_PAX_KERNEXEC
9287 + unsigned long (*pax_open_kernel)(void);
9288 + unsigned long (*pax_close_kernel)(void);
9289 +#endif
9290 +
9291 };
9292
9293 struct raw_spinlock;
9294 @@ -326,7 +332,7 @@ struct pv_lock_ops {
9295 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9296 int (*spin_trylock)(struct raw_spinlock *lock);
9297 void (*spin_unlock)(struct raw_spinlock *lock);
9298 -};
9299 +} __no_const;
9300
9301 /* This contains all the paravirt structures: we get a convenient
9302 * number for each function using the offset which we use to indicate
9303 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pci_x86.h linux-2.6.32.44/arch/x86/include/asm/pci_x86.h
9304 --- linux-2.6.32.44/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9305 +++ linux-2.6.32.44/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9306 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9307 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9308
9309 struct pci_raw_ops {
9310 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9311 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9312 int reg, int len, u32 *val);
9313 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9314 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9315 int reg, int len, u32 val);
9316 };
9317
9318 -extern struct pci_raw_ops *raw_pci_ops;
9319 -extern struct pci_raw_ops *raw_pci_ext_ops;
9320 +extern const struct pci_raw_ops *raw_pci_ops;
9321 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9322
9323 -extern struct pci_raw_ops pci_direct_conf1;
9324 +extern const struct pci_raw_ops pci_direct_conf1;
9325 extern bool port_cf9_safe;
9326
9327 /* arch_initcall level */
9328 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgalloc.h linux-2.6.32.44/arch/x86/include/asm/pgalloc.h
9329 --- linux-2.6.32.44/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9330 +++ linux-2.6.32.44/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9331 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9332 pmd_t *pmd, pte_t *pte)
9333 {
9334 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9335 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9336 +}
9337 +
9338 +static inline void pmd_populate_user(struct mm_struct *mm,
9339 + pmd_t *pmd, pte_t *pte)
9340 +{
9341 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9342 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9343 }
9344
9345 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.44/arch/x86/include/asm/pgtable-2level.h
9346 --- linux-2.6.32.44/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9347 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9348 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9349
9350 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9351 {
9352 + pax_open_kernel();
9353 *pmdp = pmd;
9354 + pax_close_kernel();
9355 }
9356
9357 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9358 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable_32.h linux-2.6.32.44/arch/x86/include/asm/pgtable_32.h
9359 --- linux-2.6.32.44/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9360 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9361 @@ -26,9 +26,6 @@
9362 struct mm_struct;
9363 struct vm_area_struct;
9364
9365 -extern pgd_t swapper_pg_dir[1024];
9366 -extern pgd_t trampoline_pg_dir[1024];
9367 -
9368 static inline void pgtable_cache_init(void) { }
9369 static inline void check_pgt_cache(void) { }
9370 void paging_init(void);
9371 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9372 # include <asm/pgtable-2level.h>
9373 #endif
9374
9375 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9376 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9377 +#ifdef CONFIG_X86_PAE
9378 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9379 +#endif
9380 +
9381 #if defined(CONFIG_HIGHPTE)
9382 #define __KM_PTE \
9383 (in_nmi() ? KM_NMI_PTE : \
9384 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9385 /* Clear a kernel PTE and flush it from the TLB */
9386 #define kpte_clear_flush(ptep, vaddr) \
9387 do { \
9388 + pax_open_kernel(); \
9389 pte_clear(&init_mm, (vaddr), (ptep)); \
9390 + pax_close_kernel(); \
9391 __flush_tlb_one((vaddr)); \
9392 } while (0)
9393
9394 @@ -85,6 +90,9 @@ do { \
9395
9396 #endif /* !__ASSEMBLY__ */
9397
9398 +#define HAVE_ARCH_UNMAPPED_AREA
9399 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9400 +
9401 /*
9402 * kern_addr_valid() is (1) for FLATMEM and (0) for
9403 * SPARSEMEM and DISCONTIGMEM
9404 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.44/arch/x86/include/asm/pgtable_32_types.h
9405 --- linux-2.6.32.44/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9406 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9407 @@ -8,7 +8,7 @@
9408 */
9409 #ifdef CONFIG_X86_PAE
9410 # include <asm/pgtable-3level_types.h>
9411 -# define PMD_SIZE (1UL << PMD_SHIFT)
9412 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9413 # define PMD_MASK (~(PMD_SIZE - 1))
9414 #else
9415 # include <asm/pgtable-2level_types.h>
9416 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9417 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9418 #endif
9419
9420 +#ifdef CONFIG_PAX_KERNEXEC
9421 +#ifndef __ASSEMBLY__
9422 +extern unsigned char MODULES_EXEC_VADDR[];
9423 +extern unsigned char MODULES_EXEC_END[];
9424 +#endif
9425 +#include <asm/boot.h>
9426 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9427 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9428 +#else
9429 +#define ktla_ktva(addr) (addr)
9430 +#define ktva_ktla(addr) (addr)
9431 +#endif
9432 +
9433 #define MODULES_VADDR VMALLOC_START
9434 #define MODULES_END VMALLOC_END
9435 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9436 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.44/arch/x86/include/asm/pgtable-3level.h
9437 --- linux-2.6.32.44/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9438 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9439 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9440
9441 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9442 {
9443 + pax_open_kernel();
9444 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9445 + pax_close_kernel();
9446 }
9447
9448 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9449 {
9450 + pax_open_kernel();
9451 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9452 + pax_close_kernel();
9453 }
9454
9455 /*
9456 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable_64.h linux-2.6.32.44/arch/x86/include/asm/pgtable_64.h
9457 --- linux-2.6.32.44/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9458 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9459 @@ -16,10 +16,13 @@
9460
9461 extern pud_t level3_kernel_pgt[512];
9462 extern pud_t level3_ident_pgt[512];
9463 +extern pud_t level3_vmalloc_pgt[512];
9464 +extern pud_t level3_vmemmap_pgt[512];
9465 +extern pud_t level2_vmemmap_pgt[512];
9466 extern pmd_t level2_kernel_pgt[512];
9467 extern pmd_t level2_fixmap_pgt[512];
9468 -extern pmd_t level2_ident_pgt[512];
9469 -extern pgd_t init_level4_pgt[];
9470 +extern pmd_t level2_ident_pgt[512*2];
9471 +extern pgd_t init_level4_pgt[512];
9472
9473 #define swapper_pg_dir init_level4_pgt
9474
9475 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9476
9477 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9478 {
9479 + pax_open_kernel();
9480 *pmdp = pmd;
9481 + pax_close_kernel();
9482 }
9483
9484 static inline void native_pmd_clear(pmd_t *pmd)
9485 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9486
9487 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9488 {
9489 + pax_open_kernel();
9490 *pgdp = pgd;
9491 + pax_close_kernel();
9492 }
9493
9494 static inline void native_pgd_clear(pgd_t *pgd)
9495 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.44/arch/x86/include/asm/pgtable_64_types.h
9496 --- linux-2.6.32.44/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9497 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9498 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9499 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9500 #define MODULES_END _AC(0xffffffffff000000, UL)
9501 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9502 +#define MODULES_EXEC_VADDR MODULES_VADDR
9503 +#define MODULES_EXEC_END MODULES_END
9504 +
9505 +#define ktla_ktva(addr) (addr)
9506 +#define ktva_ktla(addr) (addr)
9507
9508 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9509 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable.h linux-2.6.32.44/arch/x86/include/asm/pgtable.h
9510 --- linux-2.6.32.44/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9511 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9512 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9513
9514 #define arch_end_context_switch(prev) do {} while(0)
9515
9516 +#define pax_open_kernel() native_pax_open_kernel()
9517 +#define pax_close_kernel() native_pax_close_kernel()
9518 #endif /* CONFIG_PARAVIRT */
9519
9520 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9521 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9522 +
9523 +#ifdef CONFIG_PAX_KERNEXEC
9524 +static inline unsigned long native_pax_open_kernel(void)
9525 +{
9526 + unsigned long cr0;
9527 +
9528 + preempt_disable();
9529 + barrier();
9530 + cr0 = read_cr0() ^ X86_CR0_WP;
9531 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9532 + write_cr0(cr0);
9533 + return cr0 ^ X86_CR0_WP;
9534 +}
9535 +
9536 +static inline unsigned long native_pax_close_kernel(void)
9537 +{
9538 + unsigned long cr0;
9539 +
9540 + cr0 = read_cr0() ^ X86_CR0_WP;
9541 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9542 + write_cr0(cr0);
9543 + barrier();
9544 + preempt_enable_no_resched();
9545 + return cr0 ^ X86_CR0_WP;
9546 +}
9547 +#else
9548 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9549 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9550 +#endif
9551 +
9552 /*
9553 * The following only work if pte_present() is true.
9554 * Undefined behaviour if not..
9555 */
9556 +static inline int pte_user(pte_t pte)
9557 +{
9558 + return pte_val(pte) & _PAGE_USER;
9559 +}
9560 +
9561 static inline int pte_dirty(pte_t pte)
9562 {
9563 return pte_flags(pte) & _PAGE_DIRTY;
9564 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9565 return pte_clear_flags(pte, _PAGE_RW);
9566 }
9567
9568 +static inline pte_t pte_mkread(pte_t pte)
9569 +{
9570 + return __pte(pte_val(pte) | _PAGE_USER);
9571 +}
9572 +
9573 static inline pte_t pte_mkexec(pte_t pte)
9574 {
9575 - return pte_clear_flags(pte, _PAGE_NX);
9576 +#ifdef CONFIG_X86_PAE
9577 + if (__supported_pte_mask & _PAGE_NX)
9578 + return pte_clear_flags(pte, _PAGE_NX);
9579 + else
9580 +#endif
9581 + return pte_set_flags(pte, _PAGE_USER);
9582 +}
9583 +
9584 +static inline pte_t pte_exprotect(pte_t pte)
9585 +{
9586 +#ifdef CONFIG_X86_PAE
9587 + if (__supported_pte_mask & _PAGE_NX)
9588 + return pte_set_flags(pte, _PAGE_NX);
9589 + else
9590 +#endif
9591 + return pte_clear_flags(pte, _PAGE_USER);
9592 }
9593
9594 static inline pte_t pte_mkdirty(pte_t pte)
9595 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9596 #endif
9597
9598 #ifndef __ASSEMBLY__
9599 +
9600 +#ifdef CONFIG_PAX_PER_CPU_PGD
9601 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9602 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9603 +{
9604 + return cpu_pgd[cpu];
9605 +}
9606 +#endif
9607 +
9608 #include <linux/mm_types.h>
9609
9610 static inline int pte_none(pte_t pte)
9611 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9612
9613 static inline int pgd_bad(pgd_t pgd)
9614 {
9615 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9616 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9617 }
9618
9619 static inline int pgd_none(pgd_t pgd)
9620 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9621 * pgd_offset() returns a (pgd_t *)
9622 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9623 */
9624 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9625 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9626 +
9627 +#ifdef CONFIG_PAX_PER_CPU_PGD
9628 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9629 +#endif
9630 +
9631 /*
9632 * a shortcut which implies the use of the kernel's pgd, instead
9633 * of a process's
9634 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9635 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9636 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9637
9638 +#ifdef CONFIG_X86_32
9639 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9640 +#else
9641 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9642 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9643 +
9644 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9645 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9646 +#else
9647 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9648 +#endif
9649 +
9650 +#endif
9651 +
9652 #ifndef __ASSEMBLY__
9653
9654 extern int direct_gbpages;
9655 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9656 * dst and src can be on the same page, but the range must not overlap,
9657 * and must not cross a page boundary.
9658 */
9659 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9660 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9661 {
9662 - memcpy(dst, src, count * sizeof(pgd_t));
9663 + pax_open_kernel();
9664 + while (count--)
9665 + *dst++ = *src++;
9666 + pax_close_kernel();
9667 }
9668
9669 +#ifdef CONFIG_PAX_PER_CPU_PGD
9670 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9671 +#endif
9672 +
9673 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9674 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9675 +#else
9676 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9677 +#endif
9678
9679 #include <asm-generic/pgtable.h>
9680 #endif /* __ASSEMBLY__ */
9681 diff -urNp linux-2.6.32.44/arch/x86/include/asm/pgtable_types.h linux-2.6.32.44/arch/x86/include/asm/pgtable_types.h
9682 --- linux-2.6.32.44/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9683 +++ linux-2.6.32.44/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9684 @@ -16,12 +16,11 @@
9685 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9686 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9687 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9688 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9689 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9690 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9691 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9692 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9693 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9694 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9695 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9696 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9697
9698 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9699 @@ -39,7 +38,6 @@
9700 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9701 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9702 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9703 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9704 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9705 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9706 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9707 @@ -55,8 +53,10 @@
9708
9709 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9710 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9711 -#else
9712 +#elif defined(CONFIG_KMEMCHECK)
9713 #define _PAGE_NX (_AT(pteval_t, 0))
9714 +#else
9715 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9716 #endif
9717
9718 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9719 @@ -93,6 +93,9 @@
9720 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9721 _PAGE_ACCESSED)
9722
9723 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9724 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9725 +
9726 #define __PAGE_KERNEL_EXEC \
9727 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9728 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9729 @@ -103,8 +106,8 @@
9730 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9731 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9732 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9733 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9734 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9735 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9736 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9737 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9738 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9739 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9740 @@ -163,8 +166,8 @@
9741 * bits are combined, this will alow user to access the high address mapped
9742 * VDSO in the presence of CONFIG_COMPAT_VDSO
9743 */
9744 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9745 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9746 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9747 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9748 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9749 #endif
9750
9751 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9752 {
9753 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9754 }
9755 +#endif
9756
9757 +#if PAGETABLE_LEVELS == 3
9758 +#include <asm-generic/pgtable-nopud.h>
9759 +#endif
9760 +
9761 +#if PAGETABLE_LEVELS == 2
9762 +#include <asm-generic/pgtable-nopmd.h>
9763 +#endif
9764 +
9765 +#ifndef __ASSEMBLY__
9766 #if PAGETABLE_LEVELS > 3
9767 typedef struct { pudval_t pud; } pud_t;
9768
9769 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9770 return pud.pud;
9771 }
9772 #else
9773 -#include <asm-generic/pgtable-nopud.h>
9774 -
9775 static inline pudval_t native_pud_val(pud_t pud)
9776 {
9777 return native_pgd_val(pud.pgd);
9778 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9779 return pmd.pmd;
9780 }
9781 #else
9782 -#include <asm-generic/pgtable-nopmd.h>
9783 -
9784 static inline pmdval_t native_pmd_val(pmd_t pmd)
9785 {
9786 return native_pgd_val(pmd.pud.pgd);
9787 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9788
9789 extern pteval_t __supported_pte_mask;
9790 extern void set_nx(void);
9791 +
9792 +#ifdef CONFIG_X86_32
9793 +#ifdef CONFIG_X86_PAE
9794 extern int nx_enabled;
9795 +#else
9796 +#define nx_enabled (0)
9797 +#endif
9798 +#else
9799 +#define nx_enabled (1)
9800 +#endif
9801
9802 #define pgprot_writecombine pgprot_writecombine
9803 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9804 diff -urNp linux-2.6.32.44/arch/x86/include/asm/processor.h linux-2.6.32.44/arch/x86/include/asm/processor.h
9805 --- linux-2.6.32.44/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9806 +++ linux-2.6.32.44/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9807 @@ -272,7 +272,7 @@ struct tss_struct {
9808
9809 } ____cacheline_aligned;
9810
9811 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9812 +extern struct tss_struct init_tss[NR_CPUS];
9813
9814 /*
9815 * Save the original ist values for checking stack pointers during debugging
9816 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9817 */
9818 #define TASK_SIZE PAGE_OFFSET
9819 #define TASK_SIZE_MAX TASK_SIZE
9820 +
9821 +#ifdef CONFIG_PAX_SEGMEXEC
9822 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9823 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9824 +#else
9825 #define STACK_TOP TASK_SIZE
9826 -#define STACK_TOP_MAX STACK_TOP
9827 +#endif
9828 +
9829 +#define STACK_TOP_MAX TASK_SIZE
9830
9831 #define INIT_THREAD { \
9832 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9833 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9834 .vm86_info = NULL, \
9835 .sysenter_cs = __KERNEL_CS, \
9836 .io_bitmap_ptr = NULL, \
9837 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9838 */
9839 #define INIT_TSS { \
9840 .x86_tss = { \
9841 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9842 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9843 .ss0 = __KERNEL_DS, \
9844 .ss1 = __KERNEL_CS, \
9845 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9846 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9847 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9848
9849 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9850 -#define KSTK_TOP(info) \
9851 -({ \
9852 - unsigned long *__ptr = (unsigned long *)(info); \
9853 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9854 -})
9855 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9856
9857 /*
9858 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9859 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9860 #define task_pt_regs(task) \
9861 ({ \
9862 struct pt_regs *__regs__; \
9863 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9864 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9865 __regs__ - 1; \
9866 })
9867
9868 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9869 /*
9870 * User space process size. 47bits minus one guard page.
9871 */
9872 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9873 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9874
9875 /* This decides where the kernel will search for a free chunk of vm
9876 * space during mmap's.
9877 */
9878 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9879 - 0xc0000000 : 0xFFFFe000)
9880 + 0xc0000000 : 0xFFFFf000)
9881
9882 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9883 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9884 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9885 #define STACK_TOP_MAX TASK_SIZE_MAX
9886
9887 #define INIT_THREAD { \
9888 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9889 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9890 }
9891
9892 #define INIT_TSS { \
9893 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9894 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9895 }
9896
9897 /*
9898 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9899 */
9900 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9901
9902 +#ifdef CONFIG_PAX_SEGMEXEC
9903 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9904 +#endif
9905 +
9906 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9907
9908 /* Get/set a process' ability to use the timestamp counter instruction */
9909 diff -urNp linux-2.6.32.44/arch/x86/include/asm/ptrace.h linux-2.6.32.44/arch/x86/include/asm/ptrace.h
9910 --- linux-2.6.32.44/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9911 +++ linux-2.6.32.44/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9912 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9913 }
9914
9915 /*
9916 - * user_mode_vm(regs) determines whether a register set came from user mode.
9917 + * user_mode(regs) determines whether a register set came from user mode.
9918 * This is true if V8086 mode was enabled OR if the register set was from
9919 * protected mode with RPL-3 CS value. This tricky test checks that with
9920 * one comparison. Many places in the kernel can bypass this full check
9921 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9922 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9923 + * be used.
9924 */
9925 -static inline int user_mode(struct pt_regs *regs)
9926 +static inline int user_mode_novm(struct pt_regs *regs)
9927 {
9928 #ifdef CONFIG_X86_32
9929 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9930 #else
9931 - return !!(regs->cs & 3);
9932 + return !!(regs->cs & SEGMENT_RPL_MASK);
9933 #endif
9934 }
9935
9936 -static inline int user_mode_vm(struct pt_regs *regs)
9937 +static inline int user_mode(struct pt_regs *regs)
9938 {
9939 #ifdef CONFIG_X86_32
9940 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9941 USER_RPL;
9942 #else
9943 - return user_mode(regs);
9944 + return user_mode_novm(regs);
9945 #endif
9946 }
9947
9948 diff -urNp linux-2.6.32.44/arch/x86/include/asm/reboot.h linux-2.6.32.44/arch/x86/include/asm/reboot.h
9949 --- linux-2.6.32.44/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9950 +++ linux-2.6.32.44/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
9951 @@ -6,19 +6,19 @@
9952 struct pt_regs;
9953
9954 struct machine_ops {
9955 - void (*restart)(char *cmd);
9956 - void (*halt)(void);
9957 - void (*power_off)(void);
9958 + void (* __noreturn restart)(char *cmd);
9959 + void (* __noreturn halt)(void);
9960 + void (* __noreturn power_off)(void);
9961 void (*shutdown)(void);
9962 void (*crash_shutdown)(struct pt_regs *);
9963 - void (*emergency_restart)(void);
9964 -};
9965 + void (* __noreturn emergency_restart)(void);
9966 +} __no_const;
9967
9968 extern struct machine_ops machine_ops;
9969
9970 void native_machine_crash_shutdown(struct pt_regs *regs);
9971 void native_machine_shutdown(void);
9972 -void machine_real_restart(const unsigned char *code, int length);
9973 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9974
9975 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9976 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9977 diff -urNp linux-2.6.32.44/arch/x86/include/asm/rwsem.h linux-2.6.32.44/arch/x86/include/asm/rwsem.h
9978 --- linux-2.6.32.44/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9979 +++ linux-2.6.32.44/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9980 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9981 {
9982 asm volatile("# beginning down_read\n\t"
9983 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9984 +
9985 +#ifdef CONFIG_PAX_REFCOUNT
9986 + "jno 0f\n"
9987 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9988 + "int $4\n0:\n"
9989 + _ASM_EXTABLE(0b, 0b)
9990 +#endif
9991 +
9992 /* adds 0x00000001, returns the old value */
9993 " jns 1f\n"
9994 " call call_rwsem_down_read_failed\n"
9995 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9996 "1:\n\t"
9997 " mov %1,%2\n\t"
9998 " add %3,%2\n\t"
9999 +
10000 +#ifdef CONFIG_PAX_REFCOUNT
10001 + "jno 0f\n"
10002 + "sub %3,%2\n"
10003 + "int $4\n0:\n"
10004 + _ASM_EXTABLE(0b, 0b)
10005 +#endif
10006 +
10007 " jle 2f\n\t"
10008 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10009 " jnz 1b\n\t"
10010 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10011 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10012 asm volatile("# beginning down_write\n\t"
10013 LOCK_PREFIX " xadd %1,(%2)\n\t"
10014 +
10015 +#ifdef CONFIG_PAX_REFCOUNT
10016 + "jno 0f\n"
10017 + "mov %1,(%2)\n"
10018 + "int $4\n0:\n"
10019 + _ASM_EXTABLE(0b, 0b)
10020 +#endif
10021 +
10022 /* subtract 0x0000ffff, returns the old value */
10023 " test %1,%1\n\t"
10024 /* was the count 0 before? */
10025 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10026 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10027 asm volatile("# beginning __up_read\n\t"
10028 LOCK_PREFIX " xadd %1,(%2)\n\t"
10029 +
10030 +#ifdef CONFIG_PAX_REFCOUNT
10031 + "jno 0f\n"
10032 + "mov %1,(%2)\n"
10033 + "int $4\n0:\n"
10034 + _ASM_EXTABLE(0b, 0b)
10035 +#endif
10036 +
10037 /* subtracts 1, returns the old value */
10038 " jns 1f\n\t"
10039 " call call_rwsem_wake\n"
10040 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10041 rwsem_count_t tmp;
10042 asm volatile("# beginning __up_write\n\t"
10043 LOCK_PREFIX " xadd %1,(%2)\n\t"
10044 +
10045 +#ifdef CONFIG_PAX_REFCOUNT
10046 + "jno 0f\n"
10047 + "mov %1,(%2)\n"
10048 + "int $4\n0:\n"
10049 + _ASM_EXTABLE(0b, 0b)
10050 +#endif
10051 +
10052 /* tries to transition
10053 0xffff0001 -> 0x00000000 */
10054 " jz 1f\n"
10055 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10056 {
10057 asm volatile("# beginning __downgrade_write\n\t"
10058 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10059 +
10060 +#ifdef CONFIG_PAX_REFCOUNT
10061 + "jno 0f\n"
10062 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10063 + "int $4\n0:\n"
10064 + _ASM_EXTABLE(0b, 0b)
10065 +#endif
10066 +
10067 /*
10068 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10069 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10070 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10071 static inline void rwsem_atomic_add(rwsem_count_t delta,
10072 struct rw_semaphore *sem)
10073 {
10074 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10075 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10076 +
10077 +#ifdef CONFIG_PAX_REFCOUNT
10078 + "jno 0f\n"
10079 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10080 + "int $4\n0:\n"
10081 + _ASM_EXTABLE(0b, 0b)
10082 +#endif
10083 +
10084 : "+m" (sem->count)
10085 : "er" (delta));
10086 }
10087 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10088 {
10089 rwsem_count_t tmp = delta;
10090
10091 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10092 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10093 +
10094 +#ifdef CONFIG_PAX_REFCOUNT
10095 + "jno 0f\n"
10096 + "mov %0,%1\n"
10097 + "int $4\n0:\n"
10098 + _ASM_EXTABLE(0b, 0b)
10099 +#endif
10100 +
10101 : "+r" (tmp), "+m" (sem->count)
10102 : : "memory");
10103
10104 diff -urNp linux-2.6.32.44/arch/x86/include/asm/segment.h linux-2.6.32.44/arch/x86/include/asm/segment.h
10105 --- linux-2.6.32.44/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10106 +++ linux-2.6.32.44/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10107 @@ -62,8 +62,8 @@
10108 * 26 - ESPFIX small SS
10109 * 27 - per-cpu [ offset to per-cpu data area ]
10110 * 28 - stack_canary-20 [ for stack protector ]
10111 - * 29 - unused
10112 - * 30 - unused
10113 + * 29 - PCI BIOS CS
10114 + * 30 - PCI BIOS DS
10115 * 31 - TSS for double fault handler
10116 */
10117 #define GDT_ENTRY_TLS_MIN 6
10118 @@ -77,6 +77,8 @@
10119
10120 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10121
10122 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10123 +
10124 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10125
10126 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10127 @@ -88,7 +90,7 @@
10128 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10129 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10130
10131 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10132 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10133 #ifdef CONFIG_SMP
10134 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10135 #else
10136 @@ -102,6 +104,12 @@
10137 #define __KERNEL_STACK_CANARY 0
10138 #endif
10139
10140 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10141 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10142 +
10143 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10144 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10145 +
10146 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10147
10148 /*
10149 @@ -139,7 +147,7 @@
10150 */
10151
10152 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10153 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10154 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10155
10156
10157 #else
10158 @@ -163,6 +171,8 @@
10159 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10160 #define __USER32_DS __USER_DS
10161
10162 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10163 +
10164 #define GDT_ENTRY_TSS 8 /* needs two entries */
10165 #define GDT_ENTRY_LDT 10 /* needs two entries */
10166 #define GDT_ENTRY_TLS_MIN 12
10167 @@ -183,6 +193,7 @@
10168 #endif
10169
10170 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10171 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10172 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10173 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10174 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10175 diff -urNp linux-2.6.32.44/arch/x86/include/asm/smp.h linux-2.6.32.44/arch/x86/include/asm/smp.h
10176 --- linux-2.6.32.44/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10177 +++ linux-2.6.32.44/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10178 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10179 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10180 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10181 DECLARE_PER_CPU(u16, cpu_llc_id);
10182 -DECLARE_PER_CPU(int, cpu_number);
10183 +DECLARE_PER_CPU(unsigned int, cpu_number);
10184
10185 static inline struct cpumask *cpu_sibling_mask(int cpu)
10186 {
10187 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10188 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10189
10190 /* Static state in head.S used to set up a CPU */
10191 -extern struct {
10192 - void *sp;
10193 - unsigned short ss;
10194 -} stack_start;
10195 +extern unsigned long stack_start; /* Initial stack pointer address */
10196
10197 struct smp_ops {
10198 void (*smp_prepare_boot_cpu)(void);
10199 @@ -60,7 +57,7 @@ struct smp_ops {
10200
10201 void (*send_call_func_ipi)(const struct cpumask *mask);
10202 void (*send_call_func_single_ipi)(int cpu);
10203 -};
10204 +} __no_const;
10205
10206 /* Globals due to paravirt */
10207 extern void set_cpu_sibling_map(int cpu);
10208 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10209 extern int safe_smp_processor_id(void);
10210
10211 #elif defined(CONFIG_X86_64_SMP)
10212 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10213 -
10214 -#define stack_smp_processor_id() \
10215 -({ \
10216 - struct thread_info *ti; \
10217 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10218 - ti->cpu; \
10219 -})
10220 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10221 +#define stack_smp_processor_id() raw_smp_processor_id()
10222 #define safe_smp_processor_id() smp_processor_id()
10223
10224 #endif
10225 diff -urNp linux-2.6.32.44/arch/x86/include/asm/spinlock.h linux-2.6.32.44/arch/x86/include/asm/spinlock.h
10226 --- linux-2.6.32.44/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10227 +++ linux-2.6.32.44/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10228 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10229 static inline void __raw_read_lock(raw_rwlock_t *rw)
10230 {
10231 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10232 +
10233 +#ifdef CONFIG_PAX_REFCOUNT
10234 + "jno 0f\n"
10235 + LOCK_PREFIX " addl $1,(%0)\n"
10236 + "int $4\n0:\n"
10237 + _ASM_EXTABLE(0b, 0b)
10238 +#endif
10239 +
10240 "jns 1f\n"
10241 "call __read_lock_failed\n\t"
10242 "1:\n"
10243 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10244 static inline void __raw_write_lock(raw_rwlock_t *rw)
10245 {
10246 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10247 +
10248 +#ifdef CONFIG_PAX_REFCOUNT
10249 + "jno 0f\n"
10250 + LOCK_PREFIX " addl %1,(%0)\n"
10251 + "int $4\n0:\n"
10252 + _ASM_EXTABLE(0b, 0b)
10253 +#endif
10254 +
10255 "jz 1f\n"
10256 "call __write_lock_failed\n\t"
10257 "1:\n"
10258 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10259
10260 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10261 {
10262 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10263 + asm volatile(LOCK_PREFIX "incl %0\n"
10264 +
10265 +#ifdef CONFIG_PAX_REFCOUNT
10266 + "jno 0f\n"
10267 + LOCK_PREFIX "decl %0\n"
10268 + "int $4\n0:\n"
10269 + _ASM_EXTABLE(0b, 0b)
10270 +#endif
10271 +
10272 + :"+m" (rw->lock) : : "memory");
10273 }
10274
10275 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10276 {
10277 - asm volatile(LOCK_PREFIX "addl %1, %0"
10278 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10279 +
10280 +#ifdef CONFIG_PAX_REFCOUNT
10281 + "jno 0f\n"
10282 + LOCK_PREFIX "subl %1, %0\n"
10283 + "int $4\n0:\n"
10284 + _ASM_EXTABLE(0b, 0b)
10285 +#endif
10286 +
10287 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10288 }
10289
10290 diff -urNp linux-2.6.32.44/arch/x86/include/asm/stackprotector.h linux-2.6.32.44/arch/x86/include/asm/stackprotector.h
10291 --- linux-2.6.32.44/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10292 +++ linux-2.6.32.44/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10293 @@ -48,7 +48,7 @@
10294 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10295 */
10296 #define GDT_STACK_CANARY_INIT \
10297 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10298 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10299
10300 /*
10301 * Initialize the stackprotector canary value.
10302 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10303
10304 static inline void load_stack_canary_segment(void)
10305 {
10306 -#ifdef CONFIG_X86_32
10307 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10308 asm volatile ("mov %0, %%gs" : : "r" (0));
10309 #endif
10310 }
10311 diff -urNp linux-2.6.32.44/arch/x86/include/asm/system.h linux-2.6.32.44/arch/x86/include/asm/system.h
10312 --- linux-2.6.32.44/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10313 +++ linux-2.6.32.44/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10314 @@ -132,7 +132,7 @@ do { \
10315 "thread_return:\n\t" \
10316 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10317 __switch_canary \
10318 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10319 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10320 "movq %%rax,%%rdi\n\t" \
10321 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10322 "jnz ret_from_fork\n\t" \
10323 @@ -143,7 +143,7 @@ do { \
10324 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10325 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10326 [_tif_fork] "i" (_TIF_FORK), \
10327 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10328 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10329 [current_task] "m" (per_cpu_var(current_task)) \
10330 __switch_canary_iparam \
10331 : "memory", "cc" __EXTRA_CLOBBER)
10332 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10333 {
10334 unsigned long __limit;
10335 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10336 - return __limit + 1;
10337 + return __limit;
10338 }
10339
10340 static inline void native_clts(void)
10341 @@ -340,12 +340,12 @@ void enable_hlt(void);
10342
10343 void cpu_idle_wait(void);
10344
10345 -extern unsigned long arch_align_stack(unsigned long sp);
10346 +#define arch_align_stack(x) ((x) & ~0xfUL)
10347 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10348
10349 void default_idle(void);
10350
10351 -void stop_this_cpu(void *dummy);
10352 +void stop_this_cpu(void *dummy) __noreturn;
10353
10354 /*
10355 * Force strict CPU ordering.
10356 diff -urNp linux-2.6.32.44/arch/x86/include/asm/thread_info.h linux-2.6.32.44/arch/x86/include/asm/thread_info.h
10357 --- linux-2.6.32.44/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10358 +++ linux-2.6.32.44/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10359 @@ -10,6 +10,7 @@
10360 #include <linux/compiler.h>
10361 #include <asm/page.h>
10362 #include <asm/types.h>
10363 +#include <asm/percpu.h>
10364
10365 /*
10366 * low level task data that entry.S needs immediate access to
10367 @@ -24,7 +25,6 @@ struct exec_domain;
10368 #include <asm/atomic.h>
10369
10370 struct thread_info {
10371 - struct task_struct *task; /* main task structure */
10372 struct exec_domain *exec_domain; /* execution domain */
10373 __u32 flags; /* low level flags */
10374 __u32 status; /* thread synchronous flags */
10375 @@ -34,18 +34,12 @@ struct thread_info {
10376 mm_segment_t addr_limit;
10377 struct restart_block restart_block;
10378 void __user *sysenter_return;
10379 -#ifdef CONFIG_X86_32
10380 - unsigned long previous_esp; /* ESP of the previous stack in
10381 - case of nested (IRQ) stacks
10382 - */
10383 - __u8 supervisor_stack[0];
10384 -#endif
10385 + unsigned long lowest_stack;
10386 int uaccess_err;
10387 };
10388
10389 -#define INIT_THREAD_INFO(tsk) \
10390 +#define INIT_THREAD_INFO \
10391 { \
10392 - .task = &tsk, \
10393 .exec_domain = &default_exec_domain, \
10394 .flags = 0, \
10395 .cpu = 0, \
10396 @@ -56,7 +50,7 @@ struct thread_info {
10397 }, \
10398 }
10399
10400 -#define init_thread_info (init_thread_union.thread_info)
10401 +#define init_thread_info (init_thread_union.stack)
10402 #define init_stack (init_thread_union.stack)
10403
10404 #else /* !__ASSEMBLY__ */
10405 @@ -163,6 +157,23 @@ struct thread_info {
10406 #define alloc_thread_info(tsk) \
10407 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10408
10409 +#ifdef __ASSEMBLY__
10410 +/* how to get the thread information struct from ASM */
10411 +#define GET_THREAD_INFO(reg) \
10412 + mov PER_CPU_VAR(current_tinfo), reg
10413 +
10414 +/* use this one if reg already contains %esp */
10415 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10416 +#else
10417 +/* how to get the thread information struct from C */
10418 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10419 +
10420 +static __always_inline struct thread_info *current_thread_info(void)
10421 +{
10422 + return percpu_read_stable(current_tinfo);
10423 +}
10424 +#endif
10425 +
10426 #ifdef CONFIG_X86_32
10427
10428 #define STACK_WARN (THREAD_SIZE/8)
10429 @@ -173,35 +184,13 @@ struct thread_info {
10430 */
10431 #ifndef __ASSEMBLY__
10432
10433 -
10434 /* how to get the current stack pointer from C */
10435 register unsigned long current_stack_pointer asm("esp") __used;
10436
10437 -/* how to get the thread information struct from C */
10438 -static inline struct thread_info *current_thread_info(void)
10439 -{
10440 - return (struct thread_info *)
10441 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10442 -}
10443 -
10444 -#else /* !__ASSEMBLY__ */
10445 -
10446 -/* how to get the thread information struct from ASM */
10447 -#define GET_THREAD_INFO(reg) \
10448 - movl $-THREAD_SIZE, reg; \
10449 - andl %esp, reg
10450 -
10451 -/* use this one if reg already contains %esp */
10452 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10453 - andl $-THREAD_SIZE, reg
10454 -
10455 #endif
10456
10457 #else /* X86_32 */
10458
10459 -#include <asm/percpu.h>
10460 -#define KERNEL_STACK_OFFSET (5*8)
10461 -
10462 /*
10463 * macros/functions for gaining access to the thread information structure
10464 * preempt_count needs to be 1 initially, until the scheduler is functional.
10465 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10466 #ifndef __ASSEMBLY__
10467 DECLARE_PER_CPU(unsigned long, kernel_stack);
10468
10469 -static inline struct thread_info *current_thread_info(void)
10470 -{
10471 - struct thread_info *ti;
10472 - ti = (void *)(percpu_read_stable(kernel_stack) +
10473 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10474 - return ti;
10475 -}
10476 -
10477 -#else /* !__ASSEMBLY__ */
10478 -
10479 -/* how to get the thread information struct from ASM */
10480 -#define GET_THREAD_INFO(reg) \
10481 - movq PER_CPU_VAR(kernel_stack),reg ; \
10482 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10483 -
10484 +/* how to get the current stack pointer from C */
10485 +register unsigned long current_stack_pointer asm("rsp") __used;
10486 #endif
10487
10488 #endif /* !X86_32 */
10489 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10490 extern void free_thread_info(struct thread_info *ti);
10491 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10492 #define arch_task_cache_init arch_task_cache_init
10493 +
10494 +#define __HAVE_THREAD_FUNCTIONS
10495 +#define task_thread_info(task) (&(task)->tinfo)
10496 +#define task_stack_page(task) ((task)->stack)
10497 +#define setup_thread_stack(p, org) do {} while (0)
10498 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10499 +
10500 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10501 +extern struct task_struct *alloc_task_struct(void);
10502 +extern void free_task_struct(struct task_struct *);
10503 +
10504 #endif
10505 #endif /* _ASM_X86_THREAD_INFO_H */
10506 diff -urNp linux-2.6.32.44/arch/x86/include/asm/uaccess_32.h linux-2.6.32.44/arch/x86/include/asm/uaccess_32.h
10507 --- linux-2.6.32.44/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10508 +++ linux-2.6.32.44/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10509 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10510 static __always_inline unsigned long __must_check
10511 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10512 {
10513 + pax_track_stack();
10514 +
10515 + if ((long)n < 0)
10516 + return n;
10517 +
10518 if (__builtin_constant_p(n)) {
10519 unsigned long ret;
10520
10521 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10522 return ret;
10523 }
10524 }
10525 + if (!__builtin_constant_p(n))
10526 + check_object_size(from, n, true);
10527 return __copy_to_user_ll(to, from, n);
10528 }
10529
10530 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10531 __copy_to_user(void __user *to, const void *from, unsigned long n)
10532 {
10533 might_fault();
10534 +
10535 return __copy_to_user_inatomic(to, from, n);
10536 }
10537
10538 static __always_inline unsigned long
10539 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10540 {
10541 + if ((long)n < 0)
10542 + return n;
10543 +
10544 /* Avoid zeroing the tail if the copy fails..
10545 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10546 * but as the zeroing behaviour is only significant when n is not
10547 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10548 __copy_from_user(void *to, const void __user *from, unsigned long n)
10549 {
10550 might_fault();
10551 +
10552 + pax_track_stack();
10553 +
10554 + if ((long)n < 0)
10555 + return n;
10556 +
10557 if (__builtin_constant_p(n)) {
10558 unsigned long ret;
10559
10560 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10561 return ret;
10562 }
10563 }
10564 + if (!__builtin_constant_p(n))
10565 + check_object_size(to, n, false);
10566 return __copy_from_user_ll(to, from, n);
10567 }
10568
10569 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10570 const void __user *from, unsigned long n)
10571 {
10572 might_fault();
10573 +
10574 + if ((long)n < 0)
10575 + return n;
10576 +
10577 if (__builtin_constant_p(n)) {
10578 unsigned long ret;
10579
10580 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10581 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10582 unsigned long n)
10583 {
10584 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10585 + if ((long)n < 0)
10586 + return n;
10587 +
10588 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10589 +}
10590 +
10591 +/**
10592 + * copy_to_user: - Copy a block of data into user space.
10593 + * @to: Destination address, in user space.
10594 + * @from: Source address, in kernel space.
10595 + * @n: Number of bytes to copy.
10596 + *
10597 + * Context: User context only. This function may sleep.
10598 + *
10599 + * Copy data from kernel space to user space.
10600 + *
10601 + * Returns number of bytes that could not be copied.
10602 + * On success, this will be zero.
10603 + */
10604 +static __always_inline unsigned long __must_check
10605 +copy_to_user(void __user *to, const void *from, unsigned long n)
10606 +{
10607 + if (access_ok(VERIFY_WRITE, to, n))
10608 + n = __copy_to_user(to, from, n);
10609 + return n;
10610 +}
10611 +
10612 +/**
10613 + * copy_from_user: - Copy a block of data from user space.
10614 + * @to: Destination address, in kernel space.
10615 + * @from: Source address, in user space.
10616 + * @n: Number of bytes to copy.
10617 + *
10618 + * Context: User context only. This function may sleep.
10619 + *
10620 + * Copy data from user space to kernel space.
10621 + *
10622 + * Returns number of bytes that could not be copied.
10623 + * On success, this will be zero.
10624 + *
10625 + * If some data could not be copied, this function will pad the copied
10626 + * data to the requested size using zero bytes.
10627 + */
10628 +static __always_inline unsigned long __must_check
10629 +copy_from_user(void *to, const void __user *from, unsigned long n)
10630 +{
10631 + if (access_ok(VERIFY_READ, from, n))
10632 + n = __copy_from_user(to, from, n);
10633 + else if ((long)n > 0) {
10634 + if (!__builtin_constant_p(n))
10635 + check_object_size(to, n, false);
10636 + memset(to, 0, n);
10637 + }
10638 + return n;
10639 }
10640
10641 -unsigned long __must_check copy_to_user(void __user *to,
10642 - const void *from, unsigned long n);
10643 -unsigned long __must_check copy_from_user(void *to,
10644 - const void __user *from,
10645 - unsigned long n);
10646 long __must_check strncpy_from_user(char *dst, const char __user *src,
10647 long count);
10648 long __must_check __strncpy_from_user(char *dst,
10649 diff -urNp linux-2.6.32.44/arch/x86/include/asm/uaccess_64.h linux-2.6.32.44/arch/x86/include/asm/uaccess_64.h
10650 --- linux-2.6.32.44/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10651 +++ linux-2.6.32.44/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10652 @@ -9,6 +9,9 @@
10653 #include <linux/prefetch.h>
10654 #include <linux/lockdep.h>
10655 #include <asm/page.h>
10656 +#include <asm/pgtable.h>
10657 +
10658 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10659
10660 /*
10661 * Copy To/From Userspace
10662 @@ -19,113 +22,203 @@ __must_check unsigned long
10663 copy_user_generic(void *to, const void *from, unsigned len);
10664
10665 __must_check unsigned long
10666 -copy_to_user(void __user *to, const void *from, unsigned len);
10667 -__must_check unsigned long
10668 -copy_from_user(void *to, const void __user *from, unsigned len);
10669 -__must_check unsigned long
10670 copy_in_user(void __user *to, const void __user *from, unsigned len);
10671
10672 static __always_inline __must_check
10673 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10674 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10675 {
10676 - int ret = 0;
10677 + unsigned ret = 0;
10678
10679 might_fault();
10680 - if (!__builtin_constant_p(size))
10681 - return copy_user_generic(dst, (__force void *)src, size);
10682 +
10683 + if ((int)size < 0)
10684 + return size;
10685 +
10686 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10687 + if (!__access_ok(VERIFY_READ, src, size))
10688 + return size;
10689 +#endif
10690 +
10691 + if (!__builtin_constant_p(size)) {
10692 + check_object_size(dst, size, false);
10693 +
10694 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10695 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10696 + src += PAX_USER_SHADOW_BASE;
10697 +#endif
10698 +
10699 + return copy_user_generic(dst, (__force const void *)src, size);
10700 + }
10701 switch (size) {
10702 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10703 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10704 ret, "b", "b", "=q", 1);
10705 return ret;
10706 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10707 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10708 ret, "w", "w", "=r", 2);
10709 return ret;
10710 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10711 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10712 ret, "l", "k", "=r", 4);
10713 return ret;
10714 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10715 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10716 ret, "q", "", "=r", 8);
10717 return ret;
10718 case 10:
10719 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10720 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10721 ret, "q", "", "=r", 10);
10722 if (unlikely(ret))
10723 return ret;
10724 __get_user_asm(*(u16 *)(8 + (char *)dst),
10725 - (u16 __user *)(8 + (char __user *)src),
10726 + (const u16 __user *)(8 + (const char __user *)src),
10727 ret, "w", "w", "=r", 2);
10728 return ret;
10729 case 16:
10730 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10731 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10732 ret, "q", "", "=r", 16);
10733 if (unlikely(ret))
10734 return ret;
10735 __get_user_asm(*(u64 *)(8 + (char *)dst),
10736 - (u64 __user *)(8 + (char __user *)src),
10737 + (const u64 __user *)(8 + (const char __user *)src),
10738 ret, "q", "", "=r", 8);
10739 return ret;
10740 default:
10741 - return copy_user_generic(dst, (__force void *)src, size);
10742 +
10743 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10744 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10745 + src += PAX_USER_SHADOW_BASE;
10746 +#endif
10747 +
10748 + return copy_user_generic(dst, (__force const void *)src, size);
10749 }
10750 }
10751
10752 static __always_inline __must_check
10753 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10754 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10755 {
10756 - int ret = 0;
10757 + unsigned ret = 0;
10758
10759 might_fault();
10760 - if (!__builtin_constant_p(size))
10761 +
10762 + pax_track_stack();
10763 +
10764 + if ((int)size < 0)
10765 + return size;
10766 +
10767 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10768 + if (!__access_ok(VERIFY_WRITE, dst, size))
10769 + return size;
10770 +#endif
10771 +
10772 + if (!__builtin_constant_p(size)) {
10773 + check_object_size(src, size, true);
10774 +
10775 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10776 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10777 + dst += PAX_USER_SHADOW_BASE;
10778 +#endif
10779 +
10780 return copy_user_generic((__force void *)dst, src, size);
10781 + }
10782 switch (size) {
10783 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10784 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10785 ret, "b", "b", "iq", 1);
10786 return ret;
10787 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10788 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10789 ret, "w", "w", "ir", 2);
10790 return ret;
10791 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10792 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10793 ret, "l", "k", "ir", 4);
10794 return ret;
10795 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10796 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10797 ret, "q", "", "er", 8);
10798 return ret;
10799 case 10:
10800 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10801 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10802 ret, "q", "", "er", 10);
10803 if (unlikely(ret))
10804 return ret;
10805 asm("":::"memory");
10806 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10807 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10808 ret, "w", "w", "ir", 2);
10809 return ret;
10810 case 16:
10811 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10812 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10813 ret, "q", "", "er", 16);
10814 if (unlikely(ret))
10815 return ret;
10816 asm("":::"memory");
10817 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10818 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10819 ret, "q", "", "er", 8);
10820 return ret;
10821 default:
10822 +
10823 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10824 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10825 + dst += PAX_USER_SHADOW_BASE;
10826 +#endif
10827 +
10828 return copy_user_generic((__force void *)dst, src, size);
10829 }
10830 }
10831
10832 static __always_inline __must_check
10833 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10834 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10835 +{
10836 + if (access_ok(VERIFY_WRITE, to, len))
10837 + len = __copy_to_user(to, from, len);
10838 + return len;
10839 +}
10840 +
10841 +static __always_inline __must_check
10842 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10843 +{
10844 + if ((int)len < 0)
10845 + return len;
10846 +
10847 + if (access_ok(VERIFY_READ, from, len))
10848 + len = __copy_from_user(to, from, len);
10849 + else if ((int)len > 0) {
10850 + if (!__builtin_constant_p(len))
10851 + check_object_size(to, len, false);
10852 + memset(to, 0, len);
10853 + }
10854 + return len;
10855 +}
10856 +
10857 +static __always_inline __must_check
10858 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10859 {
10860 - int ret = 0;
10861 + unsigned ret = 0;
10862
10863 might_fault();
10864 - if (!__builtin_constant_p(size))
10865 +
10866 + pax_track_stack();
10867 +
10868 + if ((int)size < 0)
10869 + return size;
10870 +
10871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10872 + if (!__access_ok(VERIFY_READ, src, size))
10873 + return size;
10874 + if (!__access_ok(VERIFY_WRITE, dst, size))
10875 + return size;
10876 +#endif
10877 +
10878 + if (!__builtin_constant_p(size)) {
10879 +
10880 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10881 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10882 + src += PAX_USER_SHADOW_BASE;
10883 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10884 + dst += PAX_USER_SHADOW_BASE;
10885 +#endif
10886 +
10887 return copy_user_generic((__force void *)dst,
10888 - (__force void *)src, size);
10889 + (__force const void *)src, size);
10890 + }
10891 switch (size) {
10892 case 1: {
10893 u8 tmp;
10894 - __get_user_asm(tmp, (u8 __user *)src,
10895 + __get_user_asm(tmp, (const u8 __user *)src,
10896 ret, "b", "b", "=q", 1);
10897 if (likely(!ret))
10898 __put_user_asm(tmp, (u8 __user *)dst,
10899 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10900 }
10901 case 2: {
10902 u16 tmp;
10903 - __get_user_asm(tmp, (u16 __user *)src,
10904 + __get_user_asm(tmp, (const u16 __user *)src,
10905 ret, "w", "w", "=r", 2);
10906 if (likely(!ret))
10907 __put_user_asm(tmp, (u16 __user *)dst,
10908 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10909
10910 case 4: {
10911 u32 tmp;
10912 - __get_user_asm(tmp, (u32 __user *)src,
10913 + __get_user_asm(tmp, (const u32 __user *)src,
10914 ret, "l", "k", "=r", 4);
10915 if (likely(!ret))
10916 __put_user_asm(tmp, (u32 __user *)dst,
10917 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10918 }
10919 case 8: {
10920 u64 tmp;
10921 - __get_user_asm(tmp, (u64 __user *)src,
10922 + __get_user_asm(tmp, (const u64 __user *)src,
10923 ret, "q", "", "=r", 8);
10924 if (likely(!ret))
10925 __put_user_asm(tmp, (u64 __user *)dst,
10926 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10927 return ret;
10928 }
10929 default:
10930 +
10931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10932 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10933 + src += PAX_USER_SHADOW_BASE;
10934 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10935 + dst += PAX_USER_SHADOW_BASE;
10936 +#endif
10937 +
10938 return copy_user_generic((__force void *)dst,
10939 - (__force void *)src, size);
10940 + (__force const void *)src, size);
10941 }
10942 }
10943
10944 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10945 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10946 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10947
10948 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10949 - unsigned size);
10950 +static __must_check __always_inline unsigned long
10951 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10952 +{
10953 + pax_track_stack();
10954 +
10955 + if ((int)size < 0)
10956 + return size;
10957
10958 -static __must_check __always_inline int
10959 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10960 + if (!__access_ok(VERIFY_READ, src, size))
10961 + return size;
10962 +
10963 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10964 + src += PAX_USER_SHADOW_BASE;
10965 +#endif
10966 +
10967 + return copy_user_generic(dst, (__force const void *)src, size);
10968 +}
10969 +
10970 +static __must_check __always_inline unsigned long
10971 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10972 {
10973 + if ((int)size < 0)
10974 + return size;
10975 +
10976 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10977 + if (!__access_ok(VERIFY_WRITE, dst, size))
10978 + return size;
10979 +
10980 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10981 + dst += PAX_USER_SHADOW_BASE;
10982 +#endif
10983 +
10984 return copy_user_generic((__force void *)dst, src, size);
10985 }
10986
10987 -extern long __copy_user_nocache(void *dst, const void __user *src,
10988 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10989 unsigned size, int zerorest);
10990
10991 -static inline int
10992 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10993 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10994 {
10995 might_sleep();
10996 +
10997 + if ((int)size < 0)
10998 + return size;
10999 +
11000 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11001 + if (!__access_ok(VERIFY_READ, src, size))
11002 + return size;
11003 +#endif
11004 +
11005 return __copy_user_nocache(dst, src, size, 1);
11006 }
11007
11008 -static inline int
11009 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11010 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11011 unsigned size)
11012 {
11013 + if ((int)size < 0)
11014 + return size;
11015 +
11016 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11017 + if (!__access_ok(VERIFY_READ, src, size))
11018 + return size;
11019 +#endif
11020 +
11021 return __copy_user_nocache(dst, src, size, 0);
11022 }
11023
11024 -unsigned long
11025 +extern unsigned long
11026 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11027
11028 #endif /* _ASM_X86_UACCESS_64_H */
11029 diff -urNp linux-2.6.32.44/arch/x86/include/asm/uaccess.h linux-2.6.32.44/arch/x86/include/asm/uaccess.h
11030 --- linux-2.6.32.44/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11031 +++ linux-2.6.32.44/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11032 @@ -8,12 +8,15 @@
11033 #include <linux/thread_info.h>
11034 #include <linux/prefetch.h>
11035 #include <linux/string.h>
11036 +#include <linux/sched.h>
11037 #include <asm/asm.h>
11038 #include <asm/page.h>
11039
11040 #define VERIFY_READ 0
11041 #define VERIFY_WRITE 1
11042
11043 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11044 +
11045 /*
11046 * The fs value determines whether argument validity checking should be
11047 * performed or not. If get_fs() == USER_DS, checking is performed, with
11048 @@ -29,7 +32,12 @@
11049
11050 #define get_ds() (KERNEL_DS)
11051 #define get_fs() (current_thread_info()->addr_limit)
11052 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11053 +void __set_fs(mm_segment_t x);
11054 +void set_fs(mm_segment_t x);
11055 +#else
11056 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11057 +#endif
11058
11059 #define segment_eq(a, b) ((a).seg == (b).seg)
11060
11061 @@ -77,7 +85,33 @@
11062 * checks that the pointer is in the user space range - after calling
11063 * this function, memory access functions may still return -EFAULT.
11064 */
11065 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11066 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11067 +#define access_ok(type, addr, size) \
11068 +({ \
11069 + long __size = size; \
11070 + unsigned long __addr = (unsigned long)addr; \
11071 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11072 + unsigned long __end_ao = __addr + __size - 1; \
11073 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11074 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11075 + while(__addr_ao <= __end_ao) { \
11076 + char __c_ao; \
11077 + __addr_ao += PAGE_SIZE; \
11078 + if (__size > PAGE_SIZE) \
11079 + cond_resched(); \
11080 + if (__get_user(__c_ao, (char __user *)__addr)) \
11081 + break; \
11082 + if (type != VERIFY_WRITE) { \
11083 + __addr = __addr_ao; \
11084 + continue; \
11085 + } \
11086 + if (__put_user(__c_ao, (char __user *)__addr)) \
11087 + break; \
11088 + __addr = __addr_ao; \
11089 + } \
11090 + } \
11091 + __ret_ao; \
11092 +})
11093
11094 /*
11095 * The exception table consists of pairs of addresses: the first is the
11096 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11097 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11098 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11099
11100 -
11101 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11102 +#define __copyuser_seg "gs;"
11103 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11104 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11105 +#else
11106 +#define __copyuser_seg
11107 +#define __COPYUSER_SET_ES
11108 +#define __COPYUSER_RESTORE_ES
11109 +#endif
11110
11111 #ifdef CONFIG_X86_32
11112 #define __put_user_asm_u64(x, addr, err, errret) \
11113 - asm volatile("1: movl %%eax,0(%2)\n" \
11114 - "2: movl %%edx,4(%2)\n" \
11115 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11116 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11117 "3:\n" \
11118 ".section .fixup,\"ax\"\n" \
11119 "4: movl %3,%0\n" \
11120 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11121 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11122
11123 #define __put_user_asm_ex_u64(x, addr) \
11124 - asm volatile("1: movl %%eax,0(%1)\n" \
11125 - "2: movl %%edx,4(%1)\n" \
11126 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11127 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11128 "3:\n" \
11129 _ASM_EXTABLE(1b, 2b - 1b) \
11130 _ASM_EXTABLE(2b, 3b - 2b) \
11131 @@ -374,7 +416,7 @@ do { \
11132 } while (0)
11133
11134 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11135 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11136 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11137 "2:\n" \
11138 ".section .fixup,\"ax\"\n" \
11139 "3: mov %3,%0\n" \
11140 @@ -382,7 +424,7 @@ do { \
11141 " jmp 2b\n" \
11142 ".previous\n" \
11143 _ASM_EXTABLE(1b, 3b) \
11144 - : "=r" (err), ltype(x) \
11145 + : "=r" (err), ltype (x) \
11146 : "m" (__m(addr)), "i" (errret), "0" (err))
11147
11148 #define __get_user_size_ex(x, ptr, size) \
11149 @@ -407,7 +449,7 @@ do { \
11150 } while (0)
11151
11152 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11153 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11154 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11155 "2:\n" \
11156 _ASM_EXTABLE(1b, 2b - 1b) \
11157 : ltype(x) : "m" (__m(addr)))
11158 @@ -424,13 +466,24 @@ do { \
11159 int __gu_err; \
11160 unsigned long __gu_val; \
11161 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11162 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11163 + (x) = (__typeof__(*(ptr)))__gu_val; \
11164 __gu_err; \
11165 })
11166
11167 /* FIXME: this hack is definitely wrong -AK */
11168 struct __large_struct { unsigned long buf[100]; };
11169 -#define __m(x) (*(struct __large_struct __user *)(x))
11170 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11171 +#define ____m(x) \
11172 +({ \
11173 + unsigned long ____x = (unsigned long)(x); \
11174 + if (____x < PAX_USER_SHADOW_BASE) \
11175 + ____x += PAX_USER_SHADOW_BASE; \
11176 + (void __user *)____x; \
11177 +})
11178 +#else
11179 +#define ____m(x) (x)
11180 +#endif
11181 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11182
11183 /*
11184 * Tell gcc we read from memory instead of writing: this is because
11185 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11186 * aliasing issues.
11187 */
11188 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11189 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11190 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11191 "2:\n" \
11192 ".section .fixup,\"ax\"\n" \
11193 "3: mov %3,%0\n" \
11194 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11195 ".previous\n" \
11196 _ASM_EXTABLE(1b, 3b) \
11197 : "=r"(err) \
11198 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11199 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11200
11201 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11202 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11203 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11204 "2:\n" \
11205 _ASM_EXTABLE(1b, 2b - 1b) \
11206 : : ltype(x), "m" (__m(addr)))
11207 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11208 * On error, the variable @x is set to zero.
11209 */
11210
11211 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11212 +#define __get_user(x, ptr) get_user((x), (ptr))
11213 +#else
11214 #define __get_user(x, ptr) \
11215 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11216 +#endif
11217
11218 /**
11219 * __put_user: - Write a simple value into user space, with less checking.
11220 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11221 * Returns zero on success, or -EFAULT on error.
11222 */
11223
11224 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11225 +#define __put_user(x, ptr) put_user((x), (ptr))
11226 +#else
11227 #define __put_user(x, ptr) \
11228 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11229 +#endif
11230
11231 #define __get_user_unaligned __get_user
11232 #define __put_user_unaligned __put_user
11233 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11234 #define get_user_ex(x, ptr) do { \
11235 unsigned long __gue_val; \
11236 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11237 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11238 + (x) = (__typeof__(*(ptr)))__gue_val; \
11239 } while (0)
11240
11241 #ifdef CONFIG_X86_WP_WORKS_OK
11242 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11243
11244 #define ARCH_HAS_NOCACHE_UACCESS 1
11245
11246 +#define ARCH_HAS_SORT_EXTABLE
11247 #ifdef CONFIG_X86_32
11248 # include "uaccess_32.h"
11249 #else
11250 diff -urNp linux-2.6.32.44/arch/x86/include/asm/vgtod.h linux-2.6.32.44/arch/x86/include/asm/vgtod.h
11251 --- linux-2.6.32.44/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11252 +++ linux-2.6.32.44/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11253 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11254 int sysctl_enabled;
11255 struct timezone sys_tz;
11256 struct { /* extract of a clocksource struct */
11257 + char name[8];
11258 cycle_t (*vread)(void);
11259 cycle_t cycle_last;
11260 cycle_t mask;
11261 diff -urNp linux-2.6.32.44/arch/x86/include/asm/vmi.h linux-2.6.32.44/arch/x86/include/asm/vmi.h
11262 --- linux-2.6.32.44/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11263 +++ linux-2.6.32.44/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11264 @@ -191,6 +191,7 @@ struct vrom_header {
11265 u8 reserved[96]; /* Reserved for headers */
11266 char vmi_init[8]; /* VMI_Init jump point */
11267 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11268 + char rom_data[8048]; /* rest of the option ROM */
11269 } __attribute__((packed));
11270
11271 struct pnp_header {
11272 diff -urNp linux-2.6.32.44/arch/x86/include/asm/vmi_time.h linux-2.6.32.44/arch/x86/include/asm/vmi_time.h
11273 --- linux-2.6.32.44/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11274 +++ linux-2.6.32.44/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11275 @@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11276 int (*wallclock_updated)(void);
11277 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11278 void (*cancel_alarm)(u32 flags);
11279 -} vmi_timer_ops;
11280 +} __no_const vmi_timer_ops;
11281
11282 /* Prototypes */
11283 extern void __init vmi_time_init(void);
11284 diff -urNp linux-2.6.32.44/arch/x86/include/asm/vsyscall.h linux-2.6.32.44/arch/x86/include/asm/vsyscall.h
11285 --- linux-2.6.32.44/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11286 +++ linux-2.6.32.44/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11287 @@ -15,9 +15,10 @@ enum vsyscall_num {
11288
11289 #ifdef __KERNEL__
11290 #include <linux/seqlock.h>
11291 +#include <linux/getcpu.h>
11292 +#include <linux/time.h>
11293
11294 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11295 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11296
11297 /* Definitions for CONFIG_GENERIC_TIME definitions */
11298 #define __section_vsyscall_gtod_data __attribute__ \
11299 @@ -31,7 +32,6 @@ enum vsyscall_num {
11300 #define VGETCPU_LSL 2
11301
11302 extern int __vgetcpu_mode;
11303 -extern volatile unsigned long __jiffies;
11304
11305 /* kernel space (writeable) */
11306 extern int vgetcpu_mode;
11307 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11308
11309 extern void map_vsyscall(void);
11310
11311 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11312 +extern time_t vtime(time_t *t);
11313 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11314 #endif /* __KERNEL__ */
11315
11316 #endif /* _ASM_X86_VSYSCALL_H */
11317 diff -urNp linux-2.6.32.44/arch/x86/include/asm/x86_init.h linux-2.6.32.44/arch/x86/include/asm/x86_init.h
11318 --- linux-2.6.32.44/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11319 +++ linux-2.6.32.44/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11320 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11321 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11322 void (*find_smp_config)(unsigned int reserve);
11323 void (*get_smp_config)(unsigned int early);
11324 -};
11325 +} __no_const;
11326
11327 /**
11328 * struct x86_init_resources - platform specific resource related ops
11329 @@ -42,7 +42,7 @@ struct x86_init_resources {
11330 void (*probe_roms)(void);
11331 void (*reserve_resources)(void);
11332 char *(*memory_setup)(void);
11333 -};
11334 +} __no_const;
11335
11336 /**
11337 * struct x86_init_irqs - platform specific interrupt setup
11338 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11339 void (*pre_vector_init)(void);
11340 void (*intr_init)(void);
11341 void (*trap_init)(void);
11342 -};
11343 +} __no_const;
11344
11345 /**
11346 * struct x86_init_oem - oem platform specific customizing functions
11347 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11348 struct x86_init_oem {
11349 void (*arch_setup)(void);
11350 void (*banner)(void);
11351 -};
11352 +} __no_const;
11353
11354 /**
11355 * struct x86_init_paging - platform specific paging functions
11356 @@ -75,7 +75,7 @@ struct x86_init_oem {
11357 struct x86_init_paging {
11358 void (*pagetable_setup_start)(pgd_t *base);
11359 void (*pagetable_setup_done)(pgd_t *base);
11360 -};
11361 +} __no_const;
11362
11363 /**
11364 * struct x86_init_timers - platform specific timer setup
11365 @@ -88,7 +88,7 @@ struct x86_init_timers {
11366 void (*setup_percpu_clockev)(void);
11367 void (*tsc_pre_init)(void);
11368 void (*timer_init)(void);
11369 -};
11370 +} __no_const;
11371
11372 /**
11373 * struct x86_init_ops - functions for platform specific setup
11374 @@ -101,7 +101,7 @@ struct x86_init_ops {
11375 struct x86_init_oem oem;
11376 struct x86_init_paging paging;
11377 struct x86_init_timers timers;
11378 -};
11379 +} __no_const;
11380
11381 /**
11382 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11383 @@ -109,7 +109,7 @@ struct x86_init_ops {
11384 */
11385 struct x86_cpuinit_ops {
11386 void (*setup_percpu_clockev)(void);
11387 -};
11388 +} __no_const;
11389
11390 /**
11391 * struct x86_platform_ops - platform specific runtime functions
11392 @@ -121,7 +121,7 @@ struct x86_platform_ops {
11393 unsigned long (*calibrate_tsc)(void);
11394 unsigned long (*get_wallclock)(void);
11395 int (*set_wallclock)(unsigned long nowtime);
11396 -};
11397 +} __no_const;
11398
11399 extern struct x86_init_ops x86_init;
11400 extern struct x86_cpuinit_ops x86_cpuinit;
11401 diff -urNp linux-2.6.32.44/arch/x86/include/asm/xsave.h linux-2.6.32.44/arch/x86/include/asm/xsave.h
11402 --- linux-2.6.32.44/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11403 +++ linux-2.6.32.44/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11404 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11405 static inline int xsave_user(struct xsave_struct __user *buf)
11406 {
11407 int err;
11408 +
11409 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11410 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11411 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11412 +#endif
11413 +
11414 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11415 "2:\n"
11416 ".section .fixup,\"ax\"\n"
11417 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11418 u32 lmask = mask;
11419 u32 hmask = mask >> 32;
11420
11421 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11422 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11423 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11424 +#endif
11425 +
11426 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11427 "2:\n"
11428 ".section .fixup,\"ax\"\n"
11429 diff -urNp linux-2.6.32.44/arch/x86/Kconfig linux-2.6.32.44/arch/x86/Kconfig
11430 --- linux-2.6.32.44/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11431 +++ linux-2.6.32.44/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11432 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11433
11434 config X86_32_LAZY_GS
11435 def_bool y
11436 - depends on X86_32 && !CC_STACKPROTECTOR
11437 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11438
11439 config KTIME_SCALAR
11440 def_bool X86_32
11441 @@ -1008,7 +1008,7 @@ choice
11442
11443 config NOHIGHMEM
11444 bool "off"
11445 - depends on !X86_NUMAQ
11446 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11447 ---help---
11448 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11449 However, the address space of 32-bit x86 processors is only 4
11450 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11451
11452 config HIGHMEM4G
11453 bool "4GB"
11454 - depends on !X86_NUMAQ
11455 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11456 ---help---
11457 Select this if you have a 32-bit processor and between 1 and 4
11458 gigabytes of physical RAM.
11459 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11460 hex
11461 default 0xB0000000 if VMSPLIT_3G_OPT
11462 default 0x80000000 if VMSPLIT_2G
11463 - default 0x78000000 if VMSPLIT_2G_OPT
11464 + default 0x70000000 if VMSPLIT_2G_OPT
11465 default 0x40000000 if VMSPLIT_1G
11466 default 0xC0000000
11467 depends on X86_32
11468 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11469
11470 config EFI
11471 bool "EFI runtime service support"
11472 - depends on ACPI
11473 + depends on ACPI && !PAX_KERNEXEC
11474 ---help---
11475 This enables the kernel to use EFI runtime services that are
11476 available (such as the EFI variable services).
11477 @@ -1460,6 +1460,7 @@ config SECCOMP
11478
11479 config CC_STACKPROTECTOR
11480 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11481 + depends on X86_64 || !PAX_MEMORY_UDEREF
11482 ---help---
11483 This option turns on the -fstack-protector GCC feature. This
11484 feature puts, at the beginning of functions, a canary value on
11485 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11486 config PHYSICAL_START
11487 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11488 default "0x1000000"
11489 + range 0x400000 0x40000000
11490 ---help---
11491 This gives the physical address where the kernel is loaded.
11492
11493 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11494 hex
11495 prompt "Alignment value to which kernel should be aligned" if X86_32
11496 default "0x1000000"
11497 + range 0x400000 0x1000000 if PAX_KERNEXEC
11498 range 0x2000 0x1000000
11499 ---help---
11500 This value puts the alignment restrictions on physical address
11501 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11502 Say N if you want to disable CPU hotplug.
11503
11504 config COMPAT_VDSO
11505 - def_bool y
11506 + def_bool n
11507 prompt "Compat VDSO support"
11508 depends on X86_32 || IA32_EMULATION
11509 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11510 ---help---
11511 Map the 32-bit VDSO to the predictable old-style address too.
11512 ---help---
11513 diff -urNp linux-2.6.32.44/arch/x86/Kconfig.cpu linux-2.6.32.44/arch/x86/Kconfig.cpu
11514 --- linux-2.6.32.44/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11515 +++ linux-2.6.32.44/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11516 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11517
11518 config X86_F00F_BUG
11519 def_bool y
11520 - depends on M586MMX || M586TSC || M586 || M486 || M386
11521 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11522
11523 config X86_WP_WORKS_OK
11524 def_bool y
11525 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11526
11527 config X86_ALIGNMENT_16
11528 def_bool y
11529 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11530 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11531
11532 config X86_INTEL_USERCOPY
11533 def_bool y
11534 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11535 # generates cmov.
11536 config X86_CMOV
11537 def_bool y
11538 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11539 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11540
11541 config X86_MINIMUM_CPU_FAMILY
11542 int
11543 diff -urNp linux-2.6.32.44/arch/x86/Kconfig.debug linux-2.6.32.44/arch/x86/Kconfig.debug
11544 --- linux-2.6.32.44/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11545 +++ linux-2.6.32.44/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11546 @@ -99,7 +99,7 @@ config X86_PTDUMP
11547 config DEBUG_RODATA
11548 bool "Write protect kernel read-only data structures"
11549 default y
11550 - depends on DEBUG_KERNEL
11551 + depends on DEBUG_KERNEL && BROKEN
11552 ---help---
11553 Mark the kernel read-only data as write-protected in the pagetables,
11554 in order to catch accidental (and incorrect) writes to such const
11555 diff -urNp linux-2.6.32.44/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.44/arch/x86/kernel/acpi/realmode/Makefile
11556 --- linux-2.6.32.44/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11557 +++ linux-2.6.32.44/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11558 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11559 $(call cc-option, -fno-stack-protector) \
11560 $(call cc-option, -mpreferred-stack-boundary=2)
11561 KBUILD_CFLAGS += $(call cc-option, -m32)
11562 +ifdef CONSTIFY_PLUGIN
11563 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11564 +endif
11565 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11566 GCOV_PROFILE := n
11567
11568 diff -urNp linux-2.6.32.44/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.44/arch/x86/kernel/acpi/realmode/wakeup.S
11569 --- linux-2.6.32.44/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11570 +++ linux-2.6.32.44/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11571 @@ -91,6 +91,9 @@ _start:
11572 /* Do any other stuff... */
11573
11574 #ifndef CONFIG_64BIT
11575 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11576 + call verify_cpu
11577 +
11578 /* This could also be done in C code... */
11579 movl pmode_cr3, %eax
11580 movl %eax, %cr3
11581 @@ -104,7 +107,7 @@ _start:
11582 movl %eax, %ecx
11583 orl %edx, %ecx
11584 jz 1f
11585 - movl $0xc0000080, %ecx
11586 + mov $MSR_EFER, %ecx
11587 wrmsr
11588 1:
11589
11590 @@ -114,6 +117,7 @@ _start:
11591 movl pmode_cr0, %eax
11592 movl %eax, %cr0
11593 jmp pmode_return
11594 +# include "../../verify_cpu.S"
11595 #else
11596 pushw $0
11597 pushw trampoline_segment
11598 diff -urNp linux-2.6.32.44/arch/x86/kernel/acpi/sleep.c linux-2.6.32.44/arch/x86/kernel/acpi/sleep.c
11599 --- linux-2.6.32.44/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11600 +++ linux-2.6.32.44/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11601 @@ -11,11 +11,12 @@
11602 #include <linux/cpumask.h>
11603 #include <asm/segment.h>
11604 #include <asm/desc.h>
11605 +#include <asm/e820.h>
11606
11607 #include "realmode/wakeup.h"
11608 #include "sleep.h"
11609
11610 -unsigned long acpi_wakeup_address;
11611 +unsigned long acpi_wakeup_address = 0x2000;
11612 unsigned long acpi_realmode_flags;
11613
11614 /* address in low memory of the wakeup routine. */
11615 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11616 #else /* CONFIG_64BIT */
11617 header->trampoline_segment = setup_trampoline() >> 4;
11618 #ifdef CONFIG_SMP
11619 - stack_start.sp = temp_stack + sizeof(temp_stack);
11620 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11621 +
11622 + pax_open_kernel();
11623 early_gdt_descr.address =
11624 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11625 + pax_close_kernel();
11626 +
11627 initial_gs = per_cpu_offset(smp_processor_id());
11628 #endif
11629 initial_code = (unsigned long)wakeup_long64;
11630 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11631 return;
11632 }
11633
11634 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11635 -
11636 - if (!acpi_realmode) {
11637 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11638 - return;
11639 - }
11640 -
11641 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11642 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11643 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11644 }
11645
11646
11647 diff -urNp linux-2.6.32.44/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.44/arch/x86/kernel/acpi/wakeup_32.S
11648 --- linux-2.6.32.44/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11649 +++ linux-2.6.32.44/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11650 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11651 # and restore the stack ... but you need gdt for this to work
11652 movl saved_context_esp, %esp
11653
11654 - movl %cs:saved_magic, %eax
11655 - cmpl $0x12345678, %eax
11656 + cmpl $0x12345678, saved_magic
11657 jne bogus_magic
11658
11659 # jump to place where we left off
11660 - movl saved_eip, %eax
11661 - jmp *%eax
11662 + jmp *(saved_eip)
11663
11664 bogus_magic:
11665 jmp bogus_magic
11666 diff -urNp linux-2.6.32.44/arch/x86/kernel/alternative.c linux-2.6.32.44/arch/x86/kernel/alternative.c
11667 --- linux-2.6.32.44/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11668 +++ linux-2.6.32.44/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11669 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11670
11671 BUG_ON(p->len > MAX_PATCH_LEN);
11672 /* prep the buffer with the original instructions */
11673 - memcpy(insnbuf, p->instr, p->len);
11674 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11675 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11676 (unsigned long)p->instr, p->len);
11677
11678 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11679 if (smp_alt_once)
11680 free_init_pages("SMP alternatives",
11681 (unsigned long)__smp_locks,
11682 - (unsigned long)__smp_locks_end);
11683 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11684
11685 restart_nmi();
11686 }
11687 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11688 * instructions. And on the local CPU you need to be protected again NMI or MCE
11689 * handlers seeing an inconsistent instruction while you patch.
11690 */
11691 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11692 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11693 size_t len)
11694 {
11695 unsigned long flags;
11696 local_irq_save(flags);
11697 - memcpy(addr, opcode, len);
11698 +
11699 + pax_open_kernel();
11700 + memcpy(ktla_ktva(addr), opcode, len);
11701 sync_core();
11702 + pax_close_kernel();
11703 +
11704 local_irq_restore(flags);
11705 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11706 that causes hangs on some VIA CPUs. */
11707 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11708 */
11709 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11710 {
11711 - unsigned long flags;
11712 - char *vaddr;
11713 + unsigned char *vaddr = ktla_ktva(addr);
11714 struct page *pages[2];
11715 - int i;
11716 + size_t i;
11717
11718 if (!core_kernel_text((unsigned long)addr)) {
11719 - pages[0] = vmalloc_to_page(addr);
11720 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11721 + pages[0] = vmalloc_to_page(vaddr);
11722 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11723 } else {
11724 - pages[0] = virt_to_page(addr);
11725 + pages[0] = virt_to_page(vaddr);
11726 WARN_ON(!PageReserved(pages[0]));
11727 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11728 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11729 }
11730 BUG_ON(!pages[0]);
11731 - local_irq_save(flags);
11732 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11733 - if (pages[1])
11734 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11735 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11736 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11737 - clear_fixmap(FIX_TEXT_POKE0);
11738 - if (pages[1])
11739 - clear_fixmap(FIX_TEXT_POKE1);
11740 - local_flush_tlb();
11741 - sync_core();
11742 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11743 - that causes hangs on some VIA CPUs. */
11744 + text_poke_early(addr, opcode, len);
11745 for (i = 0; i < len; i++)
11746 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11747 - local_irq_restore(flags);
11748 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11749 return addr;
11750 }
11751 diff -urNp linux-2.6.32.44/arch/x86/kernel/amd_iommu.c linux-2.6.32.44/arch/x86/kernel/amd_iommu.c
11752 --- linux-2.6.32.44/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11753 +++ linux-2.6.32.44/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11754 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11755 }
11756 }
11757
11758 -static struct dma_map_ops amd_iommu_dma_ops = {
11759 +static const struct dma_map_ops amd_iommu_dma_ops = {
11760 .alloc_coherent = alloc_coherent,
11761 .free_coherent = free_coherent,
11762 .map_page = map_page,
11763 diff -urNp linux-2.6.32.44/arch/x86/kernel/apic/apic.c linux-2.6.32.44/arch/x86/kernel/apic/apic.c
11764 --- linux-2.6.32.44/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11765 +++ linux-2.6.32.44/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11766 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11767 apic_write(APIC_ESR, 0);
11768 v1 = apic_read(APIC_ESR);
11769 ack_APIC_irq();
11770 - atomic_inc(&irq_err_count);
11771 + atomic_inc_unchecked(&irq_err_count);
11772
11773 /*
11774 * Here is what the APIC error bits mean:
11775 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11776 u16 *bios_cpu_apicid;
11777 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11778
11779 + pax_track_stack();
11780 +
11781 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11782 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11783
11784 diff -urNp linux-2.6.32.44/arch/x86/kernel/apic/io_apic.c linux-2.6.32.44/arch/x86/kernel/apic/io_apic.c
11785 --- linux-2.6.32.44/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11786 +++ linux-2.6.32.44/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11787 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11788 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11789 GFP_ATOMIC);
11790 if (!ioapic_entries)
11791 - return 0;
11792 + return NULL;
11793
11794 for (apic = 0; apic < nr_ioapics; apic++) {
11795 ioapic_entries[apic] =
11796 @@ -733,7 +733,7 @@ nomem:
11797 kfree(ioapic_entries[apic]);
11798 kfree(ioapic_entries);
11799
11800 - return 0;
11801 + return NULL;
11802 }
11803
11804 /*
11805 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11806 }
11807 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11808
11809 -void lock_vector_lock(void)
11810 +void lock_vector_lock(void) __acquires(vector_lock)
11811 {
11812 /* Used to the online set of cpus does not change
11813 * during assign_irq_vector.
11814 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11815 spin_lock(&vector_lock);
11816 }
11817
11818 -void unlock_vector_lock(void)
11819 +void unlock_vector_lock(void) __releases(vector_lock)
11820 {
11821 spin_unlock(&vector_lock);
11822 }
11823 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11824 ack_APIC_irq();
11825 }
11826
11827 -atomic_t irq_mis_count;
11828 +atomic_unchecked_t irq_mis_count;
11829
11830 static void ack_apic_level(unsigned int irq)
11831 {
11832 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11833
11834 /* Tail end of version 0x11 I/O APIC bug workaround */
11835 if (!(v & (1 << (i & 0x1f)))) {
11836 - atomic_inc(&irq_mis_count);
11837 + atomic_inc_unchecked(&irq_mis_count);
11838 spin_lock(&ioapic_lock);
11839 __mask_and_edge_IO_APIC_irq(cfg);
11840 __unmask_and_level_IO_APIC_irq(cfg);
11841 diff -urNp linux-2.6.32.44/arch/x86/kernel/apm_32.c linux-2.6.32.44/arch/x86/kernel/apm_32.c
11842 --- linux-2.6.32.44/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11843 +++ linux-2.6.32.44/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11844 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11845 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11846 * even though they are called in protected mode.
11847 */
11848 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11849 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11850 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11851
11852 static const char driver_version[] = "1.16ac"; /* no spaces */
11853 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11854 BUG_ON(cpu != 0);
11855 gdt = get_cpu_gdt_table(cpu);
11856 save_desc_40 = gdt[0x40 / 8];
11857 +
11858 + pax_open_kernel();
11859 gdt[0x40 / 8] = bad_bios_desc;
11860 + pax_close_kernel();
11861
11862 apm_irq_save(flags);
11863 APM_DO_SAVE_SEGS;
11864 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11865 &call->esi);
11866 APM_DO_RESTORE_SEGS;
11867 apm_irq_restore(flags);
11868 +
11869 + pax_open_kernel();
11870 gdt[0x40 / 8] = save_desc_40;
11871 + pax_close_kernel();
11872 +
11873 put_cpu();
11874
11875 return call->eax & 0xff;
11876 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11877 BUG_ON(cpu != 0);
11878 gdt = get_cpu_gdt_table(cpu);
11879 save_desc_40 = gdt[0x40 / 8];
11880 +
11881 + pax_open_kernel();
11882 gdt[0x40 / 8] = bad_bios_desc;
11883 + pax_close_kernel();
11884
11885 apm_irq_save(flags);
11886 APM_DO_SAVE_SEGS;
11887 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11888 &call->eax);
11889 APM_DO_RESTORE_SEGS;
11890 apm_irq_restore(flags);
11891 +
11892 + pax_open_kernel();
11893 gdt[0x40 / 8] = save_desc_40;
11894 + pax_close_kernel();
11895 +
11896 put_cpu();
11897 return error;
11898 }
11899 @@ -975,7 +989,7 @@ recalc:
11900
11901 static void apm_power_off(void)
11902 {
11903 - unsigned char po_bios_call[] = {
11904 + const unsigned char po_bios_call[] = {
11905 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11906 0x8e, 0xd0, /* movw ax,ss */
11907 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11908 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11909 * code to that CPU.
11910 */
11911 gdt = get_cpu_gdt_table(0);
11912 +
11913 + pax_open_kernel();
11914 set_desc_base(&gdt[APM_CS >> 3],
11915 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11916 set_desc_base(&gdt[APM_CS_16 >> 3],
11917 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11918 set_desc_base(&gdt[APM_DS >> 3],
11919 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11920 + pax_close_kernel();
11921
11922 proc_create("apm", 0, NULL, &apm_file_ops);
11923
11924 diff -urNp linux-2.6.32.44/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.44/arch/x86/kernel/asm-offsets_32.c
11925 --- linux-2.6.32.44/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11926 +++ linux-2.6.32.44/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11927 @@ -51,7 +51,6 @@ void foo(void)
11928 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11929 BLANK();
11930
11931 - OFFSET(TI_task, thread_info, task);
11932 OFFSET(TI_exec_domain, thread_info, exec_domain);
11933 OFFSET(TI_flags, thread_info, flags);
11934 OFFSET(TI_status, thread_info, status);
11935 @@ -60,6 +59,8 @@ void foo(void)
11936 OFFSET(TI_restart_block, thread_info, restart_block);
11937 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11938 OFFSET(TI_cpu, thread_info, cpu);
11939 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11940 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11941 BLANK();
11942
11943 OFFSET(GDS_size, desc_ptr, size);
11944 @@ -99,6 +100,7 @@ void foo(void)
11945
11946 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11947 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11948 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11949 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11950 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11951 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11952 @@ -115,6 +117,11 @@ void foo(void)
11953 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11954 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11955 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11956 +
11957 +#ifdef CONFIG_PAX_KERNEXEC
11958 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11959 +#endif
11960 +
11961 #endif
11962
11963 #ifdef CONFIG_XEN
11964 diff -urNp linux-2.6.32.44/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.44/arch/x86/kernel/asm-offsets_64.c
11965 --- linux-2.6.32.44/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11966 +++ linux-2.6.32.44/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11967 @@ -44,6 +44,8 @@ int main(void)
11968 ENTRY(addr_limit);
11969 ENTRY(preempt_count);
11970 ENTRY(status);
11971 + ENTRY(lowest_stack);
11972 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11973 #ifdef CONFIG_IA32_EMULATION
11974 ENTRY(sysenter_return);
11975 #endif
11976 @@ -63,6 +65,18 @@ int main(void)
11977 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11978 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11979 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11980 +
11981 +#ifdef CONFIG_PAX_KERNEXEC
11982 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11983 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11984 +#endif
11985 +
11986 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11987 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11988 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11989 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11990 +#endif
11991 +
11992 #endif
11993
11994
11995 @@ -115,6 +129,7 @@ int main(void)
11996 ENTRY(cr8);
11997 BLANK();
11998 #undef ENTRY
11999 + DEFINE(TSS_size, sizeof(struct tss_struct));
12000 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12001 BLANK();
12002 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12003 @@ -130,6 +145,7 @@ int main(void)
12004
12005 BLANK();
12006 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12007 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12008 #ifdef CONFIG_XEN
12009 BLANK();
12010 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12011 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/amd.c linux-2.6.32.44/arch/x86/kernel/cpu/amd.c
12012 --- linux-2.6.32.44/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12013 +++ linux-2.6.32.44/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12014 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12015 unsigned int size)
12016 {
12017 /* AMD errata T13 (order #21922) */
12018 - if ((c->x86 == 6)) {
12019 + if (c->x86 == 6) {
12020 /* Duron Rev A0 */
12021 if (c->x86_model == 3 && c->x86_mask == 0)
12022 size = 64;
12023 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/common.c linux-2.6.32.44/arch/x86/kernel/cpu/common.c
12024 --- linux-2.6.32.44/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12025 +++ linux-2.6.32.44/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12026 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12027
12028 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12029
12030 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12031 -#ifdef CONFIG_X86_64
12032 - /*
12033 - * We need valid kernel segments for data and code in long mode too
12034 - * IRET will check the segment types kkeil 2000/10/28
12035 - * Also sysret mandates a special GDT layout
12036 - *
12037 - * TLS descriptors are currently at a different place compared to i386.
12038 - * Hopefully nobody expects them at a fixed place (Wine?)
12039 - */
12040 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12041 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12042 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12043 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12044 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12045 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12046 -#else
12047 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12048 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12049 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12050 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12051 - /*
12052 - * Segments used for calling PnP BIOS have byte granularity.
12053 - * They code segments and data segments have fixed 64k limits,
12054 - * the transfer segment sizes are set at run time.
12055 - */
12056 - /* 32-bit code */
12057 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12058 - /* 16-bit code */
12059 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12060 - /* 16-bit data */
12061 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12062 - /* 16-bit data */
12063 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12064 - /* 16-bit data */
12065 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12066 - /*
12067 - * The APM segments have byte granularity and their bases
12068 - * are set at run time. All have 64k limits.
12069 - */
12070 - /* 32-bit code */
12071 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12072 - /* 16-bit code */
12073 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12074 - /* data */
12075 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12076 -
12077 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12078 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12079 - GDT_STACK_CANARY_INIT
12080 -#endif
12081 -} };
12082 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12083 -
12084 static int __init x86_xsave_setup(char *s)
12085 {
12086 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12087 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12088 {
12089 struct desc_ptr gdt_descr;
12090
12091 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12092 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12093 gdt_descr.size = GDT_SIZE - 1;
12094 load_gdt(&gdt_descr);
12095 /* Reload the per-cpu base */
12096 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12097 /* Filter out anything that depends on CPUID levels we don't have */
12098 filter_cpuid_features(c, true);
12099
12100 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12101 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12102 +#endif
12103 +
12104 /* If the model name is still unset, do table lookup. */
12105 if (!c->x86_model_id[0]) {
12106 const char *p;
12107 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12108 }
12109 __setup("clearcpuid=", setup_disablecpuid);
12110
12111 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12112 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12113 +
12114 #ifdef CONFIG_X86_64
12115 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12116
12117 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12118 EXPORT_PER_CPU_SYMBOL(current_task);
12119
12120 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12121 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12122 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12123 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12124
12125 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12126 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12127 {
12128 memset(regs, 0, sizeof(struct pt_regs));
12129 regs->fs = __KERNEL_PERCPU;
12130 - regs->gs = __KERNEL_STACK_CANARY;
12131 + savesegment(gs, regs->gs);
12132
12133 return regs;
12134 }
12135 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12136 int i;
12137
12138 cpu = stack_smp_processor_id();
12139 - t = &per_cpu(init_tss, cpu);
12140 + t = init_tss + cpu;
12141 orig_ist = &per_cpu(orig_ist, cpu);
12142
12143 #ifdef CONFIG_NUMA
12144 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12145 switch_to_new_gdt(cpu);
12146 loadsegment(fs, 0);
12147
12148 - load_idt((const struct desc_ptr *)&idt_descr);
12149 + load_idt(&idt_descr);
12150
12151 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12152 syscall_init();
12153 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12154 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12155 barrier();
12156
12157 - check_efer();
12158 if (cpu != 0)
12159 enable_x2apic();
12160
12161 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12162 {
12163 int cpu = smp_processor_id();
12164 struct task_struct *curr = current;
12165 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12166 + struct tss_struct *t = init_tss + cpu;
12167 struct thread_struct *thread = &curr->thread;
12168
12169 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12170 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/intel.c linux-2.6.32.44/arch/x86/kernel/cpu/intel.c
12171 --- linux-2.6.32.44/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12172 +++ linux-2.6.32.44/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12173 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12174 * Update the IDT descriptor and reload the IDT so that
12175 * it uses the read-only mapped virtual address.
12176 */
12177 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12178 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12179 load_idt(&idt_descr);
12180 }
12181 #endif
12182 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.44/arch/x86/kernel/cpu/intel_cacheinfo.c
12183 --- linux-2.6.32.44/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12184 +++ linux-2.6.32.44/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12185 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12186 return ret;
12187 }
12188
12189 -static struct sysfs_ops sysfs_ops = {
12190 +static const struct sysfs_ops sysfs_ops = {
12191 .show = show,
12192 .store = store,
12193 };
12194 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/Makefile linux-2.6.32.44/arch/x86/kernel/cpu/Makefile
12195 --- linux-2.6.32.44/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12196 +++ linux-2.6.32.44/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12197 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12198 CFLAGS_REMOVE_common.o = -pg
12199 endif
12200
12201 -# Make sure load_percpu_segment has no stackprotector
12202 -nostackp := $(call cc-option, -fno-stack-protector)
12203 -CFLAGS_common.o := $(nostackp)
12204 -
12205 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12206 obj-y += proc.o capflags.o powerflags.o common.o
12207 obj-y += vmware.o hypervisor.o sched.o
12208 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce_amd.c
12209 --- linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12210 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12211 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12212 return ret;
12213 }
12214
12215 -static struct sysfs_ops threshold_ops = {
12216 +static const struct sysfs_ops threshold_ops = {
12217 .show = show,
12218 .store = store,
12219 };
12220 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce.c
12221 --- linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12222 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12223 @@ -43,6 +43,7 @@
12224 #include <asm/ipi.h>
12225 #include <asm/mce.h>
12226 #include <asm/msr.h>
12227 +#include <asm/local.h>
12228
12229 #include "mce-internal.h"
12230
12231 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12232 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12233 m->cs, m->ip);
12234
12235 - if (m->cs == __KERNEL_CS)
12236 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12237 print_symbol("{%s}", m->ip);
12238 pr_cont("\n");
12239 }
12240 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12241
12242 #define PANIC_TIMEOUT 5 /* 5 seconds */
12243
12244 -static atomic_t mce_paniced;
12245 +static atomic_unchecked_t mce_paniced;
12246
12247 static int fake_panic;
12248 -static atomic_t mce_fake_paniced;
12249 +static atomic_unchecked_t mce_fake_paniced;
12250
12251 /* Panic in progress. Enable interrupts and wait for final IPI */
12252 static void wait_for_panic(void)
12253 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12254 /*
12255 * Make sure only one CPU runs in machine check panic
12256 */
12257 - if (atomic_inc_return(&mce_paniced) > 1)
12258 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12259 wait_for_panic();
12260 barrier();
12261
12262 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12263 console_verbose();
12264 } else {
12265 /* Don't log too much for fake panic */
12266 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12267 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12268 return;
12269 }
12270 print_mce_head();
12271 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12272 * might have been modified by someone else.
12273 */
12274 rmb();
12275 - if (atomic_read(&mce_paniced))
12276 + if (atomic_read_unchecked(&mce_paniced))
12277 wait_for_panic();
12278 if (!monarch_timeout)
12279 goto out;
12280 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12281 */
12282
12283 static DEFINE_SPINLOCK(mce_state_lock);
12284 -static int open_count; /* #times opened */
12285 +static local_t open_count; /* #times opened */
12286 static int open_exclu; /* already open exclusive? */
12287
12288 static int mce_open(struct inode *inode, struct file *file)
12289 {
12290 spin_lock(&mce_state_lock);
12291
12292 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12293 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12294 spin_unlock(&mce_state_lock);
12295
12296 return -EBUSY;
12297 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12298
12299 if (file->f_flags & O_EXCL)
12300 open_exclu = 1;
12301 - open_count++;
12302 + local_inc(&open_count);
12303
12304 spin_unlock(&mce_state_lock);
12305
12306 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12307 {
12308 spin_lock(&mce_state_lock);
12309
12310 - open_count--;
12311 + local_dec(&open_count);
12312 open_exclu = 0;
12313
12314 spin_unlock(&mce_state_lock);
12315 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12316 static void mce_reset(void)
12317 {
12318 cpu_missing = 0;
12319 - atomic_set(&mce_fake_paniced, 0);
12320 + atomic_set_unchecked(&mce_fake_paniced, 0);
12321 atomic_set(&mce_executing, 0);
12322 atomic_set(&mce_callin, 0);
12323 atomic_set(&global_nwo, 0);
12324 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce-inject.c
12325 --- linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12326 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12327 @@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12328 static int inject_init(void)
12329 {
12330 printk(KERN_INFO "Machine check injector initialized\n");
12331 - mce_chrdev_ops.write = mce_write;
12332 + pax_open_kernel();
12333 + *(void **)&mce_chrdev_ops.write = mce_write;
12334 + pax_close_kernel();
12335 register_die_notifier(&mce_raise_nb);
12336 return 0;
12337 }
12338 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/amd.c
12339 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12340 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12341 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12342 return 0;
12343 }
12344
12345 -static struct mtrr_ops amd_mtrr_ops = {
12346 +static const struct mtrr_ops amd_mtrr_ops = {
12347 .vendor = X86_VENDOR_AMD,
12348 .set = amd_set_mtrr,
12349 .get = amd_get_mtrr,
12350 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/centaur.c
12351 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12352 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12353 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12354 return 0;
12355 }
12356
12357 -static struct mtrr_ops centaur_mtrr_ops = {
12358 +static const struct mtrr_ops centaur_mtrr_ops = {
12359 .vendor = X86_VENDOR_CENTAUR,
12360 .set = centaur_set_mcr,
12361 .get = centaur_get_mcr,
12362 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/cyrix.c
12363 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12364 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12365 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12366 post_set();
12367 }
12368
12369 -static struct mtrr_ops cyrix_mtrr_ops = {
12370 +static const struct mtrr_ops cyrix_mtrr_ops = {
12371 .vendor = X86_VENDOR_CYRIX,
12372 .set_all = cyrix_set_all,
12373 .set = cyrix_set_arr,
12374 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/generic.c
12375 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12376 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12377 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12378 /*
12379 * Generic structure...
12380 */
12381 -struct mtrr_ops generic_mtrr_ops = {
12382 +const struct mtrr_ops generic_mtrr_ops = {
12383 .use_intel_if = 1,
12384 .set_all = generic_set_all,
12385 .get = generic_get_mtrr,
12386 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/main.c
12387 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12388 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12389 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12390 u64 size_or_mask, size_and_mask;
12391 static bool mtrr_aps_delayed_init;
12392
12393 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12394 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12395
12396 -struct mtrr_ops *mtrr_if;
12397 +const struct mtrr_ops *mtrr_if;
12398
12399 static void set_mtrr(unsigned int reg, unsigned long base,
12400 unsigned long size, mtrr_type type);
12401
12402 -void set_mtrr_ops(struct mtrr_ops *ops)
12403 +void set_mtrr_ops(const struct mtrr_ops *ops)
12404 {
12405 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12406 mtrr_ops[ops->vendor] = ops;
12407 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/mtrr.h
12408 --- linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12409 +++ linux-2.6.32.44/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12410 @@ -12,19 +12,19 @@
12411 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12412
12413 struct mtrr_ops {
12414 - u32 vendor;
12415 - u32 use_intel_if;
12416 - void (*set)(unsigned int reg, unsigned long base,
12417 + const u32 vendor;
12418 + const u32 use_intel_if;
12419 + void (* const set)(unsigned int reg, unsigned long base,
12420 unsigned long size, mtrr_type type);
12421 - void (*set_all)(void);
12422 + void (* const set_all)(void);
12423
12424 - void (*get)(unsigned int reg, unsigned long *base,
12425 + void (* const get)(unsigned int reg, unsigned long *base,
12426 unsigned long *size, mtrr_type *type);
12427 - int (*get_free_region)(unsigned long base, unsigned long size,
12428 + int (* const get_free_region)(unsigned long base, unsigned long size,
12429 int replace_reg);
12430 - int (*validate_add_page)(unsigned long base, unsigned long size,
12431 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12432 unsigned int type);
12433 - int (*have_wrcomb)(void);
12434 + int (* const have_wrcomb)(void);
12435 };
12436
12437 extern int generic_get_free_region(unsigned long base, unsigned long size,
12438 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12439 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12440 unsigned int type);
12441
12442 -extern struct mtrr_ops generic_mtrr_ops;
12443 +extern const struct mtrr_ops generic_mtrr_ops;
12444
12445 extern int positive_have_wrcomb(void);
12446
12447 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12448 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12449 void get_mtrr_state(void);
12450
12451 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12452 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12453
12454 extern u64 size_or_mask, size_and_mask;
12455 -extern struct mtrr_ops *mtrr_if;
12456 +extern const struct mtrr_ops *mtrr_if;
12457
12458 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12459 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12460 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.44/arch/x86/kernel/cpu/perfctr-watchdog.c
12461 --- linux-2.6.32.44/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12462 +++ linux-2.6.32.44/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12463 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12464
12465 /* Interface defining a CPU specific perfctr watchdog */
12466 struct wd_ops {
12467 - int (*reserve)(void);
12468 - void (*unreserve)(void);
12469 - int (*setup)(unsigned nmi_hz);
12470 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12471 - void (*stop)(void);
12472 + int (* const reserve)(void);
12473 + void (* const unreserve)(void);
12474 + int (* const setup)(unsigned nmi_hz);
12475 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12476 + void (* const stop)(void);
12477 unsigned perfctr;
12478 unsigned evntsel;
12479 u64 checkbit;
12480 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12481 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12482 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12483
12484 +/* cannot be const */
12485 static struct wd_ops intel_arch_wd_ops;
12486
12487 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12488 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12489 return 1;
12490 }
12491
12492 +/* cannot be const */
12493 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12494 .reserve = single_msr_reserve,
12495 .unreserve = single_msr_unreserve,
12496 diff -urNp linux-2.6.32.44/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.44/arch/x86/kernel/cpu/perf_event.c
12497 --- linux-2.6.32.44/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12498 +++ linux-2.6.32.44/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12499 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12500 * count to the generic event atomically:
12501 */
12502 again:
12503 - prev_raw_count = atomic64_read(&hwc->prev_count);
12504 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12505 rdmsrl(hwc->event_base + idx, new_raw_count);
12506
12507 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12508 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12509 new_raw_count) != prev_raw_count)
12510 goto again;
12511
12512 @@ -741,7 +741,7 @@ again:
12513 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12514 delta >>= shift;
12515
12516 - atomic64_add(delta, &event->count);
12517 + atomic64_add_unchecked(delta, &event->count);
12518 atomic64_sub(delta, &hwc->period_left);
12519
12520 return new_raw_count;
12521 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12522 * The hw event starts counting from this event offset,
12523 * mark it to be able to extra future deltas:
12524 */
12525 - atomic64_set(&hwc->prev_count, (u64)-left);
12526 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12527
12528 err = checking_wrmsrl(hwc->event_base + idx,
12529 (u64)(-left) & x86_pmu.event_mask);
12530 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12531 break;
12532
12533 callchain_store(entry, frame.return_address);
12534 - fp = frame.next_frame;
12535 + fp = (__force const void __user *)frame.next_frame;
12536 }
12537 }
12538
12539 diff -urNp linux-2.6.32.44/arch/x86/kernel/crash.c linux-2.6.32.44/arch/x86/kernel/crash.c
12540 --- linux-2.6.32.44/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12541 +++ linux-2.6.32.44/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12542 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12543 regs = args->regs;
12544
12545 #ifdef CONFIG_X86_32
12546 - if (!user_mode_vm(regs)) {
12547 + if (!user_mode(regs)) {
12548 crash_fixup_ss_esp(&fixed_regs, regs);
12549 regs = &fixed_regs;
12550 }
12551 diff -urNp linux-2.6.32.44/arch/x86/kernel/doublefault_32.c linux-2.6.32.44/arch/x86/kernel/doublefault_32.c
12552 --- linux-2.6.32.44/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12553 +++ linux-2.6.32.44/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12554 @@ -11,7 +11,7 @@
12555
12556 #define DOUBLEFAULT_STACKSIZE (1024)
12557 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12558 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12559 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12560
12561 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12562
12563 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12564 unsigned long gdt, tss;
12565
12566 store_gdt(&gdt_desc);
12567 - gdt = gdt_desc.address;
12568 + gdt = (unsigned long)gdt_desc.address;
12569
12570 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12571
12572 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12573 /* 0x2 bit is always set */
12574 .flags = X86_EFLAGS_SF | 0x2,
12575 .sp = STACK_START,
12576 - .es = __USER_DS,
12577 + .es = __KERNEL_DS,
12578 .cs = __KERNEL_CS,
12579 .ss = __KERNEL_DS,
12580 - .ds = __USER_DS,
12581 + .ds = __KERNEL_DS,
12582 .fs = __KERNEL_PERCPU,
12583
12584 .__cr3 = __pa_nodebug(swapper_pg_dir),
12585 diff -urNp linux-2.6.32.44/arch/x86/kernel/dumpstack_32.c linux-2.6.32.44/arch/x86/kernel/dumpstack_32.c
12586 --- linux-2.6.32.44/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12587 +++ linux-2.6.32.44/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12588 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12589 #endif
12590
12591 for (;;) {
12592 - struct thread_info *context;
12593 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12594 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12595
12596 - context = (struct thread_info *)
12597 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12598 - bp = print_context_stack(context, stack, bp, ops,
12599 - data, NULL, &graph);
12600 -
12601 - stack = (unsigned long *)context->previous_esp;
12602 - if (!stack)
12603 + if (stack_start == task_stack_page(task))
12604 break;
12605 + stack = *(unsigned long **)stack_start;
12606 if (ops->stack(data, "IRQ") < 0)
12607 break;
12608 touch_nmi_watchdog();
12609 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12610 * When in-kernel, we also print out the stack and code at the
12611 * time of the fault..
12612 */
12613 - if (!user_mode_vm(regs)) {
12614 + if (!user_mode(regs)) {
12615 unsigned int code_prologue = code_bytes * 43 / 64;
12616 unsigned int code_len = code_bytes;
12617 unsigned char c;
12618 u8 *ip;
12619 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12620
12621 printk(KERN_EMERG "Stack:\n");
12622 show_stack_log_lvl(NULL, regs, &regs->sp,
12623 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12624
12625 printk(KERN_EMERG "Code: ");
12626
12627 - ip = (u8 *)regs->ip - code_prologue;
12628 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12629 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12630 /* try starting at IP */
12631 - ip = (u8 *)regs->ip;
12632 + ip = (u8 *)regs->ip + cs_base;
12633 code_len = code_len - code_prologue + 1;
12634 }
12635 for (i = 0; i < code_len; i++, ip++) {
12636 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12637 printk(" Bad EIP value.");
12638 break;
12639 }
12640 - if (ip == (u8 *)regs->ip)
12641 + if (ip == (u8 *)regs->ip + cs_base)
12642 printk("<%02x> ", c);
12643 else
12644 printk("%02x ", c);
12645 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12646 {
12647 unsigned short ud2;
12648
12649 + ip = ktla_ktva(ip);
12650 if (ip < PAGE_OFFSET)
12651 return 0;
12652 if (probe_kernel_address((unsigned short *)ip, ud2))
12653 diff -urNp linux-2.6.32.44/arch/x86/kernel/dumpstack_64.c linux-2.6.32.44/arch/x86/kernel/dumpstack_64.c
12654 --- linux-2.6.32.44/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12655 +++ linux-2.6.32.44/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12656 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12657 unsigned long *irq_stack_end =
12658 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12659 unsigned used = 0;
12660 - struct thread_info *tinfo;
12661 int graph = 0;
12662 + void *stack_start;
12663
12664 if (!task)
12665 task = current;
12666 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12667 * current stack address. If the stacks consist of nested
12668 * exceptions
12669 */
12670 - tinfo = task_thread_info(task);
12671 for (;;) {
12672 char *id;
12673 unsigned long *estack_end;
12674 +
12675 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12676 &used, &id);
12677
12678 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12679 if (ops->stack(data, id) < 0)
12680 break;
12681
12682 - bp = print_context_stack(tinfo, stack, bp, ops,
12683 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12684 data, estack_end, &graph);
12685 ops->stack(data, "<EOE>");
12686 /*
12687 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12688 if (stack >= irq_stack && stack < irq_stack_end) {
12689 if (ops->stack(data, "IRQ") < 0)
12690 break;
12691 - bp = print_context_stack(tinfo, stack, bp,
12692 + bp = print_context_stack(task, irq_stack, stack, bp,
12693 ops, data, irq_stack_end, &graph);
12694 /*
12695 * We link to the next stack (which would be
12696 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12697 /*
12698 * This handles the process stack:
12699 */
12700 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12701 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12702 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12703 put_cpu();
12704 }
12705 EXPORT_SYMBOL(dump_trace);
12706 diff -urNp linux-2.6.32.44/arch/x86/kernel/dumpstack.c linux-2.6.32.44/arch/x86/kernel/dumpstack.c
12707 --- linux-2.6.32.44/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12708 +++ linux-2.6.32.44/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12709 @@ -2,6 +2,9 @@
12710 * Copyright (C) 1991, 1992 Linus Torvalds
12711 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12712 */
12713 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12714 +#define __INCLUDED_BY_HIDESYM 1
12715 +#endif
12716 #include <linux/kallsyms.h>
12717 #include <linux/kprobes.h>
12718 #include <linux/uaccess.h>
12719 @@ -28,7 +31,7 @@ static int die_counter;
12720
12721 void printk_address(unsigned long address, int reliable)
12722 {
12723 - printk(" [<%p>] %s%pS\n", (void *) address,
12724 + printk(" [<%p>] %s%pA\n", (void *) address,
12725 reliable ? "" : "? ", (void *) address);
12726 }
12727
12728 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12729 static void
12730 print_ftrace_graph_addr(unsigned long addr, void *data,
12731 const struct stacktrace_ops *ops,
12732 - struct thread_info *tinfo, int *graph)
12733 + struct task_struct *task, int *graph)
12734 {
12735 - struct task_struct *task = tinfo->task;
12736 unsigned long ret_addr;
12737 int index = task->curr_ret_stack;
12738
12739 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12740 static inline void
12741 print_ftrace_graph_addr(unsigned long addr, void *data,
12742 const struct stacktrace_ops *ops,
12743 - struct thread_info *tinfo, int *graph)
12744 + struct task_struct *task, int *graph)
12745 { }
12746 #endif
12747
12748 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12749 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12750 */
12751
12752 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12753 - void *p, unsigned int size, void *end)
12754 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12755 {
12756 - void *t = tinfo;
12757 if (end) {
12758 if (p < end && p >= (end-THREAD_SIZE))
12759 return 1;
12760 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12761 }
12762
12763 unsigned long
12764 -print_context_stack(struct thread_info *tinfo,
12765 +print_context_stack(struct task_struct *task, void *stack_start,
12766 unsigned long *stack, unsigned long bp,
12767 const struct stacktrace_ops *ops, void *data,
12768 unsigned long *end, int *graph)
12769 {
12770 struct stack_frame *frame = (struct stack_frame *)bp;
12771
12772 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12773 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12774 unsigned long addr;
12775
12776 addr = *stack;
12777 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12778 } else {
12779 ops->address(data, addr, 0);
12780 }
12781 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12782 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12783 }
12784 stack++;
12785 }
12786 @@ -180,7 +180,7 @@ void dump_stack(void)
12787 #endif
12788
12789 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12790 - current->pid, current->comm, print_tainted(),
12791 + task_pid_nr(current), current->comm, print_tainted(),
12792 init_utsname()->release,
12793 (int)strcspn(init_utsname()->version, " "),
12794 init_utsname()->version);
12795 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12796 return flags;
12797 }
12798
12799 +extern void gr_handle_kernel_exploit(void);
12800 +
12801 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12802 {
12803 if (regs && kexec_should_crash(current))
12804 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12805 panic("Fatal exception in interrupt");
12806 if (panic_on_oops)
12807 panic("Fatal exception");
12808 - do_exit(signr);
12809 +
12810 + gr_handle_kernel_exploit();
12811 +
12812 + do_group_exit(signr);
12813 }
12814
12815 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12816 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12817 unsigned long flags = oops_begin();
12818 int sig = SIGSEGV;
12819
12820 - if (!user_mode_vm(regs))
12821 + if (!user_mode(regs))
12822 report_bug(regs->ip, regs);
12823
12824 if (__die(str, regs, err))
12825 diff -urNp linux-2.6.32.44/arch/x86/kernel/dumpstack.h linux-2.6.32.44/arch/x86/kernel/dumpstack.h
12826 --- linux-2.6.32.44/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12827 +++ linux-2.6.32.44/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12828 @@ -15,7 +15,7 @@
12829 #endif
12830
12831 extern unsigned long
12832 -print_context_stack(struct thread_info *tinfo,
12833 +print_context_stack(struct task_struct *task, void *stack_start,
12834 unsigned long *stack, unsigned long bp,
12835 const struct stacktrace_ops *ops, void *data,
12836 unsigned long *end, int *graph);
12837 diff -urNp linux-2.6.32.44/arch/x86/kernel/e820.c linux-2.6.32.44/arch/x86/kernel/e820.c
12838 --- linux-2.6.32.44/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12839 +++ linux-2.6.32.44/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12840 @@ -733,7 +733,7 @@ struct early_res {
12841 };
12842 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12843 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12844 - {}
12845 + { 0, 0, {0}, 0 }
12846 };
12847
12848 static int __init find_overlapped_early(u64 start, u64 end)
12849 diff -urNp linux-2.6.32.44/arch/x86/kernel/early_printk.c linux-2.6.32.44/arch/x86/kernel/early_printk.c
12850 --- linux-2.6.32.44/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12851 +++ linux-2.6.32.44/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12852 @@ -7,6 +7,7 @@
12853 #include <linux/pci_regs.h>
12854 #include <linux/pci_ids.h>
12855 #include <linux/errno.h>
12856 +#include <linux/sched.h>
12857 #include <asm/io.h>
12858 #include <asm/processor.h>
12859 #include <asm/fcntl.h>
12860 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12861 int n;
12862 va_list ap;
12863
12864 + pax_track_stack();
12865 +
12866 va_start(ap, fmt);
12867 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12868 early_console->write(early_console, buf, n);
12869 diff -urNp linux-2.6.32.44/arch/x86/kernel/efi_32.c linux-2.6.32.44/arch/x86/kernel/efi_32.c
12870 --- linux-2.6.32.44/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12871 +++ linux-2.6.32.44/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12872 @@ -38,70 +38,38 @@
12873 */
12874
12875 static unsigned long efi_rt_eflags;
12876 -static pgd_t efi_bak_pg_dir_pointer[2];
12877 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12878
12879 -void efi_call_phys_prelog(void)
12880 +void __init efi_call_phys_prelog(void)
12881 {
12882 - unsigned long cr4;
12883 - unsigned long temp;
12884 struct desc_ptr gdt_descr;
12885
12886 local_irq_save(efi_rt_eflags);
12887
12888 - /*
12889 - * If I don't have PAE, I should just duplicate two entries in page
12890 - * directory. If I have PAE, I just need to duplicate one entry in
12891 - * page directory.
12892 - */
12893 - cr4 = read_cr4_safe();
12894
12895 - if (cr4 & X86_CR4_PAE) {
12896 - efi_bak_pg_dir_pointer[0].pgd =
12897 - swapper_pg_dir[pgd_index(0)].pgd;
12898 - swapper_pg_dir[0].pgd =
12899 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12900 - } else {
12901 - efi_bak_pg_dir_pointer[0].pgd =
12902 - swapper_pg_dir[pgd_index(0)].pgd;
12903 - efi_bak_pg_dir_pointer[1].pgd =
12904 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12905 - swapper_pg_dir[pgd_index(0)].pgd =
12906 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12907 - temp = PAGE_OFFSET + 0x400000;
12908 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12909 - swapper_pg_dir[pgd_index(temp)].pgd;
12910 - }
12911 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12912 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12913 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12914
12915 /*
12916 * After the lock is released, the original page table is restored.
12917 */
12918 __flush_tlb_all();
12919
12920 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12921 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12922 gdt_descr.size = GDT_SIZE - 1;
12923 load_gdt(&gdt_descr);
12924 }
12925
12926 -void efi_call_phys_epilog(void)
12927 +void __init efi_call_phys_epilog(void)
12928 {
12929 - unsigned long cr4;
12930 struct desc_ptr gdt_descr;
12931
12932 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12933 + gdt_descr.address = get_cpu_gdt_table(0);
12934 gdt_descr.size = GDT_SIZE - 1;
12935 load_gdt(&gdt_descr);
12936
12937 - cr4 = read_cr4_safe();
12938 -
12939 - if (cr4 & X86_CR4_PAE) {
12940 - swapper_pg_dir[pgd_index(0)].pgd =
12941 - efi_bak_pg_dir_pointer[0].pgd;
12942 - } else {
12943 - swapper_pg_dir[pgd_index(0)].pgd =
12944 - efi_bak_pg_dir_pointer[0].pgd;
12945 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12946 - efi_bak_pg_dir_pointer[1].pgd;
12947 - }
12948 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12949
12950 /*
12951 * After the lock is released, the original page table is restored.
12952 diff -urNp linux-2.6.32.44/arch/x86/kernel/efi_stub_32.S linux-2.6.32.44/arch/x86/kernel/efi_stub_32.S
12953 --- linux-2.6.32.44/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12954 +++ linux-2.6.32.44/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12955 @@ -6,6 +6,7 @@
12956 */
12957
12958 #include <linux/linkage.h>
12959 +#include <linux/init.h>
12960 #include <asm/page_types.h>
12961
12962 /*
12963 @@ -20,7 +21,7 @@
12964 * service functions will comply with gcc calling convention, too.
12965 */
12966
12967 -.text
12968 +__INIT
12969 ENTRY(efi_call_phys)
12970 /*
12971 * 0. The function can only be called in Linux kernel. So CS has been
12972 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12973 * The mapping of lower virtual memory has been created in prelog and
12974 * epilog.
12975 */
12976 - movl $1f, %edx
12977 - subl $__PAGE_OFFSET, %edx
12978 - jmp *%edx
12979 + jmp 1f-__PAGE_OFFSET
12980 1:
12981
12982 /*
12983 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12984 * parameter 2, ..., param n. To make things easy, we save the return
12985 * address of efi_call_phys in a global variable.
12986 */
12987 - popl %edx
12988 - movl %edx, saved_return_addr
12989 - /* get the function pointer into ECX*/
12990 - popl %ecx
12991 - movl %ecx, efi_rt_function_ptr
12992 - movl $2f, %edx
12993 - subl $__PAGE_OFFSET, %edx
12994 - pushl %edx
12995 + popl (saved_return_addr)
12996 + popl (efi_rt_function_ptr)
12997
12998 /*
12999 * 3. Clear PG bit in %CR0.
13000 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13001 /*
13002 * 5. Call the physical function.
13003 */
13004 - jmp *%ecx
13005 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
13006
13007 -2:
13008 /*
13009 * 6. After EFI runtime service returns, control will return to
13010 * following instruction. We'd better readjust stack pointer first.
13011 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13012 movl %cr0, %edx
13013 orl $0x80000000, %edx
13014 movl %edx, %cr0
13015 - jmp 1f
13016 -1:
13017 +
13018 /*
13019 * 8. Now restore the virtual mode from flat mode by
13020 * adding EIP with PAGE_OFFSET.
13021 */
13022 - movl $1f, %edx
13023 - jmp *%edx
13024 + jmp 1f+__PAGE_OFFSET
13025 1:
13026
13027 /*
13028 * 9. Balance the stack. And because EAX contain the return value,
13029 * we'd better not clobber it.
13030 */
13031 - leal efi_rt_function_ptr, %edx
13032 - movl (%edx), %ecx
13033 - pushl %ecx
13034 + pushl (efi_rt_function_ptr)
13035
13036 /*
13037 - * 10. Push the saved return address onto the stack and return.
13038 + * 10. Return to the saved return address.
13039 */
13040 - leal saved_return_addr, %edx
13041 - movl (%edx), %ecx
13042 - pushl %ecx
13043 - ret
13044 + jmpl *(saved_return_addr)
13045 ENDPROC(efi_call_phys)
13046 .previous
13047
13048 -.data
13049 +__INITDATA
13050 saved_return_addr:
13051 .long 0
13052 efi_rt_function_ptr:
13053 diff -urNp linux-2.6.32.44/arch/x86/kernel/entry_32.S linux-2.6.32.44/arch/x86/kernel/entry_32.S
13054 --- linux-2.6.32.44/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13055 +++ linux-2.6.32.44/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13056 @@ -185,13 +185,146 @@
13057 /*CFI_REL_OFFSET gs, PT_GS*/
13058 .endm
13059 .macro SET_KERNEL_GS reg
13060 +
13061 +#ifdef CONFIG_CC_STACKPROTECTOR
13062 movl $(__KERNEL_STACK_CANARY), \reg
13063 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13064 + movl $(__USER_DS), \reg
13065 +#else
13066 + xorl \reg, \reg
13067 +#endif
13068 +
13069 movl \reg, %gs
13070 .endm
13071
13072 #endif /* CONFIG_X86_32_LAZY_GS */
13073
13074 -.macro SAVE_ALL
13075 +.macro pax_enter_kernel
13076 +#ifdef CONFIG_PAX_KERNEXEC
13077 + call pax_enter_kernel
13078 +#endif
13079 +.endm
13080 +
13081 +.macro pax_exit_kernel
13082 +#ifdef CONFIG_PAX_KERNEXEC
13083 + call pax_exit_kernel
13084 +#endif
13085 +.endm
13086 +
13087 +#ifdef CONFIG_PAX_KERNEXEC
13088 +ENTRY(pax_enter_kernel)
13089 +#ifdef CONFIG_PARAVIRT
13090 + pushl %eax
13091 + pushl %ecx
13092 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13093 + mov %eax, %esi
13094 +#else
13095 + mov %cr0, %esi
13096 +#endif
13097 + bts $16, %esi
13098 + jnc 1f
13099 + mov %cs, %esi
13100 + cmp $__KERNEL_CS, %esi
13101 + jz 3f
13102 + ljmp $__KERNEL_CS, $3f
13103 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13104 +2:
13105 +#ifdef CONFIG_PARAVIRT
13106 + mov %esi, %eax
13107 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13108 +#else
13109 + mov %esi, %cr0
13110 +#endif
13111 +3:
13112 +#ifdef CONFIG_PARAVIRT
13113 + popl %ecx
13114 + popl %eax
13115 +#endif
13116 + ret
13117 +ENDPROC(pax_enter_kernel)
13118 +
13119 +ENTRY(pax_exit_kernel)
13120 +#ifdef CONFIG_PARAVIRT
13121 + pushl %eax
13122 + pushl %ecx
13123 +#endif
13124 + mov %cs, %esi
13125 + cmp $__KERNEXEC_KERNEL_CS, %esi
13126 + jnz 2f
13127 +#ifdef CONFIG_PARAVIRT
13128 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13129 + mov %eax, %esi
13130 +#else
13131 + mov %cr0, %esi
13132 +#endif
13133 + btr $16, %esi
13134 + ljmp $__KERNEL_CS, $1f
13135 +1:
13136 +#ifdef CONFIG_PARAVIRT
13137 + mov %esi, %eax
13138 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13139 +#else
13140 + mov %esi, %cr0
13141 +#endif
13142 +2:
13143 +#ifdef CONFIG_PARAVIRT
13144 + popl %ecx
13145 + popl %eax
13146 +#endif
13147 + ret
13148 +ENDPROC(pax_exit_kernel)
13149 +#endif
13150 +
13151 +.macro pax_erase_kstack
13152 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13153 + call pax_erase_kstack
13154 +#endif
13155 +.endm
13156 +
13157 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13158 +/*
13159 + * ebp: thread_info
13160 + * ecx, edx: can be clobbered
13161 + */
13162 +ENTRY(pax_erase_kstack)
13163 + pushl %edi
13164 + pushl %eax
13165 +
13166 + mov TI_lowest_stack(%ebp), %edi
13167 + mov $-0xBEEF, %eax
13168 + std
13169 +
13170 +1: mov %edi, %ecx
13171 + and $THREAD_SIZE_asm - 1, %ecx
13172 + shr $2, %ecx
13173 + repne scasl
13174 + jecxz 2f
13175 +
13176 + cmp $2*16, %ecx
13177 + jc 2f
13178 +
13179 + mov $2*16, %ecx
13180 + repe scasl
13181 + jecxz 2f
13182 + jne 1b
13183 +
13184 +2: cld
13185 + mov %esp, %ecx
13186 + sub %edi, %ecx
13187 + shr $2, %ecx
13188 + rep stosl
13189 +
13190 + mov TI_task_thread_sp0(%ebp), %edi
13191 + sub $128, %edi
13192 + mov %edi, TI_lowest_stack(%ebp)
13193 +
13194 + popl %eax
13195 + popl %edi
13196 + ret
13197 +ENDPROC(pax_erase_kstack)
13198 +#endif
13199 +
13200 +.macro __SAVE_ALL _DS
13201 cld
13202 PUSH_GS
13203 pushl %fs
13204 @@ -224,7 +357,7 @@
13205 pushl %ebx
13206 CFI_ADJUST_CFA_OFFSET 4
13207 CFI_REL_OFFSET ebx, 0
13208 - movl $(__USER_DS), %edx
13209 + movl $\_DS, %edx
13210 movl %edx, %ds
13211 movl %edx, %es
13212 movl $(__KERNEL_PERCPU), %edx
13213 @@ -232,6 +365,15 @@
13214 SET_KERNEL_GS %edx
13215 .endm
13216
13217 +.macro SAVE_ALL
13218 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13219 + __SAVE_ALL __KERNEL_DS
13220 + pax_enter_kernel
13221 +#else
13222 + __SAVE_ALL __USER_DS
13223 +#endif
13224 +.endm
13225 +
13226 .macro RESTORE_INT_REGS
13227 popl %ebx
13228 CFI_ADJUST_CFA_OFFSET -4
13229 @@ -352,7 +494,15 @@ check_userspace:
13230 movb PT_CS(%esp), %al
13231 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13232 cmpl $USER_RPL, %eax
13233 +
13234 +#ifdef CONFIG_PAX_KERNEXEC
13235 + jae resume_userspace
13236 +
13237 + PAX_EXIT_KERNEL
13238 + jmp resume_kernel
13239 +#else
13240 jb resume_kernel # not returning to v8086 or userspace
13241 +#endif
13242
13243 ENTRY(resume_userspace)
13244 LOCKDEP_SYS_EXIT
13245 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13246 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13247 # int/exception return?
13248 jne work_pending
13249 - jmp restore_all
13250 + jmp restore_all_pax
13251 END(ret_from_exception)
13252
13253 #ifdef CONFIG_PREEMPT
13254 @@ -414,25 +564,36 @@ sysenter_past_esp:
13255 /*CFI_REL_OFFSET cs, 0*/
13256 /*
13257 * Push current_thread_info()->sysenter_return to the stack.
13258 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13259 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13260 */
13261 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13262 + pushl $0
13263 CFI_ADJUST_CFA_OFFSET 4
13264 CFI_REL_OFFSET eip, 0
13265
13266 pushl %eax
13267 CFI_ADJUST_CFA_OFFSET 4
13268 SAVE_ALL
13269 + GET_THREAD_INFO(%ebp)
13270 + movl TI_sysenter_return(%ebp),%ebp
13271 + movl %ebp,PT_EIP(%esp)
13272 ENABLE_INTERRUPTS(CLBR_NONE)
13273
13274 /*
13275 * Load the potential sixth argument from user stack.
13276 * Careful about security.
13277 */
13278 + movl PT_OLDESP(%esp),%ebp
13279 +
13280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13281 + mov PT_OLDSS(%esp),%ds
13282 +1: movl %ds:(%ebp),%ebp
13283 + push %ss
13284 + pop %ds
13285 +#else
13286 cmpl $__PAGE_OFFSET-3,%ebp
13287 jae syscall_fault
13288 1: movl (%ebp),%ebp
13289 +#endif
13290 +
13291 movl %ebp,PT_EBP(%esp)
13292 .section __ex_table,"a"
13293 .align 4
13294 @@ -455,12 +616,23 @@ sysenter_do_call:
13295 testl $_TIF_ALLWORK_MASK, %ecx
13296 jne sysexit_audit
13297 sysenter_exit:
13298 +
13299 +#ifdef CONFIG_PAX_RANDKSTACK
13300 + pushl_cfi %eax
13301 + call pax_randomize_kstack
13302 + popl_cfi %eax
13303 +#endif
13304 +
13305 + pax_erase_kstack
13306 +
13307 /* if something modifies registers it must also disable sysexit */
13308 movl PT_EIP(%esp), %edx
13309 movl PT_OLDESP(%esp), %ecx
13310 xorl %ebp,%ebp
13311 TRACE_IRQS_ON
13312 1: mov PT_FS(%esp), %fs
13313 +2: mov PT_DS(%esp), %ds
13314 +3: mov PT_ES(%esp), %es
13315 PTGS_TO_GS
13316 ENABLE_INTERRUPTS_SYSEXIT
13317
13318 @@ -477,6 +649,9 @@ sysenter_audit:
13319 movl %eax,%edx /* 2nd arg: syscall number */
13320 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13321 call audit_syscall_entry
13322 +
13323 + pax_erase_kstack
13324 +
13325 pushl %ebx
13326 CFI_ADJUST_CFA_OFFSET 4
13327 movl PT_EAX(%esp),%eax /* reload syscall number */
13328 @@ -504,11 +679,17 @@ sysexit_audit:
13329
13330 CFI_ENDPROC
13331 .pushsection .fixup,"ax"
13332 -2: movl $0,PT_FS(%esp)
13333 +4: movl $0,PT_FS(%esp)
13334 + jmp 1b
13335 +5: movl $0,PT_DS(%esp)
13336 + jmp 1b
13337 +6: movl $0,PT_ES(%esp)
13338 jmp 1b
13339 .section __ex_table,"a"
13340 .align 4
13341 - .long 1b,2b
13342 + .long 1b,4b
13343 + .long 2b,5b
13344 + .long 3b,6b
13345 .popsection
13346 PTGS_TO_GS_EX
13347 ENDPROC(ia32_sysenter_target)
13348 @@ -538,6 +719,14 @@ syscall_exit:
13349 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13350 jne syscall_exit_work
13351
13352 +restore_all_pax:
13353 +
13354 +#ifdef CONFIG_PAX_RANDKSTACK
13355 + call pax_randomize_kstack
13356 +#endif
13357 +
13358 + pax_erase_kstack
13359 +
13360 restore_all:
13361 TRACE_IRQS_IRET
13362 restore_all_notrace:
13363 @@ -602,7 +791,13 @@ ldt_ss:
13364 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13365 mov %dx, %ax /* eax: new kernel esp */
13366 sub %eax, %edx /* offset (low word is 0) */
13367 - PER_CPU(gdt_page, %ebx)
13368 +#ifdef CONFIG_SMP
13369 + movl PER_CPU_VAR(cpu_number), %ebx
13370 + shll $PAGE_SHIFT_asm, %ebx
13371 + addl $cpu_gdt_table, %ebx
13372 +#else
13373 + movl $cpu_gdt_table, %ebx
13374 +#endif
13375 shr $16, %edx
13376 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13377 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13378 @@ -636,31 +831,25 @@ work_resched:
13379 movl TI_flags(%ebp), %ecx
13380 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13381 # than syscall tracing?
13382 - jz restore_all
13383 + jz restore_all_pax
13384 testb $_TIF_NEED_RESCHED, %cl
13385 jnz work_resched
13386
13387 work_notifysig: # deal with pending signals and
13388 # notify-resume requests
13389 + movl %esp, %eax
13390 #ifdef CONFIG_VM86
13391 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13392 - movl %esp, %eax
13393 - jne work_notifysig_v86 # returning to kernel-space or
13394 + jz 1f # returning to kernel-space or
13395 # vm86-space
13396 - xorl %edx, %edx
13397 - call do_notify_resume
13398 - jmp resume_userspace_sig
13399
13400 - ALIGN
13401 -work_notifysig_v86:
13402 pushl %ecx # save ti_flags for do_notify_resume
13403 CFI_ADJUST_CFA_OFFSET 4
13404 call save_v86_state # %eax contains pt_regs pointer
13405 popl %ecx
13406 CFI_ADJUST_CFA_OFFSET -4
13407 movl %eax, %esp
13408 -#else
13409 - movl %esp, %eax
13410 +1:
13411 #endif
13412 xorl %edx, %edx
13413 call do_notify_resume
13414 @@ -673,6 +862,9 @@ syscall_trace_entry:
13415 movl $-ENOSYS,PT_EAX(%esp)
13416 movl %esp, %eax
13417 call syscall_trace_enter
13418 +
13419 + pax_erase_kstack
13420 +
13421 /* What it returned is what we'll actually use. */
13422 cmpl $(nr_syscalls), %eax
13423 jnae syscall_call
13424 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13425
13426 RING0_INT_FRAME # can't unwind into user space anyway
13427 syscall_fault:
13428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13429 + push %ss
13430 + pop %ds
13431 +#endif
13432 GET_THREAD_INFO(%ebp)
13433 movl $-EFAULT,PT_EAX(%esp)
13434 jmp resume_userspace
13435 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13436 PTREGSCALL(vm86)
13437 PTREGSCALL(vm86old)
13438
13439 + ALIGN;
13440 +ENTRY(kernel_execve)
13441 + push %ebp
13442 + sub $PT_OLDSS+4,%esp
13443 + push %edi
13444 + push %ecx
13445 + push %eax
13446 + lea 3*4(%esp),%edi
13447 + mov $PT_OLDSS/4+1,%ecx
13448 + xorl %eax,%eax
13449 + rep stosl
13450 + pop %eax
13451 + pop %ecx
13452 + pop %edi
13453 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13454 + mov %eax,PT_EBX(%esp)
13455 + mov %edx,PT_ECX(%esp)
13456 + mov %ecx,PT_EDX(%esp)
13457 + mov %esp,%eax
13458 + call sys_execve
13459 + GET_THREAD_INFO(%ebp)
13460 + test %eax,%eax
13461 + jz syscall_exit
13462 + add $PT_OLDSS+4,%esp
13463 + pop %ebp
13464 + ret
13465 +
13466 .macro FIXUP_ESPFIX_STACK
13467 /*
13468 * Switch back for ESPFIX stack to the normal zerobased stack
13469 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13470 * normal stack and adjusts ESP with the matching offset.
13471 */
13472 /* fixup the stack */
13473 - PER_CPU(gdt_page, %ebx)
13474 +#ifdef CONFIG_SMP
13475 + movl PER_CPU_VAR(cpu_number), %ebx
13476 + shll $PAGE_SHIFT_asm, %ebx
13477 + addl $cpu_gdt_table, %ebx
13478 +#else
13479 + movl $cpu_gdt_table, %ebx
13480 +#endif
13481 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13482 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13483 shl $16, %eax
13484 @@ -1198,7 +1427,6 @@ return_to_handler:
13485 ret
13486 #endif
13487
13488 -.section .rodata,"a"
13489 #include "syscall_table_32.S"
13490
13491 syscall_table_size=(.-sys_call_table)
13492 @@ -1255,9 +1483,12 @@ error_code:
13493 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13494 REG_TO_PTGS %ecx
13495 SET_KERNEL_GS %ecx
13496 - movl $(__USER_DS), %ecx
13497 + movl $(__KERNEL_DS), %ecx
13498 movl %ecx, %ds
13499 movl %ecx, %es
13500 +
13501 + pax_enter_kernel
13502 +
13503 TRACE_IRQS_OFF
13504 movl %esp,%eax # pt_regs pointer
13505 call *%edi
13506 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13507 xorl %edx,%edx # zero error code
13508 movl %esp,%eax # pt_regs pointer
13509 call do_nmi
13510 +
13511 + pax_exit_kernel
13512 +
13513 jmp restore_all_notrace
13514 CFI_ENDPROC
13515
13516 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13517 FIXUP_ESPFIX_STACK # %eax == %esp
13518 xorl %edx,%edx # zero error code
13519 call do_nmi
13520 +
13521 + pax_exit_kernel
13522 +
13523 RESTORE_REGS
13524 lss 12+4(%esp), %esp # back to espfix stack
13525 CFI_ADJUST_CFA_OFFSET -24
13526 diff -urNp linux-2.6.32.44/arch/x86/kernel/entry_64.S linux-2.6.32.44/arch/x86/kernel/entry_64.S
13527 --- linux-2.6.32.44/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13528 +++ linux-2.6.32.44/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13529 @@ -53,6 +53,7 @@
13530 #include <asm/paravirt.h>
13531 #include <asm/ftrace.h>
13532 #include <asm/percpu.h>
13533 +#include <asm/pgtable.h>
13534
13535 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13536 #include <linux/elf-em.h>
13537 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13538 ENDPROC(native_usergs_sysret64)
13539 #endif /* CONFIG_PARAVIRT */
13540
13541 + .macro ljmpq sel, off
13542 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13543 + .byte 0x48; ljmp *1234f(%rip)
13544 + .pushsection .rodata
13545 + .align 16
13546 + 1234: .quad \off; .word \sel
13547 + .popsection
13548 +#else
13549 + pushq $\sel
13550 + pushq $\off
13551 + lretq
13552 +#endif
13553 + .endm
13554 +
13555 + .macro pax_enter_kernel
13556 +#ifdef CONFIG_PAX_KERNEXEC
13557 + call pax_enter_kernel
13558 +#endif
13559 + .endm
13560 +
13561 + .macro pax_exit_kernel
13562 +#ifdef CONFIG_PAX_KERNEXEC
13563 + call pax_exit_kernel
13564 +#endif
13565 + .endm
13566 +
13567 +#ifdef CONFIG_PAX_KERNEXEC
13568 +ENTRY(pax_enter_kernel)
13569 + pushq %rdi
13570 +
13571 +#ifdef CONFIG_PARAVIRT
13572 + PV_SAVE_REGS(CLBR_RDI)
13573 +#endif
13574 +
13575 + GET_CR0_INTO_RDI
13576 + bts $16,%rdi
13577 + jnc 1f
13578 + mov %cs,%edi
13579 + cmp $__KERNEL_CS,%edi
13580 + jz 3f
13581 + ljmpq __KERNEL_CS,3f
13582 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13583 +2: SET_RDI_INTO_CR0
13584 +3:
13585 +
13586 +#ifdef CONFIG_PARAVIRT
13587 + PV_RESTORE_REGS(CLBR_RDI)
13588 +#endif
13589 +
13590 + popq %rdi
13591 + retq
13592 +ENDPROC(pax_enter_kernel)
13593 +
13594 +ENTRY(pax_exit_kernel)
13595 + pushq %rdi
13596 +
13597 +#ifdef CONFIG_PARAVIRT
13598 + PV_SAVE_REGS(CLBR_RDI)
13599 +#endif
13600 +
13601 + mov %cs,%rdi
13602 + cmp $__KERNEXEC_KERNEL_CS,%edi
13603 + jnz 2f
13604 + GET_CR0_INTO_RDI
13605 + btr $16,%rdi
13606 + ljmpq __KERNEL_CS,1f
13607 +1: SET_RDI_INTO_CR0
13608 +2:
13609 +
13610 +#ifdef CONFIG_PARAVIRT
13611 + PV_RESTORE_REGS(CLBR_RDI);
13612 +#endif
13613 +
13614 + popq %rdi
13615 + retq
13616 +ENDPROC(pax_exit_kernel)
13617 +#endif
13618 +
13619 + .macro pax_enter_kernel_user
13620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13621 + call pax_enter_kernel_user
13622 +#endif
13623 + .endm
13624 +
13625 + .macro pax_exit_kernel_user
13626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13627 + call pax_exit_kernel_user
13628 +#endif
13629 +#ifdef CONFIG_PAX_RANDKSTACK
13630 + push %rax
13631 + call pax_randomize_kstack
13632 + pop %rax
13633 +#endif
13634 + pax_erase_kstack
13635 + .endm
13636 +
13637 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13638 +ENTRY(pax_enter_kernel_user)
13639 + pushq %rdi
13640 + pushq %rbx
13641 +
13642 +#ifdef CONFIG_PARAVIRT
13643 + PV_SAVE_REGS(CLBR_RDI)
13644 +#endif
13645 +
13646 + GET_CR3_INTO_RDI
13647 + mov %rdi,%rbx
13648 + add $__START_KERNEL_map,%rbx
13649 + sub phys_base(%rip),%rbx
13650 +
13651 +#ifdef CONFIG_PARAVIRT
13652 + pushq %rdi
13653 + cmpl $0, pv_info+PARAVIRT_enabled
13654 + jz 1f
13655 + i = 0
13656 + .rept USER_PGD_PTRS
13657 + mov i*8(%rbx),%rsi
13658 + mov $0,%sil
13659 + lea i*8(%rbx),%rdi
13660 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13661 + i = i + 1
13662 + .endr
13663 + jmp 2f
13664 +1:
13665 +#endif
13666 +
13667 + i = 0
13668 + .rept USER_PGD_PTRS
13669 + movb $0,i*8(%rbx)
13670 + i = i + 1
13671 + .endr
13672 +
13673 +#ifdef CONFIG_PARAVIRT
13674 +2: popq %rdi
13675 +#endif
13676 + SET_RDI_INTO_CR3
13677 +
13678 +#ifdef CONFIG_PAX_KERNEXEC
13679 + GET_CR0_INTO_RDI
13680 + bts $16,%rdi
13681 + SET_RDI_INTO_CR0
13682 +#endif
13683 +
13684 +#ifdef CONFIG_PARAVIRT
13685 + PV_RESTORE_REGS(CLBR_RDI)
13686 +#endif
13687 +
13688 + popq %rbx
13689 + popq %rdi
13690 + retq
13691 +ENDPROC(pax_enter_kernel_user)
13692 +
13693 +ENTRY(pax_exit_kernel_user)
13694 + push %rdi
13695 +
13696 +#ifdef CONFIG_PARAVIRT
13697 + pushq %rbx
13698 + PV_SAVE_REGS(CLBR_RDI)
13699 +#endif
13700 +
13701 +#ifdef CONFIG_PAX_KERNEXEC
13702 + GET_CR0_INTO_RDI
13703 + btr $16,%rdi
13704 + SET_RDI_INTO_CR0
13705 +#endif
13706 +
13707 + GET_CR3_INTO_RDI
13708 + add $__START_KERNEL_map,%rdi
13709 + sub phys_base(%rip),%rdi
13710 +
13711 +#ifdef CONFIG_PARAVIRT
13712 + cmpl $0, pv_info+PARAVIRT_enabled
13713 + jz 1f
13714 + mov %rdi,%rbx
13715 + i = 0
13716 + .rept USER_PGD_PTRS
13717 + mov i*8(%rbx),%rsi
13718 + mov $0x67,%sil
13719 + lea i*8(%rbx),%rdi
13720 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13721 + i = i + 1
13722 + .endr
13723 + jmp 2f
13724 +1:
13725 +#endif
13726 +
13727 + i = 0
13728 + .rept USER_PGD_PTRS
13729 + movb $0x67,i*8(%rdi)
13730 + i = i + 1
13731 + .endr
13732 +
13733 +#ifdef CONFIG_PARAVIRT
13734 +2: PV_RESTORE_REGS(CLBR_RDI)
13735 + popq %rbx
13736 +#endif
13737 +
13738 + popq %rdi
13739 + retq
13740 +ENDPROC(pax_exit_kernel_user)
13741 +#endif
13742 +
13743 +.macro pax_erase_kstack
13744 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13745 + call pax_erase_kstack
13746 +#endif
13747 +.endm
13748 +
13749 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13750 +/*
13751 + * r10: thread_info
13752 + * rcx, rdx: can be clobbered
13753 + */
13754 +ENTRY(pax_erase_kstack)
13755 + pushq %rdi
13756 + pushq %rax
13757 +
13758 + GET_THREAD_INFO(%r10)
13759 + mov TI_lowest_stack(%r10), %rdi
13760 + mov $-0xBEEF, %rax
13761 + std
13762 +
13763 +1: mov %edi, %ecx
13764 + and $THREAD_SIZE_asm - 1, %ecx
13765 + shr $3, %ecx
13766 + repne scasq
13767 + jecxz 2f
13768 +
13769 + cmp $2*8, %ecx
13770 + jc 2f
13771 +
13772 + mov $2*8, %ecx
13773 + repe scasq
13774 + jecxz 2f
13775 + jne 1b
13776 +
13777 +2: cld
13778 + mov %esp, %ecx
13779 + sub %edi, %ecx
13780 + shr $3, %ecx
13781 + rep stosq
13782 +
13783 + mov TI_task_thread_sp0(%r10), %rdi
13784 + sub $256, %rdi
13785 + mov %rdi, TI_lowest_stack(%r10)
13786 +
13787 + popq %rax
13788 + popq %rdi
13789 + ret
13790 +ENDPROC(pax_erase_kstack)
13791 +#endif
13792
13793 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13794 #ifdef CONFIG_TRACE_IRQFLAGS
13795 @@ -317,7 +569,7 @@ ENTRY(save_args)
13796 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13797 movq_cfi rbp, 8 /* push %rbp */
13798 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13799 - testl $3, CS(%rdi)
13800 + testb $3, CS(%rdi)
13801 je 1f
13802 SWAPGS
13803 /*
13804 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13805
13806 RESTORE_REST
13807
13808 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13809 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13810 je int_ret_from_sys_call
13811
13812 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13813 @@ -455,7 +707,7 @@ END(ret_from_fork)
13814 ENTRY(system_call)
13815 CFI_STARTPROC simple
13816 CFI_SIGNAL_FRAME
13817 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13818 + CFI_DEF_CFA rsp,0
13819 CFI_REGISTER rip,rcx
13820 /*CFI_REGISTER rflags,r11*/
13821 SWAPGS_UNSAFE_STACK
13822 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13823
13824 movq %rsp,PER_CPU_VAR(old_rsp)
13825 movq PER_CPU_VAR(kernel_stack),%rsp
13826 + pax_enter_kernel_user
13827 /*
13828 * No need to follow this irqs off/on section - it's straight
13829 * and short:
13830 */
13831 ENABLE_INTERRUPTS(CLBR_NONE)
13832 - SAVE_ARGS 8,1
13833 + SAVE_ARGS 8*6,1
13834 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13835 movq %rcx,RIP-ARGOFFSET(%rsp)
13836 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13837 @@ -502,6 +755,7 @@ sysret_check:
13838 andl %edi,%edx
13839 jnz sysret_careful
13840 CFI_REMEMBER_STATE
13841 + pax_exit_kernel_user
13842 /*
13843 * sysretq will re-enable interrupts:
13844 */
13845 @@ -562,6 +816,9 @@ auditsys:
13846 movq %rax,%rsi /* 2nd arg: syscall number */
13847 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13848 call audit_syscall_entry
13849 +
13850 + pax_erase_kstack
13851 +
13852 LOAD_ARGS 0 /* reload call-clobbered registers */
13853 jmp system_call_fastpath
13854
13855 @@ -592,6 +849,9 @@ tracesys:
13856 FIXUP_TOP_OF_STACK %rdi
13857 movq %rsp,%rdi
13858 call syscall_trace_enter
13859 +
13860 + pax_erase_kstack
13861 +
13862 /*
13863 * Reload arg registers from stack in case ptrace changed them.
13864 * We don't reload %rax because syscall_trace_enter() returned
13865 @@ -613,7 +873,7 @@ tracesys:
13866 GLOBAL(int_ret_from_sys_call)
13867 DISABLE_INTERRUPTS(CLBR_NONE)
13868 TRACE_IRQS_OFF
13869 - testl $3,CS-ARGOFFSET(%rsp)
13870 + testb $3,CS-ARGOFFSET(%rsp)
13871 je retint_restore_args
13872 movl $_TIF_ALLWORK_MASK,%edi
13873 /* edi: mask to check */
13874 @@ -800,6 +1060,16 @@ END(interrupt)
13875 CFI_ADJUST_CFA_OFFSET 10*8
13876 call save_args
13877 PARTIAL_FRAME 0
13878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13879 + testb $3, CS(%rdi)
13880 + jnz 1f
13881 + pax_enter_kernel
13882 + jmp 2f
13883 +1: pax_enter_kernel_user
13884 +2:
13885 +#else
13886 + pax_enter_kernel
13887 +#endif
13888 call \func
13889 .endm
13890
13891 @@ -822,7 +1092,7 @@ ret_from_intr:
13892 CFI_ADJUST_CFA_OFFSET -8
13893 exit_intr:
13894 GET_THREAD_INFO(%rcx)
13895 - testl $3,CS-ARGOFFSET(%rsp)
13896 + testb $3,CS-ARGOFFSET(%rsp)
13897 je retint_kernel
13898
13899 /* Interrupt came from user space */
13900 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13901 * The iretq could re-enable interrupts:
13902 */
13903 DISABLE_INTERRUPTS(CLBR_ANY)
13904 + pax_exit_kernel_user
13905 TRACE_IRQS_IRETQ
13906 SWAPGS
13907 jmp restore_args
13908
13909 retint_restore_args: /* return to kernel space */
13910 DISABLE_INTERRUPTS(CLBR_ANY)
13911 + pax_exit_kernel
13912 /*
13913 * The iretq could re-enable interrupts:
13914 */
13915 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13916 CFI_ADJUST_CFA_OFFSET 15*8
13917 call error_entry
13918 DEFAULT_FRAME 0
13919 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13920 + testb $3, CS(%rsp)
13921 + jnz 1f
13922 + pax_enter_kernel
13923 + jmp 2f
13924 +1: pax_enter_kernel_user
13925 +2:
13926 +#else
13927 + pax_enter_kernel
13928 +#endif
13929 movq %rsp,%rdi /* pt_regs pointer */
13930 xorl %esi,%esi /* no error code */
13931 call \do_sym
13932 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13933 subq $15*8, %rsp
13934 call save_paranoid
13935 TRACE_IRQS_OFF
13936 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13937 + testb $3, CS(%rsp)
13938 + jnz 1f
13939 + pax_enter_kernel
13940 + jmp 2f
13941 +1: pax_enter_kernel_user
13942 +2:
13943 +#else
13944 + pax_enter_kernel
13945 +#endif
13946 movq %rsp,%rdi /* pt_regs pointer */
13947 xorl %esi,%esi /* no error code */
13948 call \do_sym
13949 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13950 subq $15*8, %rsp
13951 call save_paranoid
13952 TRACE_IRQS_OFF
13953 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13954 + testb $3, CS(%rsp)
13955 + jnz 1f
13956 + pax_enter_kernel
13957 + jmp 2f
13958 +1: pax_enter_kernel_user
13959 +2:
13960 +#else
13961 + pax_enter_kernel
13962 +#endif
13963 movq %rsp,%rdi /* pt_regs pointer */
13964 xorl %esi,%esi /* no error code */
13965 - PER_CPU(init_tss, %rbp)
13966 +#ifdef CONFIG_SMP
13967 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13968 + lea init_tss(%rbp), %rbp
13969 +#else
13970 + lea init_tss(%rip), %rbp
13971 +#endif
13972 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13973 call \do_sym
13974 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13975 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13976 CFI_ADJUST_CFA_OFFSET 15*8
13977 call error_entry
13978 DEFAULT_FRAME 0
13979 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13980 + testb $3, CS(%rsp)
13981 + jnz 1f
13982 + pax_enter_kernel
13983 + jmp 2f
13984 +1: pax_enter_kernel_user
13985 +2:
13986 +#else
13987 + pax_enter_kernel
13988 +#endif
13989 movq %rsp,%rdi /* pt_regs pointer */
13990 movq ORIG_RAX(%rsp),%rsi /* get error code */
13991 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13992 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13993 call save_paranoid
13994 DEFAULT_FRAME 0
13995 TRACE_IRQS_OFF
13996 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13997 + testb $3, CS(%rsp)
13998 + jnz 1f
13999 + pax_enter_kernel
14000 + jmp 2f
14001 +1: pax_enter_kernel_user
14002 +2:
14003 +#else
14004 + pax_enter_kernel
14005 +#endif
14006 movq %rsp,%rdi /* pt_regs pointer */
14007 movq ORIG_RAX(%rsp),%rsi /* get error code */
14008 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14009 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14010 TRACE_IRQS_OFF
14011 testl %ebx,%ebx /* swapgs needed? */
14012 jnz paranoid_restore
14013 - testl $3,CS(%rsp)
14014 + testb $3,CS(%rsp)
14015 jnz paranoid_userspace
14016 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14017 + pax_exit_kernel
14018 + TRACE_IRQS_IRETQ 0
14019 + SWAPGS_UNSAFE_STACK
14020 + RESTORE_ALL 8
14021 + jmp irq_return
14022 +#endif
14023 paranoid_swapgs:
14024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14025 + pax_exit_kernel_user
14026 +#else
14027 + pax_exit_kernel
14028 +#endif
14029 TRACE_IRQS_IRETQ 0
14030 SWAPGS_UNSAFE_STACK
14031 RESTORE_ALL 8
14032 jmp irq_return
14033 paranoid_restore:
14034 + pax_exit_kernel
14035 TRACE_IRQS_IRETQ 0
14036 RESTORE_ALL 8
14037 jmp irq_return
14038 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14039 movq_cfi r14, R14+8
14040 movq_cfi r15, R15+8
14041 xorl %ebx,%ebx
14042 - testl $3,CS+8(%rsp)
14043 + testb $3,CS+8(%rsp)
14044 je error_kernelspace
14045 error_swapgs:
14046 SWAPGS
14047 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
14048 CFI_ADJUST_CFA_OFFSET 15*8
14049 call save_paranoid
14050 DEFAULT_FRAME 0
14051 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14052 + testb $3, CS(%rsp)
14053 + jnz 1f
14054 + pax_enter_kernel
14055 + jmp 2f
14056 +1: pax_enter_kernel_user
14057 +2:
14058 +#else
14059 + pax_enter_kernel
14060 +#endif
14061 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14062 movq %rsp,%rdi
14063 movq $-1,%rsi
14064 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
14065 DISABLE_INTERRUPTS(CLBR_NONE)
14066 testl %ebx,%ebx /* swapgs needed? */
14067 jnz nmi_restore
14068 - testl $3,CS(%rsp)
14069 + testb $3,CS(%rsp)
14070 jnz nmi_userspace
14071 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14072 + pax_exit_kernel
14073 + SWAPGS_UNSAFE_STACK
14074 + RESTORE_ALL 8
14075 + jmp irq_return
14076 +#endif
14077 nmi_swapgs:
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + pax_exit_kernel_user
14080 +#else
14081 + pax_exit_kernel
14082 +#endif
14083 SWAPGS_UNSAFE_STACK
14084 + RESTORE_ALL 8
14085 + jmp irq_return
14086 nmi_restore:
14087 + pax_exit_kernel
14088 RESTORE_ALL 8
14089 jmp irq_return
14090 nmi_userspace:
14091 diff -urNp linux-2.6.32.44/arch/x86/kernel/ftrace.c linux-2.6.32.44/arch/x86/kernel/ftrace.c
14092 --- linux-2.6.32.44/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14093 +++ linux-2.6.32.44/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14094 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14095 static void *mod_code_newcode; /* holds the text to write to the IP */
14096
14097 static unsigned nmi_wait_count;
14098 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14099 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14100
14101 int ftrace_arch_read_dyn_info(char *buf, int size)
14102 {
14103 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14104
14105 r = snprintf(buf, size, "%u %u",
14106 nmi_wait_count,
14107 - atomic_read(&nmi_update_count));
14108 + atomic_read_unchecked(&nmi_update_count));
14109 return r;
14110 }
14111
14112 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14113 {
14114 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14115 smp_rmb();
14116 + pax_open_kernel();
14117 ftrace_mod_code();
14118 - atomic_inc(&nmi_update_count);
14119 + pax_close_kernel();
14120 + atomic_inc_unchecked(&nmi_update_count);
14121 }
14122 /* Must have previous changes seen before executions */
14123 smp_mb();
14124 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14125
14126
14127
14128 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14129 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14130
14131 static unsigned char *ftrace_nop_replace(void)
14132 {
14133 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14134 {
14135 unsigned char replaced[MCOUNT_INSN_SIZE];
14136
14137 + ip = ktla_ktva(ip);
14138 +
14139 /*
14140 * Note: Due to modules and __init, code can
14141 * disappear and change, we need to protect against faulting
14142 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14143 unsigned char old[MCOUNT_INSN_SIZE], *new;
14144 int ret;
14145
14146 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14147 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14148 new = ftrace_call_replace(ip, (unsigned long)func);
14149 ret = ftrace_modify_code(ip, old, new);
14150
14151 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14152 switch (faulted) {
14153 case 0:
14154 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14155 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14156 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14157 break;
14158 case 1:
14159 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14160 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14161 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14162 break;
14163 case 2:
14164 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14165 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14166 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14167 break;
14168 }
14169
14170 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14171 {
14172 unsigned char code[MCOUNT_INSN_SIZE];
14173
14174 + ip = ktla_ktva(ip);
14175 +
14176 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14177 return -EFAULT;
14178
14179 diff -urNp linux-2.6.32.44/arch/x86/kernel/head32.c linux-2.6.32.44/arch/x86/kernel/head32.c
14180 --- linux-2.6.32.44/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14181 +++ linux-2.6.32.44/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14182 @@ -16,6 +16,7 @@
14183 #include <asm/apic.h>
14184 #include <asm/io_apic.h>
14185 #include <asm/bios_ebda.h>
14186 +#include <asm/boot.h>
14187
14188 static void __init i386_default_early_setup(void)
14189 {
14190 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14191 {
14192 reserve_trampoline_memory();
14193
14194 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14195 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14196
14197 #ifdef CONFIG_BLK_DEV_INITRD
14198 /* Reserve INITRD */
14199 diff -urNp linux-2.6.32.44/arch/x86/kernel/head_32.S linux-2.6.32.44/arch/x86/kernel/head_32.S
14200 --- linux-2.6.32.44/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14201 +++ linux-2.6.32.44/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14202 @@ -19,10 +19,17 @@
14203 #include <asm/setup.h>
14204 #include <asm/processor-flags.h>
14205 #include <asm/percpu.h>
14206 +#include <asm/msr-index.h>
14207
14208 /* Physical address */
14209 #define pa(X) ((X) - __PAGE_OFFSET)
14210
14211 +#ifdef CONFIG_PAX_KERNEXEC
14212 +#define ta(X) (X)
14213 +#else
14214 +#define ta(X) ((X) - __PAGE_OFFSET)
14215 +#endif
14216 +
14217 /*
14218 * References to members of the new_cpu_data structure.
14219 */
14220 @@ -52,11 +59,7 @@
14221 * and small than max_low_pfn, otherwise will waste some page table entries
14222 */
14223
14224 -#if PTRS_PER_PMD > 1
14225 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14226 -#else
14227 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14228 -#endif
14229 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14230
14231 /* Enough space to fit pagetables for the low memory linear map */
14232 MAPPING_BEYOND_END = \
14233 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14234 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14235
14236 /*
14237 + * Real beginning of normal "text" segment
14238 + */
14239 +ENTRY(stext)
14240 +ENTRY(_stext)
14241 +
14242 +/*
14243 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14244 * %esi points to the real-mode code as a 32-bit pointer.
14245 * CS and DS must be 4 GB flat segments, but we don't depend on
14246 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14247 * can.
14248 */
14249 __HEAD
14250 +
14251 +#ifdef CONFIG_PAX_KERNEXEC
14252 + jmp startup_32
14253 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14254 +.fill PAGE_SIZE-5,1,0xcc
14255 +#endif
14256 +
14257 ENTRY(startup_32)
14258 + movl pa(stack_start),%ecx
14259 +
14260 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14261 us to not reload segments */
14262 testb $(1<<6), BP_loadflags(%esi)
14263 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14264 movl %eax,%es
14265 movl %eax,%fs
14266 movl %eax,%gs
14267 + movl %eax,%ss
14268 2:
14269 + leal -__PAGE_OFFSET(%ecx),%esp
14270 +
14271 +#ifdef CONFIG_SMP
14272 + movl $pa(cpu_gdt_table),%edi
14273 + movl $__per_cpu_load,%eax
14274 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14275 + rorl $16,%eax
14276 + movb %al,__KERNEL_PERCPU + 4(%edi)
14277 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14278 + movl $__per_cpu_end - 1,%eax
14279 + subl $__per_cpu_start,%eax
14280 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14281 +#endif
14282 +
14283 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14284 + movl $NR_CPUS,%ecx
14285 + movl $pa(cpu_gdt_table),%edi
14286 +1:
14287 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14288 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14289 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14290 + addl $PAGE_SIZE_asm,%edi
14291 + loop 1b
14292 +#endif
14293 +
14294 +#ifdef CONFIG_PAX_KERNEXEC
14295 + movl $pa(boot_gdt),%edi
14296 + movl $__LOAD_PHYSICAL_ADDR,%eax
14297 + movw %ax,__BOOT_CS + 2(%edi)
14298 + rorl $16,%eax
14299 + movb %al,__BOOT_CS + 4(%edi)
14300 + movb %ah,__BOOT_CS + 7(%edi)
14301 + rorl $16,%eax
14302 +
14303 + ljmp $(__BOOT_CS),$1f
14304 +1:
14305 +
14306 + movl $NR_CPUS,%ecx
14307 + movl $pa(cpu_gdt_table),%edi
14308 + addl $__PAGE_OFFSET,%eax
14309 +1:
14310 + movw %ax,__KERNEL_CS + 2(%edi)
14311 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14312 + rorl $16,%eax
14313 + movb %al,__KERNEL_CS + 4(%edi)
14314 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14315 + movb %ah,__KERNEL_CS + 7(%edi)
14316 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14317 + rorl $16,%eax
14318 + addl $PAGE_SIZE_asm,%edi
14319 + loop 1b
14320 +#endif
14321
14322 /*
14323 * Clear BSS first so that there are no surprises...
14324 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14325 cmpl $num_subarch_entries, %eax
14326 jae bad_subarch
14327
14328 - movl pa(subarch_entries)(,%eax,4), %eax
14329 - subl $__PAGE_OFFSET, %eax
14330 - jmp *%eax
14331 + jmp *pa(subarch_entries)(,%eax,4)
14332
14333 bad_subarch:
14334 WEAK(lguest_entry)
14335 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14336 __INITDATA
14337
14338 subarch_entries:
14339 - .long default_entry /* normal x86/PC */
14340 - .long lguest_entry /* lguest hypervisor */
14341 - .long xen_entry /* Xen hypervisor */
14342 - .long default_entry /* Moorestown MID */
14343 + .long ta(default_entry) /* normal x86/PC */
14344 + .long ta(lguest_entry) /* lguest hypervisor */
14345 + .long ta(xen_entry) /* Xen hypervisor */
14346 + .long ta(default_entry) /* Moorestown MID */
14347 num_subarch_entries = (. - subarch_entries) / 4
14348 .previous
14349 #endif /* CONFIG_PARAVIRT */
14350 @@ -218,8 +287,11 @@ default_entry:
14351 movl %eax, pa(max_pfn_mapped)
14352
14353 /* Do early initialization of the fixmap area */
14354 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14355 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14356 +#ifdef CONFIG_COMPAT_VDSO
14357 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14358 +#else
14359 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14360 +#endif
14361 #else /* Not PAE */
14362
14363 page_pde_offset = (__PAGE_OFFSET >> 20);
14364 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14365 movl %eax, pa(max_pfn_mapped)
14366
14367 /* Do early initialization of the fixmap area */
14368 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14369 - movl %eax,pa(swapper_pg_dir+0xffc)
14370 +#ifdef CONFIG_COMPAT_VDSO
14371 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14372 +#else
14373 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14374 +#endif
14375 #endif
14376 jmp 3f
14377 /*
14378 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14379 movl %eax,%es
14380 movl %eax,%fs
14381 movl %eax,%gs
14382 + movl pa(stack_start),%ecx
14383 + movl %eax,%ss
14384 + leal -__PAGE_OFFSET(%ecx),%esp
14385 #endif /* CONFIG_SMP */
14386 3:
14387
14388 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14389 orl %edx,%eax
14390 movl %eax,%cr4
14391
14392 +#ifdef CONFIG_X86_PAE
14393 btl $5, %eax # check if PAE is enabled
14394 jnc 6f
14395
14396 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14397 cpuid
14398 cmpl $0x80000000, %eax
14399 jbe 6f
14400 +
14401 + /* Clear bogus XD_DISABLE bits */
14402 + call verify_cpu
14403 +
14404 mov $0x80000001, %eax
14405 cpuid
14406 /* Execute Disable bit supported? */
14407 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14408 jnc 6f
14409
14410 /* Setup EFER (Extended Feature Enable Register) */
14411 - movl $0xc0000080, %ecx
14412 + movl $MSR_EFER, %ecx
14413 rdmsr
14414
14415 btsl $11, %eax
14416 /* Make changes effective */
14417 wrmsr
14418
14419 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14420 + movl $1,pa(nx_enabled)
14421 +#endif
14422 +
14423 6:
14424
14425 /*
14426 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14427 movl %eax,%cr0 /* ..and set paging (PG) bit */
14428 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14429 1:
14430 - /* Set up the stack pointer */
14431 - lss stack_start,%esp
14432 + /* Shift the stack pointer to a virtual address */
14433 + addl $__PAGE_OFFSET, %esp
14434
14435 /*
14436 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14437 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14438
14439 #ifdef CONFIG_SMP
14440 cmpb $0, ready
14441 - jz 1f /* Initial CPU cleans BSS */
14442 - jmp checkCPUtype
14443 -1:
14444 + jnz checkCPUtype
14445 #endif /* CONFIG_SMP */
14446
14447 /*
14448 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14449 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14450 movl %eax,%ss # after changing gdt.
14451
14452 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14453 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14454 movl %eax,%ds
14455 movl %eax,%es
14456
14457 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14458 */
14459 cmpb $0,ready
14460 jne 1f
14461 - movl $per_cpu__gdt_page,%eax
14462 + movl $cpu_gdt_table,%eax
14463 movl $per_cpu__stack_canary,%ecx
14464 +#ifdef CONFIG_SMP
14465 + addl $__per_cpu_load,%ecx
14466 +#endif
14467 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14468 shrl $16, %ecx
14469 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14470 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14471 1:
14472 -#endif
14473 movl $(__KERNEL_STACK_CANARY),%eax
14474 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14475 + movl $(__USER_DS),%eax
14476 +#else
14477 + xorl %eax,%eax
14478 +#endif
14479 movl %eax,%gs
14480
14481 xorl %eax,%eax # Clear LDT
14482 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14483
14484 cld # gcc2 wants the direction flag cleared at all times
14485 pushl $0 # fake return address for unwinder
14486 -#ifdef CONFIG_SMP
14487 - movb ready, %cl
14488 movb $1, ready
14489 - cmpb $0,%cl # the first CPU calls start_kernel
14490 - je 1f
14491 - movl (stack_start), %esp
14492 -1:
14493 -#endif /* CONFIG_SMP */
14494 jmp *(initial_code)
14495
14496 /*
14497 @@ -546,22 +631,22 @@ early_page_fault:
14498 jmp early_fault
14499
14500 early_fault:
14501 - cld
14502 #ifdef CONFIG_PRINTK
14503 + cmpl $1,%ss:early_recursion_flag
14504 + je hlt_loop
14505 + incl %ss:early_recursion_flag
14506 + cld
14507 pusha
14508 movl $(__KERNEL_DS),%eax
14509 movl %eax,%ds
14510 movl %eax,%es
14511 - cmpl $2,early_recursion_flag
14512 - je hlt_loop
14513 - incl early_recursion_flag
14514 movl %cr2,%eax
14515 pushl %eax
14516 pushl %edx /* trapno */
14517 pushl $fault_msg
14518 call printk
14519 +; call dump_stack
14520 #endif
14521 - call dump_stack
14522 hlt_loop:
14523 hlt
14524 jmp hlt_loop
14525 @@ -569,8 +654,11 @@ hlt_loop:
14526 /* This is the default interrupt "handler" :-) */
14527 ALIGN
14528 ignore_int:
14529 - cld
14530 #ifdef CONFIG_PRINTK
14531 + cmpl $2,%ss:early_recursion_flag
14532 + je hlt_loop
14533 + incl %ss:early_recursion_flag
14534 + cld
14535 pushl %eax
14536 pushl %ecx
14537 pushl %edx
14538 @@ -579,9 +667,6 @@ ignore_int:
14539 movl $(__KERNEL_DS),%eax
14540 movl %eax,%ds
14541 movl %eax,%es
14542 - cmpl $2,early_recursion_flag
14543 - je hlt_loop
14544 - incl early_recursion_flag
14545 pushl 16(%esp)
14546 pushl 24(%esp)
14547 pushl 32(%esp)
14548 @@ -600,6 +685,8 @@ ignore_int:
14549 #endif
14550 iret
14551
14552 +#include "verify_cpu.S"
14553 +
14554 __REFDATA
14555 .align 4
14556 ENTRY(initial_code)
14557 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14558 /*
14559 * BSS section
14560 */
14561 -__PAGE_ALIGNED_BSS
14562 - .align PAGE_SIZE_asm
14563 #ifdef CONFIG_X86_PAE
14564 +.section .swapper_pg_pmd,"a",@progbits
14565 swapper_pg_pmd:
14566 .fill 1024*KPMDS,4,0
14567 #else
14568 +.section .swapper_pg_dir,"a",@progbits
14569 ENTRY(swapper_pg_dir)
14570 .fill 1024,4,0
14571 #endif
14572 +.section .swapper_pg_fixmap,"a",@progbits
14573 swapper_pg_fixmap:
14574 .fill 1024,4,0
14575 #ifdef CONFIG_X86_TRAMPOLINE
14576 +.section .trampoline_pg_dir,"a",@progbits
14577 ENTRY(trampoline_pg_dir)
14578 +#ifdef CONFIG_X86_PAE
14579 + .fill 4,8,0
14580 +#else
14581 .fill 1024,4,0
14582 #endif
14583 +#endif
14584 +
14585 +.section .empty_zero_page,"a",@progbits
14586 ENTRY(empty_zero_page)
14587 .fill 4096,1,0
14588
14589 /*
14590 + * The IDT has to be page-aligned to simplify the Pentium
14591 + * F0 0F bug workaround.. We have a special link segment
14592 + * for this.
14593 + */
14594 +.section .idt,"a",@progbits
14595 +ENTRY(idt_table)
14596 + .fill 256,8,0
14597 +
14598 +/*
14599 * This starts the data section.
14600 */
14601 #ifdef CONFIG_X86_PAE
14602 -__PAGE_ALIGNED_DATA
14603 - /* Page-aligned for the benefit of paravirt? */
14604 - .align PAGE_SIZE_asm
14605 +.section .swapper_pg_dir,"a",@progbits
14606 +
14607 ENTRY(swapper_pg_dir)
14608 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14609 # if KPMDS == 3
14610 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14611 # error "Kernel PMDs should be 1, 2 or 3"
14612 # endif
14613 .align PAGE_SIZE_asm /* needs to be page-sized too */
14614 +
14615 +#ifdef CONFIG_PAX_PER_CPU_PGD
14616 +ENTRY(cpu_pgd)
14617 + .rept NR_CPUS
14618 + .fill 4,8,0
14619 + .endr
14620 +#endif
14621 +
14622 #endif
14623
14624 .data
14625 +.balign 4
14626 ENTRY(stack_start)
14627 - .long init_thread_union+THREAD_SIZE
14628 - .long __BOOT_DS
14629 + .long init_thread_union+THREAD_SIZE-8
14630
14631 ready: .byte 0
14632
14633 +.section .rodata,"a",@progbits
14634 early_recursion_flag:
14635 .long 0
14636
14637 @@ -697,7 +809,7 @@ fault_msg:
14638 .word 0 # 32 bit align gdt_desc.address
14639 boot_gdt_descr:
14640 .word __BOOT_DS+7
14641 - .long boot_gdt - __PAGE_OFFSET
14642 + .long pa(boot_gdt)
14643
14644 .word 0 # 32-bit align idt_desc.address
14645 idt_descr:
14646 @@ -708,7 +820,7 @@ idt_descr:
14647 .word 0 # 32 bit align gdt_desc.address
14648 ENTRY(early_gdt_descr)
14649 .word GDT_ENTRIES*8-1
14650 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14651 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14652
14653 /*
14654 * The boot_gdt must mirror the equivalent in setup.S and is
14655 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14656 .align L1_CACHE_BYTES
14657 ENTRY(boot_gdt)
14658 .fill GDT_ENTRY_BOOT_CS,8,0
14659 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14660 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14661 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14662 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14663 +
14664 + .align PAGE_SIZE_asm
14665 +ENTRY(cpu_gdt_table)
14666 + .rept NR_CPUS
14667 + .quad 0x0000000000000000 /* NULL descriptor */
14668 + .quad 0x0000000000000000 /* 0x0b reserved */
14669 + .quad 0x0000000000000000 /* 0x13 reserved */
14670 + .quad 0x0000000000000000 /* 0x1b reserved */
14671 +
14672 +#ifdef CONFIG_PAX_KERNEXEC
14673 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14674 +#else
14675 + .quad 0x0000000000000000 /* 0x20 unused */
14676 +#endif
14677 +
14678 + .quad 0x0000000000000000 /* 0x28 unused */
14679 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14680 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14681 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14682 + .quad 0x0000000000000000 /* 0x4b reserved */
14683 + .quad 0x0000000000000000 /* 0x53 reserved */
14684 + .quad 0x0000000000000000 /* 0x5b reserved */
14685 +
14686 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14687 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14688 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14689 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14690 +
14691 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14692 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14693 +
14694 + /*
14695 + * Segments used for calling PnP BIOS have byte granularity.
14696 + * The code segments and data segments have fixed 64k limits,
14697 + * the transfer segment sizes are set at run time.
14698 + */
14699 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14700 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14701 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14702 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14703 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14704 +
14705 + /*
14706 + * The APM segments have byte granularity and their bases
14707 + * are set at run time. All have 64k limits.
14708 + */
14709 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14710 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14711 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14712 +
14713 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14714 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14715 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14716 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14717 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14718 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14719 +
14720 + /* Be sure this is zeroed to avoid false validations in Xen */
14721 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14722 + .endr
14723 diff -urNp linux-2.6.32.44/arch/x86/kernel/head_64.S linux-2.6.32.44/arch/x86/kernel/head_64.S
14724 --- linux-2.6.32.44/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14725 +++ linux-2.6.32.44/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14726 @@ -19,6 +19,7 @@
14727 #include <asm/cache.h>
14728 #include <asm/processor-flags.h>
14729 #include <asm/percpu.h>
14730 +#include <asm/cpufeature.h>
14731
14732 #ifdef CONFIG_PARAVIRT
14733 #include <asm/asm-offsets.h>
14734 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14735 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14736 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14737 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14738 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14739 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14740 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14741 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14742
14743 .text
14744 __HEAD
14745 @@ -85,35 +90,22 @@ startup_64:
14746 */
14747 addq %rbp, init_level4_pgt + 0(%rip)
14748 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14749 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14750 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14751 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14752
14753 addq %rbp, level3_ident_pgt + 0(%rip)
14754 +#ifndef CONFIG_XEN
14755 + addq %rbp, level3_ident_pgt + 8(%rip)
14756 +#endif
14757
14758 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14759 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14760 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14761
14762 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14763 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14764 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14765
14766 - /* Add an Identity mapping if I am above 1G */
14767 - leaq _text(%rip), %rdi
14768 - andq $PMD_PAGE_MASK, %rdi
14769 -
14770 - movq %rdi, %rax
14771 - shrq $PUD_SHIFT, %rax
14772 - andq $(PTRS_PER_PUD - 1), %rax
14773 - jz ident_complete
14774 -
14775 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14776 - leaq level3_ident_pgt(%rip), %rbx
14777 - movq %rdx, 0(%rbx, %rax, 8)
14778 -
14779 - movq %rdi, %rax
14780 - shrq $PMD_SHIFT, %rax
14781 - andq $(PTRS_PER_PMD - 1), %rax
14782 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14783 - leaq level2_spare_pgt(%rip), %rbx
14784 - movq %rdx, 0(%rbx, %rax, 8)
14785 -ident_complete:
14786 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14787 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14788
14789 /*
14790 * Fixup the kernel text+data virtual addresses. Note that
14791 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14792 * after the boot processor executes this code.
14793 */
14794
14795 - /* Enable PAE mode and PGE */
14796 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14797 + /* Enable PAE mode and PSE/PGE */
14798 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14799 movq %rax, %cr4
14800
14801 /* Setup early boot stage 4 level pagetables. */
14802 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14803 movl $MSR_EFER, %ecx
14804 rdmsr
14805 btsl $_EFER_SCE, %eax /* Enable System Call */
14806 - btl $20,%edi /* No Execute supported? */
14807 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14808 jnc 1f
14809 btsl $_EFER_NX, %eax
14810 + leaq init_level4_pgt(%rip), %rdi
14811 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14812 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14813 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14814 1: wrmsr /* Make changes effective */
14815
14816 /* Setup cr0 */
14817 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14818 .quad x86_64_start_kernel
14819 ENTRY(initial_gs)
14820 .quad INIT_PER_CPU_VAR(irq_stack_union)
14821 - __FINITDATA
14822
14823 ENTRY(stack_start)
14824 .quad init_thread_union+THREAD_SIZE-8
14825 .word 0
14826 + __FINITDATA
14827
14828 bad_address:
14829 jmp bad_address
14830
14831 - .section ".init.text","ax"
14832 + __INIT
14833 #ifdef CONFIG_EARLY_PRINTK
14834 .globl early_idt_handlers
14835 early_idt_handlers:
14836 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14837 #endif /* EARLY_PRINTK */
14838 1: hlt
14839 jmp 1b
14840 + .previous
14841
14842 #ifdef CONFIG_EARLY_PRINTK
14843 + __INITDATA
14844 early_recursion_flag:
14845 .long 0
14846 + .previous
14847
14848 + .section .rodata,"a",@progbits
14849 early_idt_msg:
14850 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14851 early_idt_ripmsg:
14852 .asciz "RIP %s\n"
14853 -#endif /* CONFIG_EARLY_PRINTK */
14854 .previous
14855 +#endif /* CONFIG_EARLY_PRINTK */
14856
14857 + .section .rodata,"a",@progbits
14858 #define NEXT_PAGE(name) \
14859 .balign PAGE_SIZE; \
14860 ENTRY(name)
14861 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14862 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14863 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14864 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14865 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14866 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14867 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14868 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14869 .org init_level4_pgt + L4_START_KERNEL*8, 0
14870 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14871 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14872
14873 +#ifdef CONFIG_PAX_PER_CPU_PGD
14874 +NEXT_PAGE(cpu_pgd)
14875 + .rept NR_CPUS
14876 + .fill 512,8,0
14877 + .endr
14878 +#endif
14879 +
14880 NEXT_PAGE(level3_ident_pgt)
14881 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14882 +#ifdef CONFIG_XEN
14883 .fill 511,8,0
14884 +#else
14885 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14886 + .fill 510,8,0
14887 +#endif
14888 +
14889 +NEXT_PAGE(level3_vmalloc_pgt)
14890 + .fill 512,8,0
14891 +
14892 +NEXT_PAGE(level3_vmemmap_pgt)
14893 + .fill L3_VMEMMAP_START,8,0
14894 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14895
14896 NEXT_PAGE(level3_kernel_pgt)
14897 .fill L3_START_KERNEL,8,0
14898 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14899 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14900 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14901
14902 +NEXT_PAGE(level2_vmemmap_pgt)
14903 + .fill 512,8,0
14904 +
14905 NEXT_PAGE(level2_fixmap_pgt)
14906 - .fill 506,8,0
14907 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14908 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14909 - .fill 5,8,0
14910 + .fill 507,8,0
14911 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14912 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14913 + .fill 4,8,0
14914
14915 -NEXT_PAGE(level1_fixmap_pgt)
14916 +NEXT_PAGE(level1_vsyscall_pgt)
14917 .fill 512,8,0
14918
14919 -NEXT_PAGE(level2_ident_pgt)
14920 - /* Since I easily can, map the first 1G.
14921 + /* Since I easily can, map the first 2G.
14922 * Don't set NX because code runs from these pages.
14923 */
14924 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14925 +NEXT_PAGE(level2_ident_pgt)
14926 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14927
14928 NEXT_PAGE(level2_kernel_pgt)
14929 /*
14930 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14931 * If you want to increase this then increase MODULES_VADDR
14932 * too.)
14933 */
14934 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14935 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14936 -
14937 -NEXT_PAGE(level2_spare_pgt)
14938 - .fill 512, 8, 0
14939 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14940
14941 #undef PMDS
14942 #undef NEXT_PAGE
14943
14944 - .data
14945 + .align PAGE_SIZE
14946 +ENTRY(cpu_gdt_table)
14947 + .rept NR_CPUS
14948 + .quad 0x0000000000000000 /* NULL descriptor */
14949 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14950 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14951 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14952 + .quad 0x00cffb000000ffff /* __USER32_CS */
14953 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14954 + .quad 0x00affb000000ffff /* __USER_CS */
14955 +
14956 +#ifdef CONFIG_PAX_KERNEXEC
14957 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14958 +#else
14959 + .quad 0x0 /* unused */
14960 +#endif
14961 +
14962 + .quad 0,0 /* TSS */
14963 + .quad 0,0 /* LDT */
14964 + .quad 0,0,0 /* three TLS descriptors */
14965 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14966 + /* asm/segment.h:GDT_ENTRIES must match this */
14967 +
14968 + /* zero the remaining page */
14969 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14970 + .endr
14971 +
14972 .align 16
14973 .globl early_gdt_descr
14974 early_gdt_descr:
14975 .word GDT_ENTRIES*8-1
14976 early_gdt_descr_base:
14977 - .quad INIT_PER_CPU_VAR(gdt_page)
14978 + .quad cpu_gdt_table
14979
14980 ENTRY(phys_base)
14981 /* This must match the first entry in level2_kernel_pgt */
14982 .quad 0x0000000000000000
14983
14984 #include "../../x86/xen/xen-head.S"
14985 -
14986 - .section .bss, "aw", @nobits
14987 +
14988 + .section .rodata,"a",@progbits
14989 .align L1_CACHE_BYTES
14990 ENTRY(idt_table)
14991 - .skip IDT_ENTRIES * 16
14992 + .fill 512,8,0
14993
14994 __PAGE_ALIGNED_BSS
14995 .align PAGE_SIZE
14996 diff -urNp linux-2.6.32.44/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.44/arch/x86/kernel/i386_ksyms_32.c
14997 --- linux-2.6.32.44/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14998 +++ linux-2.6.32.44/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14999 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15000 EXPORT_SYMBOL(cmpxchg8b_emu);
15001 #endif
15002
15003 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15004 +
15005 /* Networking helper routines. */
15006 EXPORT_SYMBOL(csum_partial_copy_generic);
15007 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15008 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15009
15010 EXPORT_SYMBOL(__get_user_1);
15011 EXPORT_SYMBOL(__get_user_2);
15012 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15013
15014 EXPORT_SYMBOL(csum_partial);
15015 EXPORT_SYMBOL(empty_zero_page);
15016 +
15017 +#ifdef CONFIG_PAX_KERNEXEC
15018 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15019 +#endif
15020 diff -urNp linux-2.6.32.44/arch/x86/kernel/i8259.c linux-2.6.32.44/arch/x86/kernel/i8259.c
15021 --- linux-2.6.32.44/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15022 +++ linux-2.6.32.44/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15023 @@ -208,7 +208,7 @@ spurious_8259A_irq:
15024 "spurious 8259A interrupt: IRQ%d.\n", irq);
15025 spurious_irq_mask |= irqmask;
15026 }
15027 - atomic_inc(&irq_err_count);
15028 + atomic_inc_unchecked(&irq_err_count);
15029 /*
15030 * Theoretically we do not have to handle this IRQ,
15031 * but in Linux this does not cause problems and is
15032 diff -urNp linux-2.6.32.44/arch/x86/kernel/init_task.c linux-2.6.32.44/arch/x86/kernel/init_task.c
15033 --- linux-2.6.32.44/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15034 +++ linux-2.6.32.44/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15035 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15036 * way process stacks are handled. This is done by having a special
15037 * "init_task" linker map entry..
15038 */
15039 -union thread_union init_thread_union __init_task_data =
15040 - { INIT_THREAD_INFO(init_task) };
15041 +union thread_union init_thread_union __init_task_data;
15042
15043 /*
15044 * Initial task structure.
15045 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15046 * section. Since TSS's are completely CPU-local, we want them
15047 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15048 */
15049 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15050 -
15051 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15052 +EXPORT_SYMBOL(init_tss);
15053 diff -urNp linux-2.6.32.44/arch/x86/kernel/ioport.c linux-2.6.32.44/arch/x86/kernel/ioport.c
15054 --- linux-2.6.32.44/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15055 +++ linux-2.6.32.44/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15056 @@ -6,6 +6,7 @@
15057 #include <linux/sched.h>
15058 #include <linux/kernel.h>
15059 #include <linux/capability.h>
15060 +#include <linux/security.h>
15061 #include <linux/errno.h>
15062 #include <linux/types.h>
15063 #include <linux/ioport.h>
15064 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15065
15066 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15067 return -EINVAL;
15068 +#ifdef CONFIG_GRKERNSEC_IO
15069 + if (turn_on && grsec_disable_privio) {
15070 + gr_handle_ioperm();
15071 + return -EPERM;
15072 + }
15073 +#endif
15074 if (turn_on && !capable(CAP_SYS_RAWIO))
15075 return -EPERM;
15076
15077 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15078 * because the ->io_bitmap_max value must match the bitmap
15079 * contents:
15080 */
15081 - tss = &per_cpu(init_tss, get_cpu());
15082 + tss = init_tss + get_cpu();
15083
15084 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15085
15086 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15087 return -EINVAL;
15088 /* Trying to gain more privileges? */
15089 if (level > old) {
15090 +#ifdef CONFIG_GRKERNSEC_IO
15091 + if (grsec_disable_privio) {
15092 + gr_handle_iopl();
15093 + return -EPERM;
15094 + }
15095 +#endif
15096 if (!capable(CAP_SYS_RAWIO))
15097 return -EPERM;
15098 }
15099 diff -urNp linux-2.6.32.44/arch/x86/kernel/irq_32.c linux-2.6.32.44/arch/x86/kernel/irq_32.c
15100 --- linux-2.6.32.44/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15101 +++ linux-2.6.32.44/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15102 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15103 __asm__ __volatile__("andl %%esp,%0" :
15104 "=r" (sp) : "0" (THREAD_SIZE - 1));
15105
15106 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15107 + return sp < STACK_WARN;
15108 }
15109
15110 static void print_stack_overflow(void)
15111 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15112 * per-CPU IRQ handling contexts (thread information and stack)
15113 */
15114 union irq_ctx {
15115 - struct thread_info tinfo;
15116 - u32 stack[THREAD_SIZE/sizeof(u32)];
15117 -} __attribute__((aligned(PAGE_SIZE)));
15118 + unsigned long previous_esp;
15119 + u32 stack[THREAD_SIZE/sizeof(u32)];
15120 +} __attribute__((aligned(THREAD_SIZE)));
15121
15122 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15123 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15124 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15125 static inline int
15126 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15127 {
15128 - union irq_ctx *curctx, *irqctx;
15129 + union irq_ctx *irqctx;
15130 u32 *isp, arg1, arg2;
15131
15132 - curctx = (union irq_ctx *) current_thread_info();
15133 irqctx = __get_cpu_var(hardirq_ctx);
15134
15135 /*
15136 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15137 * handler) we can't do that and just have to keep using the
15138 * current stack (which is the irq stack already after all)
15139 */
15140 - if (unlikely(curctx == irqctx))
15141 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15142 return 0;
15143
15144 /* build the stack frame on the IRQ stack */
15145 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15146 - irqctx->tinfo.task = curctx->tinfo.task;
15147 - irqctx->tinfo.previous_esp = current_stack_pointer;
15148 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15149 + irqctx->previous_esp = current_stack_pointer;
15150
15151 - /*
15152 - * Copy the softirq bits in preempt_count so that the
15153 - * softirq checks work in the hardirq context.
15154 - */
15155 - irqctx->tinfo.preempt_count =
15156 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15157 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15158 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15159 + __set_fs(MAKE_MM_SEG(0));
15160 +#endif
15161
15162 if (unlikely(overflow))
15163 call_on_stack(print_stack_overflow, isp);
15164 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15165 : "0" (irq), "1" (desc), "2" (isp),
15166 "D" (desc->handle_irq)
15167 : "memory", "cc", "ecx");
15168 +
15169 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15170 + __set_fs(current_thread_info()->addr_limit);
15171 +#endif
15172 +
15173 return 1;
15174 }
15175
15176 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15177 */
15178 void __cpuinit irq_ctx_init(int cpu)
15179 {
15180 - union irq_ctx *irqctx;
15181 -
15182 if (per_cpu(hardirq_ctx, cpu))
15183 return;
15184
15185 - irqctx = &per_cpu(hardirq_stack, cpu);
15186 - irqctx->tinfo.task = NULL;
15187 - irqctx->tinfo.exec_domain = NULL;
15188 - irqctx->tinfo.cpu = cpu;
15189 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15190 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15191 -
15192 - per_cpu(hardirq_ctx, cpu) = irqctx;
15193 -
15194 - irqctx = &per_cpu(softirq_stack, cpu);
15195 - irqctx->tinfo.task = NULL;
15196 - irqctx->tinfo.exec_domain = NULL;
15197 - irqctx->tinfo.cpu = cpu;
15198 - irqctx->tinfo.preempt_count = 0;
15199 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15200 -
15201 - per_cpu(softirq_ctx, cpu) = irqctx;
15202 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15203 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15204
15205 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15206 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15207 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15208 asmlinkage void do_softirq(void)
15209 {
15210 unsigned long flags;
15211 - struct thread_info *curctx;
15212 union irq_ctx *irqctx;
15213 u32 *isp;
15214
15215 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15216 local_irq_save(flags);
15217
15218 if (local_softirq_pending()) {
15219 - curctx = current_thread_info();
15220 irqctx = __get_cpu_var(softirq_ctx);
15221 - irqctx->tinfo.task = curctx->task;
15222 - irqctx->tinfo.previous_esp = current_stack_pointer;
15223 + irqctx->previous_esp = current_stack_pointer;
15224
15225 /* build the stack frame on the softirq stack */
15226 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15227 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15228 +
15229 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15230 + __set_fs(MAKE_MM_SEG(0));
15231 +#endif
15232
15233 call_on_stack(__do_softirq, isp);
15234 +
15235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15236 + __set_fs(current_thread_info()->addr_limit);
15237 +#endif
15238 +
15239 /*
15240 * Shouldnt happen, we returned above if in_interrupt():
15241 */
15242 diff -urNp linux-2.6.32.44/arch/x86/kernel/irq.c linux-2.6.32.44/arch/x86/kernel/irq.c
15243 --- linux-2.6.32.44/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15244 +++ linux-2.6.32.44/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15245 @@ -15,7 +15,7 @@
15246 #include <asm/mce.h>
15247 #include <asm/hw_irq.h>
15248
15249 -atomic_t irq_err_count;
15250 +atomic_unchecked_t irq_err_count;
15251
15252 /* Function pointer for generic interrupt vector handling */
15253 void (*generic_interrupt_extension)(void) = NULL;
15254 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15255 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15256 seq_printf(p, " Machine check polls\n");
15257 #endif
15258 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15259 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15260 #if defined(CONFIG_X86_IO_APIC)
15261 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15262 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15263 #endif
15264 return 0;
15265 }
15266 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15267
15268 u64 arch_irq_stat(void)
15269 {
15270 - u64 sum = atomic_read(&irq_err_count);
15271 + u64 sum = atomic_read_unchecked(&irq_err_count);
15272
15273 #ifdef CONFIG_X86_IO_APIC
15274 - sum += atomic_read(&irq_mis_count);
15275 + sum += atomic_read_unchecked(&irq_mis_count);
15276 #endif
15277 return sum;
15278 }
15279 diff -urNp linux-2.6.32.44/arch/x86/kernel/kgdb.c linux-2.6.32.44/arch/x86/kernel/kgdb.c
15280 --- linux-2.6.32.44/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15281 +++ linux-2.6.32.44/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15282 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15283
15284 /* clear the trace bit */
15285 linux_regs->flags &= ~X86_EFLAGS_TF;
15286 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15287 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15288
15289 /* set the trace bit if we're stepping */
15290 if (remcomInBuffer[0] == 's') {
15291 linux_regs->flags |= X86_EFLAGS_TF;
15292 kgdb_single_step = 1;
15293 - atomic_set(&kgdb_cpu_doing_single_step,
15294 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15295 raw_smp_processor_id());
15296 }
15297
15298 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15299 break;
15300
15301 case DIE_DEBUG:
15302 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15303 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15304 raw_smp_processor_id()) {
15305 if (user_mode(regs))
15306 return single_step_cont(regs, args);
15307 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15308 return instruction_pointer(regs);
15309 }
15310
15311 -struct kgdb_arch arch_kgdb_ops = {
15312 +const struct kgdb_arch arch_kgdb_ops = {
15313 /* Breakpoint instruction: */
15314 .gdb_bpt_instr = { 0xcc },
15315 .flags = KGDB_HW_BREAKPOINT,
15316 diff -urNp linux-2.6.32.44/arch/x86/kernel/kprobes.c linux-2.6.32.44/arch/x86/kernel/kprobes.c
15317 --- linux-2.6.32.44/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15318 +++ linux-2.6.32.44/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15319 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15320 char op;
15321 s32 raddr;
15322 } __attribute__((packed)) * jop;
15323 - jop = (struct __arch_jmp_op *)from;
15324 +
15325 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15326 +
15327 + pax_open_kernel();
15328 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15329 jop->op = RELATIVEJUMP_INSTRUCTION;
15330 + pax_close_kernel();
15331 }
15332
15333 /*
15334 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15335 kprobe_opcode_t opcode;
15336 kprobe_opcode_t *orig_opcodes = opcodes;
15337
15338 - if (search_exception_tables((unsigned long)opcodes))
15339 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15340 return 0; /* Page fault may occur on this address. */
15341
15342 retry:
15343 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15344 disp = (u8 *) p->addr + *((s32 *) insn) -
15345 (u8 *) p->ainsn.insn;
15346 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15347 + pax_open_kernel();
15348 *(s32 *)insn = (s32) disp;
15349 + pax_close_kernel();
15350 }
15351 }
15352 #endif
15353 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15354
15355 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15356 {
15357 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15358 + pax_open_kernel();
15359 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15360 + pax_close_kernel();
15361
15362 fix_riprel(p);
15363
15364 - if (can_boost(p->addr))
15365 + if (can_boost(ktla_ktva(p->addr)))
15366 p->ainsn.boostable = 0;
15367 else
15368 p->ainsn.boostable = -1;
15369
15370 - p->opcode = *p->addr;
15371 + p->opcode = *(ktla_ktva(p->addr));
15372 }
15373
15374 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15375 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15376 if (p->opcode == BREAKPOINT_INSTRUCTION)
15377 regs->ip = (unsigned long)p->addr;
15378 else
15379 - regs->ip = (unsigned long)p->ainsn.insn;
15380 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15381 }
15382
15383 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15384 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15385 if (p->ainsn.boostable == 1 && !p->post_handler) {
15386 /* Boost up -- we can execute copied instructions directly */
15387 reset_current_kprobe();
15388 - regs->ip = (unsigned long)p->ainsn.insn;
15389 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15390 preempt_enable_no_resched();
15391 return;
15392 }
15393 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15394 struct kprobe_ctlblk *kcb;
15395
15396 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15397 - if (*addr != BREAKPOINT_INSTRUCTION) {
15398 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15399 /*
15400 * The breakpoint instruction was removed right
15401 * after we hit it. Another cpu has removed
15402 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15403 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15404 {
15405 unsigned long *tos = stack_addr(regs);
15406 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15407 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15408 unsigned long orig_ip = (unsigned long)p->addr;
15409 kprobe_opcode_t *insn = p->ainsn.insn;
15410
15411 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15412 struct die_args *args = data;
15413 int ret = NOTIFY_DONE;
15414
15415 - if (args->regs && user_mode_vm(args->regs))
15416 + if (args->regs && user_mode(args->regs))
15417 return ret;
15418
15419 switch (val) {
15420 diff -urNp linux-2.6.32.44/arch/x86/kernel/ldt.c linux-2.6.32.44/arch/x86/kernel/ldt.c
15421 --- linux-2.6.32.44/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15422 +++ linux-2.6.32.44/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15423 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15424 if (reload) {
15425 #ifdef CONFIG_SMP
15426 preempt_disable();
15427 - load_LDT(pc);
15428 + load_LDT_nolock(pc);
15429 if (!cpumask_equal(mm_cpumask(current->mm),
15430 cpumask_of(smp_processor_id())))
15431 smp_call_function(flush_ldt, current->mm, 1);
15432 preempt_enable();
15433 #else
15434 - load_LDT(pc);
15435 + load_LDT_nolock(pc);
15436 #endif
15437 }
15438 if (oldsize) {
15439 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15440 return err;
15441
15442 for (i = 0; i < old->size; i++)
15443 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15444 + write_ldt_entry(new->ldt, i, old->ldt + i);
15445 return 0;
15446 }
15447
15448 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15449 retval = copy_ldt(&mm->context, &old_mm->context);
15450 mutex_unlock(&old_mm->context.lock);
15451 }
15452 +
15453 + if (tsk == current) {
15454 + mm->context.vdso = 0;
15455 +
15456 +#ifdef CONFIG_X86_32
15457 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15458 + mm->context.user_cs_base = 0UL;
15459 + mm->context.user_cs_limit = ~0UL;
15460 +
15461 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15462 + cpus_clear(mm->context.cpu_user_cs_mask);
15463 +#endif
15464 +
15465 +#endif
15466 +#endif
15467 +
15468 + }
15469 +
15470 return retval;
15471 }
15472
15473 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15474 }
15475 }
15476
15477 +#ifdef CONFIG_PAX_SEGMEXEC
15478 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15479 + error = -EINVAL;
15480 + goto out_unlock;
15481 + }
15482 +#endif
15483 +
15484 fill_ldt(&ldt, &ldt_info);
15485 if (oldmode)
15486 ldt.avl = 0;
15487 diff -urNp linux-2.6.32.44/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.44/arch/x86/kernel/machine_kexec_32.c
15488 --- linux-2.6.32.44/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15489 +++ linux-2.6.32.44/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15490 @@ -26,7 +26,7 @@
15491 #include <asm/system.h>
15492 #include <asm/cacheflush.h>
15493
15494 -static void set_idt(void *newidt, __u16 limit)
15495 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15496 {
15497 struct desc_ptr curidt;
15498
15499 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15500 }
15501
15502
15503 -static void set_gdt(void *newgdt, __u16 limit)
15504 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15505 {
15506 struct desc_ptr curgdt;
15507
15508 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15509 }
15510
15511 control_page = page_address(image->control_code_page);
15512 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15513 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15514
15515 relocate_kernel_ptr = control_page;
15516 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15517 diff -urNp linux-2.6.32.44/arch/x86/kernel/microcode_amd.c linux-2.6.32.44/arch/x86/kernel/microcode_amd.c
15518 --- linux-2.6.32.44/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15519 +++ linux-2.6.32.44/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15520 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15521 uci->mc = NULL;
15522 }
15523
15524 -static struct microcode_ops microcode_amd_ops = {
15525 +static const struct microcode_ops microcode_amd_ops = {
15526 .request_microcode_user = request_microcode_user,
15527 .request_microcode_fw = request_microcode_fw,
15528 .collect_cpu_info = collect_cpu_info_amd,
15529 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15530 .microcode_fini_cpu = microcode_fini_cpu_amd,
15531 };
15532
15533 -struct microcode_ops * __init init_amd_microcode(void)
15534 +const struct microcode_ops * __init init_amd_microcode(void)
15535 {
15536 return &microcode_amd_ops;
15537 }
15538 diff -urNp linux-2.6.32.44/arch/x86/kernel/microcode_core.c linux-2.6.32.44/arch/x86/kernel/microcode_core.c
15539 --- linux-2.6.32.44/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15540 +++ linux-2.6.32.44/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15541 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15542
15543 #define MICROCODE_VERSION "2.00"
15544
15545 -static struct microcode_ops *microcode_ops;
15546 +static const struct microcode_ops *microcode_ops;
15547
15548 /*
15549 * Synchronization.
15550 diff -urNp linux-2.6.32.44/arch/x86/kernel/microcode_intel.c linux-2.6.32.44/arch/x86/kernel/microcode_intel.c
15551 --- linux-2.6.32.44/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15552 +++ linux-2.6.32.44/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15553 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15554
15555 static int get_ucode_user(void *to, const void *from, size_t n)
15556 {
15557 - return copy_from_user(to, from, n);
15558 + return copy_from_user(to, (__force const void __user *)from, n);
15559 }
15560
15561 static enum ucode_state
15562 request_microcode_user(int cpu, const void __user *buf, size_t size)
15563 {
15564 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15565 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15566 }
15567
15568 static void microcode_fini_cpu(int cpu)
15569 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15570 uci->mc = NULL;
15571 }
15572
15573 -static struct microcode_ops microcode_intel_ops = {
15574 +static const struct microcode_ops microcode_intel_ops = {
15575 .request_microcode_user = request_microcode_user,
15576 .request_microcode_fw = request_microcode_fw,
15577 .collect_cpu_info = collect_cpu_info,
15578 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15579 .microcode_fini_cpu = microcode_fini_cpu,
15580 };
15581
15582 -struct microcode_ops * __init init_intel_microcode(void)
15583 +const struct microcode_ops * __init init_intel_microcode(void)
15584 {
15585 return &microcode_intel_ops;
15586 }
15587 diff -urNp linux-2.6.32.44/arch/x86/kernel/module.c linux-2.6.32.44/arch/x86/kernel/module.c
15588 --- linux-2.6.32.44/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15589 +++ linux-2.6.32.44/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15590 @@ -34,7 +34,7 @@
15591 #define DEBUGP(fmt...)
15592 #endif
15593
15594 -void *module_alloc(unsigned long size)
15595 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15596 {
15597 struct vm_struct *area;
15598
15599 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15600 if (!area)
15601 return NULL;
15602
15603 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15604 - PAGE_KERNEL_EXEC);
15605 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15606 +}
15607 +
15608 +void *module_alloc(unsigned long size)
15609 +{
15610 +
15611 +#ifdef CONFIG_PAX_KERNEXEC
15612 + return __module_alloc(size, PAGE_KERNEL);
15613 +#else
15614 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15615 +#endif
15616 +
15617 }
15618
15619 /* Free memory returned from module_alloc */
15620 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15621 vfree(module_region);
15622 }
15623
15624 +#ifdef CONFIG_PAX_KERNEXEC
15625 +#ifdef CONFIG_X86_32
15626 +void *module_alloc_exec(unsigned long size)
15627 +{
15628 + struct vm_struct *area;
15629 +
15630 + if (size == 0)
15631 + return NULL;
15632 +
15633 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15634 + return area ? area->addr : NULL;
15635 +}
15636 +EXPORT_SYMBOL(module_alloc_exec);
15637 +
15638 +void module_free_exec(struct module *mod, void *module_region)
15639 +{
15640 + vunmap(module_region);
15641 +}
15642 +EXPORT_SYMBOL(module_free_exec);
15643 +#else
15644 +void module_free_exec(struct module *mod, void *module_region)
15645 +{
15646 + module_free(mod, module_region);
15647 +}
15648 +EXPORT_SYMBOL(module_free_exec);
15649 +
15650 +void *module_alloc_exec(unsigned long size)
15651 +{
15652 + return __module_alloc(size, PAGE_KERNEL_RX);
15653 +}
15654 +EXPORT_SYMBOL(module_alloc_exec);
15655 +#endif
15656 +#endif
15657 +
15658 /* We don't need anything special. */
15659 int module_frob_arch_sections(Elf_Ehdr *hdr,
15660 Elf_Shdr *sechdrs,
15661 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15662 unsigned int i;
15663 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15664 Elf32_Sym *sym;
15665 - uint32_t *location;
15666 + uint32_t *plocation, location;
15667
15668 DEBUGP("Applying relocate section %u to %u\n", relsec,
15669 sechdrs[relsec].sh_info);
15670 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15671 /* This is where to make the change */
15672 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15673 - + rel[i].r_offset;
15674 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15675 + location = (uint32_t)plocation;
15676 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15677 + plocation = ktla_ktva((void *)plocation);
15678 /* This is the symbol it is referring to. Note that all
15679 undefined symbols have been resolved. */
15680 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15681 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15682 switch (ELF32_R_TYPE(rel[i].r_info)) {
15683 case R_386_32:
15684 /* We add the value into the location given */
15685 - *location += sym->st_value;
15686 + pax_open_kernel();
15687 + *plocation += sym->st_value;
15688 + pax_close_kernel();
15689 break;
15690 case R_386_PC32:
15691 /* Add the value, subtract its postition */
15692 - *location += sym->st_value - (uint32_t)location;
15693 + pax_open_kernel();
15694 + *plocation += sym->st_value - location;
15695 + pax_close_kernel();
15696 break;
15697 default:
15698 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15699 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15700 case R_X86_64_NONE:
15701 break;
15702 case R_X86_64_64:
15703 + pax_open_kernel();
15704 *(u64 *)loc = val;
15705 + pax_close_kernel();
15706 break;
15707 case R_X86_64_32:
15708 + pax_open_kernel();
15709 *(u32 *)loc = val;
15710 + pax_close_kernel();
15711 if (val != *(u32 *)loc)
15712 goto overflow;
15713 break;
15714 case R_X86_64_32S:
15715 + pax_open_kernel();
15716 *(s32 *)loc = val;
15717 + pax_close_kernel();
15718 if ((s64)val != *(s32 *)loc)
15719 goto overflow;
15720 break;
15721 case R_X86_64_PC32:
15722 val -= (u64)loc;
15723 + pax_open_kernel();
15724 *(u32 *)loc = val;
15725 + pax_close_kernel();
15726 +
15727 #if 0
15728 if ((s64)val != *(s32 *)loc)
15729 goto overflow;
15730 diff -urNp linux-2.6.32.44/arch/x86/kernel/paravirt.c linux-2.6.32.44/arch/x86/kernel/paravirt.c
15731 --- linux-2.6.32.44/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15732 +++ linux-2.6.32.44/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15733 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15734 {
15735 return x;
15736 }
15737 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15738 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15739 +#endif
15740
15741 void __init default_banner(void)
15742 {
15743 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15744 * corresponding structure. */
15745 static void *get_call_destination(u8 type)
15746 {
15747 - struct paravirt_patch_template tmpl = {
15748 + const struct paravirt_patch_template tmpl = {
15749 .pv_init_ops = pv_init_ops,
15750 .pv_time_ops = pv_time_ops,
15751 .pv_cpu_ops = pv_cpu_ops,
15752 @@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15753 .pv_lock_ops = pv_lock_ops,
15754 #endif
15755 };
15756 +
15757 + pax_track_stack();
15758 return *((void **)&tmpl + type);
15759 }
15760
15761 @@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15762 if (opfunc == NULL)
15763 /* If there's no function, patch it with a ud2a (BUG) */
15764 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15765 - else if (opfunc == _paravirt_nop)
15766 + else if (opfunc == (void *)_paravirt_nop)
15767 /* If the operation is a nop, then nop the callsite */
15768 ret = paravirt_patch_nop();
15769
15770 /* identity functions just return their single argument */
15771 - else if (opfunc == _paravirt_ident_32)
15772 + else if (opfunc == (void *)_paravirt_ident_32)
15773 ret = paravirt_patch_ident_32(insnbuf, len);
15774 - else if (opfunc == _paravirt_ident_64)
15775 + else if (opfunc == (void *)_paravirt_ident_64)
15776 + ret = paravirt_patch_ident_64(insnbuf, len);
15777 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15778 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15779 ret = paravirt_patch_ident_64(insnbuf, len);
15780 +#endif
15781
15782 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15783 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15784 @@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15785 if (insn_len > len || start == NULL)
15786 insn_len = len;
15787 else
15788 - memcpy(insnbuf, start, insn_len);
15789 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15790
15791 return insn_len;
15792 }
15793 @@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15794 preempt_enable();
15795 }
15796
15797 -struct pv_info pv_info = {
15798 +struct pv_info pv_info __read_only = {
15799 .name = "bare hardware",
15800 .paravirt_enabled = 0,
15801 .kernel_rpl = 0,
15802 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15803 };
15804
15805 -struct pv_init_ops pv_init_ops = {
15806 +struct pv_init_ops pv_init_ops __read_only = {
15807 .patch = native_patch,
15808 };
15809
15810 -struct pv_time_ops pv_time_ops = {
15811 +struct pv_time_ops pv_time_ops __read_only = {
15812 .sched_clock = native_sched_clock,
15813 };
15814
15815 -struct pv_irq_ops pv_irq_ops = {
15816 +struct pv_irq_ops pv_irq_ops __read_only = {
15817 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15818 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15819 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15820 @@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15821 #endif
15822 };
15823
15824 -struct pv_cpu_ops pv_cpu_ops = {
15825 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15826 .cpuid = native_cpuid,
15827 .get_debugreg = native_get_debugreg,
15828 .set_debugreg = native_set_debugreg,
15829 @@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15830 .end_context_switch = paravirt_nop,
15831 };
15832
15833 -struct pv_apic_ops pv_apic_ops = {
15834 +struct pv_apic_ops pv_apic_ops __read_only = {
15835 #ifdef CONFIG_X86_LOCAL_APIC
15836 .startup_ipi_hook = paravirt_nop,
15837 #endif
15838 };
15839
15840 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15841 +#ifdef CONFIG_X86_32
15842 +#ifdef CONFIG_X86_PAE
15843 +/* 64-bit pagetable entries */
15844 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15845 +#else
15846 /* 32-bit pagetable entries */
15847 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15848 +#endif
15849 #else
15850 /* 64-bit pagetable entries */
15851 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15852 #endif
15853
15854 -struct pv_mmu_ops pv_mmu_ops = {
15855 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15856
15857 .read_cr2 = native_read_cr2,
15858 .write_cr2 = native_write_cr2,
15859 @@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15860 },
15861
15862 .set_fixmap = native_set_fixmap,
15863 +
15864 +#ifdef CONFIG_PAX_KERNEXEC
15865 + .pax_open_kernel = native_pax_open_kernel,
15866 + .pax_close_kernel = native_pax_close_kernel,
15867 +#endif
15868 +
15869 };
15870
15871 EXPORT_SYMBOL_GPL(pv_time_ops);
15872 diff -urNp linux-2.6.32.44/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.44/arch/x86/kernel/paravirt-spinlocks.c
15873 --- linux-2.6.32.44/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15874 +++ linux-2.6.32.44/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15875 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15876 __raw_spin_lock(lock);
15877 }
15878
15879 -struct pv_lock_ops pv_lock_ops = {
15880 +struct pv_lock_ops pv_lock_ops __read_only = {
15881 #ifdef CONFIG_SMP
15882 .spin_is_locked = __ticket_spin_is_locked,
15883 .spin_is_contended = __ticket_spin_is_contended,
15884 diff -urNp linux-2.6.32.44/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.44/arch/x86/kernel/pci-calgary_64.c
15885 --- linux-2.6.32.44/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15886 +++ linux-2.6.32.44/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15887 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15888 free_pages((unsigned long)vaddr, get_order(size));
15889 }
15890
15891 -static struct dma_map_ops calgary_dma_ops = {
15892 +static const struct dma_map_ops calgary_dma_ops = {
15893 .alloc_coherent = calgary_alloc_coherent,
15894 .free_coherent = calgary_free_coherent,
15895 .map_sg = calgary_map_sg,
15896 diff -urNp linux-2.6.32.44/arch/x86/kernel/pci-dma.c linux-2.6.32.44/arch/x86/kernel/pci-dma.c
15897 --- linux-2.6.32.44/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15898 +++ linux-2.6.32.44/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15899 @@ -14,7 +14,7 @@
15900
15901 static int forbid_dac __read_mostly;
15902
15903 -struct dma_map_ops *dma_ops;
15904 +const struct dma_map_ops *dma_ops;
15905 EXPORT_SYMBOL(dma_ops);
15906
15907 static int iommu_sac_force __read_mostly;
15908 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15909
15910 int dma_supported(struct device *dev, u64 mask)
15911 {
15912 - struct dma_map_ops *ops = get_dma_ops(dev);
15913 + const struct dma_map_ops *ops = get_dma_ops(dev);
15914
15915 #ifdef CONFIG_PCI
15916 if (mask > 0xffffffff && forbid_dac > 0) {
15917 diff -urNp linux-2.6.32.44/arch/x86/kernel/pci-gart_64.c linux-2.6.32.44/arch/x86/kernel/pci-gart_64.c
15918 --- linux-2.6.32.44/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15919 +++ linux-2.6.32.44/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15920 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15921 return -1;
15922 }
15923
15924 -static struct dma_map_ops gart_dma_ops = {
15925 +static const struct dma_map_ops gart_dma_ops = {
15926 .map_sg = gart_map_sg,
15927 .unmap_sg = gart_unmap_sg,
15928 .map_page = gart_map_page,
15929 diff -urNp linux-2.6.32.44/arch/x86/kernel/pci-nommu.c linux-2.6.32.44/arch/x86/kernel/pci-nommu.c
15930 --- linux-2.6.32.44/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15931 +++ linux-2.6.32.44/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15932 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15933 flush_write_buffers();
15934 }
15935
15936 -struct dma_map_ops nommu_dma_ops = {
15937 +const struct dma_map_ops nommu_dma_ops = {
15938 .alloc_coherent = dma_generic_alloc_coherent,
15939 .free_coherent = nommu_free_coherent,
15940 .map_sg = nommu_map_sg,
15941 diff -urNp linux-2.6.32.44/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.44/arch/x86/kernel/pci-swiotlb.c
15942 --- linux-2.6.32.44/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15943 +++ linux-2.6.32.44/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15944 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15945 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15946 }
15947
15948 -static struct dma_map_ops swiotlb_dma_ops = {
15949 +static const struct dma_map_ops swiotlb_dma_ops = {
15950 .mapping_error = swiotlb_dma_mapping_error,
15951 .alloc_coherent = x86_swiotlb_alloc_coherent,
15952 .free_coherent = swiotlb_free_coherent,
15953 diff -urNp linux-2.6.32.44/arch/x86/kernel/process_32.c linux-2.6.32.44/arch/x86/kernel/process_32.c
15954 --- linux-2.6.32.44/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15955 +++ linux-2.6.32.44/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15956 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15957 unsigned long thread_saved_pc(struct task_struct *tsk)
15958 {
15959 return ((unsigned long *)tsk->thread.sp)[3];
15960 +//XXX return tsk->thread.eip;
15961 }
15962
15963 #ifndef CONFIG_SMP
15964 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15965 unsigned short ss, gs;
15966 const char *board;
15967
15968 - if (user_mode_vm(regs)) {
15969 + if (user_mode(regs)) {
15970 sp = regs->sp;
15971 ss = regs->ss & 0xffff;
15972 - gs = get_user_gs(regs);
15973 } else {
15974 sp = (unsigned long) (&regs->sp);
15975 savesegment(ss, ss);
15976 - savesegment(gs, gs);
15977 }
15978 + gs = get_user_gs(regs);
15979
15980 printk("\n");
15981
15982 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15983 regs.bx = (unsigned long) fn;
15984 regs.dx = (unsigned long) arg;
15985
15986 - regs.ds = __USER_DS;
15987 - regs.es = __USER_DS;
15988 + regs.ds = __KERNEL_DS;
15989 + regs.es = __KERNEL_DS;
15990 regs.fs = __KERNEL_PERCPU;
15991 - regs.gs = __KERNEL_STACK_CANARY;
15992 + savesegment(gs, regs.gs);
15993 regs.orig_ax = -1;
15994 regs.ip = (unsigned long) kernel_thread_helper;
15995 regs.cs = __KERNEL_CS | get_kernel_rpl();
15996 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15997 struct task_struct *tsk;
15998 int err;
15999
16000 - childregs = task_pt_regs(p);
16001 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16002 *childregs = *regs;
16003 childregs->ax = 0;
16004 childregs->sp = sp;
16005
16006 p->thread.sp = (unsigned long) childregs;
16007 p->thread.sp0 = (unsigned long) (childregs+1);
16008 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16009
16010 p->thread.ip = (unsigned long) ret_from_fork;
16011
16012 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16013 struct thread_struct *prev = &prev_p->thread,
16014 *next = &next_p->thread;
16015 int cpu = smp_processor_id();
16016 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16017 + struct tss_struct *tss = init_tss + cpu;
16018 bool preload_fpu;
16019
16020 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16021 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16022 */
16023 lazy_save_gs(prev->gs);
16024
16025 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16026 + __set_fs(task_thread_info(next_p)->addr_limit);
16027 +#endif
16028 +
16029 /*
16030 * Load the per-thread Thread-Local Storage descriptor.
16031 */
16032 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16033 */
16034 arch_end_context_switch(next_p);
16035
16036 + percpu_write(current_task, next_p);
16037 + percpu_write(current_tinfo, &next_p->tinfo);
16038 +
16039 if (preload_fpu)
16040 __math_state_restore();
16041
16042 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16043 if (prev->gs | next->gs)
16044 lazy_load_gs(next->gs);
16045
16046 - percpu_write(current_task, next_p);
16047 -
16048 return prev_p;
16049 }
16050
16051 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16052 } while (count++ < 16);
16053 return 0;
16054 }
16055 -
16056 diff -urNp linux-2.6.32.44/arch/x86/kernel/process_64.c linux-2.6.32.44/arch/x86/kernel/process_64.c
16057 --- linux-2.6.32.44/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16058 +++ linux-2.6.32.44/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16059 @@ -91,7 +91,7 @@ static void __exit_idle(void)
16060 void exit_idle(void)
16061 {
16062 /* idle loop has pid 0 */
16063 - if (current->pid)
16064 + if (task_pid_nr(current))
16065 return;
16066 __exit_idle();
16067 }
16068 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16069 if (!board)
16070 board = "";
16071 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16072 - current->pid, current->comm, print_tainted(),
16073 + task_pid_nr(current), current->comm, print_tainted(),
16074 init_utsname()->release,
16075 (int)strcspn(init_utsname()->version, " "),
16076 init_utsname()->version, board);
16077 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16078 struct pt_regs *childregs;
16079 struct task_struct *me = current;
16080
16081 - childregs = ((struct pt_regs *)
16082 - (THREAD_SIZE + task_stack_page(p))) - 1;
16083 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16084 *childregs = *regs;
16085
16086 childregs->ax = 0;
16087 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16088 p->thread.sp = (unsigned long) childregs;
16089 p->thread.sp0 = (unsigned long) (childregs+1);
16090 p->thread.usersp = me->thread.usersp;
16091 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16092
16093 set_tsk_thread_flag(p, TIF_FORK);
16094
16095 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16096 struct thread_struct *prev = &prev_p->thread;
16097 struct thread_struct *next = &next_p->thread;
16098 int cpu = smp_processor_id();
16099 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16100 + struct tss_struct *tss = init_tss + cpu;
16101 unsigned fsindex, gsindex;
16102 bool preload_fpu;
16103
16104 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16105 prev->usersp = percpu_read(old_rsp);
16106 percpu_write(old_rsp, next->usersp);
16107 percpu_write(current_task, next_p);
16108 + percpu_write(current_tinfo, &next_p->tinfo);
16109
16110 - percpu_write(kernel_stack,
16111 - (unsigned long)task_stack_page(next_p) +
16112 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16113 + percpu_write(kernel_stack, next->sp0);
16114
16115 /*
16116 * Now maybe reload the debug registers and handle I/O bitmaps
16117 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16118 if (!p || p == current || p->state == TASK_RUNNING)
16119 return 0;
16120 stack = (unsigned long)task_stack_page(p);
16121 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16122 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16123 return 0;
16124 fp = *(u64 *)(p->thread.sp);
16125 do {
16126 - if (fp < (unsigned long)stack ||
16127 - fp >= (unsigned long)stack+THREAD_SIZE)
16128 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16129 return 0;
16130 ip = *(u64 *)(fp+8);
16131 if (!in_sched_functions(ip))
16132 diff -urNp linux-2.6.32.44/arch/x86/kernel/process.c linux-2.6.32.44/arch/x86/kernel/process.c
16133 --- linux-2.6.32.44/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16134 +++ linux-2.6.32.44/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16135 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16136
16137 void free_thread_info(struct thread_info *ti)
16138 {
16139 - free_thread_xstate(ti->task);
16140 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16141 }
16142
16143 +static struct kmem_cache *task_struct_cachep;
16144 +
16145 void arch_task_cache_init(void)
16146 {
16147 - task_xstate_cachep =
16148 - kmem_cache_create("task_xstate", xstate_size,
16149 + /* create a slab on which task_structs can be allocated */
16150 + task_struct_cachep =
16151 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16152 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16153 +
16154 + task_xstate_cachep =
16155 + kmem_cache_create("task_xstate", xstate_size,
16156 __alignof__(union thread_xstate),
16157 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16158 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16159 +}
16160 +
16161 +struct task_struct *alloc_task_struct(void)
16162 +{
16163 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16164 +}
16165 +
16166 +void free_task_struct(struct task_struct *task)
16167 +{
16168 + free_thread_xstate(task);
16169 + kmem_cache_free(task_struct_cachep, task);
16170 }
16171
16172 /*
16173 @@ -73,7 +90,7 @@ void exit_thread(void)
16174 unsigned long *bp = t->io_bitmap_ptr;
16175
16176 if (bp) {
16177 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16178 + struct tss_struct *tss = init_tss + get_cpu();
16179
16180 t->io_bitmap_ptr = NULL;
16181 clear_thread_flag(TIF_IO_BITMAP);
16182 @@ -93,6 +110,9 @@ void flush_thread(void)
16183
16184 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16185
16186 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16187 + loadsegment(gs, 0);
16188 +#endif
16189 tsk->thread.debugreg0 = 0;
16190 tsk->thread.debugreg1 = 0;
16191 tsk->thread.debugreg2 = 0;
16192 @@ -307,7 +327,7 @@ void default_idle(void)
16193 EXPORT_SYMBOL(default_idle);
16194 #endif
16195
16196 -void stop_this_cpu(void *dummy)
16197 +__noreturn void stop_this_cpu(void *dummy)
16198 {
16199 local_irq_disable();
16200 /*
16201 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16202 }
16203 early_param("idle", idle_setup);
16204
16205 -unsigned long arch_align_stack(unsigned long sp)
16206 +#ifdef CONFIG_PAX_RANDKSTACK
16207 +asmlinkage void pax_randomize_kstack(void)
16208 {
16209 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16210 - sp -= get_random_int() % 8192;
16211 - return sp & ~0xf;
16212 -}
16213 + struct thread_struct *thread = &current->thread;
16214 + unsigned long time;
16215
16216 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16217 -{
16218 - unsigned long range_end = mm->brk + 0x02000000;
16219 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16220 + if (!randomize_va_space)
16221 + return;
16222 +
16223 + rdtscl(time);
16224 +
16225 + /* P4 seems to return a 0 LSB, ignore it */
16226 +#ifdef CONFIG_MPENTIUM4
16227 + time &= 0x3EUL;
16228 + time <<= 2;
16229 +#elif defined(CONFIG_X86_64)
16230 + time &= 0xFUL;
16231 + time <<= 4;
16232 +#else
16233 + time &= 0x1FUL;
16234 + time <<= 3;
16235 +#endif
16236 +
16237 + thread->sp0 ^= time;
16238 + load_sp0(init_tss + smp_processor_id(), thread);
16239 +
16240 +#ifdef CONFIG_X86_64
16241 + percpu_write(kernel_stack, thread->sp0);
16242 +#endif
16243 }
16244 +#endif
16245
16246 diff -urNp linux-2.6.32.44/arch/x86/kernel/ptrace.c linux-2.6.32.44/arch/x86/kernel/ptrace.c
16247 --- linux-2.6.32.44/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16248 +++ linux-2.6.32.44/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16249 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16250 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16251 {
16252 int ret;
16253 - unsigned long __user *datap = (unsigned long __user *)data;
16254 + unsigned long __user *datap = (__force unsigned long __user *)data;
16255
16256 switch (request) {
16257 /* read the word at location addr in the USER area. */
16258 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16259 if (addr < 0)
16260 return -EIO;
16261 ret = do_get_thread_area(child, addr,
16262 - (struct user_desc __user *) data);
16263 + (__force struct user_desc __user *) data);
16264 break;
16265
16266 case PTRACE_SET_THREAD_AREA:
16267 if (addr < 0)
16268 return -EIO;
16269 ret = do_set_thread_area(child, addr,
16270 - (struct user_desc __user *) data, 0);
16271 + (__force struct user_desc __user *) data, 0);
16272 break;
16273 #endif
16274
16275 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16276 #ifdef CONFIG_X86_PTRACE_BTS
16277 case PTRACE_BTS_CONFIG:
16278 ret = ptrace_bts_config
16279 - (child, data, (struct ptrace_bts_config __user *)addr);
16280 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16281 break;
16282
16283 case PTRACE_BTS_STATUS:
16284 ret = ptrace_bts_status
16285 - (child, data, (struct ptrace_bts_config __user *)addr);
16286 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16287 break;
16288
16289 case PTRACE_BTS_SIZE:
16290 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16291
16292 case PTRACE_BTS_GET:
16293 ret = ptrace_bts_read_record
16294 - (child, data, (struct bts_struct __user *) addr);
16295 + (child, data, (__force struct bts_struct __user *) addr);
16296 break;
16297
16298 case PTRACE_BTS_CLEAR:
16299 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16300
16301 case PTRACE_BTS_DRAIN:
16302 ret = ptrace_bts_drain
16303 - (child, data, (struct bts_struct __user *) addr);
16304 + (child, data, (__force struct bts_struct __user *) addr);
16305 break;
16306 #endif /* CONFIG_X86_PTRACE_BTS */
16307
16308 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16309 info.si_code = si_code;
16310
16311 /* User-mode ip? */
16312 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16313 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16314
16315 /* Send us the fake SIGTRAP */
16316 force_sig_info(SIGTRAP, &info, tsk);
16317 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16318 * We must return the syscall number to actually look up in the table.
16319 * This can be -1L to skip running any syscall at all.
16320 */
16321 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16322 +long syscall_trace_enter(struct pt_regs *regs)
16323 {
16324 long ret = 0;
16325
16326 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16327 return ret ?: regs->orig_ax;
16328 }
16329
16330 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16331 +void syscall_trace_leave(struct pt_regs *regs)
16332 {
16333 if (unlikely(current->audit_context))
16334 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16335 diff -urNp linux-2.6.32.44/arch/x86/kernel/reboot.c linux-2.6.32.44/arch/x86/kernel/reboot.c
16336 --- linux-2.6.32.44/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16337 +++ linux-2.6.32.44/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16338 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16339 EXPORT_SYMBOL(pm_power_off);
16340
16341 static const struct desc_ptr no_idt = {};
16342 -static int reboot_mode;
16343 +static unsigned short reboot_mode;
16344 enum reboot_type reboot_type = BOOT_KBD;
16345 int reboot_force;
16346
16347 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16348 controller to pulse the CPU reset line, which is more thorough, but
16349 doesn't work with at least one type of 486 motherboard. It is easy
16350 to stop this code working; hence the copious comments. */
16351 -static const unsigned long long
16352 -real_mode_gdt_entries [3] =
16353 +static struct desc_struct
16354 +real_mode_gdt_entries [3] __read_only =
16355 {
16356 - 0x0000000000000000ULL, /* Null descriptor */
16357 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16358 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16359 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16360 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16361 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16362 };
16363
16364 static const struct desc_ptr
16365 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16366 * specified by the code and length parameters.
16367 * We assume that length will aways be less that 100!
16368 */
16369 -void machine_real_restart(const unsigned char *code, int length)
16370 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16371 {
16372 local_irq_disable();
16373
16374 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16375 /* Remap the kernel at virtual address zero, as well as offset zero
16376 from the kernel segment. This assumes the kernel segment starts at
16377 virtual address PAGE_OFFSET. */
16378 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16379 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16380 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16381 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16382
16383 /*
16384 * Use `swapper_pg_dir' as our page directory.
16385 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16386 boot)". This seems like a fairly standard thing that gets set by
16387 REBOOT.COM programs, and the previous reset routine did this
16388 too. */
16389 - *((unsigned short *)0x472) = reboot_mode;
16390 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16391
16392 /* For the switch to real mode, copy some code to low memory. It has
16393 to be in the first 64k because it is running in 16-bit mode, and it
16394 has to have the same physical and virtual address, because it turns
16395 off paging. Copy it near the end of the first page, out of the way
16396 of BIOS variables. */
16397 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16398 - real_mode_switch, sizeof (real_mode_switch));
16399 - memcpy((void *)(0x1000 - 100), code, length);
16400 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16401 + memcpy(__va(0x1000 - 100), code, length);
16402
16403 /* Set up the IDT for real mode. */
16404 load_idt(&real_mode_idt);
16405 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16406 __asm__ __volatile__ ("ljmp $0x0008,%0"
16407 :
16408 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16409 + do { } while (1);
16410 }
16411 #ifdef CONFIG_APM_MODULE
16412 EXPORT_SYMBOL(machine_real_restart);
16413 @@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16414 {
16415 }
16416
16417 -static void native_machine_emergency_restart(void)
16418 +__noreturn static void native_machine_emergency_restart(void)
16419 {
16420 int i;
16421
16422 @@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16423 #endif
16424 }
16425
16426 -static void __machine_emergency_restart(int emergency)
16427 +static __noreturn void __machine_emergency_restart(int emergency)
16428 {
16429 reboot_emergency = emergency;
16430 machine_ops.emergency_restart();
16431 }
16432
16433 -static void native_machine_restart(char *__unused)
16434 +static __noreturn void native_machine_restart(char *__unused)
16435 {
16436 printk("machine restart\n");
16437
16438 @@ -674,7 +674,7 @@ static void native_machine_restart(char
16439 __machine_emergency_restart(0);
16440 }
16441
16442 -static void native_machine_halt(void)
16443 +static __noreturn void native_machine_halt(void)
16444 {
16445 /* stop other cpus and apics */
16446 machine_shutdown();
16447 @@ -685,7 +685,7 @@ static void native_machine_halt(void)
16448 stop_this_cpu(NULL);
16449 }
16450
16451 -static void native_machine_power_off(void)
16452 +__noreturn static void native_machine_power_off(void)
16453 {
16454 if (pm_power_off) {
16455 if (!reboot_force)
16456 @@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16457 }
16458 /* a fallback in case there is no PM info available */
16459 tboot_shutdown(TB_SHUTDOWN_HALT);
16460 + do { } while (1);
16461 }
16462
16463 struct machine_ops machine_ops = {
16464 diff -urNp linux-2.6.32.44/arch/x86/kernel/setup.c linux-2.6.32.44/arch/x86/kernel/setup.c
16465 --- linux-2.6.32.44/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16466 +++ linux-2.6.32.44/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16467 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16468
16469 if (!boot_params.hdr.root_flags)
16470 root_mountflags &= ~MS_RDONLY;
16471 - init_mm.start_code = (unsigned long) _text;
16472 - init_mm.end_code = (unsigned long) _etext;
16473 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16474 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16475 init_mm.end_data = (unsigned long) _edata;
16476 init_mm.brk = _brk_end;
16477
16478 - code_resource.start = virt_to_phys(_text);
16479 - code_resource.end = virt_to_phys(_etext)-1;
16480 - data_resource.start = virt_to_phys(_etext);
16481 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16482 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16483 + data_resource.start = virt_to_phys(_sdata);
16484 data_resource.end = virt_to_phys(_edata)-1;
16485 bss_resource.start = virt_to_phys(&__bss_start);
16486 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16487 diff -urNp linux-2.6.32.44/arch/x86/kernel/setup_percpu.c linux-2.6.32.44/arch/x86/kernel/setup_percpu.c
16488 --- linux-2.6.32.44/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16489 +++ linux-2.6.32.44/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16490 @@ -25,19 +25,17 @@
16491 # define DBG(x...)
16492 #endif
16493
16494 -DEFINE_PER_CPU(int, cpu_number);
16495 +#ifdef CONFIG_SMP
16496 +DEFINE_PER_CPU(unsigned int, cpu_number);
16497 EXPORT_PER_CPU_SYMBOL(cpu_number);
16498 +#endif
16499
16500 -#ifdef CONFIG_X86_64
16501 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16502 -#else
16503 -#define BOOT_PERCPU_OFFSET 0
16504 -#endif
16505
16506 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16507 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16508
16509 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16510 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16511 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16512 };
16513 EXPORT_SYMBOL(__per_cpu_offset);
16514 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16515 {
16516 #ifdef CONFIG_X86_32
16517 struct desc_struct gdt;
16518 + unsigned long base = per_cpu_offset(cpu);
16519
16520 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16521 - 0x2 | DESCTYPE_S, 0x8);
16522 - gdt.s = 1;
16523 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16524 + 0x83 | DESCTYPE_S, 0xC);
16525 write_gdt_entry(get_cpu_gdt_table(cpu),
16526 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16527 #endif
16528 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16529 /* alrighty, percpu areas up and running */
16530 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16531 for_each_possible_cpu(cpu) {
16532 +#ifdef CONFIG_CC_STACKPROTECTOR
16533 +#ifdef CONFIG_X86_32
16534 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16535 +#endif
16536 +#endif
16537 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16538 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16539 per_cpu(cpu_number, cpu) = cpu;
16540 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16541 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16542 #endif
16543 #endif
16544 +#ifdef CONFIG_CC_STACKPROTECTOR
16545 +#ifdef CONFIG_X86_32
16546 + if (!cpu)
16547 + per_cpu(stack_canary.canary, cpu) = canary;
16548 +#endif
16549 +#endif
16550 /*
16551 * Up to this point, the boot CPU has been using .data.init
16552 * area. Reload any changed state for the boot CPU.
16553 diff -urNp linux-2.6.32.44/arch/x86/kernel/signal.c linux-2.6.32.44/arch/x86/kernel/signal.c
16554 --- linux-2.6.32.44/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16555 +++ linux-2.6.32.44/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16556 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16557 * Align the stack pointer according to the i386 ABI,
16558 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16559 */
16560 - sp = ((sp + 4) & -16ul) - 4;
16561 + sp = ((sp - 12) & -16ul) - 4;
16562 #else /* !CONFIG_X86_32 */
16563 sp = round_down(sp, 16) - 8;
16564 #endif
16565 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16566 * Return an always-bogus address instead so we will die with SIGSEGV.
16567 */
16568 if (onsigstack && !likely(on_sig_stack(sp)))
16569 - return (void __user *)-1L;
16570 + return (__force void __user *)-1L;
16571
16572 /* save i387 state */
16573 if (used_math() && save_i387_xstate(*fpstate) < 0)
16574 - return (void __user *)-1L;
16575 + return (__force void __user *)-1L;
16576
16577 return (void __user *)sp;
16578 }
16579 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16580 }
16581
16582 if (current->mm->context.vdso)
16583 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16584 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16585 else
16586 - restorer = &frame->retcode;
16587 + restorer = (void __user *)&frame->retcode;
16588 if (ka->sa.sa_flags & SA_RESTORER)
16589 restorer = ka->sa.sa_restorer;
16590
16591 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16592 * reasons and because gdb uses it as a signature to notice
16593 * signal handler stack frames.
16594 */
16595 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16596 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16597
16598 if (err)
16599 return -EFAULT;
16600 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16601 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16602
16603 /* Set up to return from userspace. */
16604 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16605 + if (current->mm->context.vdso)
16606 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16607 + else
16608 + restorer = (void __user *)&frame->retcode;
16609 if (ka->sa.sa_flags & SA_RESTORER)
16610 restorer = ka->sa.sa_restorer;
16611 put_user_ex(restorer, &frame->pretcode);
16612 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16613 * reasons and because gdb uses it as a signature to notice
16614 * signal handler stack frames.
16615 */
16616 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16617 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16618 } put_user_catch(err);
16619
16620 if (err)
16621 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16622 int signr;
16623 sigset_t *oldset;
16624
16625 + pax_track_stack();
16626 +
16627 /*
16628 * We want the common case to go fast, which is why we may in certain
16629 * cases get here from kernel mode. Just return without doing anything
16630 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16631 * X86_32: vm86 regs switched out by assembly code before reaching
16632 * here, so testing against kernel CS suffices.
16633 */
16634 - if (!user_mode(regs))
16635 + if (!user_mode_novm(regs))
16636 return;
16637
16638 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16639 diff -urNp linux-2.6.32.44/arch/x86/kernel/smpboot.c linux-2.6.32.44/arch/x86/kernel/smpboot.c
16640 --- linux-2.6.32.44/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16641 +++ linux-2.6.32.44/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16642 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16643 */
16644 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16645
16646 -void cpu_hotplug_driver_lock()
16647 +void cpu_hotplug_driver_lock(void)
16648 {
16649 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16650 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16651 }
16652
16653 -void cpu_hotplug_driver_unlock()
16654 +void cpu_hotplug_driver_unlock(void)
16655 {
16656 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16657 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16658 }
16659
16660 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16661 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16662 * target processor state.
16663 */
16664 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16665 - (unsigned long)stack_start.sp);
16666 + stack_start);
16667
16668 /*
16669 * Run STARTUP IPI loop.
16670 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16671 set_idle_for_cpu(cpu, c_idle.idle);
16672 do_rest:
16673 per_cpu(current_task, cpu) = c_idle.idle;
16674 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16675 #ifdef CONFIG_X86_32
16676 /* Stack for startup_32 can be just as for start_secondary onwards */
16677 irq_ctx_init(cpu);
16678 @@ -750,13 +751,15 @@ do_rest:
16679 #else
16680 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16681 initial_gs = per_cpu_offset(cpu);
16682 - per_cpu(kernel_stack, cpu) =
16683 - (unsigned long)task_stack_page(c_idle.idle) -
16684 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16685 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16686 #endif
16687 +
16688 + pax_open_kernel();
16689 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16690 + pax_close_kernel();
16691 +
16692 initial_code = (unsigned long)start_secondary;
16693 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16694 + stack_start = c_idle.idle->thread.sp;
16695
16696 /* start_ip had better be page-aligned! */
16697 start_ip = setup_trampoline();
16698 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16699
16700 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16701
16702 +#ifdef CONFIG_PAX_PER_CPU_PGD
16703 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16704 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16705 + KERNEL_PGD_PTRS);
16706 +#endif
16707 +
16708 err = do_boot_cpu(apicid, cpu);
16709
16710 if (err) {
16711 diff -urNp linux-2.6.32.44/arch/x86/kernel/step.c linux-2.6.32.44/arch/x86/kernel/step.c
16712 --- linux-2.6.32.44/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16713 +++ linux-2.6.32.44/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16714 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16715 struct desc_struct *desc;
16716 unsigned long base;
16717
16718 - seg &= ~7UL;
16719 + seg >>= 3;
16720
16721 mutex_lock(&child->mm->context.lock);
16722 - if (unlikely((seg >> 3) >= child->mm->context.size))
16723 + if (unlikely(seg >= child->mm->context.size))
16724 addr = -1L; /* bogus selector, access would fault */
16725 else {
16726 desc = child->mm->context.ldt + seg;
16727 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16728 addr += base;
16729 }
16730 mutex_unlock(&child->mm->context.lock);
16731 - }
16732 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16733 + addr = ktla_ktva(addr);
16734
16735 return addr;
16736 }
16737 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16738 unsigned char opcode[15];
16739 unsigned long addr = convert_ip_to_linear(child, regs);
16740
16741 + if (addr == -EINVAL)
16742 + return 0;
16743 +
16744 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16745 for (i = 0; i < copied; i++) {
16746 switch (opcode[i]) {
16747 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16748
16749 #ifdef CONFIG_X86_64
16750 case 0x40 ... 0x4f:
16751 - if (regs->cs != __USER_CS)
16752 + if ((regs->cs & 0xffff) != __USER_CS)
16753 /* 32-bit mode: register increment */
16754 return 0;
16755 /* 64-bit mode: REX prefix */
16756 diff -urNp linux-2.6.32.44/arch/x86/kernel/syscall_table_32.S linux-2.6.32.44/arch/x86/kernel/syscall_table_32.S
16757 --- linux-2.6.32.44/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16758 +++ linux-2.6.32.44/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16759 @@ -1,3 +1,4 @@
16760 +.section .rodata,"a",@progbits
16761 ENTRY(sys_call_table)
16762 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16763 .long sys_exit
16764 diff -urNp linux-2.6.32.44/arch/x86/kernel/sys_i386_32.c linux-2.6.32.44/arch/x86/kernel/sys_i386_32.c
16765 --- linux-2.6.32.44/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16766 +++ linux-2.6.32.44/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16767 @@ -24,6 +24,21 @@
16768
16769 #include <asm/syscalls.h>
16770
16771 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16772 +{
16773 + unsigned long pax_task_size = TASK_SIZE;
16774 +
16775 +#ifdef CONFIG_PAX_SEGMEXEC
16776 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16777 + pax_task_size = SEGMEXEC_TASK_SIZE;
16778 +#endif
16779 +
16780 + if (len > pax_task_size || addr > pax_task_size - len)
16781 + return -EINVAL;
16782 +
16783 + return 0;
16784 +}
16785 +
16786 /*
16787 * Perform the select(nd, in, out, ex, tv) and mmap() system
16788 * calls. Linux/i386 didn't use to be able to handle more than
16789 @@ -58,6 +73,212 @@ out:
16790 return err;
16791 }
16792
16793 +unsigned long
16794 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16795 + unsigned long len, unsigned long pgoff, unsigned long flags)
16796 +{
16797 + struct mm_struct *mm = current->mm;
16798 + struct vm_area_struct *vma;
16799 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16800 +
16801 +#ifdef CONFIG_PAX_SEGMEXEC
16802 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16803 + pax_task_size = SEGMEXEC_TASK_SIZE;
16804 +#endif
16805 +
16806 + pax_task_size -= PAGE_SIZE;
16807 +
16808 + if (len > pax_task_size)
16809 + return -ENOMEM;
16810 +
16811 + if (flags & MAP_FIXED)
16812 + return addr;
16813 +
16814 +#ifdef CONFIG_PAX_RANDMMAP
16815 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16816 +#endif
16817 +
16818 + if (addr) {
16819 + addr = PAGE_ALIGN(addr);
16820 + if (pax_task_size - len >= addr) {
16821 + vma = find_vma(mm, addr);
16822 + if (check_heap_stack_gap(vma, addr, len))
16823 + return addr;
16824 + }
16825 + }
16826 + if (len > mm->cached_hole_size) {
16827 + start_addr = addr = mm->free_area_cache;
16828 + } else {
16829 + start_addr = addr = mm->mmap_base;
16830 + mm->cached_hole_size = 0;
16831 + }
16832 +
16833 +#ifdef CONFIG_PAX_PAGEEXEC
16834 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16835 + start_addr = 0x00110000UL;
16836 +
16837 +#ifdef CONFIG_PAX_RANDMMAP
16838 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16839 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16840 +#endif
16841 +
16842 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16843 + start_addr = addr = mm->mmap_base;
16844 + else
16845 + addr = start_addr;
16846 + }
16847 +#endif
16848 +
16849 +full_search:
16850 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16851 + /* At this point: (!vma || addr < vma->vm_end). */
16852 + if (pax_task_size - len < addr) {
16853 + /*
16854 + * Start a new search - just in case we missed
16855 + * some holes.
16856 + */
16857 + if (start_addr != mm->mmap_base) {
16858 + start_addr = addr = mm->mmap_base;
16859 + mm->cached_hole_size = 0;
16860 + goto full_search;
16861 + }
16862 + return -ENOMEM;
16863 + }
16864 + if (check_heap_stack_gap(vma, addr, len))
16865 + break;
16866 + if (addr + mm->cached_hole_size < vma->vm_start)
16867 + mm->cached_hole_size = vma->vm_start - addr;
16868 + addr = vma->vm_end;
16869 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16870 + start_addr = addr = mm->mmap_base;
16871 + mm->cached_hole_size = 0;
16872 + goto full_search;
16873 + }
16874 + }
16875 +
16876 + /*
16877 + * Remember the place where we stopped the search:
16878 + */
16879 + mm->free_area_cache = addr + len;
16880 + return addr;
16881 +}
16882 +
16883 +unsigned long
16884 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16885 + const unsigned long len, const unsigned long pgoff,
16886 + const unsigned long flags)
16887 +{
16888 + struct vm_area_struct *vma;
16889 + struct mm_struct *mm = current->mm;
16890 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16891 +
16892 +#ifdef CONFIG_PAX_SEGMEXEC
16893 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16894 + pax_task_size = SEGMEXEC_TASK_SIZE;
16895 +#endif
16896 +
16897 + pax_task_size -= PAGE_SIZE;
16898 +
16899 + /* requested length too big for entire address space */
16900 + if (len > pax_task_size)
16901 + return -ENOMEM;
16902 +
16903 + if (flags & MAP_FIXED)
16904 + return addr;
16905 +
16906 +#ifdef CONFIG_PAX_PAGEEXEC
16907 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16908 + goto bottomup;
16909 +#endif
16910 +
16911 +#ifdef CONFIG_PAX_RANDMMAP
16912 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16913 +#endif
16914 +
16915 + /* requesting a specific address */
16916 + if (addr) {
16917 + addr = PAGE_ALIGN(addr);
16918 + if (pax_task_size - len >= addr) {
16919 + vma = find_vma(mm, addr);
16920 + if (check_heap_stack_gap(vma, addr, len))
16921 + return addr;
16922 + }
16923 + }
16924 +
16925 + /* check if free_area_cache is useful for us */
16926 + if (len <= mm->cached_hole_size) {
16927 + mm->cached_hole_size = 0;
16928 + mm->free_area_cache = mm->mmap_base;
16929 + }
16930 +
16931 + /* either no address requested or can't fit in requested address hole */
16932 + addr = mm->free_area_cache;
16933 +
16934 + /* make sure it can fit in the remaining address space */
16935 + if (addr > len) {
16936 + vma = find_vma(mm, addr-len);
16937 + if (check_heap_stack_gap(vma, addr - len, len))
16938 + /* remember the address as a hint for next time */
16939 + return (mm->free_area_cache = addr-len);
16940 + }
16941 +
16942 + if (mm->mmap_base < len)
16943 + goto bottomup;
16944 +
16945 + addr = mm->mmap_base-len;
16946 +
16947 + do {
16948 + /*
16949 + * Lookup failure means no vma is above this address,
16950 + * else if new region fits below vma->vm_start,
16951 + * return with success:
16952 + */
16953 + vma = find_vma(mm, addr);
16954 + if (check_heap_stack_gap(vma, addr, len))
16955 + /* remember the address as a hint for next time */
16956 + return (mm->free_area_cache = addr);
16957 +
16958 + /* remember the largest hole we saw so far */
16959 + if (addr + mm->cached_hole_size < vma->vm_start)
16960 + mm->cached_hole_size = vma->vm_start - addr;
16961 +
16962 + /* try just below the current vma->vm_start */
16963 + addr = skip_heap_stack_gap(vma, len);
16964 + } while (!IS_ERR_VALUE(addr));
16965 +
16966 +bottomup:
16967 + /*
16968 + * A failed mmap() very likely causes application failure,
16969 + * so fall back to the bottom-up function here. This scenario
16970 + * can happen with large stack limits and large mmap()
16971 + * allocations.
16972 + */
16973 +
16974 +#ifdef CONFIG_PAX_SEGMEXEC
16975 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16976 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16977 + else
16978 +#endif
16979 +
16980 + mm->mmap_base = TASK_UNMAPPED_BASE;
16981 +
16982 +#ifdef CONFIG_PAX_RANDMMAP
16983 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16984 + mm->mmap_base += mm->delta_mmap;
16985 +#endif
16986 +
16987 + mm->free_area_cache = mm->mmap_base;
16988 + mm->cached_hole_size = ~0UL;
16989 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16990 + /*
16991 + * Restore the topdown base:
16992 + */
16993 + mm->mmap_base = base;
16994 + mm->free_area_cache = base;
16995 + mm->cached_hole_size = ~0UL;
16996 +
16997 + return addr;
16998 +}
16999
17000 struct sel_arg_struct {
17001 unsigned long n;
17002 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17003 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17004 case SEMTIMEDOP:
17005 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17006 - (const struct timespec __user *)fifth);
17007 + (__force const struct timespec __user *)fifth);
17008
17009 case SEMGET:
17010 return sys_semget(first, second, third);
17011 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17012 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17013 if (ret)
17014 return ret;
17015 - return put_user(raddr, (ulong __user *) third);
17016 + return put_user(raddr, (__force ulong __user *) third);
17017 }
17018 case 1: /* iBCS2 emulator entry point */
17019 if (!segment_eq(get_fs(), get_ds()))
17020 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17021
17022 return error;
17023 }
17024 -
17025 -
17026 -/*
17027 - * Do a system call from kernel instead of calling sys_execve so we
17028 - * end up with proper pt_regs.
17029 - */
17030 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17031 -{
17032 - long __res;
17033 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17034 - : "=a" (__res)
17035 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17036 - return __res;
17037 -}
17038 diff -urNp linux-2.6.32.44/arch/x86/kernel/sys_x86_64.c linux-2.6.32.44/arch/x86/kernel/sys_x86_64.c
17039 --- linux-2.6.32.44/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17040 +++ linux-2.6.32.44/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17041 @@ -32,8 +32,8 @@ out:
17042 return error;
17043 }
17044
17045 -static void find_start_end(unsigned long flags, unsigned long *begin,
17046 - unsigned long *end)
17047 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17048 + unsigned long *begin, unsigned long *end)
17049 {
17050 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17051 unsigned long new_begin;
17052 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17053 *begin = new_begin;
17054 }
17055 } else {
17056 - *begin = TASK_UNMAPPED_BASE;
17057 + *begin = mm->mmap_base;
17058 *end = TASK_SIZE;
17059 }
17060 }
17061 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17062 if (flags & MAP_FIXED)
17063 return addr;
17064
17065 - find_start_end(flags, &begin, &end);
17066 + find_start_end(mm, flags, &begin, &end);
17067
17068 if (len > end)
17069 return -ENOMEM;
17070
17071 +#ifdef CONFIG_PAX_RANDMMAP
17072 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17073 +#endif
17074 +
17075 if (addr) {
17076 addr = PAGE_ALIGN(addr);
17077 vma = find_vma(mm, addr);
17078 - if (end - len >= addr &&
17079 - (!vma || addr + len <= vma->vm_start))
17080 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17081 return addr;
17082 }
17083 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17084 @@ -106,7 +109,7 @@ full_search:
17085 }
17086 return -ENOMEM;
17087 }
17088 - if (!vma || addr + len <= vma->vm_start) {
17089 + if (check_heap_stack_gap(vma, addr, len)) {
17090 /*
17091 * Remember the place where we stopped the search:
17092 */
17093 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17094 {
17095 struct vm_area_struct *vma;
17096 struct mm_struct *mm = current->mm;
17097 - unsigned long addr = addr0;
17098 + unsigned long base = mm->mmap_base, addr = addr0;
17099
17100 /* requested length too big for entire address space */
17101 if (len > TASK_SIZE)
17102 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17103 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17104 goto bottomup;
17105
17106 +#ifdef CONFIG_PAX_RANDMMAP
17107 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17108 +#endif
17109 +
17110 /* requesting a specific address */
17111 if (addr) {
17112 addr = PAGE_ALIGN(addr);
17113 - vma = find_vma(mm, addr);
17114 - if (TASK_SIZE - len >= addr &&
17115 - (!vma || addr + len <= vma->vm_start))
17116 - return addr;
17117 + if (TASK_SIZE - len >= addr) {
17118 + vma = find_vma(mm, addr);
17119 + if (check_heap_stack_gap(vma, addr, len))
17120 + return addr;
17121 + }
17122 }
17123
17124 /* check if free_area_cache is useful for us */
17125 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17126 /* make sure it can fit in the remaining address space */
17127 if (addr > len) {
17128 vma = find_vma(mm, addr-len);
17129 - if (!vma || addr <= vma->vm_start)
17130 + if (check_heap_stack_gap(vma, addr - len, len))
17131 /* remember the address as a hint for next time */
17132 return mm->free_area_cache = addr-len;
17133 }
17134 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17135 * return with success:
17136 */
17137 vma = find_vma(mm, addr);
17138 - if (!vma || addr+len <= vma->vm_start)
17139 + if (check_heap_stack_gap(vma, addr, len))
17140 /* remember the address as a hint for next time */
17141 return mm->free_area_cache = addr;
17142
17143 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17144 mm->cached_hole_size = vma->vm_start - addr;
17145
17146 /* try just below the current vma->vm_start */
17147 - addr = vma->vm_start-len;
17148 - } while (len < vma->vm_start);
17149 + addr = skip_heap_stack_gap(vma, len);
17150 + } while (!IS_ERR_VALUE(addr));
17151
17152 bottomup:
17153 /*
17154 @@ -198,13 +206,21 @@ bottomup:
17155 * can happen with large stack limits and large mmap()
17156 * allocations.
17157 */
17158 + mm->mmap_base = TASK_UNMAPPED_BASE;
17159 +
17160 +#ifdef CONFIG_PAX_RANDMMAP
17161 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17162 + mm->mmap_base += mm->delta_mmap;
17163 +#endif
17164 +
17165 + mm->free_area_cache = mm->mmap_base;
17166 mm->cached_hole_size = ~0UL;
17167 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17168 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17169 /*
17170 * Restore the topdown base:
17171 */
17172 - mm->free_area_cache = mm->mmap_base;
17173 + mm->mmap_base = base;
17174 + mm->free_area_cache = base;
17175 mm->cached_hole_size = ~0UL;
17176
17177 return addr;
17178 diff -urNp linux-2.6.32.44/arch/x86/kernel/tboot.c linux-2.6.32.44/arch/x86/kernel/tboot.c
17179 --- linux-2.6.32.44/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17180 +++ linux-2.6.32.44/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17181 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17182
17183 void tboot_shutdown(u32 shutdown_type)
17184 {
17185 - void (*shutdown)(void);
17186 + void (* __noreturn shutdown)(void);
17187
17188 if (!tboot_enabled())
17189 return;
17190 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17191
17192 switch_to_tboot_pt();
17193
17194 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17195 + shutdown = (void *)tboot->shutdown_entry;
17196 shutdown();
17197
17198 /* should not reach here */
17199 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17200 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17201 }
17202
17203 -static atomic_t ap_wfs_count;
17204 +static atomic_unchecked_t ap_wfs_count;
17205
17206 static int tboot_wait_for_aps(int num_aps)
17207 {
17208 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17209 {
17210 switch (action) {
17211 case CPU_DYING:
17212 - atomic_inc(&ap_wfs_count);
17213 + atomic_inc_unchecked(&ap_wfs_count);
17214 if (num_online_cpus() == 1)
17215 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17216 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17217 return NOTIFY_BAD;
17218 break;
17219 }
17220 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17221
17222 tboot_create_trampoline();
17223
17224 - atomic_set(&ap_wfs_count, 0);
17225 + atomic_set_unchecked(&ap_wfs_count, 0);
17226 register_hotcpu_notifier(&tboot_cpu_notifier);
17227 return 0;
17228 }
17229 diff -urNp linux-2.6.32.44/arch/x86/kernel/time.c linux-2.6.32.44/arch/x86/kernel/time.c
17230 --- linux-2.6.32.44/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17231 +++ linux-2.6.32.44/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17232 @@ -26,17 +26,13 @@
17233 int timer_ack;
17234 #endif
17235
17236 -#ifdef CONFIG_X86_64
17237 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17238 -#endif
17239 -
17240 unsigned long profile_pc(struct pt_regs *regs)
17241 {
17242 unsigned long pc = instruction_pointer(regs);
17243
17244 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17245 + if (!user_mode(regs) && in_lock_functions(pc)) {
17246 #ifdef CONFIG_FRAME_POINTER
17247 - return *(unsigned long *)(regs->bp + sizeof(long));
17248 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17249 #else
17250 unsigned long *sp =
17251 (unsigned long *)kernel_stack_pointer(regs);
17252 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17253 * or above a saved flags. Eflags has bits 22-31 zero,
17254 * kernel addresses don't.
17255 */
17256 +
17257 +#ifdef CONFIG_PAX_KERNEXEC
17258 + return ktla_ktva(sp[0]);
17259 +#else
17260 if (sp[0] >> 22)
17261 return sp[0];
17262 if (sp[1] >> 22)
17263 return sp[1];
17264 #endif
17265 +
17266 +#endif
17267 }
17268 return pc;
17269 }
17270 diff -urNp linux-2.6.32.44/arch/x86/kernel/tls.c linux-2.6.32.44/arch/x86/kernel/tls.c
17271 --- linux-2.6.32.44/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17272 +++ linux-2.6.32.44/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17273 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17274 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17275 return -EINVAL;
17276
17277 +#ifdef CONFIG_PAX_SEGMEXEC
17278 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17279 + return -EINVAL;
17280 +#endif
17281 +
17282 set_tls_desc(p, idx, &info, 1);
17283
17284 return 0;
17285 diff -urNp linux-2.6.32.44/arch/x86/kernel/trampoline_32.S linux-2.6.32.44/arch/x86/kernel/trampoline_32.S
17286 --- linux-2.6.32.44/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17287 +++ linux-2.6.32.44/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17288 @@ -32,6 +32,12 @@
17289 #include <asm/segment.h>
17290 #include <asm/page_types.h>
17291
17292 +#ifdef CONFIG_PAX_KERNEXEC
17293 +#define ta(X) (X)
17294 +#else
17295 +#define ta(X) ((X) - __PAGE_OFFSET)
17296 +#endif
17297 +
17298 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17299 __CPUINITRODATA
17300 .code16
17301 @@ -60,7 +66,7 @@ r_base = .
17302 inc %ax # protected mode (PE) bit
17303 lmsw %ax # into protected mode
17304 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17305 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17306 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17307
17308 # These need to be in the same 64K segment as the above;
17309 # hence we don't use the boot_gdt_descr defined in head.S
17310 diff -urNp linux-2.6.32.44/arch/x86/kernel/trampoline_64.S linux-2.6.32.44/arch/x86/kernel/trampoline_64.S
17311 --- linux-2.6.32.44/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17312 +++ linux-2.6.32.44/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17313 @@ -91,7 +91,7 @@ startup_32:
17314 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17315 movl %eax, %ds
17316
17317 - movl $X86_CR4_PAE, %eax
17318 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17319 movl %eax, %cr4 # Enable PAE mode
17320
17321 # Setup trampoline 4 level pagetables
17322 @@ -127,7 +127,7 @@ startup_64:
17323 no_longmode:
17324 hlt
17325 jmp no_longmode
17326 -#include "verify_cpu_64.S"
17327 +#include "verify_cpu.S"
17328
17329 # Careful these need to be in the same 64K segment as the above;
17330 tidt:
17331 @@ -138,7 +138,7 @@ tidt:
17332 # so the kernel can live anywhere
17333 .balign 4
17334 tgdt:
17335 - .short tgdt_end - tgdt # gdt limit
17336 + .short tgdt_end - tgdt - 1 # gdt limit
17337 .long tgdt - r_base
17338 .short 0
17339 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17340 diff -urNp linux-2.6.32.44/arch/x86/kernel/traps.c linux-2.6.32.44/arch/x86/kernel/traps.c
17341 --- linux-2.6.32.44/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17342 +++ linux-2.6.32.44/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17343 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17344
17345 /* Do we ignore FPU interrupts ? */
17346 char ignore_fpu_irq;
17347 -
17348 -/*
17349 - * The IDT has to be page-aligned to simplify the Pentium
17350 - * F0 0F bug workaround.
17351 - */
17352 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17353 #endif
17354
17355 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17356 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17357 static inline void
17358 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17359 {
17360 - if (!user_mode_vm(regs))
17361 + if (!user_mode(regs))
17362 die(str, regs, err);
17363 }
17364 #endif
17365
17366 static void __kprobes
17367 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17368 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17369 long error_code, siginfo_t *info)
17370 {
17371 struct task_struct *tsk = current;
17372
17373 #ifdef CONFIG_X86_32
17374 - if (regs->flags & X86_VM_MASK) {
17375 + if (v8086_mode(regs)) {
17376 /*
17377 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17378 * On nmi (interrupt 2), do_trap should not be called.
17379 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17380 }
17381 #endif
17382
17383 - if (!user_mode(regs))
17384 + if (!user_mode_novm(regs))
17385 goto kernel_trap;
17386
17387 #ifdef CONFIG_X86_32
17388 @@ -158,7 +152,7 @@ trap_signal:
17389 printk_ratelimit()) {
17390 printk(KERN_INFO
17391 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17392 - tsk->comm, tsk->pid, str,
17393 + tsk->comm, task_pid_nr(tsk), str,
17394 regs->ip, regs->sp, error_code);
17395 print_vma_addr(" in ", regs->ip);
17396 printk("\n");
17397 @@ -175,8 +169,20 @@ kernel_trap:
17398 if (!fixup_exception(regs)) {
17399 tsk->thread.error_code = error_code;
17400 tsk->thread.trap_no = trapnr;
17401 +
17402 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17403 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17404 + str = "PAX: suspicious stack segment fault";
17405 +#endif
17406 +
17407 die(str, regs, error_code);
17408 }
17409 +
17410 +#ifdef CONFIG_PAX_REFCOUNT
17411 + if (trapnr == 4)
17412 + pax_report_refcount_overflow(regs);
17413 +#endif
17414 +
17415 return;
17416
17417 #ifdef CONFIG_X86_32
17418 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17419 conditional_sti(regs);
17420
17421 #ifdef CONFIG_X86_32
17422 - if (regs->flags & X86_VM_MASK)
17423 + if (v8086_mode(regs))
17424 goto gp_in_vm86;
17425 #endif
17426
17427 tsk = current;
17428 - if (!user_mode(regs))
17429 + if (!user_mode_novm(regs))
17430 goto gp_in_kernel;
17431
17432 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17433 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17434 + struct mm_struct *mm = tsk->mm;
17435 + unsigned long limit;
17436 +
17437 + down_write(&mm->mmap_sem);
17438 + limit = mm->context.user_cs_limit;
17439 + if (limit < TASK_SIZE) {
17440 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17441 + up_write(&mm->mmap_sem);
17442 + return;
17443 + }
17444 + up_write(&mm->mmap_sem);
17445 + }
17446 +#endif
17447 +
17448 tsk->thread.error_code = error_code;
17449 tsk->thread.trap_no = 13;
17450
17451 @@ -305,6 +327,13 @@ gp_in_kernel:
17452 if (notify_die(DIE_GPF, "general protection fault", regs,
17453 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17454 return;
17455 +
17456 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17457 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17458 + die("PAX: suspicious general protection fault", regs, error_code);
17459 + else
17460 +#endif
17461 +
17462 die("general protection fault", regs, error_code);
17463 }
17464
17465 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17466 dotraplinkage notrace __kprobes void
17467 do_nmi(struct pt_regs *regs, long error_code)
17468 {
17469 +
17470 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17471 + if (!user_mode(regs)) {
17472 + unsigned long cs = regs->cs & 0xFFFF;
17473 + unsigned long ip = ktva_ktla(regs->ip);
17474 +
17475 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17476 + regs->ip = ip;
17477 + }
17478 +#endif
17479 +
17480 nmi_enter();
17481
17482 inc_irq_stat(__nmi_count);
17483 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17484 }
17485
17486 #ifdef CONFIG_X86_32
17487 - if (regs->flags & X86_VM_MASK)
17488 + if (v8086_mode(regs))
17489 goto debug_vm86;
17490 #endif
17491
17492 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17493 * kernel space (but re-enable TF when returning to user mode).
17494 */
17495 if (condition & DR_STEP) {
17496 - if (!user_mode(regs))
17497 + if (!user_mode_novm(regs))
17498 goto clear_TF_reenable;
17499 }
17500
17501 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17502 * Handle strange cache flush from user space exception
17503 * in all other cases. This is undocumented behaviour.
17504 */
17505 - if (regs->flags & X86_VM_MASK) {
17506 + if (v8086_mode(regs)) {
17507 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17508 return;
17509 }
17510 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17511 void __math_state_restore(void)
17512 {
17513 struct thread_info *thread = current_thread_info();
17514 - struct task_struct *tsk = thread->task;
17515 + struct task_struct *tsk = current;
17516
17517 /*
17518 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17519 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17520 */
17521 asmlinkage void math_state_restore(void)
17522 {
17523 - struct thread_info *thread = current_thread_info();
17524 - struct task_struct *tsk = thread->task;
17525 + struct task_struct *tsk = current;
17526
17527 if (!tsk_used_math(tsk)) {
17528 local_irq_enable();
17529 diff -urNp linux-2.6.32.44/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.44/arch/x86/kernel/verify_cpu_64.S
17530 --- linux-2.6.32.44/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17531 +++ linux-2.6.32.44/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17532 @@ -1,105 +0,0 @@
17533 -/*
17534 - *
17535 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17536 - * code has been borrowed from boot/setup.S and was introduced by
17537 - * Andi Kleen.
17538 - *
17539 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17540 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17541 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17542 - *
17543 - * This source code is licensed under the GNU General Public License,
17544 - * Version 2. See the file COPYING for more details.
17545 - *
17546 - * This is a common code for verification whether CPU supports
17547 - * long mode and SSE or not. It is not called directly instead this
17548 - * file is included at various places and compiled in that context.
17549 - * Following are the current usage.
17550 - *
17551 - * This file is included by both 16bit and 32bit code.
17552 - *
17553 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17554 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17555 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17556 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17557 - *
17558 - * verify_cpu, returns the status of cpu check in register %eax.
17559 - * 0: Success 1: Failure
17560 - *
17561 - * The caller needs to check for the error code and take the action
17562 - * appropriately. Either display a message or halt.
17563 - */
17564 -
17565 -#include <asm/cpufeature.h>
17566 -
17567 -verify_cpu:
17568 - pushfl # Save caller passed flags
17569 - pushl $0 # Kill any dangerous flags
17570 - popfl
17571 -
17572 - pushfl # standard way to check for cpuid
17573 - popl %eax
17574 - movl %eax,%ebx
17575 - xorl $0x200000,%eax
17576 - pushl %eax
17577 - popfl
17578 - pushfl
17579 - popl %eax
17580 - cmpl %eax,%ebx
17581 - jz verify_cpu_no_longmode # cpu has no cpuid
17582 -
17583 - movl $0x0,%eax # See if cpuid 1 is implemented
17584 - cpuid
17585 - cmpl $0x1,%eax
17586 - jb verify_cpu_no_longmode # no cpuid 1
17587 -
17588 - xor %di,%di
17589 - cmpl $0x68747541,%ebx # AuthenticAMD
17590 - jnz verify_cpu_noamd
17591 - cmpl $0x69746e65,%edx
17592 - jnz verify_cpu_noamd
17593 - cmpl $0x444d4163,%ecx
17594 - jnz verify_cpu_noamd
17595 - mov $1,%di # cpu is from AMD
17596 -
17597 -verify_cpu_noamd:
17598 - movl $0x1,%eax # Does the cpu have what it takes
17599 - cpuid
17600 - andl $REQUIRED_MASK0,%edx
17601 - xorl $REQUIRED_MASK0,%edx
17602 - jnz verify_cpu_no_longmode
17603 -
17604 - movl $0x80000000,%eax # See if extended cpuid is implemented
17605 - cpuid
17606 - cmpl $0x80000001,%eax
17607 - jb verify_cpu_no_longmode # no extended cpuid
17608 -
17609 - movl $0x80000001,%eax # Does the cpu have what it takes
17610 - cpuid
17611 - andl $REQUIRED_MASK1,%edx
17612 - xorl $REQUIRED_MASK1,%edx
17613 - jnz verify_cpu_no_longmode
17614 -
17615 -verify_cpu_sse_test:
17616 - movl $1,%eax
17617 - cpuid
17618 - andl $SSE_MASK,%edx
17619 - cmpl $SSE_MASK,%edx
17620 - je verify_cpu_sse_ok
17621 - test %di,%di
17622 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17623 - movl $0xc0010015,%ecx # HWCR
17624 - rdmsr
17625 - btr $15,%eax # enable SSE
17626 - wrmsr
17627 - xor %di,%di # don't loop
17628 - jmp verify_cpu_sse_test # try again
17629 -
17630 -verify_cpu_no_longmode:
17631 - popfl # Restore caller passed flags
17632 - movl $1,%eax
17633 - ret
17634 -verify_cpu_sse_ok:
17635 - popfl # Restore caller passed flags
17636 - xorl %eax, %eax
17637 - ret
17638 diff -urNp linux-2.6.32.44/arch/x86/kernel/verify_cpu.S linux-2.6.32.44/arch/x86/kernel/verify_cpu.S
17639 --- linux-2.6.32.44/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17640 +++ linux-2.6.32.44/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17641 @@ -0,0 +1,140 @@
17642 +/*
17643 + *
17644 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17645 + * code has been borrowed from boot/setup.S and was introduced by
17646 + * Andi Kleen.
17647 + *
17648 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17649 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17650 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17651 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17652 + *
17653 + * This source code is licensed under the GNU General Public License,
17654 + * Version 2. See the file COPYING for more details.
17655 + *
17656 + * This is a common code for verification whether CPU supports
17657 + * long mode and SSE or not. It is not called directly instead this
17658 + * file is included at various places and compiled in that context.
17659 + * This file is expected to run in 32bit code. Currently:
17660 + *
17661 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17662 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17663 + * arch/x86/kernel/head_32.S: processor startup
17664 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17665 + *
17666 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17667 + * 0: Success 1: Failure
17668 + *
17669 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17670 + *
17671 + * The caller needs to check for the error code and take the action
17672 + * appropriately. Either display a message or halt.
17673 + */
17674 +
17675 +#include <asm/cpufeature.h>
17676 +#include <asm/msr-index.h>
17677 +
17678 +verify_cpu:
17679 + pushfl # Save caller passed flags
17680 + pushl $0 # Kill any dangerous flags
17681 + popfl
17682 +
17683 + pushfl # standard way to check for cpuid
17684 + popl %eax
17685 + movl %eax,%ebx
17686 + xorl $0x200000,%eax
17687 + pushl %eax
17688 + popfl
17689 + pushfl
17690 + popl %eax
17691 + cmpl %eax,%ebx
17692 + jz verify_cpu_no_longmode # cpu has no cpuid
17693 +
17694 + movl $0x0,%eax # See if cpuid 1 is implemented
17695 + cpuid
17696 + cmpl $0x1,%eax
17697 + jb verify_cpu_no_longmode # no cpuid 1
17698 +
17699 + xor %di,%di
17700 + cmpl $0x68747541,%ebx # AuthenticAMD
17701 + jnz verify_cpu_noamd
17702 + cmpl $0x69746e65,%edx
17703 + jnz verify_cpu_noamd
17704 + cmpl $0x444d4163,%ecx
17705 + jnz verify_cpu_noamd
17706 + mov $1,%di # cpu is from AMD
17707 + jmp verify_cpu_check
17708 +
17709 +verify_cpu_noamd:
17710 + cmpl $0x756e6547,%ebx # GenuineIntel?
17711 + jnz verify_cpu_check
17712 + cmpl $0x49656e69,%edx
17713 + jnz verify_cpu_check
17714 + cmpl $0x6c65746e,%ecx
17715 + jnz verify_cpu_check
17716 +
17717 + # only call IA32_MISC_ENABLE when:
17718 + # family > 6 || (family == 6 && model >= 0xd)
17719 + movl $0x1, %eax # check CPU family and model
17720 + cpuid
17721 + movl %eax, %ecx
17722 +
17723 + andl $0x0ff00f00, %eax # mask family and extended family
17724 + shrl $8, %eax
17725 + cmpl $6, %eax
17726 + ja verify_cpu_clear_xd # family > 6, ok
17727 + jb verify_cpu_check # family < 6, skip
17728 +
17729 + andl $0x000f00f0, %ecx # mask model and extended model
17730 + shrl $4, %ecx
17731 + cmpl $0xd, %ecx
17732 + jb verify_cpu_check # family == 6, model < 0xd, skip
17733 +
17734 +verify_cpu_clear_xd:
17735 + movl $MSR_IA32_MISC_ENABLE, %ecx
17736 + rdmsr
17737 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17738 + jnc verify_cpu_check # only write MSR if bit was changed
17739 + wrmsr
17740 +
17741 +verify_cpu_check:
17742 + movl $0x1,%eax # Does the cpu have what it takes
17743 + cpuid
17744 + andl $REQUIRED_MASK0,%edx
17745 + xorl $REQUIRED_MASK0,%edx
17746 + jnz verify_cpu_no_longmode
17747 +
17748 + movl $0x80000000,%eax # See if extended cpuid is implemented
17749 + cpuid
17750 + cmpl $0x80000001,%eax
17751 + jb verify_cpu_no_longmode # no extended cpuid
17752 +
17753 + movl $0x80000001,%eax # Does the cpu have what it takes
17754 + cpuid
17755 + andl $REQUIRED_MASK1,%edx
17756 + xorl $REQUIRED_MASK1,%edx
17757 + jnz verify_cpu_no_longmode
17758 +
17759 +verify_cpu_sse_test:
17760 + movl $1,%eax
17761 + cpuid
17762 + andl $SSE_MASK,%edx
17763 + cmpl $SSE_MASK,%edx
17764 + je verify_cpu_sse_ok
17765 + test %di,%di
17766 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17767 + movl $MSR_K7_HWCR,%ecx
17768 + rdmsr
17769 + btr $15,%eax # enable SSE
17770 + wrmsr
17771 + xor %di,%di # don't loop
17772 + jmp verify_cpu_sse_test # try again
17773 +
17774 +verify_cpu_no_longmode:
17775 + popfl # Restore caller passed flags
17776 + movl $1,%eax
17777 + ret
17778 +verify_cpu_sse_ok:
17779 + popfl # Restore caller passed flags
17780 + xorl %eax, %eax
17781 + ret
17782 diff -urNp linux-2.6.32.44/arch/x86/kernel/vm86_32.c linux-2.6.32.44/arch/x86/kernel/vm86_32.c
17783 --- linux-2.6.32.44/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17784 +++ linux-2.6.32.44/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17785 @@ -41,6 +41,7 @@
17786 #include <linux/ptrace.h>
17787 #include <linux/audit.h>
17788 #include <linux/stddef.h>
17789 +#include <linux/grsecurity.h>
17790
17791 #include <asm/uaccess.h>
17792 #include <asm/io.h>
17793 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17794 do_exit(SIGSEGV);
17795 }
17796
17797 - tss = &per_cpu(init_tss, get_cpu());
17798 + tss = init_tss + get_cpu();
17799 current->thread.sp0 = current->thread.saved_sp0;
17800 current->thread.sysenter_cs = __KERNEL_CS;
17801 load_sp0(tss, &current->thread);
17802 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17803 struct task_struct *tsk;
17804 int tmp, ret = -EPERM;
17805
17806 +#ifdef CONFIG_GRKERNSEC_VM86
17807 + if (!capable(CAP_SYS_RAWIO)) {
17808 + gr_handle_vm86();
17809 + goto out;
17810 + }
17811 +#endif
17812 +
17813 tsk = current;
17814 if (tsk->thread.saved_sp0)
17815 goto out;
17816 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17817 int tmp, ret;
17818 struct vm86plus_struct __user *v86;
17819
17820 +#ifdef CONFIG_GRKERNSEC_VM86
17821 + if (!capable(CAP_SYS_RAWIO)) {
17822 + gr_handle_vm86();
17823 + ret = -EPERM;
17824 + goto out;
17825 + }
17826 +#endif
17827 +
17828 tsk = current;
17829 switch (regs->bx) {
17830 case VM86_REQUEST_IRQ:
17831 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17832 tsk->thread.saved_fs = info->regs32->fs;
17833 tsk->thread.saved_gs = get_user_gs(info->regs32);
17834
17835 - tss = &per_cpu(init_tss, get_cpu());
17836 + tss = init_tss + get_cpu();
17837 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17838 if (cpu_has_sep)
17839 tsk->thread.sysenter_cs = 0;
17840 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17841 goto cannot_handle;
17842 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17843 goto cannot_handle;
17844 - intr_ptr = (unsigned long __user *) (i << 2);
17845 + intr_ptr = (__force unsigned long __user *) (i << 2);
17846 if (get_user(segoffs, intr_ptr))
17847 goto cannot_handle;
17848 if ((segoffs >> 16) == BIOSSEG)
17849 diff -urNp linux-2.6.32.44/arch/x86/kernel/vmi_32.c linux-2.6.32.44/arch/x86/kernel/vmi_32.c
17850 --- linux-2.6.32.44/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17851 +++ linux-2.6.32.44/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17852 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17853 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17854
17855 #define call_vrom_func(rom,func) \
17856 - (((VROMFUNC *)(rom->func))())
17857 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17858
17859 #define call_vrom_long_func(rom,func,arg) \
17860 - (((VROMLONGFUNC *)(rom->func)) (arg))
17861 +({\
17862 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17863 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17864 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17865 + __reloc;\
17866 +})
17867
17868 -static struct vrom_header *vmi_rom;
17869 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17870 static int disable_pge;
17871 static int disable_pse;
17872 static int disable_sep;
17873 @@ -76,10 +81,10 @@ static struct {
17874 void (*set_initial_ap_state)(int, int);
17875 void (*halt)(void);
17876 void (*set_lazy_mode)(int mode);
17877 -} vmi_ops;
17878 +} __no_const vmi_ops __read_only;
17879
17880 /* Cached VMI operations */
17881 -struct vmi_timer_ops vmi_timer_ops;
17882 +struct vmi_timer_ops vmi_timer_ops __read_only;
17883
17884 /*
17885 * VMI patching routines.
17886 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17887 static inline void patch_offset(void *insnbuf,
17888 unsigned long ip, unsigned long dest)
17889 {
17890 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17891 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17892 }
17893
17894 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17895 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17896 {
17897 u64 reloc;
17898 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17899 +
17900 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17901 switch(rel->type) {
17902 case VMI_RELOCATION_CALL_REL:
17903 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17904
17905 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17906 {
17907 - const pte_t pte = { .pte = 0 };
17908 + const pte_t pte = __pte(0ULL);
17909 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17910 }
17911
17912 static void vmi_pmd_clear(pmd_t *pmd)
17913 {
17914 - const pte_t pte = { .pte = 0 };
17915 + const pte_t pte = __pte(0ULL);
17916 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17917 }
17918 #endif
17919 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17920 ap.ss = __KERNEL_DS;
17921 ap.esp = (unsigned long) start_esp;
17922
17923 - ap.ds = __USER_DS;
17924 - ap.es = __USER_DS;
17925 + ap.ds = __KERNEL_DS;
17926 + ap.es = __KERNEL_DS;
17927 ap.fs = __KERNEL_PERCPU;
17928 - ap.gs = __KERNEL_STACK_CANARY;
17929 + savesegment(gs, ap.gs);
17930
17931 ap.eflags = 0;
17932
17933 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17934 paravirt_leave_lazy_mmu();
17935 }
17936
17937 +#ifdef CONFIG_PAX_KERNEXEC
17938 +static unsigned long vmi_pax_open_kernel(void)
17939 +{
17940 + return 0;
17941 +}
17942 +
17943 +static unsigned long vmi_pax_close_kernel(void)
17944 +{
17945 + return 0;
17946 +}
17947 +#endif
17948 +
17949 static inline int __init check_vmi_rom(struct vrom_header *rom)
17950 {
17951 struct pci_header *pci;
17952 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17953 return 0;
17954 if (rom->vrom_signature != VMI_SIGNATURE)
17955 return 0;
17956 + if (rom->rom_length * 512 > sizeof(*rom)) {
17957 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17958 + return 0;
17959 + }
17960 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17961 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17962 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17963 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17964 struct vrom_header *romstart;
17965 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17966 if (check_vmi_rom(romstart)) {
17967 - vmi_rom = romstart;
17968 + vmi_rom = *romstart;
17969 return 1;
17970 }
17971 }
17972 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17973
17974 para_fill(pv_irq_ops.safe_halt, Halt);
17975
17976 +#ifdef CONFIG_PAX_KERNEXEC
17977 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17978 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17979 +#endif
17980 +
17981 /*
17982 * Alternative instruction rewriting doesn't happen soon enough
17983 * to convert VMI_IRET to a call instead of a jump; so we have
17984 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17985
17986 void __init vmi_init(void)
17987 {
17988 - if (!vmi_rom)
17989 + if (!vmi_rom.rom_signature)
17990 probe_vmi_rom();
17991 else
17992 - check_vmi_rom(vmi_rom);
17993 + check_vmi_rom(&vmi_rom);
17994
17995 /* In case probing for or validating the ROM failed, basil */
17996 - if (!vmi_rom)
17997 + if (!vmi_rom.rom_signature)
17998 return;
17999
18000 - reserve_top_address(-vmi_rom->virtual_top);
18001 + reserve_top_address(-vmi_rom.virtual_top);
18002
18003 #ifdef CONFIG_X86_IO_APIC
18004 /* This is virtual hardware; timer routing is wired correctly */
18005 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
18006 {
18007 unsigned long flags;
18008
18009 - if (!vmi_rom)
18010 + if (!vmi_rom.rom_signature)
18011 return;
18012
18013 local_irq_save(flags);
18014 diff -urNp linux-2.6.32.44/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.44/arch/x86/kernel/vmlinux.lds.S
18015 --- linux-2.6.32.44/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18016 +++ linux-2.6.32.44/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18017 @@ -26,6 +26,13 @@
18018 #include <asm/page_types.h>
18019 #include <asm/cache.h>
18020 #include <asm/boot.h>
18021 +#include <asm/segment.h>
18022 +
18023 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18024 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18025 +#else
18026 +#define __KERNEL_TEXT_OFFSET 0
18027 +#endif
18028
18029 #undef i386 /* in case the preprocessor is a 32bit one */
18030
18031 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18032 #ifdef CONFIG_X86_32
18033 OUTPUT_ARCH(i386)
18034 ENTRY(phys_startup_32)
18035 -jiffies = jiffies_64;
18036 #else
18037 OUTPUT_ARCH(i386:x86-64)
18038 ENTRY(phys_startup_64)
18039 -jiffies_64 = jiffies;
18040 #endif
18041
18042 PHDRS {
18043 text PT_LOAD FLAGS(5); /* R_E */
18044 - data PT_LOAD FLAGS(7); /* RWE */
18045 +#ifdef CONFIG_X86_32
18046 + module PT_LOAD FLAGS(5); /* R_E */
18047 +#endif
18048 +#ifdef CONFIG_XEN
18049 + rodata PT_LOAD FLAGS(5); /* R_E */
18050 +#else
18051 + rodata PT_LOAD FLAGS(4); /* R__ */
18052 +#endif
18053 + data PT_LOAD FLAGS(6); /* RW_ */
18054 #ifdef CONFIG_X86_64
18055 user PT_LOAD FLAGS(5); /* R_E */
18056 +#endif
18057 + init.begin PT_LOAD FLAGS(6); /* RW_ */
18058 #ifdef CONFIG_SMP
18059 percpu PT_LOAD FLAGS(6); /* RW_ */
18060 #endif
18061 + text.init PT_LOAD FLAGS(5); /* R_E */
18062 + text.exit PT_LOAD FLAGS(5); /* R_E */
18063 init PT_LOAD FLAGS(7); /* RWE */
18064 -#endif
18065 note PT_NOTE FLAGS(0); /* ___ */
18066 }
18067
18068 SECTIONS
18069 {
18070 #ifdef CONFIG_X86_32
18071 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18072 - phys_startup_32 = startup_32 - LOAD_OFFSET;
18073 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18074 #else
18075 - . = __START_KERNEL;
18076 - phys_startup_64 = startup_64 - LOAD_OFFSET;
18077 + . = __START_KERNEL;
18078 #endif
18079
18080 /* Text and read-only data */
18081 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
18082 - _text = .;
18083 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18084 /* bootstrapping code */
18085 +#ifdef CONFIG_X86_32
18086 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18087 +#else
18088 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18089 +#endif
18090 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18091 + _text = .;
18092 HEAD_TEXT
18093 #ifdef CONFIG_X86_32
18094 . = ALIGN(PAGE_SIZE);
18095 @@ -82,28 +102,71 @@ SECTIONS
18096 IRQENTRY_TEXT
18097 *(.fixup)
18098 *(.gnu.warning)
18099 - /* End of text section */
18100 - _etext = .;
18101 } :text = 0x9090
18102
18103 - NOTES :text :note
18104 + . += __KERNEL_TEXT_OFFSET;
18105 +
18106 +#ifdef CONFIG_X86_32
18107 + . = ALIGN(PAGE_SIZE);
18108 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18109 + *(.vmi.rom)
18110 + } :module
18111 +
18112 + . = ALIGN(PAGE_SIZE);
18113 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18114 +
18115 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18116 + MODULES_EXEC_VADDR = .;
18117 + BYTE(0)
18118 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18119 + . = ALIGN(HPAGE_SIZE);
18120 + MODULES_EXEC_END = . - 1;
18121 +#endif
18122 +
18123 + } :module
18124 +#endif
18125
18126 - EXCEPTION_TABLE(16) :text = 0x9090
18127 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18128 + /* End of text section */
18129 + _etext = . - __KERNEL_TEXT_OFFSET;
18130 + }
18131 +
18132 +#ifdef CONFIG_X86_32
18133 + . = ALIGN(PAGE_SIZE);
18134 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18135 + *(.idt)
18136 + . = ALIGN(PAGE_SIZE);
18137 + *(.empty_zero_page)
18138 + *(.swapper_pg_fixmap)
18139 + *(.swapper_pg_pmd)
18140 + *(.swapper_pg_dir)
18141 + *(.trampoline_pg_dir)
18142 + } :rodata
18143 +#endif
18144 +
18145 + . = ALIGN(PAGE_SIZE);
18146 + NOTES :rodata :note
18147 +
18148 + EXCEPTION_TABLE(16) :rodata
18149
18150 RO_DATA(PAGE_SIZE)
18151
18152 /* Data */
18153 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18154 +
18155 +#ifdef CONFIG_PAX_KERNEXEC
18156 + . = ALIGN(HPAGE_SIZE);
18157 +#else
18158 + . = ALIGN(PAGE_SIZE);
18159 +#endif
18160 +
18161 /* Start of data section */
18162 _sdata = .;
18163
18164 /* init_task */
18165 INIT_TASK_DATA(THREAD_SIZE)
18166
18167 -#ifdef CONFIG_X86_32
18168 - /* 32 bit has nosave before _edata */
18169 NOSAVE_DATA
18170 -#endif
18171
18172 PAGE_ALIGNED_DATA(PAGE_SIZE)
18173
18174 @@ -112,6 +175,8 @@ SECTIONS
18175 DATA_DATA
18176 CONSTRUCTORS
18177
18178 + jiffies = jiffies_64;
18179 +
18180 /* rarely changed data like cpu maps */
18181 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18182
18183 @@ -166,12 +231,6 @@ SECTIONS
18184 }
18185 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18186
18187 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18188 - .jiffies : AT(VLOAD(.jiffies)) {
18189 - *(.jiffies)
18190 - }
18191 - jiffies = VVIRT(.jiffies);
18192 -
18193 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18194 *(.vsyscall_3)
18195 }
18196 @@ -187,12 +246,19 @@ SECTIONS
18197 #endif /* CONFIG_X86_64 */
18198
18199 /* Init code and data - will be freed after init */
18200 - . = ALIGN(PAGE_SIZE);
18201 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18202 + BYTE(0)
18203 +
18204 +#ifdef CONFIG_PAX_KERNEXEC
18205 + . = ALIGN(HPAGE_SIZE);
18206 +#else
18207 + . = ALIGN(PAGE_SIZE);
18208 +#endif
18209 +
18210 __init_begin = .; /* paired with __init_end */
18211 - }
18212 + } :init.begin
18213
18214 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18215 +#ifdef CONFIG_SMP
18216 /*
18217 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18218 * output PHDR, so the next output section - .init.text - should
18219 @@ -201,12 +267,27 @@ SECTIONS
18220 PERCPU_VADDR(0, :percpu)
18221 #endif
18222
18223 - INIT_TEXT_SECTION(PAGE_SIZE)
18224 -#ifdef CONFIG_X86_64
18225 - :init
18226 -#endif
18227 + . = ALIGN(PAGE_SIZE);
18228 + init_begin = .;
18229 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18230 + VMLINUX_SYMBOL(_sinittext) = .;
18231 + INIT_TEXT
18232 + VMLINUX_SYMBOL(_einittext) = .;
18233 + . = ALIGN(PAGE_SIZE);
18234 + } :text.init
18235
18236 - INIT_DATA_SECTION(16)
18237 + /*
18238 + * .exit.text is discard at runtime, not link time, to deal with
18239 + * references from .altinstructions and .eh_frame
18240 + */
18241 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18242 + EXIT_TEXT
18243 + . = ALIGN(16);
18244 + } :text.exit
18245 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18246 +
18247 + . = ALIGN(PAGE_SIZE);
18248 + INIT_DATA_SECTION(16) :init
18249
18250 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18251 __x86_cpu_dev_start = .;
18252 @@ -232,19 +313,11 @@ SECTIONS
18253 *(.altinstr_replacement)
18254 }
18255
18256 - /*
18257 - * .exit.text is discard at runtime, not link time, to deal with
18258 - * references from .altinstructions and .eh_frame
18259 - */
18260 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18261 - EXIT_TEXT
18262 - }
18263 -
18264 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18265 EXIT_DATA
18266 }
18267
18268 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18269 +#ifndef CONFIG_SMP
18270 PERCPU(PAGE_SIZE)
18271 #endif
18272
18273 @@ -267,12 +340,6 @@ SECTIONS
18274 . = ALIGN(PAGE_SIZE);
18275 }
18276
18277 -#ifdef CONFIG_X86_64
18278 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18279 - NOSAVE_DATA
18280 - }
18281 -#endif
18282 -
18283 /* BSS */
18284 . = ALIGN(PAGE_SIZE);
18285 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18286 @@ -288,6 +355,7 @@ SECTIONS
18287 __brk_base = .;
18288 . += 64 * 1024; /* 64k alignment slop space */
18289 *(.brk_reservation) /* areas brk users have reserved */
18290 + . = ALIGN(HPAGE_SIZE);
18291 __brk_limit = .;
18292 }
18293
18294 @@ -316,13 +384,12 @@ SECTIONS
18295 * for the boot processor.
18296 */
18297 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18298 -INIT_PER_CPU(gdt_page);
18299 INIT_PER_CPU(irq_stack_union);
18300
18301 /*
18302 * Build-time check on the image size:
18303 */
18304 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18305 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18306 "kernel image bigger than KERNEL_IMAGE_SIZE");
18307
18308 #ifdef CONFIG_SMP
18309 diff -urNp linux-2.6.32.44/arch/x86/kernel/vsyscall_64.c linux-2.6.32.44/arch/x86/kernel/vsyscall_64.c
18310 --- linux-2.6.32.44/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18311 +++ linux-2.6.32.44/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18312 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18313
18314 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18315 /* copy vsyscall data */
18316 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18317 vsyscall_gtod_data.clock.vread = clock->vread;
18318 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18319 vsyscall_gtod_data.clock.mask = clock->mask;
18320 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18321 We do this here because otherwise user space would do it on
18322 its own in a likely inferior way (no access to jiffies).
18323 If you don't like it pass NULL. */
18324 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18325 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18326 p = tcache->blob[1];
18327 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18328 /* Load per CPU data from RDTSCP */
18329 diff -urNp linux-2.6.32.44/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.44/arch/x86/kernel/x8664_ksyms_64.c
18330 --- linux-2.6.32.44/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18331 +++ linux-2.6.32.44/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18332 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18333
18334 EXPORT_SYMBOL(copy_user_generic);
18335 EXPORT_SYMBOL(__copy_user_nocache);
18336 -EXPORT_SYMBOL(copy_from_user);
18337 -EXPORT_SYMBOL(copy_to_user);
18338 EXPORT_SYMBOL(__copy_from_user_inatomic);
18339
18340 EXPORT_SYMBOL(copy_page);
18341 diff -urNp linux-2.6.32.44/arch/x86/kernel/xsave.c linux-2.6.32.44/arch/x86/kernel/xsave.c
18342 --- linux-2.6.32.44/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18343 +++ linux-2.6.32.44/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18344 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18345 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18346 return -1;
18347
18348 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18349 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18350 fx_sw_user->extended_size -
18351 FP_XSTATE_MAGIC2_SIZE));
18352 /*
18353 @@ -196,7 +196,7 @@ fx_only:
18354 * the other extended state.
18355 */
18356 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18357 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18358 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18359 }
18360
18361 /*
18362 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18363 if (task_thread_info(tsk)->status & TS_XSAVE)
18364 err = restore_user_xstate(buf);
18365 else
18366 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18367 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18368 buf);
18369 if (unlikely(err)) {
18370 /*
18371 diff -urNp linux-2.6.32.44/arch/x86/kvm/emulate.c linux-2.6.32.44/arch/x86/kvm/emulate.c
18372 --- linux-2.6.32.44/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18373 +++ linux-2.6.32.44/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18374 @@ -81,8 +81,8 @@
18375 #define Src2CL (1<<29)
18376 #define Src2ImmByte (2<<29)
18377 #define Src2One (3<<29)
18378 -#define Src2Imm16 (4<<29)
18379 -#define Src2Mask (7<<29)
18380 +#define Src2Imm16 (4U<<29)
18381 +#define Src2Mask (7U<<29)
18382
18383 enum {
18384 Group1_80, Group1_81, Group1_82, Group1_83,
18385 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18386
18387 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18388 do { \
18389 + unsigned long _tmp; \
18390 __asm__ __volatile__ ( \
18391 _PRE_EFLAGS("0", "4", "2") \
18392 _op _suffix " %"_x"3,%1; " \
18393 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18394 /* Raw emulation: instruction has two explicit operands. */
18395 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18396 do { \
18397 - unsigned long _tmp; \
18398 - \
18399 switch ((_dst).bytes) { \
18400 case 2: \
18401 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18402 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18403
18404 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18405 do { \
18406 - unsigned long _tmp; \
18407 switch ((_dst).bytes) { \
18408 case 1: \
18409 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18410 diff -urNp linux-2.6.32.44/arch/x86/kvm/lapic.c linux-2.6.32.44/arch/x86/kvm/lapic.c
18411 --- linux-2.6.32.44/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18412 +++ linux-2.6.32.44/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18413 @@ -52,7 +52,7 @@
18414 #define APIC_BUS_CYCLE_NS 1
18415
18416 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18417 -#define apic_debug(fmt, arg...)
18418 +#define apic_debug(fmt, arg...) do {} while (0)
18419
18420 #define APIC_LVT_NUM 6
18421 /* 14 is the version for Xeon and Pentium 8.4.8*/
18422 diff -urNp linux-2.6.32.44/arch/x86/kvm/paging_tmpl.h linux-2.6.32.44/arch/x86/kvm/paging_tmpl.h
18423 --- linux-2.6.32.44/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18424 +++ linux-2.6.32.44/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18425 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18426 int level = PT_PAGE_TABLE_LEVEL;
18427 unsigned long mmu_seq;
18428
18429 + pax_track_stack();
18430 +
18431 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18432 kvm_mmu_audit(vcpu, "pre page fault");
18433
18434 diff -urNp linux-2.6.32.44/arch/x86/kvm/svm.c linux-2.6.32.44/arch/x86/kvm/svm.c
18435 --- linux-2.6.32.44/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18436 +++ linux-2.6.32.44/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18437 @@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18438 int cpu = raw_smp_processor_id();
18439
18440 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18441 +
18442 + pax_open_kernel();
18443 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18444 + pax_close_kernel();
18445 +
18446 load_TR_desc();
18447 }
18448
18449 @@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18450 return true;
18451 }
18452
18453 -static struct kvm_x86_ops svm_x86_ops = {
18454 +static const struct kvm_x86_ops svm_x86_ops = {
18455 .cpu_has_kvm_support = has_svm,
18456 .disabled_by_bios = is_disabled,
18457 .hardware_setup = svm_hardware_setup,
18458 diff -urNp linux-2.6.32.44/arch/x86/kvm/vmx.c linux-2.6.32.44/arch/x86/kvm/vmx.c
18459 --- linux-2.6.32.44/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18460 +++ linux-2.6.32.44/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18461 @@ -570,7 +570,11 @@ static void reload_tss(void)
18462
18463 kvm_get_gdt(&gdt);
18464 descs = (void *)gdt.base;
18465 +
18466 + pax_open_kernel();
18467 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18468 + pax_close_kernel();
18469 +
18470 load_TR_desc();
18471 }
18472
18473 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18474 if (!cpu_has_vmx_flexpriority())
18475 flexpriority_enabled = 0;
18476
18477 - if (!cpu_has_vmx_tpr_shadow())
18478 - kvm_x86_ops->update_cr8_intercept = NULL;
18479 + if (!cpu_has_vmx_tpr_shadow()) {
18480 + pax_open_kernel();
18481 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18482 + pax_close_kernel();
18483 + }
18484
18485 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18486 kvm_disable_largepages();
18487 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18488 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18489
18490 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18491 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18492 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18493 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18494 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18495 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18496 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18497 "jmp .Lkvm_vmx_return \n\t"
18498 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18499 ".Lkvm_vmx_return: "
18500 +
18501 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18502 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18503 + ".Lkvm_vmx_return2: "
18504 +#endif
18505 +
18506 /* Save guest registers, load host registers, keep flags */
18507 "xchg %0, (%%"R"sp) \n\t"
18508 "mov %%"R"ax, %c[rax](%0) \n\t"
18509 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18510 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18511 #endif
18512 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18513 +
18514 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18515 + ,[cs]"i"(__KERNEL_CS)
18516 +#endif
18517 +
18518 : "cc", "memory"
18519 - , R"bx", R"di", R"si"
18520 + , R"ax", R"bx", R"di", R"si"
18521 #ifdef CONFIG_X86_64
18522 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18523 #endif
18524 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18525 if (vmx->rmode.irq.pending)
18526 fixup_rmode_irq(vmx);
18527
18528 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18529 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18530 +
18531 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18532 + loadsegment(fs, __KERNEL_PERCPU);
18533 +#endif
18534 +
18535 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18536 + __set_fs(current_thread_info()->addr_limit);
18537 +#endif
18538 +
18539 vmx->launched = 1;
18540
18541 vmx_complete_interrupts(vmx);
18542 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18543 return false;
18544 }
18545
18546 -static struct kvm_x86_ops vmx_x86_ops = {
18547 +static const struct kvm_x86_ops vmx_x86_ops = {
18548 .cpu_has_kvm_support = cpu_has_kvm_support,
18549 .disabled_by_bios = vmx_disabled_by_bios,
18550 .hardware_setup = hardware_setup,
18551 diff -urNp linux-2.6.32.44/arch/x86/kvm/x86.c linux-2.6.32.44/arch/x86/kvm/x86.c
18552 --- linux-2.6.32.44/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18553 +++ linux-2.6.32.44/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18554 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18555 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18556 struct kvm_cpuid_entry2 __user *entries);
18557
18558 -struct kvm_x86_ops *kvm_x86_ops;
18559 +const struct kvm_x86_ops *kvm_x86_ops;
18560 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18561
18562 int ignore_msrs = 0;
18563 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18564 struct kvm_cpuid2 *cpuid,
18565 struct kvm_cpuid_entry2 __user *entries)
18566 {
18567 - int r;
18568 + int r, i;
18569
18570 r = -E2BIG;
18571 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18572 goto out;
18573 r = -EFAULT;
18574 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18575 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18576 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18577 goto out;
18578 + for (i = 0; i < cpuid->nent; ++i) {
18579 + struct kvm_cpuid_entry2 cpuid_entry;
18580 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18581 + goto out;
18582 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18583 + }
18584 vcpu->arch.cpuid_nent = cpuid->nent;
18585 kvm_apic_set_version(vcpu);
18586 return 0;
18587 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18588 struct kvm_cpuid2 *cpuid,
18589 struct kvm_cpuid_entry2 __user *entries)
18590 {
18591 - int r;
18592 + int r, i;
18593
18594 vcpu_load(vcpu);
18595 r = -E2BIG;
18596 if (cpuid->nent < vcpu->arch.cpuid_nent)
18597 goto out;
18598 r = -EFAULT;
18599 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18600 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18601 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18602 goto out;
18603 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18604 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18605 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18606 + goto out;
18607 + }
18608 return 0;
18609
18610 out:
18611 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18612 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18613 struct kvm_interrupt *irq)
18614 {
18615 - if (irq->irq < 0 || irq->irq >= 256)
18616 + if (irq->irq >= 256)
18617 return -EINVAL;
18618 if (irqchip_in_kernel(vcpu->kvm))
18619 return -ENXIO;
18620 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18621 .notifier_call = kvmclock_cpufreq_notifier
18622 };
18623
18624 -int kvm_arch_init(void *opaque)
18625 +int kvm_arch_init(const void *opaque)
18626 {
18627 int r, cpu;
18628 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18629 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18630
18631 if (kvm_x86_ops) {
18632 printk(KERN_ERR "kvm: already loaded the other module\n");
18633 diff -urNp linux-2.6.32.44/arch/x86/lguest/boot.c linux-2.6.32.44/arch/x86/lguest/boot.c
18634 --- linux-2.6.32.44/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18635 +++ linux-2.6.32.44/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18636 @@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18637 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18638 * Launcher to reboot us.
18639 */
18640 -static void lguest_restart(char *reason)
18641 +static __noreturn void lguest_restart(char *reason)
18642 {
18643 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18644 + BUG();
18645 }
18646
18647 /*G:050
18648 diff -urNp linux-2.6.32.44/arch/x86/lib/atomic64_32.c linux-2.6.32.44/arch/x86/lib/atomic64_32.c
18649 --- linux-2.6.32.44/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18650 +++ linux-2.6.32.44/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18651 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18652 }
18653 EXPORT_SYMBOL(atomic64_cmpxchg);
18654
18655 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18656 +{
18657 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18658 +}
18659 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18660 +
18661 /**
18662 * atomic64_xchg - xchg atomic64 variable
18663 * @ptr: pointer to type atomic64_t
18664 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18665 EXPORT_SYMBOL(atomic64_xchg);
18666
18667 /**
18668 + * atomic64_xchg_unchecked - xchg atomic64 variable
18669 + * @ptr: pointer to type atomic64_unchecked_t
18670 + * @new_val: value to assign
18671 + *
18672 + * Atomically xchgs the value of @ptr to @new_val and returns
18673 + * the old value.
18674 + */
18675 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18676 +{
18677 + /*
18678 + * Try first with a (possibly incorrect) assumption about
18679 + * what we have there. We'll do two loops most likely,
18680 + * but we'll get an ownership MESI transaction straight away
18681 + * instead of a read transaction followed by a
18682 + * flush-for-ownership transaction:
18683 + */
18684 + u64 old_val, real_val = 0;
18685 +
18686 + do {
18687 + old_val = real_val;
18688 +
18689 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18690 +
18691 + } while (real_val != old_val);
18692 +
18693 + return old_val;
18694 +}
18695 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18696 +
18697 +/**
18698 * atomic64_set - set atomic64 variable
18699 * @ptr: pointer to type atomic64_t
18700 * @new_val: value to assign
18701 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18702 EXPORT_SYMBOL(atomic64_set);
18703
18704 /**
18705 -EXPORT_SYMBOL(atomic64_read);
18706 + * atomic64_unchecked_set - set atomic64 variable
18707 + * @ptr: pointer to type atomic64_unchecked_t
18708 + * @new_val: value to assign
18709 + *
18710 + * Atomically sets the value of @ptr to @new_val.
18711 + */
18712 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18713 +{
18714 + atomic64_xchg_unchecked(ptr, new_val);
18715 +}
18716 +EXPORT_SYMBOL(atomic64_set_unchecked);
18717 +
18718 +/**
18719 * atomic64_add_return - add and return
18720 * @delta: integer value to add
18721 * @ptr: pointer to type atomic64_t
18722 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18723 }
18724 EXPORT_SYMBOL(atomic64_add_return);
18725
18726 +/**
18727 + * atomic64_add_return_unchecked - add and return
18728 + * @delta: integer value to add
18729 + * @ptr: pointer to type atomic64_unchecked_t
18730 + *
18731 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18732 + */
18733 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18734 +{
18735 + /*
18736 + * Try first with a (possibly incorrect) assumption about
18737 + * what we have there. We'll do two loops most likely,
18738 + * but we'll get an ownership MESI transaction straight away
18739 + * instead of a read transaction followed by a
18740 + * flush-for-ownership transaction:
18741 + */
18742 + u64 old_val, new_val, real_val = 0;
18743 +
18744 + do {
18745 + old_val = real_val;
18746 + new_val = old_val + delta;
18747 +
18748 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18749 +
18750 + } while (real_val != old_val);
18751 +
18752 + return new_val;
18753 +}
18754 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18755 +
18756 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18757 {
18758 return atomic64_add_return(-delta, ptr);
18759 }
18760 EXPORT_SYMBOL(atomic64_sub_return);
18761
18762 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18763 +{
18764 + return atomic64_add_return_unchecked(-delta, ptr);
18765 +}
18766 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18767 +
18768 u64 atomic64_inc_return(atomic64_t *ptr)
18769 {
18770 return atomic64_add_return(1, ptr);
18771 }
18772 EXPORT_SYMBOL(atomic64_inc_return);
18773
18774 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18775 +{
18776 + return atomic64_add_return_unchecked(1, ptr);
18777 +}
18778 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18779 +
18780 u64 atomic64_dec_return(atomic64_t *ptr)
18781 {
18782 return atomic64_sub_return(1, ptr);
18783 }
18784 EXPORT_SYMBOL(atomic64_dec_return);
18785
18786 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18787 +{
18788 + return atomic64_sub_return_unchecked(1, ptr);
18789 +}
18790 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18791 +
18792 /**
18793 * atomic64_add - add integer to atomic64 variable
18794 * @delta: integer value to add
18795 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18796 EXPORT_SYMBOL(atomic64_add);
18797
18798 /**
18799 + * atomic64_add_unchecked - add integer to atomic64 variable
18800 + * @delta: integer value to add
18801 + * @ptr: pointer to type atomic64_unchecked_t
18802 + *
18803 + * Atomically adds @delta to @ptr.
18804 + */
18805 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18806 +{
18807 + atomic64_add_return_unchecked(delta, ptr);
18808 +}
18809 +EXPORT_SYMBOL(atomic64_add_unchecked);
18810 +
18811 +/**
18812 * atomic64_sub - subtract the atomic64 variable
18813 * @delta: integer value to subtract
18814 * @ptr: pointer to type atomic64_t
18815 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18816 EXPORT_SYMBOL(atomic64_sub);
18817
18818 /**
18819 + * atomic64_sub_unchecked - subtract the atomic64 variable
18820 + * @delta: integer value to subtract
18821 + * @ptr: pointer to type atomic64_unchecked_t
18822 + *
18823 + * Atomically subtracts @delta from @ptr.
18824 + */
18825 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18826 +{
18827 + atomic64_add_unchecked(-delta, ptr);
18828 +}
18829 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18830 +
18831 +/**
18832 * atomic64_sub_and_test - subtract value from variable and test result
18833 * @delta: integer value to subtract
18834 * @ptr: pointer to type atomic64_t
18835 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18836 EXPORT_SYMBOL(atomic64_inc);
18837
18838 /**
18839 + * atomic64_inc_unchecked - increment atomic64 variable
18840 + * @ptr: pointer to type atomic64_unchecked_t
18841 + *
18842 + * Atomically increments @ptr by 1.
18843 + */
18844 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18845 +{
18846 + atomic64_add_unchecked(1, ptr);
18847 +}
18848 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18849 +
18850 +/**
18851 * atomic64_dec - decrement atomic64 variable
18852 * @ptr: pointer to type atomic64_t
18853 *
18854 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18855 EXPORT_SYMBOL(atomic64_dec);
18856
18857 /**
18858 + * atomic64_dec_unchecked - decrement atomic64 variable
18859 + * @ptr: pointer to type atomic64_unchecked_t
18860 + *
18861 + * Atomically decrements @ptr by 1.
18862 + */
18863 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18864 +{
18865 + atomic64_sub_unchecked(1, ptr);
18866 +}
18867 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18868 +
18869 +/**
18870 * atomic64_dec_and_test - decrement and test
18871 * @ptr: pointer to type atomic64_t
18872 *
18873 diff -urNp linux-2.6.32.44/arch/x86/lib/checksum_32.S linux-2.6.32.44/arch/x86/lib/checksum_32.S
18874 --- linux-2.6.32.44/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18875 +++ linux-2.6.32.44/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18876 @@ -28,7 +28,8 @@
18877 #include <linux/linkage.h>
18878 #include <asm/dwarf2.h>
18879 #include <asm/errno.h>
18880 -
18881 +#include <asm/segment.h>
18882 +
18883 /*
18884 * computes a partial checksum, e.g. for TCP/UDP fragments
18885 */
18886 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18887
18888 #define ARGBASE 16
18889 #define FP 12
18890 -
18891 -ENTRY(csum_partial_copy_generic)
18892 +
18893 +ENTRY(csum_partial_copy_generic_to_user)
18894 CFI_STARTPROC
18895 +
18896 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18897 + pushl %gs
18898 + CFI_ADJUST_CFA_OFFSET 4
18899 + popl %es
18900 + CFI_ADJUST_CFA_OFFSET -4
18901 + jmp csum_partial_copy_generic
18902 +#endif
18903 +
18904 +ENTRY(csum_partial_copy_generic_from_user)
18905 +
18906 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18907 + pushl %gs
18908 + CFI_ADJUST_CFA_OFFSET 4
18909 + popl %ds
18910 + CFI_ADJUST_CFA_OFFSET -4
18911 +#endif
18912 +
18913 +ENTRY(csum_partial_copy_generic)
18914 subl $4,%esp
18915 CFI_ADJUST_CFA_OFFSET 4
18916 pushl %edi
18917 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18918 jmp 4f
18919 SRC(1: movw (%esi), %bx )
18920 addl $2, %esi
18921 -DST( movw %bx, (%edi) )
18922 +DST( movw %bx, %es:(%edi) )
18923 addl $2, %edi
18924 addw %bx, %ax
18925 adcl $0, %eax
18926 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18927 SRC(1: movl (%esi), %ebx )
18928 SRC( movl 4(%esi), %edx )
18929 adcl %ebx, %eax
18930 -DST( movl %ebx, (%edi) )
18931 +DST( movl %ebx, %es:(%edi) )
18932 adcl %edx, %eax
18933 -DST( movl %edx, 4(%edi) )
18934 +DST( movl %edx, %es:4(%edi) )
18935
18936 SRC( movl 8(%esi), %ebx )
18937 SRC( movl 12(%esi), %edx )
18938 adcl %ebx, %eax
18939 -DST( movl %ebx, 8(%edi) )
18940 +DST( movl %ebx, %es:8(%edi) )
18941 adcl %edx, %eax
18942 -DST( movl %edx, 12(%edi) )
18943 +DST( movl %edx, %es:12(%edi) )
18944
18945 SRC( movl 16(%esi), %ebx )
18946 SRC( movl 20(%esi), %edx )
18947 adcl %ebx, %eax
18948 -DST( movl %ebx, 16(%edi) )
18949 +DST( movl %ebx, %es:16(%edi) )
18950 adcl %edx, %eax
18951 -DST( movl %edx, 20(%edi) )
18952 +DST( movl %edx, %es:20(%edi) )
18953
18954 SRC( movl 24(%esi), %ebx )
18955 SRC( movl 28(%esi), %edx )
18956 adcl %ebx, %eax
18957 -DST( movl %ebx, 24(%edi) )
18958 +DST( movl %ebx, %es:24(%edi) )
18959 adcl %edx, %eax
18960 -DST( movl %edx, 28(%edi) )
18961 +DST( movl %edx, %es:28(%edi) )
18962
18963 lea 32(%esi), %esi
18964 lea 32(%edi), %edi
18965 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18966 shrl $2, %edx # This clears CF
18967 SRC(3: movl (%esi), %ebx )
18968 adcl %ebx, %eax
18969 -DST( movl %ebx, (%edi) )
18970 +DST( movl %ebx, %es:(%edi) )
18971 lea 4(%esi), %esi
18972 lea 4(%edi), %edi
18973 dec %edx
18974 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18975 jb 5f
18976 SRC( movw (%esi), %cx )
18977 leal 2(%esi), %esi
18978 -DST( movw %cx, (%edi) )
18979 +DST( movw %cx, %es:(%edi) )
18980 leal 2(%edi), %edi
18981 je 6f
18982 shll $16,%ecx
18983 SRC(5: movb (%esi), %cl )
18984 -DST( movb %cl, (%edi) )
18985 +DST( movb %cl, %es:(%edi) )
18986 6: addl %ecx, %eax
18987 adcl $0, %eax
18988 7:
18989 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18990
18991 6001:
18992 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18993 - movl $-EFAULT, (%ebx)
18994 + movl $-EFAULT, %ss:(%ebx)
18995
18996 # zero the complete destination - computing the rest
18997 # is too much work
18998 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18999
19000 6002:
19001 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19002 - movl $-EFAULT,(%ebx)
19003 + movl $-EFAULT,%ss:(%ebx)
19004 jmp 5000b
19005
19006 .previous
19007
19008 + pushl %ss
19009 + CFI_ADJUST_CFA_OFFSET 4
19010 + popl %ds
19011 + CFI_ADJUST_CFA_OFFSET -4
19012 + pushl %ss
19013 + CFI_ADJUST_CFA_OFFSET 4
19014 + popl %es
19015 + CFI_ADJUST_CFA_OFFSET -4
19016 popl %ebx
19017 CFI_ADJUST_CFA_OFFSET -4
19018 CFI_RESTORE ebx
19019 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19020 CFI_ADJUST_CFA_OFFSET -4
19021 ret
19022 CFI_ENDPROC
19023 -ENDPROC(csum_partial_copy_generic)
19024 +ENDPROC(csum_partial_copy_generic_to_user)
19025
19026 #else
19027
19028 /* Version for PentiumII/PPro */
19029
19030 #define ROUND1(x) \
19031 + nop; nop; nop; \
19032 SRC(movl x(%esi), %ebx ) ; \
19033 addl %ebx, %eax ; \
19034 - DST(movl %ebx, x(%edi) ) ;
19035 + DST(movl %ebx, %es:x(%edi)) ;
19036
19037 #define ROUND(x) \
19038 + nop; nop; nop; \
19039 SRC(movl x(%esi), %ebx ) ; \
19040 adcl %ebx, %eax ; \
19041 - DST(movl %ebx, x(%edi) ) ;
19042 + DST(movl %ebx, %es:x(%edi)) ;
19043
19044 #define ARGBASE 12
19045 -
19046 -ENTRY(csum_partial_copy_generic)
19047 +
19048 +ENTRY(csum_partial_copy_generic_to_user)
19049 CFI_STARTPROC
19050 +
19051 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19052 + pushl %gs
19053 + CFI_ADJUST_CFA_OFFSET 4
19054 + popl %es
19055 + CFI_ADJUST_CFA_OFFSET -4
19056 + jmp csum_partial_copy_generic
19057 +#endif
19058 +
19059 +ENTRY(csum_partial_copy_generic_from_user)
19060 +
19061 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19062 + pushl %gs
19063 + CFI_ADJUST_CFA_OFFSET 4
19064 + popl %ds
19065 + CFI_ADJUST_CFA_OFFSET -4
19066 +#endif
19067 +
19068 +ENTRY(csum_partial_copy_generic)
19069 pushl %ebx
19070 CFI_ADJUST_CFA_OFFSET 4
19071 CFI_REL_OFFSET ebx, 0
19072 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19073 subl %ebx, %edi
19074 lea -1(%esi),%edx
19075 andl $-32,%edx
19076 - lea 3f(%ebx,%ebx), %ebx
19077 + lea 3f(%ebx,%ebx,2), %ebx
19078 testl %esi, %esi
19079 jmp *%ebx
19080 1: addl $64,%esi
19081 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19082 jb 5f
19083 SRC( movw (%esi), %dx )
19084 leal 2(%esi), %esi
19085 -DST( movw %dx, (%edi) )
19086 +DST( movw %dx, %es:(%edi) )
19087 leal 2(%edi), %edi
19088 je 6f
19089 shll $16,%edx
19090 5:
19091 SRC( movb (%esi), %dl )
19092 -DST( movb %dl, (%edi) )
19093 +DST( movb %dl, %es:(%edi) )
19094 6: addl %edx, %eax
19095 adcl $0, %eax
19096 7:
19097 .section .fixup, "ax"
19098 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19099 - movl $-EFAULT, (%ebx)
19100 + movl $-EFAULT, %ss:(%ebx)
19101 # zero the complete destination (computing the rest is too much work)
19102 movl ARGBASE+8(%esp),%edi # dst
19103 movl ARGBASE+12(%esp),%ecx # len
19104 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19105 rep; stosb
19106 jmp 7b
19107 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19108 - movl $-EFAULT, (%ebx)
19109 + movl $-EFAULT, %ss:(%ebx)
19110 jmp 7b
19111 .previous
19112
19113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19114 + pushl %ss
19115 + CFI_ADJUST_CFA_OFFSET 4
19116 + popl %ds
19117 + CFI_ADJUST_CFA_OFFSET -4
19118 + pushl %ss
19119 + CFI_ADJUST_CFA_OFFSET 4
19120 + popl %es
19121 + CFI_ADJUST_CFA_OFFSET -4
19122 +#endif
19123 +
19124 popl %esi
19125 CFI_ADJUST_CFA_OFFSET -4
19126 CFI_RESTORE esi
19127 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19128 CFI_RESTORE ebx
19129 ret
19130 CFI_ENDPROC
19131 -ENDPROC(csum_partial_copy_generic)
19132 +ENDPROC(csum_partial_copy_generic_to_user)
19133
19134 #undef ROUND
19135 #undef ROUND1
19136 diff -urNp linux-2.6.32.44/arch/x86/lib/clear_page_64.S linux-2.6.32.44/arch/x86/lib/clear_page_64.S
19137 --- linux-2.6.32.44/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19138 +++ linux-2.6.32.44/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19139 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
19140
19141 #include <asm/cpufeature.h>
19142
19143 - .section .altinstr_replacement,"ax"
19144 + .section .altinstr_replacement,"a"
19145 1: .byte 0xeb /* jmp <disp8> */
19146 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19147 2:
19148 diff -urNp linux-2.6.32.44/arch/x86/lib/copy_page_64.S linux-2.6.32.44/arch/x86/lib/copy_page_64.S
19149 --- linux-2.6.32.44/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19150 +++ linux-2.6.32.44/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19151 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
19152
19153 #include <asm/cpufeature.h>
19154
19155 - .section .altinstr_replacement,"ax"
19156 + .section .altinstr_replacement,"a"
19157 1: .byte 0xeb /* jmp <disp8> */
19158 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19159 2:
19160 diff -urNp linux-2.6.32.44/arch/x86/lib/copy_user_64.S linux-2.6.32.44/arch/x86/lib/copy_user_64.S
19161 --- linux-2.6.32.44/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19162 +++ linux-2.6.32.44/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19163 @@ -15,13 +15,14 @@
19164 #include <asm/asm-offsets.h>
19165 #include <asm/thread_info.h>
19166 #include <asm/cpufeature.h>
19167 +#include <asm/pgtable.h>
19168
19169 .macro ALTERNATIVE_JUMP feature,orig,alt
19170 0:
19171 .byte 0xe9 /* 32bit jump */
19172 .long \orig-1f /* by default jump to orig */
19173 1:
19174 - .section .altinstr_replacement,"ax"
19175 + .section .altinstr_replacement,"a"
19176 2: .byte 0xe9 /* near jump with 32bit immediate */
19177 .long \alt-1b /* offset */ /* or alternatively to alt */
19178 .previous
19179 @@ -64,49 +65,19 @@
19180 #endif
19181 .endm
19182
19183 -/* Standard copy_to_user with segment limit checking */
19184 -ENTRY(copy_to_user)
19185 - CFI_STARTPROC
19186 - GET_THREAD_INFO(%rax)
19187 - movq %rdi,%rcx
19188 - addq %rdx,%rcx
19189 - jc bad_to_user
19190 - cmpq TI_addr_limit(%rax),%rcx
19191 - ja bad_to_user
19192 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19193 - CFI_ENDPROC
19194 -ENDPROC(copy_to_user)
19195 -
19196 -/* Standard copy_from_user with segment limit checking */
19197 -ENTRY(copy_from_user)
19198 - CFI_STARTPROC
19199 - GET_THREAD_INFO(%rax)
19200 - movq %rsi,%rcx
19201 - addq %rdx,%rcx
19202 - jc bad_from_user
19203 - cmpq TI_addr_limit(%rax),%rcx
19204 - ja bad_from_user
19205 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19206 - CFI_ENDPROC
19207 -ENDPROC(copy_from_user)
19208 -
19209 ENTRY(copy_user_generic)
19210 CFI_STARTPROC
19211 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19212 CFI_ENDPROC
19213 ENDPROC(copy_user_generic)
19214
19215 -ENTRY(__copy_from_user_inatomic)
19216 - CFI_STARTPROC
19217 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19218 - CFI_ENDPROC
19219 -ENDPROC(__copy_from_user_inatomic)
19220 -
19221 .section .fixup,"ax"
19222 /* must zero dest */
19223 ENTRY(bad_from_user)
19224 bad_from_user:
19225 CFI_STARTPROC
19226 + testl %edx,%edx
19227 + js bad_to_user
19228 movl %edx,%ecx
19229 xorl %eax,%eax
19230 rep
19231 diff -urNp linux-2.6.32.44/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.44/arch/x86/lib/copy_user_nocache_64.S
19232 --- linux-2.6.32.44/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19233 +++ linux-2.6.32.44/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19234 @@ -14,6 +14,7 @@
19235 #include <asm/current.h>
19236 #include <asm/asm-offsets.h>
19237 #include <asm/thread_info.h>
19238 +#include <asm/pgtable.h>
19239
19240 .macro ALIGN_DESTINATION
19241 #ifdef FIX_ALIGNMENT
19242 @@ -50,6 +51,15 @@
19243 */
19244 ENTRY(__copy_user_nocache)
19245 CFI_STARTPROC
19246 +
19247 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19248 + mov $PAX_USER_SHADOW_BASE,%rcx
19249 + cmp %rcx,%rsi
19250 + jae 1f
19251 + add %rcx,%rsi
19252 +1:
19253 +#endif
19254 +
19255 cmpl $8,%edx
19256 jb 20f /* less then 8 bytes, go to byte copy loop */
19257 ALIGN_DESTINATION
19258 diff -urNp linux-2.6.32.44/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.44/arch/x86/lib/csum-wrappers_64.c
19259 --- linux-2.6.32.44/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19260 +++ linux-2.6.32.44/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19261 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19262 len -= 2;
19263 }
19264 }
19265 +
19266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19267 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19268 + src += PAX_USER_SHADOW_BASE;
19269 +#endif
19270 +
19271 isum = csum_partial_copy_generic((__force const void *)src,
19272 dst, len, isum, errp, NULL);
19273 if (unlikely(*errp))
19274 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19275 }
19276
19277 *errp = 0;
19278 +
19279 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19280 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19281 + dst += PAX_USER_SHADOW_BASE;
19282 +#endif
19283 +
19284 return csum_partial_copy_generic(src, (void __force *)dst,
19285 len, isum, NULL, errp);
19286 }
19287 diff -urNp linux-2.6.32.44/arch/x86/lib/getuser.S linux-2.6.32.44/arch/x86/lib/getuser.S
19288 --- linux-2.6.32.44/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19289 +++ linux-2.6.32.44/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19290 @@ -33,14 +33,35 @@
19291 #include <asm/asm-offsets.h>
19292 #include <asm/thread_info.h>
19293 #include <asm/asm.h>
19294 +#include <asm/segment.h>
19295 +#include <asm/pgtable.h>
19296 +
19297 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19298 +#define __copyuser_seg gs;
19299 +#else
19300 +#define __copyuser_seg
19301 +#endif
19302
19303 .text
19304 ENTRY(__get_user_1)
19305 CFI_STARTPROC
19306 +
19307 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19308 GET_THREAD_INFO(%_ASM_DX)
19309 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19310 jae bad_get_user
19311 -1: movzb (%_ASM_AX),%edx
19312 +
19313 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19314 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19315 + cmp %_ASM_DX,%_ASM_AX
19316 + jae 1234f
19317 + add %_ASM_DX,%_ASM_AX
19318 +1234:
19319 +#endif
19320 +
19321 +#endif
19322 +
19323 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19324 xor %eax,%eax
19325 ret
19326 CFI_ENDPROC
19327 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19328 ENTRY(__get_user_2)
19329 CFI_STARTPROC
19330 add $1,%_ASM_AX
19331 +
19332 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19333 jc bad_get_user
19334 GET_THREAD_INFO(%_ASM_DX)
19335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19336 jae bad_get_user
19337 -2: movzwl -1(%_ASM_AX),%edx
19338 +
19339 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19340 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19341 + cmp %_ASM_DX,%_ASM_AX
19342 + jae 1234f
19343 + add %_ASM_DX,%_ASM_AX
19344 +1234:
19345 +#endif
19346 +
19347 +#endif
19348 +
19349 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19350 xor %eax,%eax
19351 ret
19352 CFI_ENDPROC
19353 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19354 ENTRY(__get_user_4)
19355 CFI_STARTPROC
19356 add $3,%_ASM_AX
19357 +
19358 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19359 jc bad_get_user
19360 GET_THREAD_INFO(%_ASM_DX)
19361 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19362 jae bad_get_user
19363 -3: mov -3(%_ASM_AX),%edx
19364 +
19365 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19366 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19367 + cmp %_ASM_DX,%_ASM_AX
19368 + jae 1234f
19369 + add %_ASM_DX,%_ASM_AX
19370 +1234:
19371 +#endif
19372 +
19373 +#endif
19374 +
19375 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19376 xor %eax,%eax
19377 ret
19378 CFI_ENDPROC
19379 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19380 GET_THREAD_INFO(%_ASM_DX)
19381 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19382 jae bad_get_user
19383 +
19384 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19385 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19386 + cmp %_ASM_DX,%_ASM_AX
19387 + jae 1234f
19388 + add %_ASM_DX,%_ASM_AX
19389 +1234:
19390 +#endif
19391 +
19392 4: movq -7(%_ASM_AX),%_ASM_DX
19393 xor %eax,%eax
19394 ret
19395 diff -urNp linux-2.6.32.44/arch/x86/lib/memcpy_64.S linux-2.6.32.44/arch/x86/lib/memcpy_64.S
19396 --- linux-2.6.32.44/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19397 +++ linux-2.6.32.44/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19398 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19399 * It is also a lot simpler. Use this when possible:
19400 */
19401
19402 - .section .altinstr_replacement, "ax"
19403 + .section .altinstr_replacement, "a"
19404 1: .byte 0xeb /* jmp <disp8> */
19405 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19406 2:
19407 diff -urNp linux-2.6.32.44/arch/x86/lib/memset_64.S linux-2.6.32.44/arch/x86/lib/memset_64.S
19408 --- linux-2.6.32.44/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19409 +++ linux-2.6.32.44/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19410 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19411
19412 #include <asm/cpufeature.h>
19413
19414 - .section .altinstr_replacement,"ax"
19415 + .section .altinstr_replacement,"a"
19416 1: .byte 0xeb /* jmp <disp8> */
19417 .byte (memset_c - memset) - (2f - 1b) /* offset */
19418 2:
19419 diff -urNp linux-2.6.32.44/arch/x86/lib/mmx_32.c linux-2.6.32.44/arch/x86/lib/mmx_32.c
19420 --- linux-2.6.32.44/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19421 +++ linux-2.6.32.44/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19422 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19423 {
19424 void *p;
19425 int i;
19426 + unsigned long cr0;
19427
19428 if (unlikely(in_interrupt()))
19429 return __memcpy(to, from, len);
19430 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19431 kernel_fpu_begin();
19432
19433 __asm__ __volatile__ (
19434 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19435 - " prefetch 64(%0)\n"
19436 - " prefetch 128(%0)\n"
19437 - " prefetch 192(%0)\n"
19438 - " prefetch 256(%0)\n"
19439 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19440 + " prefetch 64(%1)\n"
19441 + " prefetch 128(%1)\n"
19442 + " prefetch 192(%1)\n"
19443 + " prefetch 256(%1)\n"
19444 "2: \n"
19445 ".section .fixup, \"ax\"\n"
19446 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19447 + "3: \n"
19448 +
19449 +#ifdef CONFIG_PAX_KERNEXEC
19450 + " movl %%cr0, %0\n"
19451 + " movl %0, %%eax\n"
19452 + " andl $0xFFFEFFFF, %%eax\n"
19453 + " movl %%eax, %%cr0\n"
19454 +#endif
19455 +
19456 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19457 +
19458 +#ifdef CONFIG_PAX_KERNEXEC
19459 + " movl %0, %%cr0\n"
19460 +#endif
19461 +
19462 " jmp 2b\n"
19463 ".previous\n"
19464 _ASM_EXTABLE(1b, 3b)
19465 - : : "r" (from));
19466 + : "=&r" (cr0) : "r" (from) : "ax");
19467
19468 for ( ; i > 5; i--) {
19469 __asm__ __volatile__ (
19470 - "1: prefetch 320(%0)\n"
19471 - "2: movq (%0), %%mm0\n"
19472 - " movq 8(%0), %%mm1\n"
19473 - " movq 16(%0), %%mm2\n"
19474 - " movq 24(%0), %%mm3\n"
19475 - " movq %%mm0, (%1)\n"
19476 - " movq %%mm1, 8(%1)\n"
19477 - " movq %%mm2, 16(%1)\n"
19478 - " movq %%mm3, 24(%1)\n"
19479 - " movq 32(%0), %%mm0\n"
19480 - " movq 40(%0), %%mm1\n"
19481 - " movq 48(%0), %%mm2\n"
19482 - " movq 56(%0), %%mm3\n"
19483 - " movq %%mm0, 32(%1)\n"
19484 - " movq %%mm1, 40(%1)\n"
19485 - " movq %%mm2, 48(%1)\n"
19486 - " movq %%mm3, 56(%1)\n"
19487 + "1: prefetch 320(%1)\n"
19488 + "2: movq (%1), %%mm0\n"
19489 + " movq 8(%1), %%mm1\n"
19490 + " movq 16(%1), %%mm2\n"
19491 + " movq 24(%1), %%mm3\n"
19492 + " movq %%mm0, (%2)\n"
19493 + " movq %%mm1, 8(%2)\n"
19494 + " movq %%mm2, 16(%2)\n"
19495 + " movq %%mm3, 24(%2)\n"
19496 + " movq 32(%1), %%mm0\n"
19497 + " movq 40(%1), %%mm1\n"
19498 + " movq 48(%1), %%mm2\n"
19499 + " movq 56(%1), %%mm3\n"
19500 + " movq %%mm0, 32(%2)\n"
19501 + " movq %%mm1, 40(%2)\n"
19502 + " movq %%mm2, 48(%2)\n"
19503 + " movq %%mm3, 56(%2)\n"
19504 ".section .fixup, \"ax\"\n"
19505 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19506 + "3:\n"
19507 +
19508 +#ifdef CONFIG_PAX_KERNEXEC
19509 + " movl %%cr0, %0\n"
19510 + " movl %0, %%eax\n"
19511 + " andl $0xFFFEFFFF, %%eax\n"
19512 + " movl %%eax, %%cr0\n"
19513 +#endif
19514 +
19515 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19516 +
19517 +#ifdef CONFIG_PAX_KERNEXEC
19518 + " movl %0, %%cr0\n"
19519 +#endif
19520 +
19521 " jmp 2b\n"
19522 ".previous\n"
19523 _ASM_EXTABLE(1b, 3b)
19524 - : : "r" (from), "r" (to) : "memory");
19525 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19526
19527 from += 64;
19528 to += 64;
19529 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19530 static void fast_copy_page(void *to, void *from)
19531 {
19532 int i;
19533 + unsigned long cr0;
19534
19535 kernel_fpu_begin();
19536
19537 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19538 * but that is for later. -AV
19539 */
19540 __asm__ __volatile__(
19541 - "1: prefetch (%0)\n"
19542 - " prefetch 64(%0)\n"
19543 - " prefetch 128(%0)\n"
19544 - " prefetch 192(%0)\n"
19545 - " prefetch 256(%0)\n"
19546 + "1: prefetch (%1)\n"
19547 + " prefetch 64(%1)\n"
19548 + " prefetch 128(%1)\n"
19549 + " prefetch 192(%1)\n"
19550 + " prefetch 256(%1)\n"
19551 "2: \n"
19552 ".section .fixup, \"ax\"\n"
19553 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19554 + "3: \n"
19555 +
19556 +#ifdef CONFIG_PAX_KERNEXEC
19557 + " movl %%cr0, %0\n"
19558 + " movl %0, %%eax\n"
19559 + " andl $0xFFFEFFFF, %%eax\n"
19560 + " movl %%eax, %%cr0\n"
19561 +#endif
19562 +
19563 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19564 +
19565 +#ifdef CONFIG_PAX_KERNEXEC
19566 + " movl %0, %%cr0\n"
19567 +#endif
19568 +
19569 " jmp 2b\n"
19570 ".previous\n"
19571 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19572 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19573
19574 for (i = 0; i < (4096-320)/64; i++) {
19575 __asm__ __volatile__ (
19576 - "1: prefetch 320(%0)\n"
19577 - "2: movq (%0), %%mm0\n"
19578 - " movntq %%mm0, (%1)\n"
19579 - " movq 8(%0), %%mm1\n"
19580 - " movntq %%mm1, 8(%1)\n"
19581 - " movq 16(%0), %%mm2\n"
19582 - " movntq %%mm2, 16(%1)\n"
19583 - " movq 24(%0), %%mm3\n"
19584 - " movntq %%mm3, 24(%1)\n"
19585 - " movq 32(%0), %%mm4\n"
19586 - " movntq %%mm4, 32(%1)\n"
19587 - " movq 40(%0), %%mm5\n"
19588 - " movntq %%mm5, 40(%1)\n"
19589 - " movq 48(%0), %%mm6\n"
19590 - " movntq %%mm6, 48(%1)\n"
19591 - " movq 56(%0), %%mm7\n"
19592 - " movntq %%mm7, 56(%1)\n"
19593 + "1: prefetch 320(%1)\n"
19594 + "2: movq (%1), %%mm0\n"
19595 + " movntq %%mm0, (%2)\n"
19596 + " movq 8(%1), %%mm1\n"
19597 + " movntq %%mm1, 8(%2)\n"
19598 + " movq 16(%1), %%mm2\n"
19599 + " movntq %%mm2, 16(%2)\n"
19600 + " movq 24(%1), %%mm3\n"
19601 + " movntq %%mm3, 24(%2)\n"
19602 + " movq 32(%1), %%mm4\n"
19603 + " movntq %%mm4, 32(%2)\n"
19604 + " movq 40(%1), %%mm5\n"
19605 + " movntq %%mm5, 40(%2)\n"
19606 + " movq 48(%1), %%mm6\n"
19607 + " movntq %%mm6, 48(%2)\n"
19608 + " movq 56(%1), %%mm7\n"
19609 + " movntq %%mm7, 56(%2)\n"
19610 ".section .fixup, \"ax\"\n"
19611 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19612 + "3:\n"
19613 +
19614 +#ifdef CONFIG_PAX_KERNEXEC
19615 + " movl %%cr0, %0\n"
19616 + " movl %0, %%eax\n"
19617 + " andl $0xFFFEFFFF, %%eax\n"
19618 + " movl %%eax, %%cr0\n"
19619 +#endif
19620 +
19621 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19622 +
19623 +#ifdef CONFIG_PAX_KERNEXEC
19624 + " movl %0, %%cr0\n"
19625 +#endif
19626 +
19627 " jmp 2b\n"
19628 ".previous\n"
19629 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19630 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19631
19632 from += 64;
19633 to += 64;
19634 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19635 static void fast_copy_page(void *to, void *from)
19636 {
19637 int i;
19638 + unsigned long cr0;
19639
19640 kernel_fpu_begin();
19641
19642 __asm__ __volatile__ (
19643 - "1: prefetch (%0)\n"
19644 - " prefetch 64(%0)\n"
19645 - " prefetch 128(%0)\n"
19646 - " prefetch 192(%0)\n"
19647 - " prefetch 256(%0)\n"
19648 + "1: prefetch (%1)\n"
19649 + " prefetch 64(%1)\n"
19650 + " prefetch 128(%1)\n"
19651 + " prefetch 192(%1)\n"
19652 + " prefetch 256(%1)\n"
19653 "2: \n"
19654 ".section .fixup, \"ax\"\n"
19655 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19656 + "3: \n"
19657 +
19658 +#ifdef CONFIG_PAX_KERNEXEC
19659 + " movl %%cr0, %0\n"
19660 + " movl %0, %%eax\n"
19661 + " andl $0xFFFEFFFF, %%eax\n"
19662 + " movl %%eax, %%cr0\n"
19663 +#endif
19664 +
19665 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19666 +
19667 +#ifdef CONFIG_PAX_KERNEXEC
19668 + " movl %0, %%cr0\n"
19669 +#endif
19670 +
19671 " jmp 2b\n"
19672 ".previous\n"
19673 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19674 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19675
19676 for (i = 0; i < 4096/64; i++) {
19677 __asm__ __volatile__ (
19678 - "1: prefetch 320(%0)\n"
19679 - "2: movq (%0), %%mm0\n"
19680 - " movq 8(%0), %%mm1\n"
19681 - " movq 16(%0), %%mm2\n"
19682 - " movq 24(%0), %%mm3\n"
19683 - " movq %%mm0, (%1)\n"
19684 - " movq %%mm1, 8(%1)\n"
19685 - " movq %%mm2, 16(%1)\n"
19686 - " movq %%mm3, 24(%1)\n"
19687 - " movq 32(%0), %%mm0\n"
19688 - " movq 40(%0), %%mm1\n"
19689 - " movq 48(%0), %%mm2\n"
19690 - " movq 56(%0), %%mm3\n"
19691 - " movq %%mm0, 32(%1)\n"
19692 - " movq %%mm1, 40(%1)\n"
19693 - " movq %%mm2, 48(%1)\n"
19694 - " movq %%mm3, 56(%1)\n"
19695 + "1: prefetch 320(%1)\n"
19696 + "2: movq (%1), %%mm0\n"
19697 + " movq 8(%1), %%mm1\n"
19698 + " movq 16(%1), %%mm2\n"
19699 + " movq 24(%1), %%mm3\n"
19700 + " movq %%mm0, (%2)\n"
19701 + " movq %%mm1, 8(%2)\n"
19702 + " movq %%mm2, 16(%2)\n"
19703 + " movq %%mm3, 24(%2)\n"
19704 + " movq 32(%1), %%mm0\n"
19705 + " movq 40(%1), %%mm1\n"
19706 + " movq 48(%1), %%mm2\n"
19707 + " movq 56(%1), %%mm3\n"
19708 + " movq %%mm0, 32(%2)\n"
19709 + " movq %%mm1, 40(%2)\n"
19710 + " movq %%mm2, 48(%2)\n"
19711 + " movq %%mm3, 56(%2)\n"
19712 ".section .fixup, \"ax\"\n"
19713 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19714 + "3:\n"
19715 +
19716 +#ifdef CONFIG_PAX_KERNEXEC
19717 + " movl %%cr0, %0\n"
19718 + " movl %0, %%eax\n"
19719 + " andl $0xFFFEFFFF, %%eax\n"
19720 + " movl %%eax, %%cr0\n"
19721 +#endif
19722 +
19723 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19724 +
19725 +#ifdef CONFIG_PAX_KERNEXEC
19726 + " movl %0, %%cr0\n"
19727 +#endif
19728 +
19729 " jmp 2b\n"
19730 ".previous\n"
19731 _ASM_EXTABLE(1b, 3b)
19732 - : : "r" (from), "r" (to) : "memory");
19733 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19734
19735 from += 64;
19736 to += 64;
19737 diff -urNp linux-2.6.32.44/arch/x86/lib/putuser.S linux-2.6.32.44/arch/x86/lib/putuser.S
19738 --- linux-2.6.32.44/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19739 +++ linux-2.6.32.44/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19740 @@ -15,7 +15,8 @@
19741 #include <asm/thread_info.h>
19742 #include <asm/errno.h>
19743 #include <asm/asm.h>
19744 -
19745 +#include <asm/segment.h>
19746 +#include <asm/pgtable.h>
19747
19748 /*
19749 * __put_user_X
19750 @@ -29,52 +30,119 @@
19751 * as they get called from within inline assembly.
19752 */
19753
19754 -#define ENTER CFI_STARTPROC ; \
19755 - GET_THREAD_INFO(%_ASM_BX)
19756 +#define ENTER CFI_STARTPROC
19757 #define EXIT ret ; \
19758 CFI_ENDPROC
19759
19760 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19761 +#define _DEST %_ASM_CX,%_ASM_BX
19762 +#else
19763 +#define _DEST %_ASM_CX
19764 +#endif
19765 +
19766 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19767 +#define __copyuser_seg gs;
19768 +#else
19769 +#define __copyuser_seg
19770 +#endif
19771 +
19772 .text
19773 ENTRY(__put_user_1)
19774 ENTER
19775 +
19776 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19777 + GET_THREAD_INFO(%_ASM_BX)
19778 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19779 jae bad_put_user
19780 -1: movb %al,(%_ASM_CX)
19781 +
19782 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19783 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19784 + cmp %_ASM_BX,%_ASM_CX
19785 + jb 1234f
19786 + xor %ebx,%ebx
19787 +1234:
19788 +#endif
19789 +
19790 +#endif
19791 +
19792 +1: __copyuser_seg movb %al,(_DEST)
19793 xor %eax,%eax
19794 EXIT
19795 ENDPROC(__put_user_1)
19796
19797 ENTRY(__put_user_2)
19798 ENTER
19799 +
19800 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19801 + GET_THREAD_INFO(%_ASM_BX)
19802 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19803 sub $1,%_ASM_BX
19804 cmp %_ASM_BX,%_ASM_CX
19805 jae bad_put_user
19806 -2: movw %ax,(%_ASM_CX)
19807 +
19808 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19809 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19810 + cmp %_ASM_BX,%_ASM_CX
19811 + jb 1234f
19812 + xor %ebx,%ebx
19813 +1234:
19814 +#endif
19815 +
19816 +#endif
19817 +
19818 +2: __copyuser_seg movw %ax,(_DEST)
19819 xor %eax,%eax
19820 EXIT
19821 ENDPROC(__put_user_2)
19822
19823 ENTRY(__put_user_4)
19824 ENTER
19825 +
19826 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19827 + GET_THREAD_INFO(%_ASM_BX)
19828 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19829 sub $3,%_ASM_BX
19830 cmp %_ASM_BX,%_ASM_CX
19831 jae bad_put_user
19832 -3: movl %eax,(%_ASM_CX)
19833 +
19834 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19835 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19836 + cmp %_ASM_BX,%_ASM_CX
19837 + jb 1234f
19838 + xor %ebx,%ebx
19839 +1234:
19840 +#endif
19841 +
19842 +#endif
19843 +
19844 +3: __copyuser_seg movl %eax,(_DEST)
19845 xor %eax,%eax
19846 EXIT
19847 ENDPROC(__put_user_4)
19848
19849 ENTRY(__put_user_8)
19850 ENTER
19851 +
19852 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19853 + GET_THREAD_INFO(%_ASM_BX)
19854 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19855 sub $7,%_ASM_BX
19856 cmp %_ASM_BX,%_ASM_CX
19857 jae bad_put_user
19858 -4: mov %_ASM_AX,(%_ASM_CX)
19859 +
19860 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19861 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19862 + cmp %_ASM_BX,%_ASM_CX
19863 + jb 1234f
19864 + xor %ebx,%ebx
19865 +1234:
19866 +#endif
19867 +
19868 +#endif
19869 +
19870 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19871 #ifdef CONFIG_X86_32
19872 -5: movl %edx,4(%_ASM_CX)
19873 +5: __copyuser_seg movl %edx,4(_DEST)
19874 #endif
19875 xor %eax,%eax
19876 EXIT
19877 diff -urNp linux-2.6.32.44/arch/x86/lib/usercopy_32.c linux-2.6.32.44/arch/x86/lib/usercopy_32.c
19878 --- linux-2.6.32.44/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19879 +++ linux-2.6.32.44/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19880 @@ -43,7 +43,7 @@ do { \
19881 __asm__ __volatile__( \
19882 " testl %1,%1\n" \
19883 " jz 2f\n" \
19884 - "0: lodsb\n" \
19885 + "0: "__copyuser_seg"lodsb\n" \
19886 " stosb\n" \
19887 " testb %%al,%%al\n" \
19888 " jz 1f\n" \
19889 @@ -128,10 +128,12 @@ do { \
19890 int __d0; \
19891 might_fault(); \
19892 __asm__ __volatile__( \
19893 + __COPYUSER_SET_ES \
19894 "0: rep; stosl\n" \
19895 " movl %2,%0\n" \
19896 "1: rep; stosb\n" \
19897 "2:\n" \
19898 + __COPYUSER_RESTORE_ES \
19899 ".section .fixup,\"ax\"\n" \
19900 "3: lea 0(%2,%0,4),%0\n" \
19901 " jmp 2b\n" \
19902 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19903 might_fault();
19904
19905 __asm__ __volatile__(
19906 + __COPYUSER_SET_ES
19907 " testl %0, %0\n"
19908 " jz 3f\n"
19909 " andl %0,%%ecx\n"
19910 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19911 " subl %%ecx,%0\n"
19912 " addl %0,%%eax\n"
19913 "1:\n"
19914 + __COPYUSER_RESTORE_ES
19915 ".section .fixup,\"ax\"\n"
19916 "2: xorl %%eax,%%eax\n"
19917 " jmp 1b\n"
19918 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19919
19920 #ifdef CONFIG_X86_INTEL_USERCOPY
19921 static unsigned long
19922 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19923 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19924 {
19925 int d0, d1;
19926 __asm__ __volatile__(
19927 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19928 " .align 2,0x90\n"
19929 "3: movl 0(%4), %%eax\n"
19930 "4: movl 4(%4), %%edx\n"
19931 - "5: movl %%eax, 0(%3)\n"
19932 - "6: movl %%edx, 4(%3)\n"
19933 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19934 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19935 "7: movl 8(%4), %%eax\n"
19936 "8: movl 12(%4),%%edx\n"
19937 - "9: movl %%eax, 8(%3)\n"
19938 - "10: movl %%edx, 12(%3)\n"
19939 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19940 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19941 "11: movl 16(%4), %%eax\n"
19942 "12: movl 20(%4), %%edx\n"
19943 - "13: movl %%eax, 16(%3)\n"
19944 - "14: movl %%edx, 20(%3)\n"
19945 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19946 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19947 "15: movl 24(%4), %%eax\n"
19948 "16: movl 28(%4), %%edx\n"
19949 - "17: movl %%eax, 24(%3)\n"
19950 - "18: movl %%edx, 28(%3)\n"
19951 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19952 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19953 "19: movl 32(%4), %%eax\n"
19954 "20: movl 36(%4), %%edx\n"
19955 - "21: movl %%eax, 32(%3)\n"
19956 - "22: movl %%edx, 36(%3)\n"
19957 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19958 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19959 "23: movl 40(%4), %%eax\n"
19960 "24: movl 44(%4), %%edx\n"
19961 - "25: movl %%eax, 40(%3)\n"
19962 - "26: movl %%edx, 44(%3)\n"
19963 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19964 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19965 "27: movl 48(%4), %%eax\n"
19966 "28: movl 52(%4), %%edx\n"
19967 - "29: movl %%eax, 48(%3)\n"
19968 - "30: movl %%edx, 52(%3)\n"
19969 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19970 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19971 "31: movl 56(%4), %%eax\n"
19972 "32: movl 60(%4), %%edx\n"
19973 - "33: movl %%eax, 56(%3)\n"
19974 - "34: movl %%edx, 60(%3)\n"
19975 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19976 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19977 " addl $-64, %0\n"
19978 " addl $64, %4\n"
19979 " addl $64, %3\n"
19980 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19981 " shrl $2, %0\n"
19982 " andl $3, %%eax\n"
19983 " cld\n"
19984 + __COPYUSER_SET_ES
19985 "99: rep; movsl\n"
19986 "36: movl %%eax, %0\n"
19987 "37: rep; movsb\n"
19988 "100:\n"
19989 + __COPYUSER_RESTORE_ES
19990 + ".section .fixup,\"ax\"\n"
19991 + "101: lea 0(%%eax,%0,4),%0\n"
19992 + " jmp 100b\n"
19993 + ".previous\n"
19994 + ".section __ex_table,\"a\"\n"
19995 + " .align 4\n"
19996 + " .long 1b,100b\n"
19997 + " .long 2b,100b\n"
19998 + " .long 3b,100b\n"
19999 + " .long 4b,100b\n"
20000 + " .long 5b,100b\n"
20001 + " .long 6b,100b\n"
20002 + " .long 7b,100b\n"
20003 + " .long 8b,100b\n"
20004 + " .long 9b,100b\n"
20005 + " .long 10b,100b\n"
20006 + " .long 11b,100b\n"
20007 + " .long 12b,100b\n"
20008 + " .long 13b,100b\n"
20009 + " .long 14b,100b\n"
20010 + " .long 15b,100b\n"
20011 + " .long 16b,100b\n"
20012 + " .long 17b,100b\n"
20013 + " .long 18b,100b\n"
20014 + " .long 19b,100b\n"
20015 + " .long 20b,100b\n"
20016 + " .long 21b,100b\n"
20017 + " .long 22b,100b\n"
20018 + " .long 23b,100b\n"
20019 + " .long 24b,100b\n"
20020 + " .long 25b,100b\n"
20021 + " .long 26b,100b\n"
20022 + " .long 27b,100b\n"
20023 + " .long 28b,100b\n"
20024 + " .long 29b,100b\n"
20025 + " .long 30b,100b\n"
20026 + " .long 31b,100b\n"
20027 + " .long 32b,100b\n"
20028 + " .long 33b,100b\n"
20029 + " .long 34b,100b\n"
20030 + " .long 35b,100b\n"
20031 + " .long 36b,100b\n"
20032 + " .long 37b,100b\n"
20033 + " .long 99b,101b\n"
20034 + ".previous"
20035 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20036 + : "1"(to), "2"(from), "0"(size)
20037 + : "eax", "edx", "memory");
20038 + return size;
20039 +}
20040 +
20041 +static unsigned long
20042 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20043 +{
20044 + int d0, d1;
20045 + __asm__ __volatile__(
20046 + " .align 2,0x90\n"
20047 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20048 + " cmpl $67, %0\n"
20049 + " jbe 3f\n"
20050 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20051 + " .align 2,0x90\n"
20052 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20053 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20054 + "5: movl %%eax, 0(%3)\n"
20055 + "6: movl %%edx, 4(%3)\n"
20056 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20057 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20058 + "9: movl %%eax, 8(%3)\n"
20059 + "10: movl %%edx, 12(%3)\n"
20060 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20061 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20062 + "13: movl %%eax, 16(%3)\n"
20063 + "14: movl %%edx, 20(%3)\n"
20064 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20065 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20066 + "17: movl %%eax, 24(%3)\n"
20067 + "18: movl %%edx, 28(%3)\n"
20068 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20069 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20070 + "21: movl %%eax, 32(%3)\n"
20071 + "22: movl %%edx, 36(%3)\n"
20072 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20073 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20074 + "25: movl %%eax, 40(%3)\n"
20075 + "26: movl %%edx, 44(%3)\n"
20076 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20077 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20078 + "29: movl %%eax, 48(%3)\n"
20079 + "30: movl %%edx, 52(%3)\n"
20080 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20081 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20082 + "33: movl %%eax, 56(%3)\n"
20083 + "34: movl %%edx, 60(%3)\n"
20084 + " addl $-64, %0\n"
20085 + " addl $64, %4\n"
20086 + " addl $64, %3\n"
20087 + " cmpl $63, %0\n"
20088 + " ja 1b\n"
20089 + "35: movl %0, %%eax\n"
20090 + " shrl $2, %0\n"
20091 + " andl $3, %%eax\n"
20092 + " cld\n"
20093 + "99: rep; "__copyuser_seg" movsl\n"
20094 + "36: movl %%eax, %0\n"
20095 + "37: rep; "__copyuser_seg" movsb\n"
20096 + "100:\n"
20097 ".section .fixup,\"ax\"\n"
20098 "101: lea 0(%%eax,%0,4),%0\n"
20099 " jmp 100b\n"
20100 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20101 int d0, d1;
20102 __asm__ __volatile__(
20103 " .align 2,0x90\n"
20104 - "0: movl 32(%4), %%eax\n"
20105 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20106 " cmpl $67, %0\n"
20107 " jbe 2f\n"
20108 - "1: movl 64(%4), %%eax\n"
20109 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20110 " .align 2,0x90\n"
20111 - "2: movl 0(%4), %%eax\n"
20112 - "21: movl 4(%4), %%edx\n"
20113 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20114 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20115 " movl %%eax, 0(%3)\n"
20116 " movl %%edx, 4(%3)\n"
20117 - "3: movl 8(%4), %%eax\n"
20118 - "31: movl 12(%4),%%edx\n"
20119 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20120 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20121 " movl %%eax, 8(%3)\n"
20122 " movl %%edx, 12(%3)\n"
20123 - "4: movl 16(%4), %%eax\n"
20124 - "41: movl 20(%4), %%edx\n"
20125 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20126 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20127 " movl %%eax, 16(%3)\n"
20128 " movl %%edx, 20(%3)\n"
20129 - "10: movl 24(%4), %%eax\n"
20130 - "51: movl 28(%4), %%edx\n"
20131 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20132 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20133 " movl %%eax, 24(%3)\n"
20134 " movl %%edx, 28(%3)\n"
20135 - "11: movl 32(%4), %%eax\n"
20136 - "61: movl 36(%4), %%edx\n"
20137 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20138 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20139 " movl %%eax, 32(%3)\n"
20140 " movl %%edx, 36(%3)\n"
20141 - "12: movl 40(%4), %%eax\n"
20142 - "71: movl 44(%4), %%edx\n"
20143 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20144 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20145 " movl %%eax, 40(%3)\n"
20146 " movl %%edx, 44(%3)\n"
20147 - "13: movl 48(%4), %%eax\n"
20148 - "81: movl 52(%4), %%edx\n"
20149 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20150 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20151 " movl %%eax, 48(%3)\n"
20152 " movl %%edx, 52(%3)\n"
20153 - "14: movl 56(%4), %%eax\n"
20154 - "91: movl 60(%4), %%edx\n"
20155 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20156 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20157 " movl %%eax, 56(%3)\n"
20158 " movl %%edx, 60(%3)\n"
20159 " addl $-64, %0\n"
20160 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20161 " shrl $2, %0\n"
20162 " andl $3, %%eax\n"
20163 " cld\n"
20164 - "6: rep; movsl\n"
20165 + "6: rep; "__copyuser_seg" movsl\n"
20166 " movl %%eax,%0\n"
20167 - "7: rep; movsb\n"
20168 + "7: rep; "__copyuser_seg" movsb\n"
20169 "8:\n"
20170 ".section .fixup,\"ax\"\n"
20171 "9: lea 0(%%eax,%0,4),%0\n"
20172 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20173
20174 __asm__ __volatile__(
20175 " .align 2,0x90\n"
20176 - "0: movl 32(%4), %%eax\n"
20177 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20178 " cmpl $67, %0\n"
20179 " jbe 2f\n"
20180 - "1: movl 64(%4), %%eax\n"
20181 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20182 " .align 2,0x90\n"
20183 - "2: movl 0(%4), %%eax\n"
20184 - "21: movl 4(%4), %%edx\n"
20185 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20186 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20187 " movnti %%eax, 0(%3)\n"
20188 " movnti %%edx, 4(%3)\n"
20189 - "3: movl 8(%4), %%eax\n"
20190 - "31: movl 12(%4),%%edx\n"
20191 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20192 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20193 " movnti %%eax, 8(%3)\n"
20194 " movnti %%edx, 12(%3)\n"
20195 - "4: movl 16(%4), %%eax\n"
20196 - "41: movl 20(%4), %%edx\n"
20197 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20198 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20199 " movnti %%eax, 16(%3)\n"
20200 " movnti %%edx, 20(%3)\n"
20201 - "10: movl 24(%4), %%eax\n"
20202 - "51: movl 28(%4), %%edx\n"
20203 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20204 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20205 " movnti %%eax, 24(%3)\n"
20206 " movnti %%edx, 28(%3)\n"
20207 - "11: movl 32(%4), %%eax\n"
20208 - "61: movl 36(%4), %%edx\n"
20209 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20210 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20211 " movnti %%eax, 32(%3)\n"
20212 " movnti %%edx, 36(%3)\n"
20213 - "12: movl 40(%4), %%eax\n"
20214 - "71: movl 44(%4), %%edx\n"
20215 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20216 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20217 " movnti %%eax, 40(%3)\n"
20218 " movnti %%edx, 44(%3)\n"
20219 - "13: movl 48(%4), %%eax\n"
20220 - "81: movl 52(%4), %%edx\n"
20221 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20222 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20223 " movnti %%eax, 48(%3)\n"
20224 " movnti %%edx, 52(%3)\n"
20225 - "14: movl 56(%4), %%eax\n"
20226 - "91: movl 60(%4), %%edx\n"
20227 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20228 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20229 " movnti %%eax, 56(%3)\n"
20230 " movnti %%edx, 60(%3)\n"
20231 " addl $-64, %0\n"
20232 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20233 " shrl $2, %0\n"
20234 " andl $3, %%eax\n"
20235 " cld\n"
20236 - "6: rep; movsl\n"
20237 + "6: rep; "__copyuser_seg" movsl\n"
20238 " movl %%eax,%0\n"
20239 - "7: rep; movsb\n"
20240 + "7: rep; "__copyuser_seg" movsb\n"
20241 "8:\n"
20242 ".section .fixup,\"ax\"\n"
20243 "9: lea 0(%%eax,%0,4),%0\n"
20244 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20245
20246 __asm__ __volatile__(
20247 " .align 2,0x90\n"
20248 - "0: movl 32(%4), %%eax\n"
20249 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20250 " cmpl $67, %0\n"
20251 " jbe 2f\n"
20252 - "1: movl 64(%4), %%eax\n"
20253 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20254 " .align 2,0x90\n"
20255 - "2: movl 0(%4), %%eax\n"
20256 - "21: movl 4(%4), %%edx\n"
20257 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20258 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20259 " movnti %%eax, 0(%3)\n"
20260 " movnti %%edx, 4(%3)\n"
20261 - "3: movl 8(%4), %%eax\n"
20262 - "31: movl 12(%4),%%edx\n"
20263 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20264 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20265 " movnti %%eax, 8(%3)\n"
20266 " movnti %%edx, 12(%3)\n"
20267 - "4: movl 16(%4), %%eax\n"
20268 - "41: movl 20(%4), %%edx\n"
20269 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20270 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20271 " movnti %%eax, 16(%3)\n"
20272 " movnti %%edx, 20(%3)\n"
20273 - "10: movl 24(%4), %%eax\n"
20274 - "51: movl 28(%4), %%edx\n"
20275 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20276 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20277 " movnti %%eax, 24(%3)\n"
20278 " movnti %%edx, 28(%3)\n"
20279 - "11: movl 32(%4), %%eax\n"
20280 - "61: movl 36(%4), %%edx\n"
20281 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20282 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20283 " movnti %%eax, 32(%3)\n"
20284 " movnti %%edx, 36(%3)\n"
20285 - "12: movl 40(%4), %%eax\n"
20286 - "71: movl 44(%4), %%edx\n"
20287 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20288 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20289 " movnti %%eax, 40(%3)\n"
20290 " movnti %%edx, 44(%3)\n"
20291 - "13: movl 48(%4), %%eax\n"
20292 - "81: movl 52(%4), %%edx\n"
20293 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20294 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20295 " movnti %%eax, 48(%3)\n"
20296 " movnti %%edx, 52(%3)\n"
20297 - "14: movl 56(%4), %%eax\n"
20298 - "91: movl 60(%4), %%edx\n"
20299 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20300 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20301 " movnti %%eax, 56(%3)\n"
20302 " movnti %%edx, 60(%3)\n"
20303 " addl $-64, %0\n"
20304 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20305 " shrl $2, %0\n"
20306 " andl $3, %%eax\n"
20307 " cld\n"
20308 - "6: rep; movsl\n"
20309 + "6: rep; "__copyuser_seg" movsl\n"
20310 " movl %%eax,%0\n"
20311 - "7: rep; movsb\n"
20312 + "7: rep; "__copyuser_seg" movsb\n"
20313 "8:\n"
20314 ".section .fixup,\"ax\"\n"
20315 "9: lea 0(%%eax,%0,4),%0\n"
20316 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20317 */
20318 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20319 unsigned long size);
20320 -unsigned long __copy_user_intel(void __user *to, const void *from,
20321 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20322 + unsigned long size);
20323 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20324 unsigned long size);
20325 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20326 const void __user *from, unsigned long size);
20327 #endif /* CONFIG_X86_INTEL_USERCOPY */
20328
20329 /* Generic arbitrary sized copy. */
20330 -#define __copy_user(to, from, size) \
20331 +#define __copy_user(to, from, size, prefix, set, restore) \
20332 do { \
20333 int __d0, __d1, __d2; \
20334 __asm__ __volatile__( \
20335 + set \
20336 " cmp $7,%0\n" \
20337 " jbe 1f\n" \
20338 " movl %1,%0\n" \
20339 " negl %0\n" \
20340 " andl $7,%0\n" \
20341 " subl %0,%3\n" \
20342 - "4: rep; movsb\n" \
20343 + "4: rep; "prefix"movsb\n" \
20344 " movl %3,%0\n" \
20345 " shrl $2,%0\n" \
20346 " andl $3,%3\n" \
20347 " .align 2,0x90\n" \
20348 - "0: rep; movsl\n" \
20349 + "0: rep; "prefix"movsl\n" \
20350 " movl %3,%0\n" \
20351 - "1: rep; movsb\n" \
20352 + "1: rep; "prefix"movsb\n" \
20353 "2:\n" \
20354 + restore \
20355 ".section .fixup,\"ax\"\n" \
20356 "5: addl %3,%0\n" \
20357 " jmp 2b\n" \
20358 @@ -682,14 +799,14 @@ do { \
20359 " negl %0\n" \
20360 " andl $7,%0\n" \
20361 " subl %0,%3\n" \
20362 - "4: rep; movsb\n" \
20363 + "4: rep; "__copyuser_seg"movsb\n" \
20364 " movl %3,%0\n" \
20365 " shrl $2,%0\n" \
20366 " andl $3,%3\n" \
20367 " .align 2,0x90\n" \
20368 - "0: rep; movsl\n" \
20369 + "0: rep; "__copyuser_seg"movsl\n" \
20370 " movl %3,%0\n" \
20371 - "1: rep; movsb\n" \
20372 + "1: rep; "__copyuser_seg"movsb\n" \
20373 "2:\n" \
20374 ".section .fixup,\"ax\"\n" \
20375 "5: addl %3,%0\n" \
20376 @@ -775,9 +892,9 @@ survive:
20377 }
20378 #endif
20379 if (movsl_is_ok(to, from, n))
20380 - __copy_user(to, from, n);
20381 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20382 else
20383 - n = __copy_user_intel(to, from, n);
20384 + n = __generic_copy_to_user_intel(to, from, n);
20385 return n;
20386 }
20387 EXPORT_SYMBOL(__copy_to_user_ll);
20388 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20389 unsigned long n)
20390 {
20391 if (movsl_is_ok(to, from, n))
20392 - __copy_user(to, from, n);
20393 + __copy_user(to, from, n, __copyuser_seg, "", "");
20394 else
20395 - n = __copy_user_intel((void __user *)to,
20396 - (const void *)from, n);
20397 + n = __generic_copy_from_user_intel(to, from, n);
20398 return n;
20399 }
20400 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20401 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20402 if (n > 64 && cpu_has_xmm2)
20403 n = __copy_user_intel_nocache(to, from, n);
20404 else
20405 - __copy_user(to, from, n);
20406 + __copy_user(to, from, n, __copyuser_seg, "", "");
20407 #else
20408 - __copy_user(to, from, n);
20409 + __copy_user(to, from, n, __copyuser_seg, "", "");
20410 #endif
20411 return n;
20412 }
20413 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20414
20415 -/**
20416 - * copy_to_user: - Copy a block of data into user space.
20417 - * @to: Destination address, in user space.
20418 - * @from: Source address, in kernel space.
20419 - * @n: Number of bytes to copy.
20420 - *
20421 - * Context: User context only. This function may sleep.
20422 - *
20423 - * Copy data from kernel space to user space.
20424 - *
20425 - * Returns number of bytes that could not be copied.
20426 - * On success, this will be zero.
20427 - */
20428 -unsigned long
20429 -copy_to_user(void __user *to, const void *from, unsigned long n)
20430 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20431 +void __set_fs(mm_segment_t x)
20432 {
20433 - if (access_ok(VERIFY_WRITE, to, n))
20434 - n = __copy_to_user(to, from, n);
20435 - return n;
20436 + switch (x.seg) {
20437 + case 0:
20438 + loadsegment(gs, 0);
20439 + break;
20440 + case TASK_SIZE_MAX:
20441 + loadsegment(gs, __USER_DS);
20442 + break;
20443 + case -1UL:
20444 + loadsegment(gs, __KERNEL_DS);
20445 + break;
20446 + default:
20447 + BUG();
20448 + }
20449 + return;
20450 }
20451 -EXPORT_SYMBOL(copy_to_user);
20452 +EXPORT_SYMBOL(__set_fs);
20453
20454 -/**
20455 - * copy_from_user: - Copy a block of data from user space.
20456 - * @to: Destination address, in kernel space.
20457 - * @from: Source address, in user space.
20458 - * @n: Number of bytes to copy.
20459 - *
20460 - * Context: User context only. This function may sleep.
20461 - *
20462 - * Copy data from user space to kernel space.
20463 - *
20464 - * Returns number of bytes that could not be copied.
20465 - * On success, this will be zero.
20466 - *
20467 - * If some data could not be copied, this function will pad the copied
20468 - * data to the requested size using zero bytes.
20469 - */
20470 -unsigned long
20471 -copy_from_user(void *to, const void __user *from, unsigned long n)
20472 +void set_fs(mm_segment_t x)
20473 {
20474 - if (access_ok(VERIFY_READ, from, n))
20475 - n = __copy_from_user(to, from, n);
20476 - else
20477 - memset(to, 0, n);
20478 - return n;
20479 + current_thread_info()->addr_limit = x;
20480 + __set_fs(x);
20481 }
20482 -EXPORT_SYMBOL(copy_from_user);
20483 +EXPORT_SYMBOL(set_fs);
20484 +#endif
20485 diff -urNp linux-2.6.32.44/arch/x86/lib/usercopy_64.c linux-2.6.32.44/arch/x86/lib/usercopy_64.c
20486 --- linux-2.6.32.44/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20487 +++ linux-2.6.32.44/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20488 @@ -42,6 +42,12 @@ long
20489 __strncpy_from_user(char *dst, const char __user *src, long count)
20490 {
20491 long res;
20492 +
20493 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20494 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20495 + src += PAX_USER_SHADOW_BASE;
20496 +#endif
20497 +
20498 __do_strncpy_from_user(dst, src, count, res);
20499 return res;
20500 }
20501 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20502 {
20503 long __d0;
20504 might_fault();
20505 +
20506 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20507 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20508 + addr += PAX_USER_SHADOW_BASE;
20509 +#endif
20510 +
20511 /* no memory constraint because it doesn't change any memory gcc knows
20512 about */
20513 asm volatile(
20514 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20515
20516 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20517 {
20518 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20519 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20520 +
20521 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20522 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20523 + to += PAX_USER_SHADOW_BASE;
20524 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20525 + from += PAX_USER_SHADOW_BASE;
20526 +#endif
20527 +
20528 return copy_user_generic((__force void *)to, (__force void *)from, len);
20529 - }
20530 - return len;
20531 + }
20532 + return len;
20533 }
20534 EXPORT_SYMBOL(copy_in_user);
20535
20536 diff -urNp linux-2.6.32.44/arch/x86/Makefile linux-2.6.32.44/arch/x86/Makefile
20537 --- linux-2.6.32.44/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20538 +++ linux-2.6.32.44/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20539 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20540 else
20541 BITS := 64
20542 UTS_MACHINE := x86_64
20543 + biarch := $(call cc-option,-m64)
20544 CHECKFLAGS += -D__x86_64__ -m64
20545
20546 KBUILD_AFLAGS += -m64
20547 @@ -189,3 +190,12 @@ define archhelp
20548 echo ' FDARGS="..." arguments for the booted kernel'
20549 echo ' FDINITRD=file initrd for the booted kernel'
20550 endef
20551 +
20552 +define OLD_LD
20553 +
20554 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20555 +*** Please upgrade your binutils to 2.18 or newer
20556 +endef
20557 +
20558 +archprepare:
20559 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20560 diff -urNp linux-2.6.32.44/arch/x86/mm/extable.c linux-2.6.32.44/arch/x86/mm/extable.c
20561 --- linux-2.6.32.44/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20562 +++ linux-2.6.32.44/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20563 @@ -1,14 +1,71 @@
20564 #include <linux/module.h>
20565 #include <linux/spinlock.h>
20566 +#include <linux/sort.h>
20567 #include <asm/uaccess.h>
20568 +#include <asm/pgtable.h>
20569
20570 +/*
20571 + * The exception table needs to be sorted so that the binary
20572 + * search that we use to find entries in it works properly.
20573 + * This is used both for the kernel exception table and for
20574 + * the exception tables of modules that get loaded.
20575 + */
20576 +static int cmp_ex(const void *a, const void *b)
20577 +{
20578 + const struct exception_table_entry *x = a, *y = b;
20579 +
20580 + /* avoid overflow */
20581 + if (x->insn > y->insn)
20582 + return 1;
20583 + if (x->insn < y->insn)
20584 + return -1;
20585 + return 0;
20586 +}
20587 +
20588 +static void swap_ex(void *a, void *b, int size)
20589 +{
20590 + struct exception_table_entry t, *x = a, *y = b;
20591 +
20592 + t = *x;
20593 +
20594 + pax_open_kernel();
20595 + *x = *y;
20596 + *y = t;
20597 + pax_close_kernel();
20598 +}
20599 +
20600 +void sort_extable(struct exception_table_entry *start,
20601 + struct exception_table_entry *finish)
20602 +{
20603 + sort(start, finish - start, sizeof(struct exception_table_entry),
20604 + cmp_ex, swap_ex);
20605 +}
20606 +
20607 +#ifdef CONFIG_MODULES
20608 +/*
20609 + * If the exception table is sorted, any referring to the module init
20610 + * will be at the beginning or the end.
20611 + */
20612 +void trim_init_extable(struct module *m)
20613 +{
20614 + /*trim the beginning*/
20615 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20616 + m->extable++;
20617 + m->num_exentries--;
20618 + }
20619 + /*trim the end*/
20620 + while (m->num_exentries &&
20621 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20622 + m->num_exentries--;
20623 +}
20624 +#endif /* CONFIG_MODULES */
20625
20626 int fixup_exception(struct pt_regs *regs)
20627 {
20628 const struct exception_table_entry *fixup;
20629
20630 #ifdef CONFIG_PNPBIOS
20631 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20632 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20633 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20634 extern u32 pnp_bios_is_utter_crap;
20635 pnp_bios_is_utter_crap = 1;
20636 diff -urNp linux-2.6.32.44/arch/x86/mm/fault.c linux-2.6.32.44/arch/x86/mm/fault.c
20637 --- linux-2.6.32.44/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20638 +++ linux-2.6.32.44/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20639 @@ -11,10 +11,19 @@
20640 #include <linux/kprobes.h> /* __kprobes, ... */
20641 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20642 #include <linux/perf_event.h> /* perf_sw_event */
20643 +#include <linux/unistd.h>
20644 +#include <linux/compiler.h>
20645
20646 #include <asm/traps.h> /* dotraplinkage, ... */
20647 #include <asm/pgalloc.h> /* pgd_*(), ... */
20648 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20649 +#include <asm/vsyscall.h>
20650 +#include <asm/tlbflush.h>
20651 +
20652 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20653 +#include <asm/stacktrace.h>
20654 +#include "../kernel/dumpstack.h"
20655 +#endif
20656
20657 /*
20658 * Page fault error code bits:
20659 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20660 int ret = 0;
20661
20662 /* kprobe_running() needs smp_processor_id() */
20663 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20664 + if (kprobes_built_in() && !user_mode(regs)) {
20665 preempt_disable();
20666 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20667 ret = 1;
20668 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20669 return !instr_lo || (instr_lo>>1) == 1;
20670 case 0x00:
20671 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20672 - if (probe_kernel_address(instr, opcode))
20673 + if (user_mode(regs)) {
20674 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20675 + return 0;
20676 + } else if (probe_kernel_address(instr, opcode))
20677 return 0;
20678
20679 *prefetch = (instr_lo == 0xF) &&
20680 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20681 while (instr < max_instr) {
20682 unsigned char opcode;
20683
20684 - if (probe_kernel_address(instr, opcode))
20685 + if (user_mode(regs)) {
20686 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20687 + break;
20688 + } else if (probe_kernel_address(instr, opcode))
20689 break;
20690
20691 instr++;
20692 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20693 force_sig_info(si_signo, &info, tsk);
20694 }
20695
20696 +#ifdef CONFIG_PAX_EMUTRAMP
20697 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20698 +#endif
20699 +
20700 +#ifdef CONFIG_PAX_PAGEEXEC
20701 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20702 +{
20703 + pgd_t *pgd;
20704 + pud_t *pud;
20705 + pmd_t *pmd;
20706 +
20707 + pgd = pgd_offset(mm, address);
20708 + if (!pgd_present(*pgd))
20709 + return NULL;
20710 + pud = pud_offset(pgd, address);
20711 + if (!pud_present(*pud))
20712 + return NULL;
20713 + pmd = pmd_offset(pud, address);
20714 + if (!pmd_present(*pmd))
20715 + return NULL;
20716 + return pmd;
20717 +}
20718 +#endif
20719 +
20720 DEFINE_SPINLOCK(pgd_lock);
20721 LIST_HEAD(pgd_list);
20722
20723 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20724 address += PMD_SIZE) {
20725
20726 unsigned long flags;
20727 +
20728 +#ifdef CONFIG_PAX_PER_CPU_PGD
20729 + unsigned long cpu;
20730 +#else
20731 struct page *page;
20732 +#endif
20733
20734 spin_lock_irqsave(&pgd_lock, flags);
20735 +
20736 +#ifdef CONFIG_PAX_PER_CPU_PGD
20737 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20738 + pgd_t *pgd = get_cpu_pgd(cpu);
20739 +#else
20740 list_for_each_entry(page, &pgd_list, lru) {
20741 - if (!vmalloc_sync_one(page_address(page), address))
20742 + pgd_t *pgd = page_address(page);
20743 +#endif
20744 +
20745 + if (!vmalloc_sync_one(pgd, address))
20746 break;
20747 }
20748 spin_unlock_irqrestore(&pgd_lock, flags);
20749 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20750 * an interrupt in the middle of a task switch..
20751 */
20752 pgd_paddr = read_cr3();
20753 +
20754 +#ifdef CONFIG_PAX_PER_CPU_PGD
20755 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20756 +#endif
20757 +
20758 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20759 if (!pmd_k)
20760 return -1;
20761 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20762
20763 const pgd_t *pgd_ref = pgd_offset_k(address);
20764 unsigned long flags;
20765 +
20766 +#ifdef CONFIG_PAX_PER_CPU_PGD
20767 + unsigned long cpu;
20768 +#else
20769 struct page *page;
20770 +#endif
20771
20772 if (pgd_none(*pgd_ref))
20773 continue;
20774
20775 spin_lock_irqsave(&pgd_lock, flags);
20776 +
20777 +#ifdef CONFIG_PAX_PER_CPU_PGD
20778 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20779 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20780 +#else
20781 list_for_each_entry(page, &pgd_list, lru) {
20782 pgd_t *pgd;
20783 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20784 +#endif
20785 +
20786 if (pgd_none(*pgd))
20787 set_pgd(pgd, *pgd_ref);
20788 else
20789 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20790 * happen within a race in page table update. In the later
20791 * case just flush:
20792 */
20793 +
20794 +#ifdef CONFIG_PAX_PER_CPU_PGD
20795 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20796 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20797 +#else
20798 pgd = pgd_offset(current->active_mm, address);
20799 +#endif
20800 +
20801 pgd_ref = pgd_offset_k(address);
20802 if (pgd_none(*pgd_ref))
20803 return -1;
20804 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20805 static int is_errata100(struct pt_regs *regs, unsigned long address)
20806 {
20807 #ifdef CONFIG_X86_64
20808 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20809 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20810 return 1;
20811 #endif
20812 return 0;
20813 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20814 }
20815
20816 static const char nx_warning[] = KERN_CRIT
20817 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20818 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20819
20820 static void
20821 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20822 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20823 if (!oops_may_print())
20824 return;
20825
20826 - if (error_code & PF_INSTR) {
20827 + if (nx_enabled && (error_code & PF_INSTR)) {
20828 unsigned int level;
20829
20830 pte_t *pte = lookup_address(address, &level);
20831
20832 if (pte && pte_present(*pte) && !pte_exec(*pte))
20833 - printk(nx_warning, current_uid());
20834 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20835 }
20836
20837 +#ifdef CONFIG_PAX_KERNEXEC
20838 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20839 + if (current->signal->curr_ip)
20840 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20841 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20842 + else
20843 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20844 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20845 + }
20846 +#endif
20847 +
20848 printk(KERN_ALERT "BUG: unable to handle kernel ");
20849 if (address < PAGE_SIZE)
20850 printk(KERN_CONT "NULL pointer dereference");
20851 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20852 unsigned long address, int si_code)
20853 {
20854 struct task_struct *tsk = current;
20855 + struct mm_struct *mm = tsk->mm;
20856 +
20857 +#ifdef CONFIG_X86_64
20858 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20859 + if (regs->ip == (unsigned long)vgettimeofday) {
20860 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20861 + return;
20862 + } else if (regs->ip == (unsigned long)vtime) {
20863 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20864 + return;
20865 + } else if (regs->ip == (unsigned long)vgetcpu) {
20866 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20867 + return;
20868 + }
20869 + }
20870 +#endif
20871 +
20872 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20873 + if (mm && (error_code & PF_USER)) {
20874 + unsigned long ip = regs->ip;
20875 +
20876 + if (v8086_mode(regs))
20877 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20878 +
20879 + /*
20880 + * It's possible to have interrupts off here:
20881 + */
20882 + local_irq_enable();
20883 +
20884 +#ifdef CONFIG_PAX_PAGEEXEC
20885 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20886 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20887 +
20888 +#ifdef CONFIG_PAX_EMUTRAMP
20889 + switch (pax_handle_fetch_fault(regs)) {
20890 + case 2:
20891 + return;
20892 + }
20893 +#endif
20894 +
20895 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20896 + do_group_exit(SIGKILL);
20897 + }
20898 +#endif
20899 +
20900 +#ifdef CONFIG_PAX_SEGMEXEC
20901 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20902 +
20903 +#ifdef CONFIG_PAX_EMUTRAMP
20904 + switch (pax_handle_fetch_fault(regs)) {
20905 + case 2:
20906 + return;
20907 + }
20908 +#endif
20909 +
20910 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20911 + do_group_exit(SIGKILL);
20912 + }
20913 +#endif
20914 +
20915 + }
20916 +#endif
20917
20918 /* User mode accesses just cause a SIGSEGV */
20919 if (error_code & PF_USER) {
20920 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20921 return 1;
20922 }
20923
20924 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20925 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20926 +{
20927 + pte_t *pte;
20928 + pmd_t *pmd;
20929 + spinlock_t *ptl;
20930 + unsigned char pte_mask;
20931 +
20932 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20933 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20934 + return 0;
20935 +
20936 + /* PaX: it's our fault, let's handle it if we can */
20937 +
20938 + /* PaX: take a look at read faults before acquiring any locks */
20939 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20940 + /* instruction fetch attempt from a protected page in user mode */
20941 + up_read(&mm->mmap_sem);
20942 +
20943 +#ifdef CONFIG_PAX_EMUTRAMP
20944 + switch (pax_handle_fetch_fault(regs)) {
20945 + case 2:
20946 + return 1;
20947 + }
20948 +#endif
20949 +
20950 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20951 + do_group_exit(SIGKILL);
20952 + }
20953 +
20954 + pmd = pax_get_pmd(mm, address);
20955 + if (unlikely(!pmd))
20956 + return 0;
20957 +
20958 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20959 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20960 + pte_unmap_unlock(pte, ptl);
20961 + return 0;
20962 + }
20963 +
20964 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20965 + /* write attempt to a protected page in user mode */
20966 + pte_unmap_unlock(pte, ptl);
20967 + return 0;
20968 + }
20969 +
20970 +#ifdef CONFIG_SMP
20971 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20972 +#else
20973 + if (likely(address > get_limit(regs->cs)))
20974 +#endif
20975 + {
20976 + set_pte(pte, pte_mkread(*pte));
20977 + __flush_tlb_one(address);
20978 + pte_unmap_unlock(pte, ptl);
20979 + up_read(&mm->mmap_sem);
20980 + return 1;
20981 + }
20982 +
20983 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20984 +
20985 + /*
20986 + * PaX: fill DTLB with user rights and retry
20987 + */
20988 + __asm__ __volatile__ (
20989 + "orb %2,(%1)\n"
20990 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20991 +/*
20992 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20993 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20994 + * page fault when examined during a TLB load attempt. this is true not only
20995 + * for PTEs holding a non-present entry but also present entries that will
20996 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20997 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20998 + * for our target pages since their PTEs are simply not in the TLBs at all.
20999 +
21000 + * the best thing in omitting it is that we gain around 15-20% speed in the
21001 + * fast path of the page fault handler and can get rid of tracing since we
21002 + * can no longer flush unintended entries.
21003 + */
21004 + "invlpg (%0)\n"
21005 +#endif
21006 + __copyuser_seg"testb $0,(%0)\n"
21007 + "xorb %3,(%1)\n"
21008 + :
21009 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21010 + : "memory", "cc");
21011 + pte_unmap_unlock(pte, ptl);
21012 + up_read(&mm->mmap_sem);
21013 + return 1;
21014 +}
21015 +#endif
21016 +
21017 /*
21018 * Handle a spurious fault caused by a stale TLB entry.
21019 *
21020 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
21021 static inline int
21022 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21023 {
21024 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21025 + return 1;
21026 +
21027 if (write) {
21028 /* write, present and write, not present: */
21029 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21030 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
21031 {
21032 struct vm_area_struct *vma;
21033 struct task_struct *tsk;
21034 - unsigned long address;
21035 struct mm_struct *mm;
21036 int write;
21037 int fault;
21038
21039 + /* Get the faulting address: */
21040 + unsigned long address = read_cr2();
21041 +
21042 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21043 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21044 + if (!search_exception_tables(regs->ip)) {
21045 + bad_area_nosemaphore(regs, error_code, address);
21046 + return;
21047 + }
21048 + if (address < PAX_USER_SHADOW_BASE) {
21049 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21050 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21051 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21052 + } else
21053 + address -= PAX_USER_SHADOW_BASE;
21054 + }
21055 +#endif
21056 +
21057 tsk = current;
21058 mm = tsk->mm;
21059
21060 - /* Get the faulting address: */
21061 - address = read_cr2();
21062 -
21063 /*
21064 * Detect and handle instructions that would cause a page fault for
21065 * both a tracked kernel page and a userspace page.
21066 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
21067 * User-mode registers count as a user access even for any
21068 * potential system fault or CPU buglet:
21069 */
21070 - if (user_mode_vm(regs)) {
21071 + if (user_mode(regs)) {
21072 local_irq_enable();
21073 error_code |= PF_USER;
21074 } else {
21075 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
21076 might_sleep();
21077 }
21078
21079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21080 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21081 + return;
21082 +#endif
21083 +
21084 vma = find_vma(mm, address);
21085 if (unlikely(!vma)) {
21086 bad_area(regs, error_code, address);
21087 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
21088 bad_area(regs, error_code, address);
21089 return;
21090 }
21091 - if (error_code & PF_USER) {
21092 - /*
21093 - * Accessing the stack below %sp is always a bug.
21094 - * The large cushion allows instructions like enter
21095 - * and pusha to work. ("enter $65535, $31" pushes
21096 - * 32 pointers and then decrements %sp by 65535.)
21097 - */
21098 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21099 - bad_area(regs, error_code, address);
21100 - return;
21101 - }
21102 + /*
21103 + * Accessing the stack below %sp is always a bug.
21104 + * The large cushion allows instructions like enter
21105 + * and pusha to work. ("enter $65535, $31" pushes
21106 + * 32 pointers and then decrements %sp by 65535.)
21107 + */
21108 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21109 + bad_area(regs, error_code, address);
21110 + return;
21111 + }
21112 +
21113 +#ifdef CONFIG_PAX_SEGMEXEC
21114 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21115 + bad_area(regs, error_code, address);
21116 + return;
21117 }
21118 +#endif
21119 +
21120 if (unlikely(expand_stack(vma, address))) {
21121 bad_area(regs, error_code, address);
21122 return;
21123 @@ -1146,3 +1416,199 @@ good_area:
21124
21125 up_read(&mm->mmap_sem);
21126 }
21127 +
21128 +#ifdef CONFIG_PAX_EMUTRAMP
21129 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21130 +{
21131 + int err;
21132 +
21133 + do { /* PaX: gcc trampoline emulation #1 */
21134 + unsigned char mov1, mov2;
21135 + unsigned short jmp;
21136 + unsigned int addr1, addr2;
21137 +
21138 +#ifdef CONFIG_X86_64
21139 + if ((regs->ip + 11) >> 32)
21140 + break;
21141 +#endif
21142 +
21143 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21144 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21145 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21146 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21147 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21148 +
21149 + if (err)
21150 + break;
21151 +
21152 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21153 + regs->cx = addr1;
21154 + regs->ax = addr2;
21155 + regs->ip = addr2;
21156 + return 2;
21157 + }
21158 + } while (0);
21159 +
21160 + do { /* PaX: gcc trampoline emulation #2 */
21161 + unsigned char mov, jmp;
21162 + unsigned int addr1, addr2;
21163 +
21164 +#ifdef CONFIG_X86_64
21165 + if ((regs->ip + 9) >> 32)
21166 + break;
21167 +#endif
21168 +
21169 + err = get_user(mov, (unsigned char __user *)regs->ip);
21170 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21171 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21172 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21173 +
21174 + if (err)
21175 + break;
21176 +
21177 + if (mov == 0xB9 && jmp == 0xE9) {
21178 + regs->cx = addr1;
21179 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21180 + return 2;
21181 + }
21182 + } while (0);
21183 +
21184 + return 1; /* PaX in action */
21185 +}
21186 +
21187 +#ifdef CONFIG_X86_64
21188 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21189 +{
21190 + int err;
21191 +
21192 + do { /* PaX: gcc trampoline emulation #1 */
21193 + unsigned short mov1, mov2, jmp1;
21194 + unsigned char jmp2;
21195 + unsigned int addr1;
21196 + unsigned long addr2;
21197 +
21198 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21199 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21200 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21201 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21202 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21203 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21204 +
21205 + if (err)
21206 + break;
21207 +
21208 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21209 + regs->r11 = addr1;
21210 + regs->r10 = addr2;
21211 + regs->ip = addr1;
21212 + return 2;
21213 + }
21214 + } while (0);
21215 +
21216 + do { /* PaX: gcc trampoline emulation #2 */
21217 + unsigned short mov1, mov2, jmp1;
21218 + unsigned char jmp2;
21219 + unsigned long addr1, addr2;
21220 +
21221 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21222 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21223 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21224 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21225 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21226 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21227 +
21228 + if (err)
21229 + break;
21230 +
21231 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21232 + regs->r11 = addr1;
21233 + regs->r10 = addr2;
21234 + regs->ip = addr1;
21235 + return 2;
21236 + }
21237 + } while (0);
21238 +
21239 + return 1; /* PaX in action */
21240 +}
21241 +#endif
21242 +
21243 +/*
21244 + * PaX: decide what to do with offenders (regs->ip = fault address)
21245 + *
21246 + * returns 1 when task should be killed
21247 + * 2 when gcc trampoline was detected
21248 + */
21249 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21250 +{
21251 + if (v8086_mode(regs))
21252 + return 1;
21253 +
21254 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21255 + return 1;
21256 +
21257 +#ifdef CONFIG_X86_32
21258 + return pax_handle_fetch_fault_32(regs);
21259 +#else
21260 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21261 + return pax_handle_fetch_fault_32(regs);
21262 + else
21263 + return pax_handle_fetch_fault_64(regs);
21264 +#endif
21265 +}
21266 +#endif
21267 +
21268 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21269 +void pax_report_insns(void *pc, void *sp)
21270 +{
21271 + long i;
21272 +
21273 + printk(KERN_ERR "PAX: bytes at PC: ");
21274 + for (i = 0; i < 20; i++) {
21275 + unsigned char c;
21276 + if (get_user(c, (__force unsigned char __user *)pc+i))
21277 + printk(KERN_CONT "?? ");
21278 + else
21279 + printk(KERN_CONT "%02x ", c);
21280 + }
21281 + printk("\n");
21282 +
21283 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21284 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21285 + unsigned long c;
21286 + if (get_user(c, (__force unsigned long __user *)sp+i))
21287 +#ifdef CONFIG_X86_32
21288 + printk(KERN_CONT "???????? ");
21289 +#else
21290 + printk(KERN_CONT "???????????????? ");
21291 +#endif
21292 + else
21293 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21294 + }
21295 + printk("\n");
21296 +}
21297 +#endif
21298 +
21299 +/**
21300 + * probe_kernel_write(): safely attempt to write to a location
21301 + * @dst: address to write to
21302 + * @src: pointer to the data that shall be written
21303 + * @size: size of the data chunk
21304 + *
21305 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21306 + * happens, handle that and return -EFAULT.
21307 + */
21308 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21309 +{
21310 + long ret;
21311 + mm_segment_t old_fs = get_fs();
21312 +
21313 + set_fs(KERNEL_DS);
21314 + pagefault_disable();
21315 + pax_open_kernel();
21316 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21317 + pax_close_kernel();
21318 + pagefault_enable();
21319 + set_fs(old_fs);
21320 +
21321 + return ret ? -EFAULT : 0;
21322 +}
21323 diff -urNp linux-2.6.32.44/arch/x86/mm/gup.c linux-2.6.32.44/arch/x86/mm/gup.c
21324 --- linux-2.6.32.44/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21325 +++ linux-2.6.32.44/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21326 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21327 addr = start;
21328 len = (unsigned long) nr_pages << PAGE_SHIFT;
21329 end = start + len;
21330 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21331 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21332 (void __user *)start, len)))
21333 return 0;
21334
21335 diff -urNp linux-2.6.32.44/arch/x86/mm/highmem_32.c linux-2.6.32.44/arch/x86/mm/highmem_32.c
21336 --- linux-2.6.32.44/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21337 +++ linux-2.6.32.44/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21338 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21339 idx = type + KM_TYPE_NR*smp_processor_id();
21340 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21341 BUG_ON(!pte_none(*(kmap_pte-idx)));
21342 +
21343 + pax_open_kernel();
21344 set_pte(kmap_pte-idx, mk_pte(page, prot));
21345 + pax_close_kernel();
21346
21347 return (void *)vaddr;
21348 }
21349 diff -urNp linux-2.6.32.44/arch/x86/mm/hugetlbpage.c linux-2.6.32.44/arch/x86/mm/hugetlbpage.c
21350 --- linux-2.6.32.44/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21351 +++ linux-2.6.32.44/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21352 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21353 struct hstate *h = hstate_file(file);
21354 struct mm_struct *mm = current->mm;
21355 struct vm_area_struct *vma;
21356 - unsigned long start_addr;
21357 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21358 +
21359 +#ifdef CONFIG_PAX_SEGMEXEC
21360 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21361 + pax_task_size = SEGMEXEC_TASK_SIZE;
21362 +#endif
21363 +
21364 + pax_task_size -= PAGE_SIZE;
21365
21366 if (len > mm->cached_hole_size) {
21367 - start_addr = mm->free_area_cache;
21368 + start_addr = mm->free_area_cache;
21369 } else {
21370 - start_addr = TASK_UNMAPPED_BASE;
21371 - mm->cached_hole_size = 0;
21372 + start_addr = mm->mmap_base;
21373 + mm->cached_hole_size = 0;
21374 }
21375
21376 full_search:
21377 @@ -281,26 +288,27 @@ full_search:
21378
21379 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21380 /* At this point: (!vma || addr < vma->vm_end). */
21381 - if (TASK_SIZE - len < addr) {
21382 + if (pax_task_size - len < addr) {
21383 /*
21384 * Start a new search - just in case we missed
21385 * some holes.
21386 */
21387 - if (start_addr != TASK_UNMAPPED_BASE) {
21388 - start_addr = TASK_UNMAPPED_BASE;
21389 + if (start_addr != mm->mmap_base) {
21390 + start_addr = mm->mmap_base;
21391 mm->cached_hole_size = 0;
21392 goto full_search;
21393 }
21394 return -ENOMEM;
21395 }
21396 - if (!vma || addr + len <= vma->vm_start) {
21397 - mm->free_area_cache = addr + len;
21398 - return addr;
21399 - }
21400 + if (check_heap_stack_gap(vma, addr, len))
21401 + break;
21402 if (addr + mm->cached_hole_size < vma->vm_start)
21403 mm->cached_hole_size = vma->vm_start - addr;
21404 addr = ALIGN(vma->vm_end, huge_page_size(h));
21405 }
21406 +
21407 + mm->free_area_cache = addr + len;
21408 + return addr;
21409 }
21410
21411 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21412 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21413 {
21414 struct hstate *h = hstate_file(file);
21415 struct mm_struct *mm = current->mm;
21416 - struct vm_area_struct *vma, *prev_vma;
21417 - unsigned long base = mm->mmap_base, addr = addr0;
21418 + struct vm_area_struct *vma;
21419 + unsigned long base = mm->mmap_base, addr;
21420 unsigned long largest_hole = mm->cached_hole_size;
21421 - int first_time = 1;
21422
21423 /* don't allow allocations above current base */
21424 if (mm->free_area_cache > base)
21425 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21426 largest_hole = 0;
21427 mm->free_area_cache = base;
21428 }
21429 -try_again:
21430 +
21431 /* make sure it can fit in the remaining address space */
21432 if (mm->free_area_cache < len)
21433 goto fail;
21434
21435 /* either no address requested or cant fit in requested address hole */
21436 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21437 + addr = (mm->free_area_cache - len);
21438 do {
21439 + addr &= huge_page_mask(h);
21440 + vma = find_vma(mm, addr);
21441 /*
21442 * Lookup failure means no vma is above this address,
21443 * i.e. return with success:
21444 - */
21445 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21446 - return addr;
21447 -
21448 - /*
21449 * new region fits between prev_vma->vm_end and
21450 * vma->vm_start, use it:
21451 */
21452 - if (addr + len <= vma->vm_start &&
21453 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21454 + if (check_heap_stack_gap(vma, addr, len)) {
21455 /* remember the address as a hint for next time */
21456 - mm->cached_hole_size = largest_hole;
21457 - return (mm->free_area_cache = addr);
21458 - } else {
21459 - /* pull free_area_cache down to the first hole */
21460 - if (mm->free_area_cache == vma->vm_end) {
21461 - mm->free_area_cache = vma->vm_start;
21462 - mm->cached_hole_size = largest_hole;
21463 - }
21464 + mm->cached_hole_size = largest_hole;
21465 + return (mm->free_area_cache = addr);
21466 + }
21467 + /* pull free_area_cache down to the first hole */
21468 + if (mm->free_area_cache == vma->vm_end) {
21469 + mm->free_area_cache = vma->vm_start;
21470 + mm->cached_hole_size = largest_hole;
21471 }
21472
21473 /* remember the largest hole we saw so far */
21474 if (addr + largest_hole < vma->vm_start)
21475 - largest_hole = vma->vm_start - addr;
21476 + largest_hole = vma->vm_start - addr;
21477
21478 /* try just below the current vma->vm_start */
21479 - addr = (vma->vm_start - len) & huge_page_mask(h);
21480 - } while (len <= vma->vm_start);
21481 + addr = skip_heap_stack_gap(vma, len);
21482 + } while (!IS_ERR_VALUE(addr));
21483
21484 fail:
21485 /*
21486 - * if hint left us with no space for the requested
21487 - * mapping then try again:
21488 - */
21489 - if (first_time) {
21490 - mm->free_area_cache = base;
21491 - largest_hole = 0;
21492 - first_time = 0;
21493 - goto try_again;
21494 - }
21495 - /*
21496 * A failed mmap() very likely causes application failure,
21497 * so fall back to the bottom-up function here. This scenario
21498 * can happen with large stack limits and large mmap()
21499 * allocations.
21500 */
21501 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21502 +
21503 +#ifdef CONFIG_PAX_SEGMEXEC
21504 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21505 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21506 + else
21507 +#endif
21508 +
21509 + mm->mmap_base = TASK_UNMAPPED_BASE;
21510 +
21511 +#ifdef CONFIG_PAX_RANDMMAP
21512 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21513 + mm->mmap_base += mm->delta_mmap;
21514 +#endif
21515 +
21516 + mm->free_area_cache = mm->mmap_base;
21517 mm->cached_hole_size = ~0UL;
21518 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21519 len, pgoff, flags);
21520 @@ -387,6 +393,7 @@ fail:
21521 /*
21522 * Restore the topdown base:
21523 */
21524 + mm->mmap_base = base;
21525 mm->free_area_cache = base;
21526 mm->cached_hole_size = ~0UL;
21527
21528 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21529 struct hstate *h = hstate_file(file);
21530 struct mm_struct *mm = current->mm;
21531 struct vm_area_struct *vma;
21532 + unsigned long pax_task_size = TASK_SIZE;
21533
21534 if (len & ~huge_page_mask(h))
21535 return -EINVAL;
21536 - if (len > TASK_SIZE)
21537 +
21538 +#ifdef CONFIG_PAX_SEGMEXEC
21539 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21540 + pax_task_size = SEGMEXEC_TASK_SIZE;
21541 +#endif
21542 +
21543 + pax_task_size -= PAGE_SIZE;
21544 +
21545 + if (len > pax_task_size)
21546 return -ENOMEM;
21547
21548 if (flags & MAP_FIXED) {
21549 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21550 if (addr) {
21551 addr = ALIGN(addr, huge_page_size(h));
21552 vma = find_vma(mm, addr);
21553 - if (TASK_SIZE - len >= addr &&
21554 - (!vma || addr + len <= vma->vm_start))
21555 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21556 return addr;
21557 }
21558 if (mm->get_unmapped_area == arch_get_unmapped_area)
21559 diff -urNp linux-2.6.32.44/arch/x86/mm/init_32.c linux-2.6.32.44/arch/x86/mm/init_32.c
21560 --- linux-2.6.32.44/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21561 +++ linux-2.6.32.44/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21562 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21563 }
21564
21565 /*
21566 - * Creates a middle page table and puts a pointer to it in the
21567 - * given global directory entry. This only returns the gd entry
21568 - * in non-PAE compilation mode, since the middle layer is folded.
21569 - */
21570 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21571 -{
21572 - pud_t *pud;
21573 - pmd_t *pmd_table;
21574 -
21575 -#ifdef CONFIG_X86_PAE
21576 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21577 - if (after_bootmem)
21578 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21579 - else
21580 - pmd_table = (pmd_t *)alloc_low_page();
21581 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21582 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21583 - pud = pud_offset(pgd, 0);
21584 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21585 -
21586 - return pmd_table;
21587 - }
21588 -#endif
21589 - pud = pud_offset(pgd, 0);
21590 - pmd_table = pmd_offset(pud, 0);
21591 -
21592 - return pmd_table;
21593 -}
21594 -
21595 -/*
21596 * Create a page table and place a pointer to it in a middle page
21597 * directory entry:
21598 */
21599 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21600 page_table = (pte_t *)alloc_low_page();
21601
21602 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21603 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21604 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21605 +#else
21606 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21607 +#endif
21608 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21609 }
21610
21611 return pte_offset_kernel(pmd, 0);
21612 }
21613
21614 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21615 +{
21616 + pud_t *pud;
21617 + pmd_t *pmd_table;
21618 +
21619 + pud = pud_offset(pgd, 0);
21620 + pmd_table = pmd_offset(pud, 0);
21621 +
21622 + return pmd_table;
21623 +}
21624 +
21625 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21626 {
21627 int pgd_idx = pgd_index(vaddr);
21628 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21629 int pgd_idx, pmd_idx;
21630 unsigned long vaddr;
21631 pgd_t *pgd;
21632 + pud_t *pud;
21633 pmd_t *pmd;
21634 pte_t *pte = NULL;
21635
21636 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21637 pgd = pgd_base + pgd_idx;
21638
21639 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21640 - pmd = one_md_table_init(pgd);
21641 - pmd = pmd + pmd_index(vaddr);
21642 + pud = pud_offset(pgd, vaddr);
21643 + pmd = pmd_offset(pud, vaddr);
21644 +
21645 +#ifdef CONFIG_X86_PAE
21646 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21647 +#endif
21648 +
21649 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21650 pmd++, pmd_idx++) {
21651 pte = page_table_kmap_check(one_page_table_init(pmd),
21652 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21653 }
21654 }
21655
21656 -static inline int is_kernel_text(unsigned long addr)
21657 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21658 {
21659 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21660 - return 1;
21661 - return 0;
21662 + if ((start > ktla_ktva((unsigned long)_etext) ||
21663 + end <= ktla_ktva((unsigned long)_stext)) &&
21664 + (start > ktla_ktva((unsigned long)_einittext) ||
21665 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21666 +
21667 +#ifdef CONFIG_ACPI_SLEEP
21668 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21669 +#endif
21670 +
21671 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21672 + return 0;
21673 + return 1;
21674 }
21675
21676 /*
21677 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21678 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21679 unsigned long start_pfn, end_pfn;
21680 pgd_t *pgd_base = swapper_pg_dir;
21681 - int pgd_idx, pmd_idx, pte_ofs;
21682 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21683 unsigned long pfn;
21684 pgd_t *pgd;
21685 + pud_t *pud;
21686 pmd_t *pmd;
21687 pte_t *pte;
21688 unsigned pages_2m, pages_4k;
21689 @@ -278,8 +279,13 @@ repeat:
21690 pfn = start_pfn;
21691 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21692 pgd = pgd_base + pgd_idx;
21693 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21694 - pmd = one_md_table_init(pgd);
21695 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21696 + pud = pud_offset(pgd, 0);
21697 + pmd = pmd_offset(pud, 0);
21698 +
21699 +#ifdef CONFIG_X86_PAE
21700 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21701 +#endif
21702
21703 if (pfn >= end_pfn)
21704 continue;
21705 @@ -291,14 +297,13 @@ repeat:
21706 #endif
21707 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21708 pmd++, pmd_idx++) {
21709 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21710 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21711
21712 /*
21713 * Map with big pages if possible, otherwise
21714 * create normal page tables:
21715 */
21716 if (use_pse) {
21717 - unsigned int addr2;
21718 pgprot_t prot = PAGE_KERNEL_LARGE;
21719 /*
21720 * first pass will use the same initial
21721 @@ -308,11 +313,7 @@ repeat:
21722 __pgprot(PTE_IDENT_ATTR |
21723 _PAGE_PSE);
21724
21725 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21726 - PAGE_OFFSET + PAGE_SIZE-1;
21727 -
21728 - if (is_kernel_text(addr) ||
21729 - is_kernel_text(addr2))
21730 + if (is_kernel_text(address, address + PMD_SIZE))
21731 prot = PAGE_KERNEL_LARGE_EXEC;
21732
21733 pages_2m++;
21734 @@ -329,7 +330,7 @@ repeat:
21735 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21736 pte += pte_ofs;
21737 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21738 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21739 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21740 pgprot_t prot = PAGE_KERNEL;
21741 /*
21742 * first pass will use the same initial
21743 @@ -337,7 +338,7 @@ repeat:
21744 */
21745 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21746
21747 - if (is_kernel_text(addr))
21748 + if (is_kernel_text(address, address + PAGE_SIZE))
21749 prot = PAGE_KERNEL_EXEC;
21750
21751 pages_4k++;
21752 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21753
21754 pud = pud_offset(pgd, va);
21755 pmd = pmd_offset(pud, va);
21756 - if (!pmd_present(*pmd))
21757 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21758 break;
21759
21760 pte = pte_offset_kernel(pmd, va);
21761 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21762
21763 static void __init pagetable_init(void)
21764 {
21765 - pgd_t *pgd_base = swapper_pg_dir;
21766 -
21767 - permanent_kmaps_init(pgd_base);
21768 + permanent_kmaps_init(swapper_pg_dir);
21769 }
21770
21771 #ifdef CONFIG_ACPI_SLEEP
21772 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21773 * ACPI suspend needs this for resume, because things like the intel-agp
21774 * driver might have split up a kernel 4MB mapping.
21775 */
21776 -char swsusp_pg_dir[PAGE_SIZE]
21777 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21778 __attribute__ ((aligned(PAGE_SIZE)));
21779
21780 static inline void save_pg_dir(void)
21781 {
21782 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21783 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21784 }
21785 #else /* !CONFIG_ACPI_SLEEP */
21786 static inline void save_pg_dir(void)
21787 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21788 flush_tlb_all();
21789 }
21790
21791 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21792 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21793 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21794
21795 /* user-defined highmem size */
21796 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21797 * Initialize the boot-time allocator (with low memory only):
21798 */
21799 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21800 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21801 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21802 PAGE_SIZE);
21803 if (bootmap == -1L)
21804 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21805 @@ -864,6 +863,12 @@ void __init mem_init(void)
21806
21807 pci_iommu_alloc();
21808
21809 +#ifdef CONFIG_PAX_PER_CPU_PGD
21810 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21811 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21812 + KERNEL_PGD_PTRS);
21813 +#endif
21814 +
21815 #ifdef CONFIG_FLATMEM
21816 BUG_ON(!mem_map);
21817 #endif
21818 @@ -881,7 +886,7 @@ void __init mem_init(void)
21819 set_highmem_pages_init();
21820
21821 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21822 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21823 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21824 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21825
21826 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21827 @@ -923,10 +928,10 @@ void __init mem_init(void)
21828 ((unsigned long)&__init_end -
21829 (unsigned long)&__init_begin) >> 10,
21830
21831 - (unsigned long)&_etext, (unsigned long)&_edata,
21832 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21833 + (unsigned long)&_sdata, (unsigned long)&_edata,
21834 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21835
21836 - (unsigned long)&_text, (unsigned long)&_etext,
21837 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21838 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21839
21840 /*
21841 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21842 if (!kernel_set_to_readonly)
21843 return;
21844
21845 + start = ktla_ktva(start);
21846 pr_debug("Set kernel text: %lx - %lx for read write\n",
21847 start, start+size);
21848
21849 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21850 if (!kernel_set_to_readonly)
21851 return;
21852
21853 + start = ktla_ktva(start);
21854 pr_debug("Set kernel text: %lx - %lx for read only\n",
21855 start, start+size);
21856
21857 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21858 unsigned long start = PFN_ALIGN(_text);
21859 unsigned long size = PFN_ALIGN(_etext) - start;
21860
21861 + start = ktla_ktva(start);
21862 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21863 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21864 size >> 10);
21865 diff -urNp linux-2.6.32.44/arch/x86/mm/init_64.c linux-2.6.32.44/arch/x86/mm/init_64.c
21866 --- linux-2.6.32.44/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21867 +++ linux-2.6.32.44/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21868 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21869 pmd = fill_pmd(pud, vaddr);
21870 pte = fill_pte(pmd, vaddr);
21871
21872 + pax_open_kernel();
21873 set_pte(pte, new_pte);
21874 + pax_close_kernel();
21875
21876 /*
21877 * It's enough to flush this one mapping.
21878 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21879 pgd = pgd_offset_k((unsigned long)__va(phys));
21880 if (pgd_none(*pgd)) {
21881 pud = (pud_t *) spp_getpage();
21882 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21883 - _PAGE_USER));
21884 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21885 }
21886 pud = pud_offset(pgd, (unsigned long)__va(phys));
21887 if (pud_none(*pud)) {
21888 pmd = (pmd_t *) spp_getpage();
21889 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21890 - _PAGE_USER));
21891 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21892 }
21893 pmd = pmd_offset(pud, phys);
21894 BUG_ON(!pmd_none(*pmd));
21895 @@ -675,6 +675,12 @@ void __init mem_init(void)
21896
21897 pci_iommu_alloc();
21898
21899 +#ifdef CONFIG_PAX_PER_CPU_PGD
21900 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21901 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21902 + KERNEL_PGD_PTRS);
21903 +#endif
21904 +
21905 /* clear_bss() already clear the empty_zero_page */
21906
21907 reservedpages = 0;
21908 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21909 static struct vm_area_struct gate_vma = {
21910 .vm_start = VSYSCALL_START,
21911 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21912 - .vm_page_prot = PAGE_READONLY_EXEC,
21913 - .vm_flags = VM_READ | VM_EXEC
21914 + .vm_page_prot = PAGE_READONLY,
21915 + .vm_flags = VM_READ
21916 };
21917
21918 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21919 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21920
21921 const char *arch_vma_name(struct vm_area_struct *vma)
21922 {
21923 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21924 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21925 return "[vdso]";
21926 if (vma == &gate_vma)
21927 return "[vsyscall]";
21928 diff -urNp linux-2.6.32.44/arch/x86/mm/init.c linux-2.6.32.44/arch/x86/mm/init.c
21929 --- linux-2.6.32.44/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21930 +++ linux-2.6.32.44/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21931 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21932 * cause a hotspot and fill up ZONE_DMA. The page tables
21933 * need roughly 0.5KB per GB.
21934 */
21935 -#ifdef CONFIG_X86_32
21936 - start = 0x7000;
21937 -#else
21938 - start = 0x8000;
21939 -#endif
21940 + start = 0x100000;
21941 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21942 tables, PAGE_SIZE);
21943 if (e820_table_start == -1UL)
21944 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21945 #endif
21946
21947 set_nx();
21948 - if (nx_enabled)
21949 + if (nx_enabled && cpu_has_nx)
21950 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21951
21952 /* Enable PSE if available */
21953 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21954 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21955 * mmio resources as well as potential bios/acpi data regions.
21956 */
21957 +
21958 int devmem_is_allowed(unsigned long pagenr)
21959 {
21960 +#ifdef CONFIG_GRKERNSEC_KMEM
21961 + /* allow BDA */
21962 + if (!pagenr)
21963 + return 1;
21964 + /* allow EBDA */
21965 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21966 + return 1;
21967 + /* allow ISA/video mem */
21968 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21969 + return 1;
21970 + /* throw out everything else below 1MB */
21971 + if (pagenr <= 256)
21972 + return 0;
21973 +#else
21974 if (pagenr <= 256)
21975 return 1;
21976 +#endif
21977 +
21978 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21979 return 0;
21980 if (!page_is_ram(pagenr))
21981 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21982
21983 void free_initmem(void)
21984 {
21985 +
21986 +#ifdef CONFIG_PAX_KERNEXEC
21987 +#ifdef CONFIG_X86_32
21988 + /* PaX: limit KERNEL_CS to actual size */
21989 + unsigned long addr, limit;
21990 + struct desc_struct d;
21991 + int cpu;
21992 +
21993 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21994 + limit = (limit - 1UL) >> PAGE_SHIFT;
21995 +
21996 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21997 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21998 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21999 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22000 + }
22001 +
22002 + /* PaX: make KERNEL_CS read-only */
22003 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22004 + if (!paravirt_enabled())
22005 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22006 +/*
22007 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22008 + pgd = pgd_offset_k(addr);
22009 + pud = pud_offset(pgd, addr);
22010 + pmd = pmd_offset(pud, addr);
22011 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22012 + }
22013 +*/
22014 +#ifdef CONFIG_X86_PAE
22015 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22016 +/*
22017 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22018 + pgd = pgd_offset_k(addr);
22019 + pud = pud_offset(pgd, addr);
22020 + pmd = pmd_offset(pud, addr);
22021 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22022 + }
22023 +*/
22024 +#endif
22025 +
22026 +#ifdef CONFIG_MODULES
22027 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22028 +#endif
22029 +
22030 +#else
22031 + pgd_t *pgd;
22032 + pud_t *pud;
22033 + pmd_t *pmd;
22034 + unsigned long addr, end;
22035 +
22036 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22037 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22038 + pgd = pgd_offset_k(addr);
22039 + pud = pud_offset(pgd, addr);
22040 + pmd = pmd_offset(pud, addr);
22041 + if (!pmd_present(*pmd))
22042 + continue;
22043 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22044 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22045 + else
22046 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22047 + }
22048 +
22049 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22050 + end = addr + KERNEL_IMAGE_SIZE;
22051 + for (; addr < end; addr += PMD_SIZE) {
22052 + pgd = pgd_offset_k(addr);
22053 + pud = pud_offset(pgd, addr);
22054 + pmd = pmd_offset(pud, addr);
22055 + if (!pmd_present(*pmd))
22056 + continue;
22057 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22058 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22059 + }
22060 +#endif
22061 +
22062 + flush_tlb_all();
22063 +#endif
22064 +
22065 free_init_pages("unused kernel memory",
22066 (unsigned long)(&__init_begin),
22067 (unsigned long)(&__init_end));
22068 diff -urNp linux-2.6.32.44/arch/x86/mm/iomap_32.c linux-2.6.32.44/arch/x86/mm/iomap_32.c
22069 --- linux-2.6.32.44/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22070 +++ linux-2.6.32.44/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22071 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22072 debug_kmap_atomic(type);
22073 idx = type + KM_TYPE_NR * smp_processor_id();
22074 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22075 +
22076 + pax_open_kernel();
22077 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22078 + pax_close_kernel();
22079 +
22080 arch_flush_lazy_mmu_mode();
22081
22082 return (void *)vaddr;
22083 diff -urNp linux-2.6.32.44/arch/x86/mm/ioremap.c linux-2.6.32.44/arch/x86/mm/ioremap.c
22084 --- linux-2.6.32.44/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22085 +++ linux-2.6.32.44/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22086 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22087 * Second special case: Some BIOSen report the PC BIOS
22088 * area (640->1Mb) as ram even though it is not.
22089 */
22090 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22091 - pagenr < (BIOS_END >> PAGE_SHIFT))
22092 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22093 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22094 return 0;
22095
22096 for (i = 0; i < e820.nr_map; i++) {
22097 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22098 /*
22099 * Don't allow anybody to remap normal RAM that we're using..
22100 */
22101 - for (pfn = phys_addr >> PAGE_SHIFT;
22102 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22103 - pfn++) {
22104 -
22105 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22106 int is_ram = page_is_ram(pfn);
22107
22108 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22109 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22110 return NULL;
22111 WARN_ON_ONCE(is_ram);
22112 }
22113 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22114 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22115
22116 static __initdata int after_paging_init;
22117 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22118 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22119
22120 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22121 {
22122 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22123 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22124
22125 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22126 - memset(bm_pte, 0, sizeof(bm_pte));
22127 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22128 + pmd_populate_user(&init_mm, pmd, bm_pte);
22129
22130 /*
22131 * The boot-ioremap range spans multiple pmds, for which
22132 diff -urNp linux-2.6.32.44/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.44/arch/x86/mm/kmemcheck/kmemcheck.c
22133 --- linux-2.6.32.44/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22134 +++ linux-2.6.32.44/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22135 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22136 * memory (e.g. tracked pages)? For now, we need this to avoid
22137 * invoking kmemcheck for PnP BIOS calls.
22138 */
22139 - if (regs->flags & X86_VM_MASK)
22140 + if (v8086_mode(regs))
22141 return false;
22142 - if (regs->cs != __KERNEL_CS)
22143 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22144 return false;
22145
22146 pte = kmemcheck_pte_lookup(address);
22147 diff -urNp linux-2.6.32.44/arch/x86/mm/mmap.c linux-2.6.32.44/arch/x86/mm/mmap.c
22148 --- linux-2.6.32.44/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22149 +++ linux-2.6.32.44/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22150 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22151 * Leave an at least ~128 MB hole with possible stack randomization.
22152 */
22153 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22154 -#define MAX_GAP (TASK_SIZE/6*5)
22155 +#define MAX_GAP (pax_task_size/6*5)
22156
22157 /*
22158 * True on X86_32 or when emulating IA32 on X86_64
22159 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22160 return rnd << PAGE_SHIFT;
22161 }
22162
22163 -static unsigned long mmap_base(void)
22164 +static unsigned long mmap_base(struct mm_struct *mm)
22165 {
22166 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22167 + unsigned long pax_task_size = TASK_SIZE;
22168 +
22169 +#ifdef CONFIG_PAX_SEGMEXEC
22170 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22171 + pax_task_size = SEGMEXEC_TASK_SIZE;
22172 +#endif
22173
22174 if (gap < MIN_GAP)
22175 gap = MIN_GAP;
22176 else if (gap > MAX_GAP)
22177 gap = MAX_GAP;
22178
22179 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22180 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22181 }
22182
22183 /*
22184 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22185 * does, but not when emulating X86_32
22186 */
22187 -static unsigned long mmap_legacy_base(void)
22188 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22189 {
22190 - if (mmap_is_ia32())
22191 + if (mmap_is_ia32()) {
22192 +
22193 +#ifdef CONFIG_PAX_SEGMEXEC
22194 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22195 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22196 + else
22197 +#endif
22198 +
22199 return TASK_UNMAPPED_BASE;
22200 - else
22201 + } else
22202 return TASK_UNMAPPED_BASE + mmap_rnd();
22203 }
22204
22205 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22206 void arch_pick_mmap_layout(struct mm_struct *mm)
22207 {
22208 if (mmap_is_legacy()) {
22209 - mm->mmap_base = mmap_legacy_base();
22210 + mm->mmap_base = mmap_legacy_base(mm);
22211 +
22212 +#ifdef CONFIG_PAX_RANDMMAP
22213 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22214 + mm->mmap_base += mm->delta_mmap;
22215 +#endif
22216 +
22217 mm->get_unmapped_area = arch_get_unmapped_area;
22218 mm->unmap_area = arch_unmap_area;
22219 } else {
22220 - mm->mmap_base = mmap_base();
22221 + mm->mmap_base = mmap_base(mm);
22222 +
22223 +#ifdef CONFIG_PAX_RANDMMAP
22224 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22225 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22226 +#endif
22227 +
22228 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22229 mm->unmap_area = arch_unmap_area_topdown;
22230 }
22231 diff -urNp linux-2.6.32.44/arch/x86/mm/mmio-mod.c linux-2.6.32.44/arch/x86/mm/mmio-mod.c
22232 --- linux-2.6.32.44/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22233 +++ linux-2.6.32.44/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22234 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22235 break;
22236 default:
22237 {
22238 - unsigned char *ip = (unsigned char *)instptr;
22239 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22240 my_trace->opcode = MMIO_UNKNOWN_OP;
22241 my_trace->width = 0;
22242 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22243 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22244 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22245 void __iomem *addr)
22246 {
22247 - static atomic_t next_id;
22248 + static atomic_unchecked_t next_id;
22249 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22250 /* These are page-unaligned. */
22251 struct mmiotrace_map map = {
22252 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22253 .private = trace
22254 },
22255 .phys = offset,
22256 - .id = atomic_inc_return(&next_id)
22257 + .id = atomic_inc_return_unchecked(&next_id)
22258 };
22259 map.map_id = trace->id;
22260
22261 diff -urNp linux-2.6.32.44/arch/x86/mm/numa_32.c linux-2.6.32.44/arch/x86/mm/numa_32.c
22262 --- linux-2.6.32.44/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22263 +++ linux-2.6.32.44/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22264 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22265 }
22266 #endif
22267
22268 -extern unsigned long find_max_low_pfn(void);
22269 extern unsigned long highend_pfn, highstart_pfn;
22270
22271 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22272 diff -urNp linux-2.6.32.44/arch/x86/mm/pageattr.c linux-2.6.32.44/arch/x86/mm/pageattr.c
22273 --- linux-2.6.32.44/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22274 +++ linux-2.6.32.44/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22275 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22276 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22277 */
22278 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22279 - pgprot_val(forbidden) |= _PAGE_NX;
22280 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22281
22282 /*
22283 * The kernel text needs to be executable for obvious reasons
22284 * Does not cover __inittext since that is gone later on. On
22285 * 64bit we do not enforce !NX on the low mapping
22286 */
22287 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22288 - pgprot_val(forbidden) |= _PAGE_NX;
22289 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22290 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22291
22292 +#ifdef CONFIG_DEBUG_RODATA
22293 /*
22294 * The .rodata section needs to be read-only. Using the pfn
22295 * catches all aliases.
22296 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22297 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22298 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22299 pgprot_val(forbidden) |= _PAGE_RW;
22300 +#endif
22301 +
22302 +#ifdef CONFIG_PAX_KERNEXEC
22303 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22304 + pgprot_val(forbidden) |= _PAGE_RW;
22305 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22306 + }
22307 +#endif
22308
22309 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22310
22311 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22312 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22313 {
22314 /* change init_mm */
22315 + pax_open_kernel();
22316 set_pte_atomic(kpte, pte);
22317 +
22318 #ifdef CONFIG_X86_32
22319 if (!SHARED_KERNEL_PMD) {
22320 +
22321 +#ifdef CONFIG_PAX_PER_CPU_PGD
22322 + unsigned long cpu;
22323 +#else
22324 struct page *page;
22325 +#endif
22326
22327 +#ifdef CONFIG_PAX_PER_CPU_PGD
22328 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22329 + pgd_t *pgd = get_cpu_pgd(cpu);
22330 +#else
22331 list_for_each_entry(page, &pgd_list, lru) {
22332 - pgd_t *pgd;
22333 + pgd_t *pgd = (pgd_t *)page_address(page);
22334 +#endif
22335 +
22336 pud_t *pud;
22337 pmd_t *pmd;
22338
22339 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22340 + pgd += pgd_index(address);
22341 pud = pud_offset(pgd, address);
22342 pmd = pmd_offset(pud, address);
22343 set_pte_atomic((pte_t *)pmd, pte);
22344 }
22345 }
22346 #endif
22347 + pax_close_kernel();
22348 }
22349
22350 static int
22351 diff -urNp linux-2.6.32.44/arch/x86/mm/pageattr-test.c linux-2.6.32.44/arch/x86/mm/pageattr-test.c
22352 --- linux-2.6.32.44/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22353 +++ linux-2.6.32.44/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22354 @@ -36,7 +36,7 @@ enum {
22355
22356 static int pte_testbit(pte_t pte)
22357 {
22358 - return pte_flags(pte) & _PAGE_UNUSED1;
22359 + return pte_flags(pte) & _PAGE_CPA_TEST;
22360 }
22361
22362 struct split_state {
22363 diff -urNp linux-2.6.32.44/arch/x86/mm/pat.c linux-2.6.32.44/arch/x86/mm/pat.c
22364 --- linux-2.6.32.44/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22365 +++ linux-2.6.32.44/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22366 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22367
22368 conflict:
22369 printk(KERN_INFO "%s:%d conflicting memory types "
22370 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22371 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22372 new->end, cattr_name(new->type), cattr_name(entry->type));
22373 return -EBUSY;
22374 }
22375 @@ -559,7 +559,7 @@ unlock_ret:
22376
22377 if (err) {
22378 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22379 - current->comm, current->pid, start, end);
22380 + current->comm, task_pid_nr(current), start, end);
22381 }
22382
22383 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22384 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22385 while (cursor < to) {
22386 if (!devmem_is_allowed(pfn)) {
22387 printk(KERN_INFO
22388 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22389 - current->comm, from, to);
22390 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22391 + current->comm, from, to, cursor);
22392 return 0;
22393 }
22394 cursor += PAGE_SIZE;
22395 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22396 printk(KERN_INFO
22397 "%s:%d ioremap_change_attr failed %s "
22398 "for %Lx-%Lx\n",
22399 - current->comm, current->pid,
22400 + current->comm, task_pid_nr(current),
22401 cattr_name(flags),
22402 base, (unsigned long long)(base + size));
22403 return -EINVAL;
22404 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22405 free_memtype(paddr, paddr + size);
22406 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22407 " for %Lx-%Lx, got %s\n",
22408 - current->comm, current->pid,
22409 + current->comm, task_pid_nr(current),
22410 cattr_name(want_flags),
22411 (unsigned long long)paddr,
22412 (unsigned long long)(paddr + size),
22413 diff -urNp linux-2.6.32.44/arch/x86/mm/pf_in.c linux-2.6.32.44/arch/x86/mm/pf_in.c
22414 --- linux-2.6.32.44/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22415 +++ linux-2.6.32.44/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22416 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22417 int i;
22418 enum reason_type rv = OTHERS;
22419
22420 - p = (unsigned char *)ins_addr;
22421 + p = (unsigned char *)ktla_ktva(ins_addr);
22422 p += skip_prefix(p, &prf);
22423 p += get_opcode(p, &opcode);
22424
22425 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22426 struct prefix_bits prf;
22427 int i;
22428
22429 - p = (unsigned char *)ins_addr;
22430 + p = (unsigned char *)ktla_ktva(ins_addr);
22431 p += skip_prefix(p, &prf);
22432 p += get_opcode(p, &opcode);
22433
22434 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22435 struct prefix_bits prf;
22436 int i;
22437
22438 - p = (unsigned char *)ins_addr;
22439 + p = (unsigned char *)ktla_ktva(ins_addr);
22440 p += skip_prefix(p, &prf);
22441 p += get_opcode(p, &opcode);
22442
22443 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22444 int i;
22445 unsigned long rv;
22446
22447 - p = (unsigned char *)ins_addr;
22448 + p = (unsigned char *)ktla_ktva(ins_addr);
22449 p += skip_prefix(p, &prf);
22450 p += get_opcode(p, &opcode);
22451 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22452 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22453 int i;
22454 unsigned long rv;
22455
22456 - p = (unsigned char *)ins_addr;
22457 + p = (unsigned char *)ktla_ktva(ins_addr);
22458 p += skip_prefix(p, &prf);
22459 p += get_opcode(p, &opcode);
22460 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22461 diff -urNp linux-2.6.32.44/arch/x86/mm/pgtable_32.c linux-2.6.32.44/arch/x86/mm/pgtable_32.c
22462 --- linux-2.6.32.44/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22463 +++ linux-2.6.32.44/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22464 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22465 return;
22466 }
22467 pte = pte_offset_kernel(pmd, vaddr);
22468 +
22469 + pax_open_kernel();
22470 if (pte_val(pteval))
22471 set_pte_at(&init_mm, vaddr, pte, pteval);
22472 else
22473 pte_clear(&init_mm, vaddr, pte);
22474 + pax_close_kernel();
22475
22476 /*
22477 * It's enough to flush this one mapping.
22478 diff -urNp linux-2.6.32.44/arch/x86/mm/pgtable.c linux-2.6.32.44/arch/x86/mm/pgtable.c
22479 --- linux-2.6.32.44/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22480 +++ linux-2.6.32.44/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22481 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22482 list_del(&page->lru);
22483 }
22484
22485 -#define UNSHARED_PTRS_PER_PGD \
22486 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22488 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22489
22490 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22491 +{
22492 + while (count--)
22493 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22494 +}
22495 +#endif
22496 +
22497 +#ifdef CONFIG_PAX_PER_CPU_PGD
22498 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22499 +{
22500 + while (count--)
22501 +
22502 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22503 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22504 +#else
22505 + *dst++ = *src++;
22506 +#endif
22507 +
22508 +}
22509 +#endif
22510 +
22511 +#ifdef CONFIG_X86_64
22512 +#define pxd_t pud_t
22513 +#define pyd_t pgd_t
22514 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22515 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22516 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22517 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22518 +#define PYD_SIZE PGDIR_SIZE
22519 +#else
22520 +#define pxd_t pmd_t
22521 +#define pyd_t pud_t
22522 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22523 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22524 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22525 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22526 +#define PYD_SIZE PUD_SIZE
22527 +#endif
22528 +
22529 +#ifdef CONFIG_PAX_PER_CPU_PGD
22530 +static inline void pgd_ctor(pgd_t *pgd) {}
22531 +static inline void pgd_dtor(pgd_t *pgd) {}
22532 +#else
22533 static void pgd_ctor(pgd_t *pgd)
22534 {
22535 /* If the pgd points to a shared pagetable level (either the
22536 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22537 pgd_list_del(pgd);
22538 spin_unlock_irqrestore(&pgd_lock, flags);
22539 }
22540 +#endif
22541
22542 /*
22543 * List of all pgd's needed for non-PAE so it can invalidate entries
22544 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22545 * -- wli
22546 */
22547
22548 -#ifdef CONFIG_X86_PAE
22549 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22550 /*
22551 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22552 * updating the top-level pagetable entries to guarantee the
22553 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22554 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22555 * and initialize the kernel pmds here.
22556 */
22557 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22558 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22559
22560 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22561 {
22562 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22563 */
22564 flush_tlb_mm(mm);
22565 }
22566 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22567 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22568 #else /* !CONFIG_X86_PAE */
22569
22570 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22571 -#define PREALLOCATED_PMDS 0
22572 +#define PREALLOCATED_PXDS 0
22573
22574 #endif /* CONFIG_X86_PAE */
22575
22576 -static void free_pmds(pmd_t *pmds[])
22577 +static void free_pxds(pxd_t *pxds[])
22578 {
22579 int i;
22580
22581 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22582 - if (pmds[i])
22583 - free_page((unsigned long)pmds[i]);
22584 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22585 + if (pxds[i])
22586 + free_page((unsigned long)pxds[i]);
22587 }
22588
22589 -static int preallocate_pmds(pmd_t *pmds[])
22590 +static int preallocate_pxds(pxd_t *pxds[])
22591 {
22592 int i;
22593 bool failed = false;
22594
22595 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22596 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22597 - if (pmd == NULL)
22598 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22599 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22600 + if (pxd == NULL)
22601 failed = true;
22602 - pmds[i] = pmd;
22603 + pxds[i] = pxd;
22604 }
22605
22606 if (failed) {
22607 - free_pmds(pmds);
22608 + free_pxds(pxds);
22609 return -ENOMEM;
22610 }
22611
22612 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22613 * preallocate which never got a corresponding vma will need to be
22614 * freed manually.
22615 */
22616 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22617 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22618 {
22619 int i;
22620
22621 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22622 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22623 pgd_t pgd = pgdp[i];
22624
22625 if (pgd_val(pgd) != 0) {
22626 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22627 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22628
22629 - pgdp[i] = native_make_pgd(0);
22630 + set_pgd(pgdp + i, native_make_pgd(0));
22631
22632 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22633 - pmd_free(mm, pmd);
22634 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22635 + pxd_free(mm, pxd);
22636 }
22637 }
22638 }
22639
22640 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22641 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22642 {
22643 - pud_t *pud;
22644 + pyd_t *pyd;
22645 unsigned long addr;
22646 int i;
22647
22648 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22649 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22650 return;
22651
22652 - pud = pud_offset(pgd, 0);
22653 +#ifdef CONFIG_X86_64
22654 + pyd = pyd_offset(mm, 0L);
22655 +#else
22656 + pyd = pyd_offset(pgd, 0L);
22657 +#endif
22658
22659 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22660 - i++, pud++, addr += PUD_SIZE) {
22661 - pmd_t *pmd = pmds[i];
22662 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22663 + i++, pyd++, addr += PYD_SIZE) {
22664 + pxd_t *pxd = pxds[i];
22665
22666 if (i >= KERNEL_PGD_BOUNDARY)
22667 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22668 - sizeof(pmd_t) * PTRS_PER_PMD);
22669 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22670 + sizeof(pxd_t) * PTRS_PER_PMD);
22671
22672 - pud_populate(mm, pud, pmd);
22673 + pyd_populate(mm, pyd, pxd);
22674 }
22675 }
22676
22677 pgd_t *pgd_alloc(struct mm_struct *mm)
22678 {
22679 pgd_t *pgd;
22680 - pmd_t *pmds[PREALLOCATED_PMDS];
22681 + pxd_t *pxds[PREALLOCATED_PXDS];
22682 +
22683 unsigned long flags;
22684
22685 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22686 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22687
22688 mm->pgd = pgd;
22689
22690 - if (preallocate_pmds(pmds) != 0)
22691 + if (preallocate_pxds(pxds) != 0)
22692 goto out_free_pgd;
22693
22694 if (paravirt_pgd_alloc(mm) != 0)
22695 - goto out_free_pmds;
22696 + goto out_free_pxds;
22697
22698 /*
22699 * Make sure that pre-populating the pmds is atomic with
22700 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22701 spin_lock_irqsave(&pgd_lock, flags);
22702
22703 pgd_ctor(pgd);
22704 - pgd_prepopulate_pmd(mm, pgd, pmds);
22705 + pgd_prepopulate_pxd(mm, pgd, pxds);
22706
22707 spin_unlock_irqrestore(&pgd_lock, flags);
22708
22709 return pgd;
22710
22711 -out_free_pmds:
22712 - free_pmds(pmds);
22713 +out_free_pxds:
22714 + free_pxds(pxds);
22715 out_free_pgd:
22716 free_page((unsigned long)pgd);
22717 out:
22718 @@ -287,7 +338,7 @@ out:
22719
22720 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22721 {
22722 - pgd_mop_up_pmds(mm, pgd);
22723 + pgd_mop_up_pxds(mm, pgd);
22724 pgd_dtor(pgd);
22725 paravirt_pgd_free(mm, pgd);
22726 free_page((unsigned long)pgd);
22727 diff -urNp linux-2.6.32.44/arch/x86/mm/setup_nx.c linux-2.6.32.44/arch/x86/mm/setup_nx.c
22728 --- linux-2.6.32.44/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22729 +++ linux-2.6.32.44/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22730 @@ -4,11 +4,10 @@
22731
22732 #include <asm/pgtable.h>
22733
22734 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22735 int nx_enabled;
22736
22737 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22738 -static int disable_nx __cpuinitdata;
22739 -
22740 +#ifndef CONFIG_PAX_PAGEEXEC
22741 /*
22742 * noexec = on|off
22743 *
22744 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22745 if (!str)
22746 return -EINVAL;
22747 if (!strncmp(str, "on", 2)) {
22748 - __supported_pte_mask |= _PAGE_NX;
22749 - disable_nx = 0;
22750 + nx_enabled = 1;
22751 } else if (!strncmp(str, "off", 3)) {
22752 - disable_nx = 1;
22753 - __supported_pte_mask &= ~_PAGE_NX;
22754 + nx_enabled = 0;
22755 }
22756 return 0;
22757 }
22758 early_param("noexec", noexec_setup);
22759 #endif
22760 +#endif
22761
22762 #ifdef CONFIG_X86_PAE
22763 void __init set_nx(void)
22764 {
22765 - unsigned int v[4], l, h;
22766 + if (!nx_enabled && cpu_has_nx) {
22767 + unsigned l, h;
22768
22769 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22770 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22771 -
22772 - if ((v[3] & (1 << 20)) && !disable_nx) {
22773 - rdmsr(MSR_EFER, l, h);
22774 - l |= EFER_NX;
22775 - wrmsr(MSR_EFER, l, h);
22776 - nx_enabled = 1;
22777 - __supported_pte_mask |= _PAGE_NX;
22778 - }
22779 + __supported_pte_mask &= ~_PAGE_NX;
22780 + rdmsr(MSR_EFER, l, h);
22781 + l &= ~EFER_NX;
22782 + wrmsr(MSR_EFER, l, h);
22783 }
22784 }
22785 #else
22786 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22787 unsigned long efer;
22788
22789 rdmsrl(MSR_EFER, efer);
22790 - if (!(efer & EFER_NX) || disable_nx)
22791 + if (!(efer & EFER_NX) || !nx_enabled)
22792 __supported_pte_mask &= ~_PAGE_NX;
22793 }
22794 #endif
22795 diff -urNp linux-2.6.32.44/arch/x86/mm/tlb.c linux-2.6.32.44/arch/x86/mm/tlb.c
22796 --- linux-2.6.32.44/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22797 +++ linux-2.6.32.44/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22798 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22799 BUG();
22800 cpumask_clear_cpu(cpu,
22801 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22802 +
22803 +#ifndef CONFIG_PAX_PER_CPU_PGD
22804 load_cr3(swapper_pg_dir);
22805 +#endif
22806 +
22807 }
22808 EXPORT_SYMBOL_GPL(leave_mm);
22809
22810 diff -urNp linux-2.6.32.44/arch/x86/oprofile/backtrace.c linux-2.6.32.44/arch/x86/oprofile/backtrace.c
22811 --- linux-2.6.32.44/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22812 +++ linux-2.6.32.44/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22813 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22814 struct frame_head bufhead[2];
22815
22816 /* Also check accessibility of one struct frame_head beyond */
22817 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22818 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22819 return NULL;
22820 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22821 return NULL;
22822 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22823 {
22824 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22825
22826 - if (!user_mode_vm(regs)) {
22827 + if (!user_mode(regs)) {
22828 unsigned long stack = kernel_stack_pointer(regs);
22829 if (depth)
22830 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22831 diff -urNp linux-2.6.32.44/arch/x86/oprofile/op_model_p4.c linux-2.6.32.44/arch/x86/oprofile/op_model_p4.c
22832 --- linux-2.6.32.44/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22833 +++ linux-2.6.32.44/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22834 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22835 #endif
22836 }
22837
22838 -static int inline addr_increment(void)
22839 +static inline int addr_increment(void)
22840 {
22841 #ifdef CONFIG_SMP
22842 return smp_num_siblings == 2 ? 2 : 1;
22843 diff -urNp linux-2.6.32.44/arch/x86/pci/common.c linux-2.6.32.44/arch/x86/pci/common.c
22844 --- linux-2.6.32.44/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22845 +++ linux-2.6.32.44/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22846 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22847 int pcibios_last_bus = -1;
22848 unsigned long pirq_table_addr;
22849 struct pci_bus *pci_root_bus;
22850 -struct pci_raw_ops *raw_pci_ops;
22851 -struct pci_raw_ops *raw_pci_ext_ops;
22852 +const struct pci_raw_ops *raw_pci_ops;
22853 +const struct pci_raw_ops *raw_pci_ext_ops;
22854
22855 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22856 int reg, int len, u32 *val)
22857 diff -urNp linux-2.6.32.44/arch/x86/pci/direct.c linux-2.6.32.44/arch/x86/pci/direct.c
22858 --- linux-2.6.32.44/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22859 +++ linux-2.6.32.44/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22860 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22861
22862 #undef PCI_CONF1_ADDRESS
22863
22864 -struct pci_raw_ops pci_direct_conf1 = {
22865 +const struct pci_raw_ops pci_direct_conf1 = {
22866 .read = pci_conf1_read,
22867 .write = pci_conf1_write,
22868 };
22869 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22870
22871 #undef PCI_CONF2_ADDRESS
22872
22873 -struct pci_raw_ops pci_direct_conf2 = {
22874 +const struct pci_raw_ops pci_direct_conf2 = {
22875 .read = pci_conf2_read,
22876 .write = pci_conf2_write,
22877 };
22878 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22879 * This should be close to trivial, but it isn't, because there are buggy
22880 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22881 */
22882 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22883 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22884 {
22885 u32 x = 0;
22886 int year, devfn;
22887 diff -urNp linux-2.6.32.44/arch/x86/pci/mmconfig_32.c linux-2.6.32.44/arch/x86/pci/mmconfig_32.c
22888 --- linux-2.6.32.44/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22889 +++ linux-2.6.32.44/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22890 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22891 return 0;
22892 }
22893
22894 -static struct pci_raw_ops pci_mmcfg = {
22895 +static const struct pci_raw_ops pci_mmcfg = {
22896 .read = pci_mmcfg_read,
22897 .write = pci_mmcfg_write,
22898 };
22899 diff -urNp linux-2.6.32.44/arch/x86/pci/mmconfig_64.c linux-2.6.32.44/arch/x86/pci/mmconfig_64.c
22900 --- linux-2.6.32.44/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22901 +++ linux-2.6.32.44/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22902 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22903 return 0;
22904 }
22905
22906 -static struct pci_raw_ops pci_mmcfg = {
22907 +static const struct pci_raw_ops pci_mmcfg = {
22908 .read = pci_mmcfg_read,
22909 .write = pci_mmcfg_write,
22910 };
22911 diff -urNp linux-2.6.32.44/arch/x86/pci/numaq_32.c linux-2.6.32.44/arch/x86/pci/numaq_32.c
22912 --- linux-2.6.32.44/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22913 +++ linux-2.6.32.44/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22914 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22915
22916 #undef PCI_CONF1_MQ_ADDRESS
22917
22918 -static struct pci_raw_ops pci_direct_conf1_mq = {
22919 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22920 .read = pci_conf1_mq_read,
22921 .write = pci_conf1_mq_write
22922 };
22923 diff -urNp linux-2.6.32.44/arch/x86/pci/olpc.c linux-2.6.32.44/arch/x86/pci/olpc.c
22924 --- linux-2.6.32.44/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22925 +++ linux-2.6.32.44/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22926 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22927 return 0;
22928 }
22929
22930 -static struct pci_raw_ops pci_olpc_conf = {
22931 +static const struct pci_raw_ops pci_olpc_conf = {
22932 .read = pci_olpc_read,
22933 .write = pci_olpc_write,
22934 };
22935 diff -urNp linux-2.6.32.44/arch/x86/pci/pcbios.c linux-2.6.32.44/arch/x86/pci/pcbios.c
22936 --- linux-2.6.32.44/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22937 +++ linux-2.6.32.44/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22938 @@ -56,50 +56,93 @@ union bios32 {
22939 static struct {
22940 unsigned long address;
22941 unsigned short segment;
22942 -} bios32_indirect = { 0, __KERNEL_CS };
22943 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22944
22945 /*
22946 * Returns the entry point for the given service, NULL on error
22947 */
22948
22949 -static unsigned long bios32_service(unsigned long service)
22950 +static unsigned long __devinit bios32_service(unsigned long service)
22951 {
22952 unsigned char return_code; /* %al */
22953 unsigned long address; /* %ebx */
22954 unsigned long length; /* %ecx */
22955 unsigned long entry; /* %edx */
22956 unsigned long flags;
22957 + struct desc_struct d, *gdt;
22958
22959 local_irq_save(flags);
22960 - __asm__("lcall *(%%edi); cld"
22961 +
22962 + gdt = get_cpu_gdt_table(smp_processor_id());
22963 +
22964 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22965 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22966 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22967 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22968 +
22969 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22970 : "=a" (return_code),
22971 "=b" (address),
22972 "=c" (length),
22973 "=d" (entry)
22974 : "0" (service),
22975 "1" (0),
22976 - "D" (&bios32_indirect));
22977 + "D" (&bios32_indirect),
22978 + "r"(__PCIBIOS_DS)
22979 + : "memory");
22980 +
22981 + pax_open_kernel();
22982 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22983 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22984 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22985 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22986 + pax_close_kernel();
22987 +
22988 local_irq_restore(flags);
22989
22990 switch (return_code) {
22991 - case 0:
22992 - return address + entry;
22993 - case 0x80: /* Not present */
22994 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22995 - return 0;
22996 - default: /* Shouldn't happen */
22997 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22998 - service, return_code);
22999 + case 0: {
23000 + int cpu;
23001 + unsigned char flags;
23002 +
23003 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23004 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23005 + printk(KERN_WARNING "bios32_service: not valid\n");
23006 return 0;
23007 + }
23008 + address = address + PAGE_OFFSET;
23009 + length += 16UL; /* some BIOSs underreport this... */
23010 + flags = 4;
23011 + if (length >= 64*1024*1024) {
23012 + length >>= PAGE_SHIFT;
23013 + flags |= 8;
23014 + }
23015 +
23016 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23017 + gdt = get_cpu_gdt_table(cpu);
23018 + pack_descriptor(&d, address, length, 0x9b, flags);
23019 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23020 + pack_descriptor(&d, address, length, 0x93, flags);
23021 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23022 + }
23023 + return entry;
23024 + }
23025 + case 0x80: /* Not present */
23026 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23027 + return 0;
23028 + default: /* Shouldn't happen */
23029 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23030 + service, return_code);
23031 + return 0;
23032 }
23033 }
23034
23035 static struct {
23036 unsigned long address;
23037 unsigned short segment;
23038 -} pci_indirect = { 0, __KERNEL_CS };
23039 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23040
23041 -static int pci_bios_present;
23042 +static int pci_bios_present __read_only;
23043
23044 static int __devinit check_pcibios(void)
23045 {
23046 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23047 unsigned long flags, pcibios_entry;
23048
23049 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23050 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23051 + pci_indirect.address = pcibios_entry;
23052
23053 local_irq_save(flags);
23054 - __asm__(
23055 - "lcall *(%%edi); cld\n\t"
23056 + __asm__("movw %w6, %%ds\n\t"
23057 + "lcall *%%ss:(%%edi); cld\n\t"
23058 + "push %%ss\n\t"
23059 + "pop %%ds\n\t"
23060 "jc 1f\n\t"
23061 "xor %%ah, %%ah\n"
23062 "1:"
23063 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23064 "=b" (ebx),
23065 "=c" (ecx)
23066 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23067 - "D" (&pci_indirect)
23068 + "D" (&pci_indirect),
23069 + "r" (__PCIBIOS_DS)
23070 : "memory");
23071 local_irq_restore(flags);
23072
23073 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23074
23075 switch (len) {
23076 case 1:
23077 - __asm__("lcall *(%%esi); cld\n\t"
23078 + __asm__("movw %w6, %%ds\n\t"
23079 + "lcall *%%ss:(%%esi); cld\n\t"
23080 + "push %%ss\n\t"
23081 + "pop %%ds\n\t"
23082 "jc 1f\n\t"
23083 "xor %%ah, %%ah\n"
23084 "1:"
23085 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23086 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23087 "b" (bx),
23088 "D" ((long)reg),
23089 - "S" (&pci_indirect));
23090 + "S" (&pci_indirect),
23091 + "r" (__PCIBIOS_DS));
23092 /*
23093 * Zero-extend the result beyond 8 bits, do not trust the
23094 * BIOS having done it:
23095 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23096 *value &= 0xff;
23097 break;
23098 case 2:
23099 - __asm__("lcall *(%%esi); cld\n\t"
23100 + __asm__("movw %w6, %%ds\n\t"
23101 + "lcall *%%ss:(%%esi); cld\n\t"
23102 + "push %%ss\n\t"
23103 + "pop %%ds\n\t"
23104 "jc 1f\n\t"
23105 "xor %%ah, %%ah\n"
23106 "1:"
23107 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23108 : "1" (PCIBIOS_READ_CONFIG_WORD),
23109 "b" (bx),
23110 "D" ((long)reg),
23111 - "S" (&pci_indirect));
23112 + "S" (&pci_indirect),
23113 + "r" (__PCIBIOS_DS));
23114 /*
23115 * Zero-extend the result beyond 16 bits, do not trust the
23116 * BIOS having done it:
23117 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23118 *value &= 0xffff;
23119 break;
23120 case 4:
23121 - __asm__("lcall *(%%esi); cld\n\t"
23122 + __asm__("movw %w6, %%ds\n\t"
23123 + "lcall *%%ss:(%%esi); cld\n\t"
23124 + "push %%ss\n\t"
23125 + "pop %%ds\n\t"
23126 "jc 1f\n\t"
23127 "xor %%ah, %%ah\n"
23128 "1:"
23129 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23130 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23131 "b" (bx),
23132 "D" ((long)reg),
23133 - "S" (&pci_indirect));
23134 + "S" (&pci_indirect),
23135 + "r" (__PCIBIOS_DS));
23136 break;
23137 }
23138
23139 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23140
23141 switch (len) {
23142 case 1:
23143 - __asm__("lcall *(%%esi); cld\n\t"
23144 + __asm__("movw %w6, %%ds\n\t"
23145 + "lcall *%%ss:(%%esi); cld\n\t"
23146 + "push %%ss\n\t"
23147 + "pop %%ds\n\t"
23148 "jc 1f\n\t"
23149 "xor %%ah, %%ah\n"
23150 "1:"
23151 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23152 "c" (value),
23153 "b" (bx),
23154 "D" ((long)reg),
23155 - "S" (&pci_indirect));
23156 + "S" (&pci_indirect),
23157 + "r" (__PCIBIOS_DS));
23158 break;
23159 case 2:
23160 - __asm__("lcall *(%%esi); cld\n\t"
23161 + __asm__("movw %w6, %%ds\n\t"
23162 + "lcall *%%ss:(%%esi); cld\n\t"
23163 + "push %%ss\n\t"
23164 + "pop %%ds\n\t"
23165 "jc 1f\n\t"
23166 "xor %%ah, %%ah\n"
23167 "1:"
23168 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23169 "c" (value),
23170 "b" (bx),
23171 "D" ((long)reg),
23172 - "S" (&pci_indirect));
23173 + "S" (&pci_indirect),
23174 + "r" (__PCIBIOS_DS));
23175 break;
23176 case 4:
23177 - __asm__("lcall *(%%esi); cld\n\t"
23178 + __asm__("movw %w6, %%ds\n\t"
23179 + "lcall *%%ss:(%%esi); cld\n\t"
23180 + "push %%ss\n\t"
23181 + "pop %%ds\n\t"
23182 "jc 1f\n\t"
23183 "xor %%ah, %%ah\n"
23184 "1:"
23185 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23186 "c" (value),
23187 "b" (bx),
23188 "D" ((long)reg),
23189 - "S" (&pci_indirect));
23190 + "S" (&pci_indirect),
23191 + "r" (__PCIBIOS_DS));
23192 break;
23193 }
23194
23195 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23196 * Function table for BIOS32 access
23197 */
23198
23199 -static struct pci_raw_ops pci_bios_access = {
23200 +static const struct pci_raw_ops pci_bios_access = {
23201 .read = pci_bios_read,
23202 .write = pci_bios_write
23203 };
23204 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23205 * Try to find PCI BIOS.
23206 */
23207
23208 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23209 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23210 {
23211 union bios32 *check;
23212 unsigned char sum;
23213 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23214
23215 DBG("PCI: Fetching IRQ routing table... ");
23216 __asm__("push %%es\n\t"
23217 + "movw %w8, %%ds\n\t"
23218 "push %%ds\n\t"
23219 "pop %%es\n\t"
23220 - "lcall *(%%esi); cld\n\t"
23221 + "lcall *%%ss:(%%esi); cld\n\t"
23222 "pop %%es\n\t"
23223 + "push %%ss\n\t"
23224 + "pop %%ds\n"
23225 "jc 1f\n\t"
23226 "xor %%ah, %%ah\n"
23227 "1:"
23228 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23229 "1" (0),
23230 "D" ((long) &opt),
23231 "S" (&pci_indirect),
23232 - "m" (opt)
23233 + "m" (opt),
23234 + "r" (__PCIBIOS_DS)
23235 : "memory");
23236 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23237 if (ret & 0xff00)
23238 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23239 {
23240 int ret;
23241
23242 - __asm__("lcall *(%%esi); cld\n\t"
23243 + __asm__("movw %w5, %%ds\n\t"
23244 + "lcall *%%ss:(%%esi); cld\n\t"
23245 + "push %%ss\n\t"
23246 + "pop %%ds\n"
23247 "jc 1f\n\t"
23248 "xor %%ah, %%ah\n"
23249 "1:"
23250 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23251 : "0" (PCIBIOS_SET_PCI_HW_INT),
23252 "b" ((dev->bus->number << 8) | dev->devfn),
23253 "c" ((irq << 8) | (pin + 10)),
23254 - "S" (&pci_indirect));
23255 + "S" (&pci_indirect),
23256 + "r" (__PCIBIOS_DS));
23257 return !(ret & 0xff00);
23258 }
23259 EXPORT_SYMBOL(pcibios_set_irq_routing);
23260 diff -urNp linux-2.6.32.44/arch/x86/power/cpu.c linux-2.6.32.44/arch/x86/power/cpu.c
23261 --- linux-2.6.32.44/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23262 +++ linux-2.6.32.44/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23263 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23264 static void fix_processor_context(void)
23265 {
23266 int cpu = smp_processor_id();
23267 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23268 + struct tss_struct *t = init_tss + cpu;
23269
23270 set_tss_desc(cpu, t); /*
23271 * This just modifies memory; should not be
23272 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23273 */
23274
23275 #ifdef CONFIG_X86_64
23276 + pax_open_kernel();
23277 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23278 + pax_close_kernel();
23279
23280 syscall_init(); /* This sets MSR_*STAR and related */
23281 #endif
23282 diff -urNp linux-2.6.32.44/arch/x86/vdso/Makefile linux-2.6.32.44/arch/x86/vdso/Makefile
23283 --- linux-2.6.32.44/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23284 +++ linux-2.6.32.44/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23285 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23286 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23287 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23288
23289 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23290 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23291 GCOV_PROFILE := n
23292
23293 #
23294 diff -urNp linux-2.6.32.44/arch/x86/vdso/vclock_gettime.c linux-2.6.32.44/arch/x86/vdso/vclock_gettime.c
23295 --- linux-2.6.32.44/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23296 +++ linux-2.6.32.44/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23297 @@ -22,24 +22,48 @@
23298 #include <asm/hpet.h>
23299 #include <asm/unistd.h>
23300 #include <asm/io.h>
23301 +#include <asm/fixmap.h>
23302 #include "vextern.h"
23303
23304 #define gtod vdso_vsyscall_gtod_data
23305
23306 +notrace noinline long __vdso_fallback_time(long *t)
23307 +{
23308 + long secs;
23309 + asm volatile("syscall"
23310 + : "=a" (secs)
23311 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23312 + return secs;
23313 +}
23314 +
23315 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23316 {
23317 long ret;
23318 asm("syscall" : "=a" (ret) :
23319 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23320 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23321 return ret;
23322 }
23323
23324 +notrace static inline cycle_t __vdso_vread_hpet(void)
23325 +{
23326 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23327 +}
23328 +
23329 +notrace static inline cycle_t __vdso_vread_tsc(void)
23330 +{
23331 + cycle_t ret = (cycle_t)vget_cycles();
23332 +
23333 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23334 +}
23335 +
23336 notrace static inline long vgetns(void)
23337 {
23338 long v;
23339 - cycles_t (*vread)(void);
23340 - vread = gtod->clock.vread;
23341 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23342 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23343 + v = __vdso_vread_tsc();
23344 + else
23345 + v = __vdso_vread_hpet();
23346 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23347 return (v * gtod->clock.mult) >> gtod->clock.shift;
23348 }
23349
23350 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23351
23352 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23353 {
23354 - if (likely(gtod->sysctl_enabled))
23355 + if (likely(gtod->sysctl_enabled &&
23356 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23357 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23358 switch (clock) {
23359 case CLOCK_REALTIME:
23360 if (likely(gtod->clock.vread))
23361 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23362 int clock_gettime(clockid_t, struct timespec *)
23363 __attribute__((weak, alias("__vdso_clock_gettime")));
23364
23365 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23366 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23367 {
23368 long ret;
23369 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23370 + asm("syscall" : "=a" (ret) :
23371 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23372 + return ret;
23373 +}
23374 +
23375 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23376 +{
23377 + if (likely(gtod->sysctl_enabled &&
23378 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23379 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23380 + {
23381 if (likely(tv != NULL)) {
23382 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23383 offsetof(struct timespec, tv_nsec) ||
23384 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23385 }
23386 return 0;
23387 }
23388 - asm("syscall" : "=a" (ret) :
23389 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23390 - return ret;
23391 + return __vdso_fallback_gettimeofday(tv, tz);
23392 }
23393 int gettimeofday(struct timeval *, struct timezone *)
23394 __attribute__((weak, alias("__vdso_gettimeofday")));
23395 diff -urNp linux-2.6.32.44/arch/x86/vdso/vdso32-setup.c linux-2.6.32.44/arch/x86/vdso/vdso32-setup.c
23396 --- linux-2.6.32.44/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23397 +++ linux-2.6.32.44/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23398 @@ -25,6 +25,7 @@
23399 #include <asm/tlbflush.h>
23400 #include <asm/vdso.h>
23401 #include <asm/proto.h>
23402 +#include <asm/mman.h>
23403
23404 enum {
23405 VDSO_DISABLED = 0,
23406 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23407 void enable_sep_cpu(void)
23408 {
23409 int cpu = get_cpu();
23410 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23411 + struct tss_struct *tss = init_tss + cpu;
23412
23413 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23414 put_cpu();
23415 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23416 gate_vma.vm_start = FIXADDR_USER_START;
23417 gate_vma.vm_end = FIXADDR_USER_END;
23418 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23419 - gate_vma.vm_page_prot = __P101;
23420 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23421 /*
23422 * Make sure the vDSO gets into every core dump.
23423 * Dumping its contents makes post-mortem fully interpretable later
23424 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23425 if (compat)
23426 addr = VDSO_HIGH_BASE;
23427 else {
23428 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23429 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23430 if (IS_ERR_VALUE(addr)) {
23431 ret = addr;
23432 goto up_fail;
23433 }
23434 }
23435
23436 - current->mm->context.vdso = (void *)addr;
23437 + current->mm->context.vdso = addr;
23438
23439 if (compat_uses_vma || !compat) {
23440 /*
23441 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23442 }
23443
23444 current_thread_info()->sysenter_return =
23445 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23446 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23447
23448 up_fail:
23449 if (ret)
23450 - current->mm->context.vdso = NULL;
23451 + current->mm->context.vdso = 0;
23452
23453 up_write(&mm->mmap_sem);
23454
23455 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23456
23457 const char *arch_vma_name(struct vm_area_struct *vma)
23458 {
23459 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23460 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23461 return "[vdso]";
23462 +
23463 +#ifdef CONFIG_PAX_SEGMEXEC
23464 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23465 + return "[vdso]";
23466 +#endif
23467 +
23468 return NULL;
23469 }
23470
23471 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23472 struct mm_struct *mm = tsk->mm;
23473
23474 /* Check to see if this task was created in compat vdso mode */
23475 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23476 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23477 return &gate_vma;
23478 return NULL;
23479 }
23480 diff -urNp linux-2.6.32.44/arch/x86/vdso/vdso.lds.S linux-2.6.32.44/arch/x86/vdso/vdso.lds.S
23481 --- linux-2.6.32.44/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23482 +++ linux-2.6.32.44/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23483 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23484 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23485 #include "vextern.h"
23486 #undef VEXTERN
23487 +
23488 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23489 +VEXTERN(fallback_gettimeofday)
23490 +VEXTERN(fallback_time)
23491 +VEXTERN(getcpu)
23492 +#undef VEXTERN
23493 diff -urNp linux-2.6.32.44/arch/x86/vdso/vextern.h linux-2.6.32.44/arch/x86/vdso/vextern.h
23494 --- linux-2.6.32.44/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23495 +++ linux-2.6.32.44/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23496 @@ -11,6 +11,5 @@
23497 put into vextern.h and be referenced as a pointer with vdso prefix.
23498 The main kernel later fills in the values. */
23499
23500 -VEXTERN(jiffies)
23501 VEXTERN(vgetcpu_mode)
23502 VEXTERN(vsyscall_gtod_data)
23503 diff -urNp linux-2.6.32.44/arch/x86/vdso/vma.c linux-2.6.32.44/arch/x86/vdso/vma.c
23504 --- linux-2.6.32.44/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23505 +++ linux-2.6.32.44/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23506 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23507 if (!vbase)
23508 goto oom;
23509
23510 - if (memcmp(vbase, "\177ELF", 4)) {
23511 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23512 printk("VDSO: I'm broken; not ELF\n");
23513 vdso_enabled = 0;
23514 }
23515 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23516 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23517 #include "vextern.h"
23518 #undef VEXTERN
23519 + vunmap(vbase);
23520 return 0;
23521
23522 oom:
23523 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23524 goto up_fail;
23525 }
23526
23527 - current->mm->context.vdso = (void *)addr;
23528 + current->mm->context.vdso = addr;
23529
23530 ret = install_special_mapping(mm, addr, vdso_size,
23531 VM_READ|VM_EXEC|
23532 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23533 VM_ALWAYSDUMP,
23534 vdso_pages);
23535 if (ret) {
23536 - current->mm->context.vdso = NULL;
23537 + current->mm->context.vdso = 0;
23538 goto up_fail;
23539 }
23540
23541 @@ -132,10 +133,3 @@ up_fail:
23542 up_write(&mm->mmap_sem);
23543 return ret;
23544 }
23545 -
23546 -static __init int vdso_setup(char *s)
23547 -{
23548 - vdso_enabled = simple_strtoul(s, NULL, 0);
23549 - return 0;
23550 -}
23551 -__setup("vdso=", vdso_setup);
23552 diff -urNp linux-2.6.32.44/arch/x86/xen/enlighten.c linux-2.6.32.44/arch/x86/xen/enlighten.c
23553 --- linux-2.6.32.44/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23554 +++ linux-2.6.32.44/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23555 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23556
23557 struct shared_info xen_dummy_shared_info;
23558
23559 -void *xen_initial_gdt;
23560 -
23561 /*
23562 * Point at some empty memory to start with. We map the real shared_info
23563 * page as soon as fixmap is up and running.
23564 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23565
23566 preempt_disable();
23567
23568 - start = __get_cpu_var(idt_desc).address;
23569 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23570 end = start + __get_cpu_var(idt_desc).size + 1;
23571
23572 xen_mc_flush();
23573 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23574 #endif
23575 };
23576
23577 -static void xen_reboot(int reason)
23578 +static __noreturn void xen_reboot(int reason)
23579 {
23580 struct sched_shutdown r = { .reason = reason };
23581
23582 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23583 BUG();
23584 }
23585
23586 -static void xen_restart(char *msg)
23587 +static __noreturn void xen_restart(char *msg)
23588 {
23589 xen_reboot(SHUTDOWN_reboot);
23590 }
23591
23592 -static void xen_emergency_restart(void)
23593 +static __noreturn void xen_emergency_restart(void)
23594 {
23595 xen_reboot(SHUTDOWN_reboot);
23596 }
23597
23598 -static void xen_machine_halt(void)
23599 +static __noreturn void xen_machine_halt(void)
23600 {
23601 xen_reboot(SHUTDOWN_poweroff);
23602 }
23603 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23604 */
23605 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23606
23607 -#ifdef CONFIG_X86_64
23608 /* Work out if we support NX */
23609 - check_efer();
23610 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23611 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23612 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23613 + unsigned l, h;
23614 +
23615 +#ifdef CONFIG_X86_PAE
23616 + nx_enabled = 1;
23617 +#endif
23618 + __supported_pte_mask |= _PAGE_NX;
23619 + rdmsr(MSR_EFER, l, h);
23620 + l |= EFER_NX;
23621 + wrmsr(MSR_EFER, l, h);
23622 + }
23623 #endif
23624
23625 xen_setup_features();
23626 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23627
23628 machine_ops = xen_machine_ops;
23629
23630 - /*
23631 - * The only reliable way to retain the initial address of the
23632 - * percpu gdt_page is to remember it here, so we can go and
23633 - * mark it RW later, when the initial percpu area is freed.
23634 - */
23635 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23636 -
23637 xen_smp_init();
23638
23639 pgd = (pgd_t *)xen_start_info->pt_base;
23640 diff -urNp linux-2.6.32.44/arch/x86/xen/mmu.c linux-2.6.32.44/arch/x86/xen/mmu.c
23641 --- linux-2.6.32.44/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23642 +++ linux-2.6.32.44/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23643 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23644 convert_pfn_mfn(init_level4_pgt);
23645 convert_pfn_mfn(level3_ident_pgt);
23646 convert_pfn_mfn(level3_kernel_pgt);
23647 + convert_pfn_mfn(level3_vmalloc_pgt);
23648 + convert_pfn_mfn(level3_vmemmap_pgt);
23649
23650 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23651 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23652 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23653 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23654 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23655 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23656 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23657 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23658 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23659 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23660 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23661 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23662
23663 diff -urNp linux-2.6.32.44/arch/x86/xen/smp.c linux-2.6.32.44/arch/x86/xen/smp.c
23664 --- linux-2.6.32.44/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23665 +++ linux-2.6.32.44/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23666 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23667 {
23668 BUG_ON(smp_processor_id() != 0);
23669 native_smp_prepare_boot_cpu();
23670 -
23671 - /* We've switched to the "real" per-cpu gdt, so make sure the
23672 - old memory can be recycled */
23673 - make_lowmem_page_readwrite(xen_initial_gdt);
23674 -
23675 xen_setup_vcpu_info_placement();
23676 }
23677
23678 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23679 gdt = get_cpu_gdt_table(cpu);
23680
23681 ctxt->flags = VGCF_IN_KERNEL;
23682 - ctxt->user_regs.ds = __USER_DS;
23683 - ctxt->user_regs.es = __USER_DS;
23684 + ctxt->user_regs.ds = __KERNEL_DS;
23685 + ctxt->user_regs.es = __KERNEL_DS;
23686 ctxt->user_regs.ss = __KERNEL_DS;
23687 #ifdef CONFIG_X86_32
23688 ctxt->user_regs.fs = __KERNEL_PERCPU;
23689 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23690 + savesegment(gs, ctxt->user_regs.gs);
23691 #else
23692 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23693 #endif
23694 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23695 int rc;
23696
23697 per_cpu(current_task, cpu) = idle;
23698 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23699 #ifdef CONFIG_X86_32
23700 irq_ctx_init(cpu);
23701 #else
23702 clear_tsk_thread_flag(idle, TIF_FORK);
23703 - per_cpu(kernel_stack, cpu) =
23704 - (unsigned long)task_stack_page(idle) -
23705 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23706 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23707 #endif
23708 xen_setup_runstate_info(cpu);
23709 xen_setup_timer(cpu);
23710 diff -urNp linux-2.6.32.44/arch/x86/xen/xen-asm_32.S linux-2.6.32.44/arch/x86/xen/xen-asm_32.S
23711 --- linux-2.6.32.44/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23712 +++ linux-2.6.32.44/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23713 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23714 ESP_OFFSET=4 # bytes pushed onto stack
23715
23716 /*
23717 - * Store vcpu_info pointer for easy access. Do it this way to
23718 - * avoid having to reload %fs
23719 + * Store vcpu_info pointer for easy access.
23720 */
23721 #ifdef CONFIG_SMP
23722 - GET_THREAD_INFO(%eax)
23723 - movl TI_cpu(%eax), %eax
23724 - movl __per_cpu_offset(,%eax,4), %eax
23725 - mov per_cpu__xen_vcpu(%eax), %eax
23726 + push %fs
23727 + mov $(__KERNEL_PERCPU), %eax
23728 + mov %eax, %fs
23729 + mov PER_CPU_VAR(xen_vcpu), %eax
23730 + pop %fs
23731 #else
23732 movl per_cpu__xen_vcpu, %eax
23733 #endif
23734 diff -urNp linux-2.6.32.44/arch/x86/xen/xen-head.S linux-2.6.32.44/arch/x86/xen/xen-head.S
23735 --- linux-2.6.32.44/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23736 +++ linux-2.6.32.44/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23737 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23738 #ifdef CONFIG_X86_32
23739 mov %esi,xen_start_info
23740 mov $init_thread_union+THREAD_SIZE,%esp
23741 +#ifdef CONFIG_SMP
23742 + movl $cpu_gdt_table,%edi
23743 + movl $__per_cpu_load,%eax
23744 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23745 + rorl $16,%eax
23746 + movb %al,__KERNEL_PERCPU + 4(%edi)
23747 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23748 + movl $__per_cpu_end - 1,%eax
23749 + subl $__per_cpu_start,%eax
23750 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23751 +#endif
23752 #else
23753 mov %rsi,xen_start_info
23754 mov $init_thread_union+THREAD_SIZE,%rsp
23755 diff -urNp linux-2.6.32.44/arch/x86/xen/xen-ops.h linux-2.6.32.44/arch/x86/xen/xen-ops.h
23756 --- linux-2.6.32.44/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23757 +++ linux-2.6.32.44/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23758 @@ -10,8 +10,6 @@
23759 extern const char xen_hypervisor_callback[];
23760 extern const char xen_failsafe_callback[];
23761
23762 -extern void *xen_initial_gdt;
23763 -
23764 struct trap_info;
23765 void xen_copy_trap_info(struct trap_info *traps);
23766
23767 diff -urNp linux-2.6.32.44/block/blk-integrity.c linux-2.6.32.44/block/blk-integrity.c
23768 --- linux-2.6.32.44/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23769 +++ linux-2.6.32.44/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23770 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23771 NULL,
23772 };
23773
23774 -static struct sysfs_ops integrity_ops = {
23775 +static const struct sysfs_ops integrity_ops = {
23776 .show = &integrity_attr_show,
23777 .store = &integrity_attr_store,
23778 };
23779 diff -urNp linux-2.6.32.44/block/blk-iopoll.c linux-2.6.32.44/block/blk-iopoll.c
23780 --- linux-2.6.32.44/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23781 +++ linux-2.6.32.44/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23782 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23783 }
23784 EXPORT_SYMBOL(blk_iopoll_complete);
23785
23786 -static void blk_iopoll_softirq(struct softirq_action *h)
23787 +static void blk_iopoll_softirq(void)
23788 {
23789 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23790 int rearm = 0, budget = blk_iopoll_budget;
23791 diff -urNp linux-2.6.32.44/block/blk-map.c linux-2.6.32.44/block/blk-map.c
23792 --- linux-2.6.32.44/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23793 +++ linux-2.6.32.44/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23794 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23795 * direct dma. else, set up kernel bounce buffers
23796 */
23797 uaddr = (unsigned long) ubuf;
23798 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23799 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23800 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23801 else
23802 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23803 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23804 for (i = 0; i < iov_count; i++) {
23805 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23806
23807 + if (!iov[i].iov_len)
23808 + return -EINVAL;
23809 +
23810 if (uaddr & queue_dma_alignment(q)) {
23811 unaligned = 1;
23812 break;
23813 }
23814 - if (!iov[i].iov_len)
23815 - return -EINVAL;
23816 }
23817
23818 if (unaligned || (q->dma_pad_mask & len) || map_data)
23819 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23820 if (!len || !kbuf)
23821 return -EINVAL;
23822
23823 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23824 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23825 if (do_copy)
23826 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23827 else
23828 diff -urNp linux-2.6.32.44/block/blk-softirq.c linux-2.6.32.44/block/blk-softirq.c
23829 --- linux-2.6.32.44/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23830 +++ linux-2.6.32.44/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23831 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23832 * Softirq action handler - move entries to local list and loop over them
23833 * while passing them to the queue registered handler.
23834 */
23835 -static void blk_done_softirq(struct softirq_action *h)
23836 +static void blk_done_softirq(void)
23837 {
23838 struct list_head *cpu_list, local_list;
23839
23840 diff -urNp linux-2.6.32.44/block/blk-sysfs.c linux-2.6.32.44/block/blk-sysfs.c
23841 --- linux-2.6.32.44/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23842 +++ linux-2.6.32.44/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23843 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23844 kmem_cache_free(blk_requestq_cachep, q);
23845 }
23846
23847 -static struct sysfs_ops queue_sysfs_ops = {
23848 +static const struct sysfs_ops queue_sysfs_ops = {
23849 .show = queue_attr_show,
23850 .store = queue_attr_store,
23851 };
23852 diff -urNp linux-2.6.32.44/block/bsg.c linux-2.6.32.44/block/bsg.c
23853 --- linux-2.6.32.44/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23854 +++ linux-2.6.32.44/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23855 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23856 struct sg_io_v4 *hdr, struct bsg_device *bd,
23857 fmode_t has_write_perm)
23858 {
23859 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23860 + unsigned char *cmdptr;
23861 +
23862 if (hdr->request_len > BLK_MAX_CDB) {
23863 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23864 if (!rq->cmd)
23865 return -ENOMEM;
23866 - }
23867 + cmdptr = rq->cmd;
23868 + } else
23869 + cmdptr = tmpcmd;
23870
23871 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23872 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23873 hdr->request_len))
23874 return -EFAULT;
23875
23876 + if (cmdptr != rq->cmd)
23877 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23878 +
23879 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23880 if (blk_verify_command(rq->cmd, has_write_perm))
23881 return -EPERM;
23882 diff -urNp linux-2.6.32.44/block/elevator.c linux-2.6.32.44/block/elevator.c
23883 --- linux-2.6.32.44/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23884 +++ linux-2.6.32.44/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23885 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23886 return error;
23887 }
23888
23889 -static struct sysfs_ops elv_sysfs_ops = {
23890 +static const struct sysfs_ops elv_sysfs_ops = {
23891 .show = elv_attr_show,
23892 .store = elv_attr_store,
23893 };
23894 diff -urNp linux-2.6.32.44/block/scsi_ioctl.c linux-2.6.32.44/block/scsi_ioctl.c
23895 --- linux-2.6.32.44/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23896 +++ linux-2.6.32.44/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23897 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23898 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23899 struct sg_io_hdr *hdr, fmode_t mode)
23900 {
23901 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23902 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23903 + unsigned char *cmdptr;
23904 +
23905 + if (rq->cmd != rq->__cmd)
23906 + cmdptr = rq->cmd;
23907 + else
23908 + cmdptr = tmpcmd;
23909 +
23910 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23911 return -EFAULT;
23912 +
23913 + if (cmdptr != rq->cmd)
23914 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23915 +
23916 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23917 return -EPERM;
23918
23919 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23920 int err;
23921 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23922 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23923 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23924 + unsigned char *cmdptr;
23925
23926 if (!sic)
23927 return -EINVAL;
23928 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23929 */
23930 err = -EFAULT;
23931 rq->cmd_len = cmdlen;
23932 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23933 +
23934 + if (rq->cmd != rq->__cmd)
23935 + cmdptr = rq->cmd;
23936 + else
23937 + cmdptr = tmpcmd;
23938 +
23939 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23940 goto error;
23941
23942 + if (rq->cmd != cmdptr)
23943 + memcpy(rq->cmd, cmdptr, cmdlen);
23944 +
23945 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23946 goto error;
23947
23948 diff -urNp linux-2.6.32.44/crypto/cryptd.c linux-2.6.32.44/crypto/cryptd.c
23949 --- linux-2.6.32.44/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
23950 +++ linux-2.6.32.44/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
23951 @@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
23952 struct cryptd_queue *queue;
23953
23954 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
23955 - rctx->complete = req->base.complete;
23956 + *(void **)&rctx->complete = req->base.complete;
23957 req->base.complete = complete;
23958
23959 return cryptd_enqueue_request(queue, &req->base);
23960 diff -urNp linux-2.6.32.44/crypto/gf128mul.c linux-2.6.32.44/crypto/gf128mul.c
23961 --- linux-2.6.32.44/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23962 +++ linux-2.6.32.44/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23963 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23964 for (i = 0; i < 7; ++i)
23965 gf128mul_x_lle(&p[i + 1], &p[i]);
23966
23967 - memset(r, 0, sizeof(r));
23968 + memset(r, 0, sizeof(*r));
23969 for (i = 0;;) {
23970 u8 ch = ((u8 *)b)[15 - i];
23971
23972 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23973 for (i = 0; i < 7; ++i)
23974 gf128mul_x_bbe(&p[i + 1], &p[i]);
23975
23976 - memset(r, 0, sizeof(r));
23977 + memset(r, 0, sizeof(*r));
23978 for (i = 0;;) {
23979 u8 ch = ((u8 *)b)[i];
23980
23981 diff -urNp linux-2.6.32.44/crypto/md5.c linux-2.6.32.44/crypto/md5.c
23982 --- linux-2.6.32.44/crypto/md5.c 2011-03-27 14:31:47.000000000 -0400
23983 +++ linux-2.6.32.44/crypto/md5.c 2011-08-07 19:48:09.000000000 -0400
23984 @@ -20,6 +20,7 @@
23985 #include <linux/module.h>
23986 #include <linux/string.h>
23987 #include <linux/types.h>
23988 +#include <linux/cryptohash.h>
23989 #include <asm/byteorder.h>
23990
23991 #define MD5_DIGEST_SIZE 16
23992 @@ -27,103 +28,12 @@
23993 #define MD5_BLOCK_WORDS 16
23994 #define MD5_HASH_WORDS 4
23995
23996 -#define F1(x, y, z) (z ^ (x & (y ^ z)))
23997 -#define F2(x, y, z) F1(z, x, y)
23998 -#define F3(x, y, z) (x ^ y ^ z)
23999 -#define F4(x, y, z) (y ^ (x | ~z))
24000 -
24001 -#define MD5STEP(f, w, x, y, z, in, s) \
24002 - (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
24003 -
24004 struct md5_ctx {
24005 u32 hash[MD5_HASH_WORDS];
24006 u32 block[MD5_BLOCK_WORDS];
24007 u64 byte_count;
24008 };
24009
24010 -static void md5_transform(u32 *hash, u32 const *in)
24011 -{
24012 - u32 a, b, c, d;
24013 -
24014 - a = hash[0];
24015 - b = hash[1];
24016 - c = hash[2];
24017 - d = hash[3];
24018 -
24019 - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
24020 - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
24021 - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
24022 - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
24023 - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
24024 - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
24025 - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
24026 - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
24027 - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
24028 - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
24029 - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
24030 - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
24031 - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
24032 - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
24033 - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
24034 - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
24035 -
24036 - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
24037 - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
24038 - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
24039 - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
24040 - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
24041 - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
24042 - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
24043 - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
24044 - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
24045 - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
24046 - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
24047 - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
24048 - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
24049 - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
24050 - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
24051 - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
24052 -
24053 - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
24054 - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
24055 - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
24056 - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
24057 - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
24058 - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
24059 - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
24060 - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
24061 - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
24062 - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
24063 - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
24064 - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
24065 - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
24066 - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
24067 - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
24068 - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
24069 -
24070 - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
24071 - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
24072 - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
24073 - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
24074 - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
24075 - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
24076 - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
24077 - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
24078 - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
24079 - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
24080 - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
24081 - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
24082 - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
24083 - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
24084 - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
24085 - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
24086 -
24087 - hash[0] += a;
24088 - hash[1] += b;
24089 - hash[2] += c;
24090 - hash[3] += d;
24091 -}
24092 -
24093 /* XXX: this stuff can be optimized */
24094 static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
24095 {
24096 diff -urNp linux-2.6.32.44/crypto/serpent.c linux-2.6.32.44/crypto/serpent.c
24097 --- linux-2.6.32.44/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24098 +++ linux-2.6.32.44/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
24099 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
24100 u32 r0,r1,r2,r3,r4;
24101 int i;
24102
24103 + pax_track_stack();
24104 +
24105 /* Copy key, add padding */
24106
24107 for (i = 0; i < keylen; ++i)
24108 diff -urNp linux-2.6.32.44/Documentation/dontdiff linux-2.6.32.44/Documentation/dontdiff
24109 --- linux-2.6.32.44/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24110 +++ linux-2.6.32.44/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
24111 @@ -1,13 +1,16 @@
24112 *.a
24113 *.aux
24114 *.bin
24115 +*.cis
24116 *.cpio
24117 *.csp
24118 +*.dbg
24119 *.dsp
24120 *.dvi
24121 *.elf
24122 *.eps
24123 *.fw
24124 +*.gcno
24125 *.gen.S
24126 *.gif
24127 *.grep
24128 @@ -38,8 +41,10 @@
24129 *.tab.h
24130 *.tex
24131 *.ver
24132 +*.vim
24133 *.xml
24134 *_MODULES
24135 +*_reg_safe.h
24136 *_vga16.c
24137 *~
24138 *.9
24139 @@ -49,11 +54,16 @@
24140 53c700_d.h
24141 CVS
24142 ChangeSet
24143 +GPATH
24144 +GRTAGS
24145 +GSYMS
24146 +GTAGS
24147 Image
24148 Kerntypes
24149 Module.markers
24150 Module.symvers
24151 PENDING
24152 +PERF*
24153 SCCS
24154 System.map*
24155 TAGS
24156 @@ -76,7 +86,11 @@ btfixupprep
24157 build
24158 bvmlinux
24159 bzImage*
24160 +capability_names.h
24161 +capflags.c
24162 classlist.h*
24163 +clut_vga16.c
24164 +common-cmds.h
24165 comp*.log
24166 compile.h*
24167 conf
24168 @@ -103,13 +117,14 @@ gen_crc32table
24169 gen_init_cpio
24170 genksyms
24171 *_gray256.c
24172 +hash
24173 ihex2fw
24174 ikconfig.h*
24175 initramfs_data.cpio
24176 +initramfs_data.cpio.bz2
24177 initramfs_data.cpio.gz
24178 initramfs_list
24179 kallsyms
24180 -kconfig
24181 keywords.c
24182 ksym.c*
24183 ksym.h*
24184 @@ -133,7 +148,9 @@ mkboot
24185 mkbugboot
24186 mkcpustr
24187 mkdep
24188 +mkpiggy
24189 mkprep
24190 +mkregtable
24191 mktables
24192 mktree
24193 modpost
24194 @@ -149,6 +166,7 @@ patches*
24195 pca200e.bin
24196 pca200e_ecd.bin2
24197 piggy.gz
24198 +piggy.S
24199 piggyback
24200 pnmtologo
24201 ppc_defs.h*
24202 @@ -157,12 +175,15 @@ qconf
24203 raid6altivec*.c
24204 raid6int*.c
24205 raid6tables.c
24206 +regdb.c
24207 relocs
24208 +rlim_names.h
24209 series
24210 setup
24211 setup.bin
24212 setup.elf
24213 sImage
24214 +slabinfo
24215 sm_tbl*
24216 split-include
24217 syscalltab.h
24218 @@ -186,14 +207,20 @@ version.h*
24219 vmlinux
24220 vmlinux-*
24221 vmlinux.aout
24222 +vmlinux.bin.all
24223 +vmlinux.bin.bz2
24224 vmlinux.lds
24225 +vmlinux.relocs
24226 +voffset.h
24227 vsyscall.lds
24228 vsyscall_32.lds
24229 wanxlfw.inc
24230 uImage
24231 unifdef
24232 +utsrelease.h
24233 wakeup.bin
24234 wakeup.elf
24235 wakeup.lds
24236 zImage*
24237 zconf.hash.c
24238 +zoffset.h
24239 diff -urNp linux-2.6.32.44/Documentation/kernel-parameters.txt linux-2.6.32.44/Documentation/kernel-parameters.txt
24240 --- linux-2.6.32.44/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24241 +++ linux-2.6.32.44/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24242 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24243 the specified number of seconds. This is to be used if
24244 your oopses keep scrolling off the screen.
24245
24246 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24247 + virtualization environments that don't cope well with the
24248 + expand down segment used by UDEREF on X86-32 or the frequent
24249 + page table updates on X86-64.
24250 +
24251 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24252 +
24253 pcbit= [HW,ISDN]
24254
24255 pcd. [PARIDE]
24256 diff -urNp linux-2.6.32.44/drivers/acpi/acpi_pad.c linux-2.6.32.44/drivers/acpi/acpi_pad.c
24257 --- linux-2.6.32.44/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24258 +++ linux-2.6.32.44/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24259 @@ -30,7 +30,7 @@
24260 #include <acpi/acpi_bus.h>
24261 #include <acpi/acpi_drivers.h>
24262
24263 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24264 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24265 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24266 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24267 static DEFINE_MUTEX(isolated_cpus_lock);
24268 diff -urNp linux-2.6.32.44/drivers/acpi/battery.c linux-2.6.32.44/drivers/acpi/battery.c
24269 --- linux-2.6.32.44/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24270 +++ linux-2.6.32.44/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24271 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24272 }
24273
24274 static struct battery_file {
24275 - struct file_operations ops;
24276 + const struct file_operations ops;
24277 mode_t mode;
24278 const char *name;
24279 } acpi_battery_file[] = {
24280 diff -urNp linux-2.6.32.44/drivers/acpi/dock.c linux-2.6.32.44/drivers/acpi/dock.c
24281 --- linux-2.6.32.44/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24282 +++ linux-2.6.32.44/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24283 @@ -77,7 +77,7 @@ struct dock_dependent_device {
24284 struct list_head list;
24285 struct list_head hotplug_list;
24286 acpi_handle handle;
24287 - struct acpi_dock_ops *ops;
24288 + const struct acpi_dock_ops *ops;
24289 void *context;
24290 };
24291
24292 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24293 * the dock driver after _DCK is executed.
24294 */
24295 int
24296 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24297 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24298 void *context)
24299 {
24300 struct dock_dependent_device *dd;
24301 diff -urNp linux-2.6.32.44/drivers/acpi/osl.c linux-2.6.32.44/drivers/acpi/osl.c
24302 --- linux-2.6.32.44/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24303 +++ linux-2.6.32.44/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24304 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24305 void __iomem *virt_addr;
24306
24307 virt_addr = ioremap(phys_addr, width);
24308 + if (!virt_addr)
24309 + return AE_NO_MEMORY;
24310 if (!value)
24311 value = &dummy;
24312
24313 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24314 void __iomem *virt_addr;
24315
24316 virt_addr = ioremap(phys_addr, width);
24317 + if (!virt_addr)
24318 + return AE_NO_MEMORY;
24319
24320 switch (width) {
24321 case 8:
24322 diff -urNp linux-2.6.32.44/drivers/acpi/power_meter.c linux-2.6.32.44/drivers/acpi/power_meter.c
24323 --- linux-2.6.32.44/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24324 +++ linux-2.6.32.44/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24325 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24326 return res;
24327
24328 temp /= 1000;
24329 - if (temp < 0)
24330 - return -EINVAL;
24331
24332 mutex_lock(&resource->lock);
24333 resource->trip[attr->index - 7] = temp;
24334 diff -urNp linux-2.6.32.44/drivers/acpi/proc.c linux-2.6.32.44/drivers/acpi/proc.c
24335 --- linux-2.6.32.44/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24336 +++ linux-2.6.32.44/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24337 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24338 size_t count, loff_t * ppos)
24339 {
24340 struct list_head *node, *next;
24341 - char strbuf[5];
24342 - char str[5] = "";
24343 - unsigned int len = count;
24344 + char strbuf[5] = {0};
24345 struct acpi_device *found_dev = NULL;
24346
24347 - if (len > 4)
24348 - len = 4;
24349 - if (len < 0)
24350 - return -EFAULT;
24351 + if (count > 4)
24352 + count = 4;
24353
24354 - if (copy_from_user(strbuf, buffer, len))
24355 + if (copy_from_user(strbuf, buffer, count))
24356 return -EFAULT;
24357 - strbuf[len] = '\0';
24358 - sscanf(strbuf, "%s", str);
24359 + strbuf[count] = '\0';
24360
24361 mutex_lock(&acpi_device_lock);
24362 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24363 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24364 if (!dev->wakeup.flags.valid)
24365 continue;
24366
24367 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24368 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24369 dev->wakeup.state.enabled =
24370 dev->wakeup.state.enabled ? 0 : 1;
24371 found_dev = dev;
24372 diff -urNp linux-2.6.32.44/drivers/acpi/processor_core.c linux-2.6.32.44/drivers/acpi/processor_core.c
24373 --- linux-2.6.32.44/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24374 +++ linux-2.6.32.44/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24375 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24376 return 0;
24377 }
24378
24379 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24380 + BUG_ON(pr->id >= nr_cpu_ids);
24381
24382 /*
24383 * Buggy BIOS check
24384 diff -urNp linux-2.6.32.44/drivers/acpi/sbshc.c linux-2.6.32.44/drivers/acpi/sbshc.c
24385 --- linux-2.6.32.44/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24386 +++ linux-2.6.32.44/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24387 @@ -17,7 +17,7 @@
24388
24389 #define PREFIX "ACPI: "
24390
24391 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24392 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24393 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24394
24395 struct acpi_smb_hc {
24396 diff -urNp linux-2.6.32.44/drivers/acpi/sleep.c linux-2.6.32.44/drivers/acpi/sleep.c
24397 --- linux-2.6.32.44/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24398 +++ linux-2.6.32.44/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24399 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24400 }
24401 }
24402
24403 -static struct platform_suspend_ops acpi_suspend_ops = {
24404 +static const struct platform_suspend_ops acpi_suspend_ops = {
24405 .valid = acpi_suspend_state_valid,
24406 .begin = acpi_suspend_begin,
24407 .prepare_late = acpi_pm_prepare,
24408 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24409 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24410 * been requested.
24411 */
24412 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24413 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24414 .valid = acpi_suspend_state_valid,
24415 .begin = acpi_suspend_begin_old,
24416 .prepare_late = acpi_pm_disable_gpes,
24417 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24418 acpi_enable_all_runtime_gpes();
24419 }
24420
24421 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24422 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24423 .begin = acpi_hibernation_begin,
24424 .end = acpi_pm_end,
24425 .pre_snapshot = acpi_hibernation_pre_snapshot,
24426 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24427 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24428 * been requested.
24429 */
24430 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24431 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24432 .begin = acpi_hibernation_begin_old,
24433 .end = acpi_pm_end,
24434 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24435 diff -urNp linux-2.6.32.44/drivers/acpi/video.c linux-2.6.32.44/drivers/acpi/video.c
24436 --- linux-2.6.32.44/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24437 +++ linux-2.6.32.44/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24438 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24439 vd->brightness->levels[request_level]);
24440 }
24441
24442 -static struct backlight_ops acpi_backlight_ops = {
24443 +static const struct backlight_ops acpi_backlight_ops = {
24444 .get_brightness = acpi_video_get_brightness,
24445 .update_status = acpi_video_set_brightness,
24446 };
24447 diff -urNp linux-2.6.32.44/drivers/ata/ahci.c linux-2.6.32.44/drivers/ata/ahci.c
24448 --- linux-2.6.32.44/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24449 +++ linux-2.6.32.44/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24450 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24451 .sdev_attrs = ahci_sdev_attrs,
24452 };
24453
24454 -static struct ata_port_operations ahci_ops = {
24455 +static const struct ata_port_operations ahci_ops = {
24456 .inherits = &sata_pmp_port_ops,
24457
24458 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24459 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24460 .port_stop = ahci_port_stop,
24461 };
24462
24463 -static struct ata_port_operations ahci_vt8251_ops = {
24464 +static const struct ata_port_operations ahci_vt8251_ops = {
24465 .inherits = &ahci_ops,
24466 .hardreset = ahci_vt8251_hardreset,
24467 };
24468
24469 -static struct ata_port_operations ahci_p5wdh_ops = {
24470 +static const struct ata_port_operations ahci_p5wdh_ops = {
24471 .inherits = &ahci_ops,
24472 .hardreset = ahci_p5wdh_hardreset,
24473 };
24474
24475 -static struct ata_port_operations ahci_sb600_ops = {
24476 +static const struct ata_port_operations ahci_sb600_ops = {
24477 .inherits = &ahci_ops,
24478 .softreset = ahci_sb600_softreset,
24479 .pmp_softreset = ahci_sb600_softreset,
24480 diff -urNp linux-2.6.32.44/drivers/ata/ata_generic.c linux-2.6.32.44/drivers/ata/ata_generic.c
24481 --- linux-2.6.32.44/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24482 +++ linux-2.6.32.44/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24483 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24484 ATA_BMDMA_SHT(DRV_NAME),
24485 };
24486
24487 -static struct ata_port_operations generic_port_ops = {
24488 +static const struct ata_port_operations generic_port_ops = {
24489 .inherits = &ata_bmdma_port_ops,
24490 .cable_detect = ata_cable_unknown,
24491 .set_mode = generic_set_mode,
24492 diff -urNp linux-2.6.32.44/drivers/ata/ata_piix.c linux-2.6.32.44/drivers/ata/ata_piix.c
24493 --- linux-2.6.32.44/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24494 +++ linux-2.6.32.44/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24495 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24496 ATA_BMDMA_SHT(DRV_NAME),
24497 };
24498
24499 -static struct ata_port_operations piix_pata_ops = {
24500 +static const struct ata_port_operations piix_pata_ops = {
24501 .inherits = &ata_bmdma32_port_ops,
24502 .cable_detect = ata_cable_40wire,
24503 .set_piomode = piix_set_piomode,
24504 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24505 .prereset = piix_pata_prereset,
24506 };
24507
24508 -static struct ata_port_operations piix_vmw_ops = {
24509 +static const struct ata_port_operations piix_vmw_ops = {
24510 .inherits = &piix_pata_ops,
24511 .bmdma_status = piix_vmw_bmdma_status,
24512 };
24513
24514 -static struct ata_port_operations ich_pata_ops = {
24515 +static const struct ata_port_operations ich_pata_ops = {
24516 .inherits = &piix_pata_ops,
24517 .cable_detect = ich_pata_cable_detect,
24518 .set_dmamode = ich_set_dmamode,
24519 };
24520
24521 -static struct ata_port_operations piix_sata_ops = {
24522 +static const struct ata_port_operations piix_sata_ops = {
24523 .inherits = &ata_bmdma_port_ops,
24524 };
24525
24526 -static struct ata_port_operations piix_sidpr_sata_ops = {
24527 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24528 .inherits = &piix_sata_ops,
24529 .hardreset = sata_std_hardreset,
24530 .scr_read = piix_sidpr_scr_read,
24531 diff -urNp linux-2.6.32.44/drivers/ata/libata-acpi.c linux-2.6.32.44/drivers/ata/libata-acpi.c
24532 --- linux-2.6.32.44/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24533 +++ linux-2.6.32.44/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24534 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24535 ata_acpi_uevent(dev->link->ap, dev, event);
24536 }
24537
24538 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24539 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24540 .handler = ata_acpi_dev_notify_dock,
24541 .uevent = ata_acpi_dev_uevent,
24542 };
24543
24544 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24545 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24546 .handler = ata_acpi_ap_notify_dock,
24547 .uevent = ata_acpi_ap_uevent,
24548 };
24549 diff -urNp linux-2.6.32.44/drivers/ata/libata-core.c linux-2.6.32.44/drivers/ata/libata-core.c
24550 --- linux-2.6.32.44/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24551 +++ linux-2.6.32.44/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24552 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24553 struct ata_port *ap;
24554 unsigned int tag;
24555
24556 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24557 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24558 ap = qc->ap;
24559
24560 qc->flags = 0;
24561 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24562 struct ata_port *ap;
24563 struct ata_link *link;
24564
24565 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24566 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24567 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24568 ap = qc->ap;
24569 link = qc->dev->link;
24570 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24571 * LOCKING:
24572 * None.
24573 */
24574 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24575 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24576 {
24577 static DEFINE_SPINLOCK(lock);
24578 const struct ata_port_operations *cur;
24579 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24580 return;
24581
24582 spin_lock(&lock);
24583 + pax_open_kernel();
24584
24585 for (cur = ops->inherits; cur; cur = cur->inherits) {
24586 void **inherit = (void **)cur;
24587 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24588 if (IS_ERR(*pp))
24589 *pp = NULL;
24590
24591 - ops->inherits = NULL;
24592 + *(struct ata_port_operations **)&ops->inherits = NULL;
24593
24594 + pax_close_kernel();
24595 spin_unlock(&lock);
24596 }
24597
24598 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24599 */
24600 /* KILLME - the only user left is ipr */
24601 void ata_host_init(struct ata_host *host, struct device *dev,
24602 - unsigned long flags, struct ata_port_operations *ops)
24603 + unsigned long flags, const struct ata_port_operations *ops)
24604 {
24605 spin_lock_init(&host->lock);
24606 host->dev = dev;
24607 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24608 /* truly dummy */
24609 }
24610
24611 -struct ata_port_operations ata_dummy_port_ops = {
24612 +const struct ata_port_operations ata_dummy_port_ops = {
24613 .qc_prep = ata_noop_qc_prep,
24614 .qc_issue = ata_dummy_qc_issue,
24615 .error_handler = ata_dummy_error_handler,
24616 diff -urNp linux-2.6.32.44/drivers/ata/libata-eh.c linux-2.6.32.44/drivers/ata/libata-eh.c
24617 --- linux-2.6.32.44/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24618 +++ linux-2.6.32.44/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24619 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24620 {
24621 struct ata_link *link;
24622
24623 + pax_track_stack();
24624 +
24625 ata_for_each_link(link, ap, HOST_FIRST)
24626 ata_eh_link_report(link);
24627 }
24628 @@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24629 */
24630 void ata_std_error_handler(struct ata_port *ap)
24631 {
24632 - struct ata_port_operations *ops = ap->ops;
24633 + const struct ata_port_operations *ops = ap->ops;
24634 ata_reset_fn_t hardreset = ops->hardreset;
24635
24636 /* ignore built-in hardreset if SCR access is not available */
24637 diff -urNp linux-2.6.32.44/drivers/ata/libata-pmp.c linux-2.6.32.44/drivers/ata/libata-pmp.c
24638 --- linux-2.6.32.44/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24639 +++ linux-2.6.32.44/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24640 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24641 */
24642 static int sata_pmp_eh_recover(struct ata_port *ap)
24643 {
24644 - struct ata_port_operations *ops = ap->ops;
24645 + const struct ata_port_operations *ops = ap->ops;
24646 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24647 struct ata_link *pmp_link = &ap->link;
24648 struct ata_device *pmp_dev = pmp_link->device;
24649 diff -urNp linux-2.6.32.44/drivers/ata/pata_acpi.c linux-2.6.32.44/drivers/ata/pata_acpi.c
24650 --- linux-2.6.32.44/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24651 +++ linux-2.6.32.44/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24652 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24653 ATA_BMDMA_SHT(DRV_NAME),
24654 };
24655
24656 -static struct ata_port_operations pacpi_ops = {
24657 +static const struct ata_port_operations pacpi_ops = {
24658 .inherits = &ata_bmdma_port_ops,
24659 .qc_issue = pacpi_qc_issue,
24660 .cable_detect = pacpi_cable_detect,
24661 diff -urNp linux-2.6.32.44/drivers/ata/pata_ali.c linux-2.6.32.44/drivers/ata/pata_ali.c
24662 --- linux-2.6.32.44/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24663 +++ linux-2.6.32.44/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24664 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24665 * Port operations for PIO only ALi
24666 */
24667
24668 -static struct ata_port_operations ali_early_port_ops = {
24669 +static const struct ata_port_operations ali_early_port_ops = {
24670 .inherits = &ata_sff_port_ops,
24671 .cable_detect = ata_cable_40wire,
24672 .set_piomode = ali_set_piomode,
24673 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24674 * Port operations for DMA capable ALi without cable
24675 * detect
24676 */
24677 -static struct ata_port_operations ali_20_port_ops = {
24678 +static const struct ata_port_operations ali_20_port_ops = {
24679 .inherits = &ali_dma_base_ops,
24680 .cable_detect = ata_cable_40wire,
24681 .mode_filter = ali_20_filter,
24682 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24683 /*
24684 * Port operations for DMA capable ALi with cable detect
24685 */
24686 -static struct ata_port_operations ali_c2_port_ops = {
24687 +static const struct ata_port_operations ali_c2_port_ops = {
24688 .inherits = &ali_dma_base_ops,
24689 .check_atapi_dma = ali_check_atapi_dma,
24690 .cable_detect = ali_c2_cable_detect,
24691 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24692 /*
24693 * Port operations for DMA capable ALi with cable detect
24694 */
24695 -static struct ata_port_operations ali_c4_port_ops = {
24696 +static const struct ata_port_operations ali_c4_port_ops = {
24697 .inherits = &ali_dma_base_ops,
24698 .check_atapi_dma = ali_check_atapi_dma,
24699 .cable_detect = ali_c2_cable_detect,
24700 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24701 /*
24702 * Port operations for DMA capable ALi with cable detect and LBA48
24703 */
24704 -static struct ata_port_operations ali_c5_port_ops = {
24705 +static const struct ata_port_operations ali_c5_port_ops = {
24706 .inherits = &ali_dma_base_ops,
24707 .check_atapi_dma = ali_check_atapi_dma,
24708 .dev_config = ali_warn_atapi_dma,
24709 diff -urNp linux-2.6.32.44/drivers/ata/pata_amd.c linux-2.6.32.44/drivers/ata/pata_amd.c
24710 --- linux-2.6.32.44/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24711 +++ linux-2.6.32.44/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24712 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24713 .prereset = amd_pre_reset,
24714 };
24715
24716 -static struct ata_port_operations amd33_port_ops = {
24717 +static const struct ata_port_operations amd33_port_ops = {
24718 .inherits = &amd_base_port_ops,
24719 .cable_detect = ata_cable_40wire,
24720 .set_piomode = amd33_set_piomode,
24721 .set_dmamode = amd33_set_dmamode,
24722 };
24723
24724 -static struct ata_port_operations amd66_port_ops = {
24725 +static const struct ata_port_operations amd66_port_ops = {
24726 .inherits = &amd_base_port_ops,
24727 .cable_detect = ata_cable_unknown,
24728 .set_piomode = amd66_set_piomode,
24729 .set_dmamode = amd66_set_dmamode,
24730 };
24731
24732 -static struct ata_port_operations amd100_port_ops = {
24733 +static const struct ata_port_operations amd100_port_ops = {
24734 .inherits = &amd_base_port_ops,
24735 .cable_detect = ata_cable_unknown,
24736 .set_piomode = amd100_set_piomode,
24737 .set_dmamode = amd100_set_dmamode,
24738 };
24739
24740 -static struct ata_port_operations amd133_port_ops = {
24741 +static const struct ata_port_operations amd133_port_ops = {
24742 .inherits = &amd_base_port_ops,
24743 .cable_detect = amd_cable_detect,
24744 .set_piomode = amd133_set_piomode,
24745 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24746 .host_stop = nv_host_stop,
24747 };
24748
24749 -static struct ata_port_operations nv100_port_ops = {
24750 +static const struct ata_port_operations nv100_port_ops = {
24751 .inherits = &nv_base_port_ops,
24752 .set_piomode = nv100_set_piomode,
24753 .set_dmamode = nv100_set_dmamode,
24754 };
24755
24756 -static struct ata_port_operations nv133_port_ops = {
24757 +static const struct ata_port_operations nv133_port_ops = {
24758 .inherits = &nv_base_port_ops,
24759 .set_piomode = nv133_set_piomode,
24760 .set_dmamode = nv133_set_dmamode,
24761 diff -urNp linux-2.6.32.44/drivers/ata/pata_artop.c linux-2.6.32.44/drivers/ata/pata_artop.c
24762 --- linux-2.6.32.44/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24763 +++ linux-2.6.32.44/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24764 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24765 ATA_BMDMA_SHT(DRV_NAME),
24766 };
24767
24768 -static struct ata_port_operations artop6210_ops = {
24769 +static const struct ata_port_operations artop6210_ops = {
24770 .inherits = &ata_bmdma_port_ops,
24771 .cable_detect = ata_cable_40wire,
24772 .set_piomode = artop6210_set_piomode,
24773 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24774 .qc_defer = artop6210_qc_defer,
24775 };
24776
24777 -static struct ata_port_operations artop6260_ops = {
24778 +static const struct ata_port_operations artop6260_ops = {
24779 .inherits = &ata_bmdma_port_ops,
24780 .cable_detect = artop6260_cable_detect,
24781 .set_piomode = artop6260_set_piomode,
24782 diff -urNp linux-2.6.32.44/drivers/ata/pata_at32.c linux-2.6.32.44/drivers/ata/pata_at32.c
24783 --- linux-2.6.32.44/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24784 +++ linux-2.6.32.44/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24785 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24786 ATA_PIO_SHT(DRV_NAME),
24787 };
24788
24789 -static struct ata_port_operations at32_port_ops = {
24790 +static const struct ata_port_operations at32_port_ops = {
24791 .inherits = &ata_sff_port_ops,
24792 .cable_detect = ata_cable_40wire,
24793 .set_piomode = pata_at32_set_piomode,
24794 diff -urNp linux-2.6.32.44/drivers/ata/pata_at91.c linux-2.6.32.44/drivers/ata/pata_at91.c
24795 --- linux-2.6.32.44/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24796 +++ linux-2.6.32.44/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24797 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24798 ATA_PIO_SHT(DRV_NAME),
24799 };
24800
24801 -static struct ata_port_operations pata_at91_port_ops = {
24802 +static const struct ata_port_operations pata_at91_port_ops = {
24803 .inherits = &ata_sff_port_ops,
24804
24805 .sff_data_xfer = pata_at91_data_xfer_noirq,
24806 diff -urNp linux-2.6.32.44/drivers/ata/pata_atiixp.c linux-2.6.32.44/drivers/ata/pata_atiixp.c
24807 --- linux-2.6.32.44/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24808 +++ linux-2.6.32.44/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24809 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24810 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24811 };
24812
24813 -static struct ata_port_operations atiixp_port_ops = {
24814 +static const struct ata_port_operations atiixp_port_ops = {
24815 .inherits = &ata_bmdma_port_ops,
24816
24817 .qc_prep = ata_sff_dumb_qc_prep,
24818 diff -urNp linux-2.6.32.44/drivers/ata/pata_atp867x.c linux-2.6.32.44/drivers/ata/pata_atp867x.c
24819 --- linux-2.6.32.44/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24820 +++ linux-2.6.32.44/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24821 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24822 ATA_BMDMA_SHT(DRV_NAME),
24823 };
24824
24825 -static struct ata_port_operations atp867x_ops = {
24826 +static const struct ata_port_operations atp867x_ops = {
24827 .inherits = &ata_bmdma_port_ops,
24828 .cable_detect = atp867x_cable_detect,
24829 .set_piomode = atp867x_set_piomode,
24830 diff -urNp linux-2.6.32.44/drivers/ata/pata_bf54x.c linux-2.6.32.44/drivers/ata/pata_bf54x.c
24831 --- linux-2.6.32.44/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24832 +++ linux-2.6.32.44/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24833 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24834 .dma_boundary = ATA_DMA_BOUNDARY,
24835 };
24836
24837 -static struct ata_port_operations bfin_pata_ops = {
24838 +static const struct ata_port_operations bfin_pata_ops = {
24839 .inherits = &ata_sff_port_ops,
24840
24841 .set_piomode = bfin_set_piomode,
24842 diff -urNp linux-2.6.32.44/drivers/ata/pata_cmd640.c linux-2.6.32.44/drivers/ata/pata_cmd640.c
24843 --- linux-2.6.32.44/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24844 +++ linux-2.6.32.44/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24845 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24846 ATA_BMDMA_SHT(DRV_NAME),
24847 };
24848
24849 -static struct ata_port_operations cmd640_port_ops = {
24850 +static const struct ata_port_operations cmd640_port_ops = {
24851 .inherits = &ata_bmdma_port_ops,
24852 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24853 .sff_data_xfer = ata_sff_data_xfer_noirq,
24854 diff -urNp linux-2.6.32.44/drivers/ata/pata_cmd64x.c linux-2.6.32.44/drivers/ata/pata_cmd64x.c
24855 --- linux-2.6.32.44/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24856 +++ linux-2.6.32.44/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24857 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24858 .set_dmamode = cmd64x_set_dmamode,
24859 };
24860
24861 -static struct ata_port_operations cmd64x_port_ops = {
24862 +static const struct ata_port_operations cmd64x_port_ops = {
24863 .inherits = &cmd64x_base_ops,
24864 .cable_detect = ata_cable_40wire,
24865 };
24866
24867 -static struct ata_port_operations cmd646r1_port_ops = {
24868 +static const struct ata_port_operations cmd646r1_port_ops = {
24869 .inherits = &cmd64x_base_ops,
24870 .bmdma_stop = cmd646r1_bmdma_stop,
24871 .cable_detect = ata_cable_40wire,
24872 };
24873
24874 -static struct ata_port_operations cmd648_port_ops = {
24875 +static const struct ata_port_operations cmd648_port_ops = {
24876 .inherits = &cmd64x_base_ops,
24877 .bmdma_stop = cmd648_bmdma_stop,
24878 .cable_detect = cmd648_cable_detect,
24879 diff -urNp linux-2.6.32.44/drivers/ata/pata_cs5520.c linux-2.6.32.44/drivers/ata/pata_cs5520.c
24880 --- linux-2.6.32.44/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24881 +++ linux-2.6.32.44/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24882 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24883 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24884 };
24885
24886 -static struct ata_port_operations cs5520_port_ops = {
24887 +static const struct ata_port_operations cs5520_port_ops = {
24888 .inherits = &ata_bmdma_port_ops,
24889 .qc_prep = ata_sff_dumb_qc_prep,
24890 .cable_detect = ata_cable_40wire,
24891 diff -urNp linux-2.6.32.44/drivers/ata/pata_cs5530.c linux-2.6.32.44/drivers/ata/pata_cs5530.c
24892 --- linux-2.6.32.44/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24893 +++ linux-2.6.32.44/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24894 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24895 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24896 };
24897
24898 -static struct ata_port_operations cs5530_port_ops = {
24899 +static const struct ata_port_operations cs5530_port_ops = {
24900 .inherits = &ata_bmdma_port_ops,
24901
24902 .qc_prep = ata_sff_dumb_qc_prep,
24903 diff -urNp linux-2.6.32.44/drivers/ata/pata_cs5535.c linux-2.6.32.44/drivers/ata/pata_cs5535.c
24904 --- linux-2.6.32.44/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24905 +++ linux-2.6.32.44/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24906 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24907 ATA_BMDMA_SHT(DRV_NAME),
24908 };
24909
24910 -static struct ata_port_operations cs5535_port_ops = {
24911 +static const struct ata_port_operations cs5535_port_ops = {
24912 .inherits = &ata_bmdma_port_ops,
24913 .cable_detect = cs5535_cable_detect,
24914 .set_piomode = cs5535_set_piomode,
24915 diff -urNp linux-2.6.32.44/drivers/ata/pata_cs5536.c linux-2.6.32.44/drivers/ata/pata_cs5536.c
24916 --- linux-2.6.32.44/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24917 +++ linux-2.6.32.44/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24918 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24919 ATA_BMDMA_SHT(DRV_NAME),
24920 };
24921
24922 -static struct ata_port_operations cs5536_port_ops = {
24923 +static const struct ata_port_operations cs5536_port_ops = {
24924 .inherits = &ata_bmdma_port_ops,
24925 .cable_detect = cs5536_cable_detect,
24926 .set_piomode = cs5536_set_piomode,
24927 diff -urNp linux-2.6.32.44/drivers/ata/pata_cypress.c linux-2.6.32.44/drivers/ata/pata_cypress.c
24928 --- linux-2.6.32.44/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24929 +++ linux-2.6.32.44/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24930 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24931 ATA_BMDMA_SHT(DRV_NAME),
24932 };
24933
24934 -static struct ata_port_operations cy82c693_port_ops = {
24935 +static const struct ata_port_operations cy82c693_port_ops = {
24936 .inherits = &ata_bmdma_port_ops,
24937 .cable_detect = ata_cable_40wire,
24938 .set_piomode = cy82c693_set_piomode,
24939 diff -urNp linux-2.6.32.44/drivers/ata/pata_efar.c linux-2.6.32.44/drivers/ata/pata_efar.c
24940 --- linux-2.6.32.44/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24941 +++ linux-2.6.32.44/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24942 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24943 ATA_BMDMA_SHT(DRV_NAME),
24944 };
24945
24946 -static struct ata_port_operations efar_ops = {
24947 +static const struct ata_port_operations efar_ops = {
24948 .inherits = &ata_bmdma_port_ops,
24949 .cable_detect = efar_cable_detect,
24950 .set_piomode = efar_set_piomode,
24951 diff -urNp linux-2.6.32.44/drivers/ata/pata_hpt366.c linux-2.6.32.44/drivers/ata/pata_hpt366.c
24952 --- linux-2.6.32.44/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24953 +++ linux-2.6.32.44/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24954 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24955 * Configuration for HPT366/68
24956 */
24957
24958 -static struct ata_port_operations hpt366_port_ops = {
24959 +static const struct ata_port_operations hpt366_port_ops = {
24960 .inherits = &ata_bmdma_port_ops,
24961 .cable_detect = hpt36x_cable_detect,
24962 .mode_filter = hpt366_filter,
24963 diff -urNp linux-2.6.32.44/drivers/ata/pata_hpt37x.c linux-2.6.32.44/drivers/ata/pata_hpt37x.c
24964 --- linux-2.6.32.44/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24965 +++ linux-2.6.32.44/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24966 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24967 * Configuration for HPT370
24968 */
24969
24970 -static struct ata_port_operations hpt370_port_ops = {
24971 +static const struct ata_port_operations hpt370_port_ops = {
24972 .inherits = &ata_bmdma_port_ops,
24973
24974 .bmdma_stop = hpt370_bmdma_stop,
24975 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24976 * Configuration for HPT370A. Close to 370 but less filters
24977 */
24978
24979 -static struct ata_port_operations hpt370a_port_ops = {
24980 +static const struct ata_port_operations hpt370a_port_ops = {
24981 .inherits = &hpt370_port_ops,
24982 .mode_filter = hpt370a_filter,
24983 };
24984 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24985 * and DMA mode setting functionality.
24986 */
24987
24988 -static struct ata_port_operations hpt372_port_ops = {
24989 +static const struct ata_port_operations hpt372_port_ops = {
24990 .inherits = &ata_bmdma_port_ops,
24991
24992 .bmdma_stop = hpt37x_bmdma_stop,
24993 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24994 * but we have a different cable detection procedure for function 1.
24995 */
24996
24997 -static struct ata_port_operations hpt374_fn1_port_ops = {
24998 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24999 .inherits = &hpt372_port_ops,
25000 .prereset = hpt374_fn1_pre_reset,
25001 };
25002 diff -urNp linux-2.6.32.44/drivers/ata/pata_hpt3x2n.c linux-2.6.32.44/drivers/ata/pata_hpt3x2n.c
25003 --- linux-2.6.32.44/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25004 +++ linux-2.6.32.44/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25005 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25006 * Configuration for HPT3x2n.
25007 */
25008
25009 -static struct ata_port_operations hpt3x2n_port_ops = {
25010 +static const struct ata_port_operations hpt3x2n_port_ops = {
25011 .inherits = &ata_bmdma_port_ops,
25012
25013 .bmdma_stop = hpt3x2n_bmdma_stop,
25014 diff -urNp linux-2.6.32.44/drivers/ata/pata_hpt3x3.c linux-2.6.32.44/drivers/ata/pata_hpt3x3.c
25015 --- linux-2.6.32.44/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25016 +++ linux-2.6.32.44/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25017 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25018 ATA_BMDMA_SHT(DRV_NAME),
25019 };
25020
25021 -static struct ata_port_operations hpt3x3_port_ops = {
25022 +static const struct ata_port_operations hpt3x3_port_ops = {
25023 .inherits = &ata_bmdma_port_ops,
25024 .cable_detect = ata_cable_40wire,
25025 .set_piomode = hpt3x3_set_piomode,
25026 diff -urNp linux-2.6.32.44/drivers/ata/pata_icside.c linux-2.6.32.44/drivers/ata/pata_icside.c
25027 --- linux-2.6.32.44/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25028 +++ linux-2.6.32.44/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25029 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25030 }
25031 }
25032
25033 -static struct ata_port_operations pata_icside_port_ops = {
25034 +static const struct ata_port_operations pata_icside_port_ops = {
25035 .inherits = &ata_sff_port_ops,
25036 /* no need to build any PRD tables for DMA */
25037 .qc_prep = ata_noop_qc_prep,
25038 diff -urNp linux-2.6.32.44/drivers/ata/pata_isapnp.c linux-2.6.32.44/drivers/ata/pata_isapnp.c
25039 --- linux-2.6.32.44/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25040 +++ linux-2.6.32.44/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25041 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25042 ATA_PIO_SHT(DRV_NAME),
25043 };
25044
25045 -static struct ata_port_operations isapnp_port_ops = {
25046 +static const struct ata_port_operations isapnp_port_ops = {
25047 .inherits = &ata_sff_port_ops,
25048 .cable_detect = ata_cable_40wire,
25049 };
25050
25051 -static struct ata_port_operations isapnp_noalt_port_ops = {
25052 +static const struct ata_port_operations isapnp_noalt_port_ops = {
25053 .inherits = &ata_sff_port_ops,
25054 .cable_detect = ata_cable_40wire,
25055 /* No altstatus so we don't want to use the lost interrupt poll */
25056 diff -urNp linux-2.6.32.44/drivers/ata/pata_it8213.c linux-2.6.32.44/drivers/ata/pata_it8213.c
25057 --- linux-2.6.32.44/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25058 +++ linux-2.6.32.44/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25059 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25060 };
25061
25062
25063 -static struct ata_port_operations it8213_ops = {
25064 +static const struct ata_port_operations it8213_ops = {
25065 .inherits = &ata_bmdma_port_ops,
25066 .cable_detect = it8213_cable_detect,
25067 .set_piomode = it8213_set_piomode,
25068 diff -urNp linux-2.6.32.44/drivers/ata/pata_it821x.c linux-2.6.32.44/drivers/ata/pata_it821x.c
25069 --- linux-2.6.32.44/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25070 +++ linux-2.6.32.44/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25071 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25072 ATA_BMDMA_SHT(DRV_NAME),
25073 };
25074
25075 -static struct ata_port_operations it821x_smart_port_ops = {
25076 +static const struct ata_port_operations it821x_smart_port_ops = {
25077 .inherits = &ata_bmdma_port_ops,
25078
25079 .check_atapi_dma= it821x_check_atapi_dma,
25080 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25081 .port_start = it821x_port_start,
25082 };
25083
25084 -static struct ata_port_operations it821x_passthru_port_ops = {
25085 +static const struct ata_port_operations it821x_passthru_port_ops = {
25086 .inherits = &ata_bmdma_port_ops,
25087
25088 .check_atapi_dma= it821x_check_atapi_dma,
25089 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25090 .port_start = it821x_port_start,
25091 };
25092
25093 -static struct ata_port_operations it821x_rdc_port_ops = {
25094 +static const struct ata_port_operations it821x_rdc_port_ops = {
25095 .inherits = &ata_bmdma_port_ops,
25096
25097 .check_atapi_dma= it821x_check_atapi_dma,
25098 diff -urNp linux-2.6.32.44/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.44/drivers/ata/pata_ixp4xx_cf.c
25099 --- linux-2.6.32.44/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25100 +++ linux-2.6.32.44/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25101 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25102 ATA_PIO_SHT(DRV_NAME),
25103 };
25104
25105 -static struct ata_port_operations ixp4xx_port_ops = {
25106 +static const struct ata_port_operations ixp4xx_port_ops = {
25107 .inherits = &ata_sff_port_ops,
25108 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25109 .cable_detect = ata_cable_40wire,
25110 diff -urNp linux-2.6.32.44/drivers/ata/pata_jmicron.c linux-2.6.32.44/drivers/ata/pata_jmicron.c
25111 --- linux-2.6.32.44/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25112 +++ linux-2.6.32.44/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25113 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25114 ATA_BMDMA_SHT(DRV_NAME),
25115 };
25116
25117 -static struct ata_port_operations jmicron_ops = {
25118 +static const struct ata_port_operations jmicron_ops = {
25119 .inherits = &ata_bmdma_port_ops,
25120 .prereset = jmicron_pre_reset,
25121 };
25122 diff -urNp linux-2.6.32.44/drivers/ata/pata_legacy.c linux-2.6.32.44/drivers/ata/pata_legacy.c
25123 --- linux-2.6.32.44/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25124 +++ linux-2.6.32.44/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25125 @@ -106,7 +106,7 @@ struct legacy_probe {
25126
25127 struct legacy_controller {
25128 const char *name;
25129 - struct ata_port_operations *ops;
25130 + const struct ata_port_operations *ops;
25131 unsigned int pio_mask;
25132 unsigned int flags;
25133 unsigned int pflags;
25134 @@ -223,12 +223,12 @@ static const struct ata_port_operations
25135 * pio_mask as well.
25136 */
25137
25138 -static struct ata_port_operations simple_port_ops = {
25139 +static const struct ata_port_operations simple_port_ops = {
25140 .inherits = &legacy_base_port_ops,
25141 .sff_data_xfer = ata_sff_data_xfer_noirq,
25142 };
25143
25144 -static struct ata_port_operations legacy_port_ops = {
25145 +static const struct ata_port_operations legacy_port_ops = {
25146 .inherits = &legacy_base_port_ops,
25147 .sff_data_xfer = ata_sff_data_xfer_noirq,
25148 .set_mode = legacy_set_mode,
25149 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25150 return buflen;
25151 }
25152
25153 -static struct ata_port_operations pdc20230_port_ops = {
25154 +static const struct ata_port_operations pdc20230_port_ops = {
25155 .inherits = &legacy_base_port_ops,
25156 .set_piomode = pdc20230_set_piomode,
25157 .sff_data_xfer = pdc_data_xfer_vlb,
25158 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25159 ioread8(ap->ioaddr.status_addr);
25160 }
25161
25162 -static struct ata_port_operations ht6560a_port_ops = {
25163 +static const struct ata_port_operations ht6560a_port_ops = {
25164 .inherits = &legacy_base_port_ops,
25165 .set_piomode = ht6560a_set_piomode,
25166 };
25167 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25168 ioread8(ap->ioaddr.status_addr);
25169 }
25170
25171 -static struct ata_port_operations ht6560b_port_ops = {
25172 +static const struct ata_port_operations ht6560b_port_ops = {
25173 .inherits = &legacy_base_port_ops,
25174 .set_piomode = ht6560b_set_piomode,
25175 };
25176 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25177 }
25178
25179
25180 -static struct ata_port_operations opti82c611a_port_ops = {
25181 +static const struct ata_port_operations opti82c611a_port_ops = {
25182 .inherits = &legacy_base_port_ops,
25183 .set_piomode = opti82c611a_set_piomode,
25184 };
25185 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25186 return ata_sff_qc_issue(qc);
25187 }
25188
25189 -static struct ata_port_operations opti82c46x_port_ops = {
25190 +static const struct ata_port_operations opti82c46x_port_ops = {
25191 .inherits = &legacy_base_port_ops,
25192 .set_piomode = opti82c46x_set_piomode,
25193 .qc_issue = opti82c46x_qc_issue,
25194 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25195 return 0;
25196 }
25197
25198 -static struct ata_port_operations qdi6500_port_ops = {
25199 +static const struct ata_port_operations qdi6500_port_ops = {
25200 .inherits = &legacy_base_port_ops,
25201 .set_piomode = qdi6500_set_piomode,
25202 .qc_issue = qdi_qc_issue,
25203 .sff_data_xfer = vlb32_data_xfer,
25204 };
25205
25206 -static struct ata_port_operations qdi6580_port_ops = {
25207 +static const struct ata_port_operations qdi6580_port_ops = {
25208 .inherits = &legacy_base_port_ops,
25209 .set_piomode = qdi6580_set_piomode,
25210 .sff_data_xfer = vlb32_data_xfer,
25211 };
25212
25213 -static struct ata_port_operations qdi6580dp_port_ops = {
25214 +static const struct ata_port_operations qdi6580dp_port_ops = {
25215 .inherits = &legacy_base_port_ops,
25216 .set_piomode = qdi6580dp_set_piomode,
25217 .sff_data_xfer = vlb32_data_xfer,
25218 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25219 return 0;
25220 }
25221
25222 -static struct ata_port_operations winbond_port_ops = {
25223 +static const struct ata_port_operations winbond_port_ops = {
25224 .inherits = &legacy_base_port_ops,
25225 .set_piomode = winbond_set_piomode,
25226 .sff_data_xfer = vlb32_data_xfer,
25227 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25228 int pio_modes = controller->pio_mask;
25229 unsigned long io = probe->port;
25230 u32 mask = (1 << probe->slot);
25231 - struct ata_port_operations *ops = controller->ops;
25232 + const struct ata_port_operations *ops = controller->ops;
25233 struct legacy_data *ld = &legacy_data[probe->slot];
25234 struct ata_host *host = NULL;
25235 struct ata_port *ap;
25236 diff -urNp linux-2.6.32.44/drivers/ata/pata_marvell.c linux-2.6.32.44/drivers/ata/pata_marvell.c
25237 --- linux-2.6.32.44/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25238 +++ linux-2.6.32.44/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25239 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25240 ATA_BMDMA_SHT(DRV_NAME),
25241 };
25242
25243 -static struct ata_port_operations marvell_ops = {
25244 +static const struct ata_port_operations marvell_ops = {
25245 .inherits = &ata_bmdma_port_ops,
25246 .cable_detect = marvell_cable_detect,
25247 .prereset = marvell_pre_reset,
25248 diff -urNp linux-2.6.32.44/drivers/ata/pata_mpc52xx.c linux-2.6.32.44/drivers/ata/pata_mpc52xx.c
25249 --- linux-2.6.32.44/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25250 +++ linux-2.6.32.44/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25251 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25252 ATA_PIO_SHT(DRV_NAME),
25253 };
25254
25255 -static struct ata_port_operations mpc52xx_ata_port_ops = {
25256 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
25257 .inherits = &ata_bmdma_port_ops,
25258 .sff_dev_select = mpc52xx_ata_dev_select,
25259 .set_piomode = mpc52xx_ata_set_piomode,
25260 diff -urNp linux-2.6.32.44/drivers/ata/pata_mpiix.c linux-2.6.32.44/drivers/ata/pata_mpiix.c
25261 --- linux-2.6.32.44/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25262 +++ linux-2.6.32.44/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25263 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25264 ATA_PIO_SHT(DRV_NAME),
25265 };
25266
25267 -static struct ata_port_operations mpiix_port_ops = {
25268 +static const struct ata_port_operations mpiix_port_ops = {
25269 .inherits = &ata_sff_port_ops,
25270 .qc_issue = mpiix_qc_issue,
25271 .cable_detect = ata_cable_40wire,
25272 diff -urNp linux-2.6.32.44/drivers/ata/pata_netcell.c linux-2.6.32.44/drivers/ata/pata_netcell.c
25273 --- linux-2.6.32.44/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25274 +++ linux-2.6.32.44/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25275 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25276 ATA_BMDMA_SHT(DRV_NAME),
25277 };
25278
25279 -static struct ata_port_operations netcell_ops = {
25280 +static const struct ata_port_operations netcell_ops = {
25281 .inherits = &ata_bmdma_port_ops,
25282 .cable_detect = ata_cable_80wire,
25283 .read_id = netcell_read_id,
25284 diff -urNp linux-2.6.32.44/drivers/ata/pata_ninja32.c linux-2.6.32.44/drivers/ata/pata_ninja32.c
25285 --- linux-2.6.32.44/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25286 +++ linux-2.6.32.44/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25287 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25288 ATA_BMDMA_SHT(DRV_NAME),
25289 };
25290
25291 -static struct ata_port_operations ninja32_port_ops = {
25292 +static const struct ata_port_operations ninja32_port_ops = {
25293 .inherits = &ata_bmdma_port_ops,
25294 .sff_dev_select = ninja32_dev_select,
25295 .cable_detect = ata_cable_40wire,
25296 diff -urNp linux-2.6.32.44/drivers/ata/pata_ns87410.c linux-2.6.32.44/drivers/ata/pata_ns87410.c
25297 --- linux-2.6.32.44/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25298 +++ linux-2.6.32.44/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25299 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25300 ATA_PIO_SHT(DRV_NAME),
25301 };
25302
25303 -static struct ata_port_operations ns87410_port_ops = {
25304 +static const struct ata_port_operations ns87410_port_ops = {
25305 .inherits = &ata_sff_port_ops,
25306 .qc_issue = ns87410_qc_issue,
25307 .cable_detect = ata_cable_40wire,
25308 diff -urNp linux-2.6.32.44/drivers/ata/pata_ns87415.c linux-2.6.32.44/drivers/ata/pata_ns87415.c
25309 --- linux-2.6.32.44/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25310 +++ linux-2.6.32.44/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25311 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25312 }
25313 #endif /* 87560 SuperIO Support */
25314
25315 -static struct ata_port_operations ns87415_pata_ops = {
25316 +static const struct ata_port_operations ns87415_pata_ops = {
25317 .inherits = &ata_bmdma_port_ops,
25318
25319 .check_atapi_dma = ns87415_check_atapi_dma,
25320 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25321 };
25322
25323 #if defined(CONFIG_SUPERIO)
25324 -static struct ata_port_operations ns87560_pata_ops = {
25325 +static const struct ata_port_operations ns87560_pata_ops = {
25326 .inherits = &ns87415_pata_ops,
25327 .sff_tf_read = ns87560_tf_read,
25328 .sff_check_status = ns87560_check_status,
25329 diff -urNp linux-2.6.32.44/drivers/ata/pata_octeon_cf.c linux-2.6.32.44/drivers/ata/pata_octeon_cf.c
25330 --- linux-2.6.32.44/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25331 +++ linux-2.6.32.44/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25332 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25333 return 0;
25334 }
25335
25336 +/* cannot be const */
25337 static struct ata_port_operations octeon_cf_ops = {
25338 .inherits = &ata_sff_port_ops,
25339 .check_atapi_dma = octeon_cf_check_atapi_dma,
25340 diff -urNp linux-2.6.32.44/drivers/ata/pata_oldpiix.c linux-2.6.32.44/drivers/ata/pata_oldpiix.c
25341 --- linux-2.6.32.44/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25342 +++ linux-2.6.32.44/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25343 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25344 ATA_BMDMA_SHT(DRV_NAME),
25345 };
25346
25347 -static struct ata_port_operations oldpiix_pata_ops = {
25348 +static const struct ata_port_operations oldpiix_pata_ops = {
25349 .inherits = &ata_bmdma_port_ops,
25350 .qc_issue = oldpiix_qc_issue,
25351 .cable_detect = ata_cable_40wire,
25352 diff -urNp linux-2.6.32.44/drivers/ata/pata_opti.c linux-2.6.32.44/drivers/ata/pata_opti.c
25353 --- linux-2.6.32.44/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25354 +++ linux-2.6.32.44/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25355 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25356 ATA_PIO_SHT(DRV_NAME),
25357 };
25358
25359 -static struct ata_port_operations opti_port_ops = {
25360 +static const struct ata_port_operations opti_port_ops = {
25361 .inherits = &ata_sff_port_ops,
25362 .cable_detect = ata_cable_40wire,
25363 .set_piomode = opti_set_piomode,
25364 diff -urNp linux-2.6.32.44/drivers/ata/pata_optidma.c linux-2.6.32.44/drivers/ata/pata_optidma.c
25365 --- linux-2.6.32.44/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25366 +++ linux-2.6.32.44/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25367 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25368 ATA_BMDMA_SHT(DRV_NAME),
25369 };
25370
25371 -static struct ata_port_operations optidma_port_ops = {
25372 +static const struct ata_port_operations optidma_port_ops = {
25373 .inherits = &ata_bmdma_port_ops,
25374 .cable_detect = ata_cable_40wire,
25375 .set_piomode = optidma_set_pio_mode,
25376 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25377 .prereset = optidma_pre_reset,
25378 };
25379
25380 -static struct ata_port_operations optiplus_port_ops = {
25381 +static const struct ata_port_operations optiplus_port_ops = {
25382 .inherits = &optidma_port_ops,
25383 .set_piomode = optiplus_set_pio_mode,
25384 .set_dmamode = optiplus_set_dma_mode,
25385 diff -urNp linux-2.6.32.44/drivers/ata/pata_palmld.c linux-2.6.32.44/drivers/ata/pata_palmld.c
25386 --- linux-2.6.32.44/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25387 +++ linux-2.6.32.44/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25388 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25389 ATA_PIO_SHT(DRV_NAME),
25390 };
25391
25392 -static struct ata_port_operations palmld_port_ops = {
25393 +static const struct ata_port_operations palmld_port_ops = {
25394 .inherits = &ata_sff_port_ops,
25395 .sff_data_xfer = ata_sff_data_xfer_noirq,
25396 .cable_detect = ata_cable_40wire,
25397 diff -urNp linux-2.6.32.44/drivers/ata/pata_pcmcia.c linux-2.6.32.44/drivers/ata/pata_pcmcia.c
25398 --- linux-2.6.32.44/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25399 +++ linux-2.6.32.44/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25400 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25401 ATA_PIO_SHT(DRV_NAME),
25402 };
25403
25404 -static struct ata_port_operations pcmcia_port_ops = {
25405 +static const struct ata_port_operations pcmcia_port_ops = {
25406 .inherits = &ata_sff_port_ops,
25407 .sff_data_xfer = ata_sff_data_xfer_noirq,
25408 .cable_detect = ata_cable_40wire,
25409 .set_mode = pcmcia_set_mode,
25410 };
25411
25412 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25413 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25414 .inherits = &ata_sff_port_ops,
25415 .sff_data_xfer = ata_data_xfer_8bit,
25416 .cable_detect = ata_cable_40wire,
25417 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25418 unsigned long io_base, ctl_base;
25419 void __iomem *io_addr, *ctl_addr;
25420 int n_ports = 1;
25421 - struct ata_port_operations *ops = &pcmcia_port_ops;
25422 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25423
25424 info = kzalloc(sizeof(*info), GFP_KERNEL);
25425 if (info == NULL)
25426 diff -urNp linux-2.6.32.44/drivers/ata/pata_pdc2027x.c linux-2.6.32.44/drivers/ata/pata_pdc2027x.c
25427 --- linux-2.6.32.44/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25428 +++ linux-2.6.32.44/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25429 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25430 ATA_BMDMA_SHT(DRV_NAME),
25431 };
25432
25433 -static struct ata_port_operations pdc2027x_pata100_ops = {
25434 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25435 .inherits = &ata_bmdma_port_ops,
25436 .check_atapi_dma = pdc2027x_check_atapi_dma,
25437 .cable_detect = pdc2027x_cable_detect,
25438 .prereset = pdc2027x_prereset,
25439 };
25440
25441 -static struct ata_port_operations pdc2027x_pata133_ops = {
25442 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25443 .inherits = &pdc2027x_pata100_ops,
25444 .mode_filter = pdc2027x_mode_filter,
25445 .set_piomode = pdc2027x_set_piomode,
25446 diff -urNp linux-2.6.32.44/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.44/drivers/ata/pata_pdc202xx_old.c
25447 --- linux-2.6.32.44/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25448 +++ linux-2.6.32.44/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25449 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25450 ATA_BMDMA_SHT(DRV_NAME),
25451 };
25452
25453 -static struct ata_port_operations pdc2024x_port_ops = {
25454 +static const struct ata_port_operations pdc2024x_port_ops = {
25455 .inherits = &ata_bmdma_port_ops,
25456
25457 .cable_detect = ata_cable_40wire,
25458 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25459 .sff_exec_command = pdc202xx_exec_command,
25460 };
25461
25462 -static struct ata_port_operations pdc2026x_port_ops = {
25463 +static const struct ata_port_operations pdc2026x_port_ops = {
25464 .inherits = &pdc2024x_port_ops,
25465
25466 .check_atapi_dma = pdc2026x_check_atapi_dma,
25467 diff -urNp linux-2.6.32.44/drivers/ata/pata_platform.c linux-2.6.32.44/drivers/ata/pata_platform.c
25468 --- linux-2.6.32.44/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25469 +++ linux-2.6.32.44/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25470 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25471 ATA_PIO_SHT(DRV_NAME),
25472 };
25473
25474 -static struct ata_port_operations pata_platform_port_ops = {
25475 +static const struct ata_port_operations pata_platform_port_ops = {
25476 .inherits = &ata_sff_port_ops,
25477 .sff_data_xfer = ata_sff_data_xfer_noirq,
25478 .cable_detect = ata_cable_unknown,
25479 diff -urNp linux-2.6.32.44/drivers/ata/pata_qdi.c linux-2.6.32.44/drivers/ata/pata_qdi.c
25480 --- linux-2.6.32.44/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25481 +++ linux-2.6.32.44/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25482 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25483 ATA_PIO_SHT(DRV_NAME),
25484 };
25485
25486 -static struct ata_port_operations qdi6500_port_ops = {
25487 +static const struct ata_port_operations qdi6500_port_ops = {
25488 .inherits = &ata_sff_port_ops,
25489 .qc_issue = qdi_qc_issue,
25490 .sff_data_xfer = qdi_data_xfer,
25491 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25492 .set_piomode = qdi6500_set_piomode,
25493 };
25494
25495 -static struct ata_port_operations qdi6580_port_ops = {
25496 +static const struct ata_port_operations qdi6580_port_ops = {
25497 .inherits = &qdi6500_port_ops,
25498 .set_piomode = qdi6580_set_piomode,
25499 };
25500 diff -urNp linux-2.6.32.44/drivers/ata/pata_radisys.c linux-2.6.32.44/drivers/ata/pata_radisys.c
25501 --- linux-2.6.32.44/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25502 +++ linux-2.6.32.44/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25503 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25504 ATA_BMDMA_SHT(DRV_NAME),
25505 };
25506
25507 -static struct ata_port_operations radisys_pata_ops = {
25508 +static const struct ata_port_operations radisys_pata_ops = {
25509 .inherits = &ata_bmdma_port_ops,
25510 .qc_issue = radisys_qc_issue,
25511 .cable_detect = ata_cable_unknown,
25512 diff -urNp linux-2.6.32.44/drivers/ata/pata_rb532_cf.c linux-2.6.32.44/drivers/ata/pata_rb532_cf.c
25513 --- linux-2.6.32.44/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25514 +++ linux-2.6.32.44/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25515 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25516 return IRQ_HANDLED;
25517 }
25518
25519 -static struct ata_port_operations rb532_pata_port_ops = {
25520 +static const struct ata_port_operations rb532_pata_port_ops = {
25521 .inherits = &ata_sff_port_ops,
25522 .sff_data_xfer = ata_sff_data_xfer32,
25523 };
25524 diff -urNp linux-2.6.32.44/drivers/ata/pata_rdc.c linux-2.6.32.44/drivers/ata/pata_rdc.c
25525 --- linux-2.6.32.44/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25526 +++ linux-2.6.32.44/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25527 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25528 pci_write_config_byte(dev, 0x48, udma_enable);
25529 }
25530
25531 -static struct ata_port_operations rdc_pata_ops = {
25532 +static const struct ata_port_operations rdc_pata_ops = {
25533 .inherits = &ata_bmdma32_port_ops,
25534 .cable_detect = rdc_pata_cable_detect,
25535 .set_piomode = rdc_set_piomode,
25536 diff -urNp linux-2.6.32.44/drivers/ata/pata_rz1000.c linux-2.6.32.44/drivers/ata/pata_rz1000.c
25537 --- linux-2.6.32.44/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25538 +++ linux-2.6.32.44/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25539 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25540 ATA_PIO_SHT(DRV_NAME),
25541 };
25542
25543 -static struct ata_port_operations rz1000_port_ops = {
25544 +static const struct ata_port_operations rz1000_port_ops = {
25545 .inherits = &ata_sff_port_ops,
25546 .cable_detect = ata_cable_40wire,
25547 .set_mode = rz1000_set_mode,
25548 diff -urNp linux-2.6.32.44/drivers/ata/pata_sc1200.c linux-2.6.32.44/drivers/ata/pata_sc1200.c
25549 --- linux-2.6.32.44/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25550 +++ linux-2.6.32.44/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25551 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25552 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25553 };
25554
25555 -static struct ata_port_operations sc1200_port_ops = {
25556 +static const struct ata_port_operations sc1200_port_ops = {
25557 .inherits = &ata_bmdma_port_ops,
25558 .qc_prep = ata_sff_dumb_qc_prep,
25559 .qc_issue = sc1200_qc_issue,
25560 diff -urNp linux-2.6.32.44/drivers/ata/pata_scc.c linux-2.6.32.44/drivers/ata/pata_scc.c
25561 --- linux-2.6.32.44/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25562 +++ linux-2.6.32.44/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25563 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25564 ATA_BMDMA_SHT(DRV_NAME),
25565 };
25566
25567 -static struct ata_port_operations scc_pata_ops = {
25568 +static const struct ata_port_operations scc_pata_ops = {
25569 .inherits = &ata_bmdma_port_ops,
25570
25571 .set_piomode = scc_set_piomode,
25572 diff -urNp linux-2.6.32.44/drivers/ata/pata_sch.c linux-2.6.32.44/drivers/ata/pata_sch.c
25573 --- linux-2.6.32.44/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25574 +++ linux-2.6.32.44/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25575 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25576 ATA_BMDMA_SHT(DRV_NAME),
25577 };
25578
25579 -static struct ata_port_operations sch_pata_ops = {
25580 +static const struct ata_port_operations sch_pata_ops = {
25581 .inherits = &ata_bmdma_port_ops,
25582 .cable_detect = ata_cable_unknown,
25583 .set_piomode = sch_set_piomode,
25584 diff -urNp linux-2.6.32.44/drivers/ata/pata_serverworks.c linux-2.6.32.44/drivers/ata/pata_serverworks.c
25585 --- linux-2.6.32.44/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25586 +++ linux-2.6.32.44/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25587 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25588 ATA_BMDMA_SHT(DRV_NAME),
25589 };
25590
25591 -static struct ata_port_operations serverworks_osb4_port_ops = {
25592 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25593 .inherits = &ata_bmdma_port_ops,
25594 .cable_detect = serverworks_cable_detect,
25595 .mode_filter = serverworks_osb4_filter,
25596 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25597 .set_dmamode = serverworks_set_dmamode,
25598 };
25599
25600 -static struct ata_port_operations serverworks_csb_port_ops = {
25601 +static const struct ata_port_operations serverworks_csb_port_ops = {
25602 .inherits = &serverworks_osb4_port_ops,
25603 .mode_filter = serverworks_csb_filter,
25604 };
25605 diff -urNp linux-2.6.32.44/drivers/ata/pata_sil680.c linux-2.6.32.44/drivers/ata/pata_sil680.c
25606 --- linux-2.6.32.44/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25607 +++ linux-2.6.32.44/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25608 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25609 ATA_BMDMA_SHT(DRV_NAME),
25610 };
25611
25612 -static struct ata_port_operations sil680_port_ops = {
25613 +static const struct ata_port_operations sil680_port_ops = {
25614 .inherits = &ata_bmdma32_port_ops,
25615 .cable_detect = sil680_cable_detect,
25616 .set_piomode = sil680_set_piomode,
25617 diff -urNp linux-2.6.32.44/drivers/ata/pata_sis.c linux-2.6.32.44/drivers/ata/pata_sis.c
25618 --- linux-2.6.32.44/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25619 +++ linux-2.6.32.44/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25620 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25621 ATA_BMDMA_SHT(DRV_NAME),
25622 };
25623
25624 -static struct ata_port_operations sis_133_for_sata_ops = {
25625 +static const struct ata_port_operations sis_133_for_sata_ops = {
25626 .inherits = &ata_bmdma_port_ops,
25627 .set_piomode = sis_133_set_piomode,
25628 .set_dmamode = sis_133_set_dmamode,
25629 .cable_detect = sis_133_cable_detect,
25630 };
25631
25632 -static struct ata_port_operations sis_base_ops = {
25633 +static const struct ata_port_operations sis_base_ops = {
25634 .inherits = &ata_bmdma_port_ops,
25635 .prereset = sis_pre_reset,
25636 };
25637
25638 -static struct ata_port_operations sis_133_ops = {
25639 +static const struct ata_port_operations sis_133_ops = {
25640 .inherits = &sis_base_ops,
25641 .set_piomode = sis_133_set_piomode,
25642 .set_dmamode = sis_133_set_dmamode,
25643 .cable_detect = sis_133_cable_detect,
25644 };
25645
25646 -static struct ata_port_operations sis_133_early_ops = {
25647 +static const struct ata_port_operations sis_133_early_ops = {
25648 .inherits = &sis_base_ops,
25649 .set_piomode = sis_100_set_piomode,
25650 .set_dmamode = sis_133_early_set_dmamode,
25651 .cable_detect = sis_66_cable_detect,
25652 };
25653
25654 -static struct ata_port_operations sis_100_ops = {
25655 +static const struct ata_port_operations sis_100_ops = {
25656 .inherits = &sis_base_ops,
25657 .set_piomode = sis_100_set_piomode,
25658 .set_dmamode = sis_100_set_dmamode,
25659 .cable_detect = sis_66_cable_detect,
25660 };
25661
25662 -static struct ata_port_operations sis_66_ops = {
25663 +static const struct ata_port_operations sis_66_ops = {
25664 .inherits = &sis_base_ops,
25665 .set_piomode = sis_old_set_piomode,
25666 .set_dmamode = sis_66_set_dmamode,
25667 .cable_detect = sis_66_cable_detect,
25668 };
25669
25670 -static struct ata_port_operations sis_old_ops = {
25671 +static const struct ata_port_operations sis_old_ops = {
25672 .inherits = &sis_base_ops,
25673 .set_piomode = sis_old_set_piomode,
25674 .set_dmamode = sis_old_set_dmamode,
25675 diff -urNp linux-2.6.32.44/drivers/ata/pata_sl82c105.c linux-2.6.32.44/drivers/ata/pata_sl82c105.c
25676 --- linux-2.6.32.44/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25677 +++ linux-2.6.32.44/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25678 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25679 ATA_BMDMA_SHT(DRV_NAME),
25680 };
25681
25682 -static struct ata_port_operations sl82c105_port_ops = {
25683 +static const struct ata_port_operations sl82c105_port_ops = {
25684 .inherits = &ata_bmdma_port_ops,
25685 .qc_defer = sl82c105_qc_defer,
25686 .bmdma_start = sl82c105_bmdma_start,
25687 diff -urNp linux-2.6.32.44/drivers/ata/pata_triflex.c linux-2.6.32.44/drivers/ata/pata_triflex.c
25688 --- linux-2.6.32.44/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25689 +++ linux-2.6.32.44/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25690 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25691 ATA_BMDMA_SHT(DRV_NAME),
25692 };
25693
25694 -static struct ata_port_operations triflex_port_ops = {
25695 +static const struct ata_port_operations triflex_port_ops = {
25696 .inherits = &ata_bmdma_port_ops,
25697 .bmdma_start = triflex_bmdma_start,
25698 .bmdma_stop = triflex_bmdma_stop,
25699 diff -urNp linux-2.6.32.44/drivers/ata/pata_via.c linux-2.6.32.44/drivers/ata/pata_via.c
25700 --- linux-2.6.32.44/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25701 +++ linux-2.6.32.44/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25702 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25703 ATA_BMDMA_SHT(DRV_NAME),
25704 };
25705
25706 -static struct ata_port_operations via_port_ops = {
25707 +static const struct ata_port_operations via_port_ops = {
25708 .inherits = &ata_bmdma_port_ops,
25709 .cable_detect = via_cable_detect,
25710 .set_piomode = via_set_piomode,
25711 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25712 .port_start = via_port_start,
25713 };
25714
25715 -static struct ata_port_operations via_port_ops_noirq = {
25716 +static const struct ata_port_operations via_port_ops_noirq = {
25717 .inherits = &via_port_ops,
25718 .sff_data_xfer = ata_sff_data_xfer_noirq,
25719 };
25720 diff -urNp linux-2.6.32.44/drivers/ata/pata_winbond.c linux-2.6.32.44/drivers/ata/pata_winbond.c
25721 --- linux-2.6.32.44/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25722 +++ linux-2.6.32.44/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25723 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25724 ATA_PIO_SHT(DRV_NAME),
25725 };
25726
25727 -static struct ata_port_operations winbond_port_ops = {
25728 +static const struct ata_port_operations winbond_port_ops = {
25729 .inherits = &ata_sff_port_ops,
25730 .sff_data_xfer = winbond_data_xfer,
25731 .cable_detect = ata_cable_40wire,
25732 diff -urNp linux-2.6.32.44/drivers/ata/pdc_adma.c linux-2.6.32.44/drivers/ata/pdc_adma.c
25733 --- linux-2.6.32.44/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25734 +++ linux-2.6.32.44/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25735 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25736 .dma_boundary = ADMA_DMA_BOUNDARY,
25737 };
25738
25739 -static struct ata_port_operations adma_ata_ops = {
25740 +static const struct ata_port_operations adma_ata_ops = {
25741 .inherits = &ata_sff_port_ops,
25742
25743 .lost_interrupt = ATA_OP_NULL,
25744 diff -urNp linux-2.6.32.44/drivers/ata/sata_fsl.c linux-2.6.32.44/drivers/ata/sata_fsl.c
25745 --- linux-2.6.32.44/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25746 +++ linux-2.6.32.44/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25747 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25748 .dma_boundary = ATA_DMA_BOUNDARY,
25749 };
25750
25751 -static struct ata_port_operations sata_fsl_ops = {
25752 +static const struct ata_port_operations sata_fsl_ops = {
25753 .inherits = &sata_pmp_port_ops,
25754
25755 .qc_defer = ata_std_qc_defer,
25756 diff -urNp linux-2.6.32.44/drivers/ata/sata_inic162x.c linux-2.6.32.44/drivers/ata/sata_inic162x.c
25757 --- linux-2.6.32.44/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25758 +++ linux-2.6.32.44/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25759 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25760 return 0;
25761 }
25762
25763 -static struct ata_port_operations inic_port_ops = {
25764 +static const struct ata_port_operations inic_port_ops = {
25765 .inherits = &sata_port_ops,
25766
25767 .check_atapi_dma = inic_check_atapi_dma,
25768 diff -urNp linux-2.6.32.44/drivers/ata/sata_mv.c linux-2.6.32.44/drivers/ata/sata_mv.c
25769 --- linux-2.6.32.44/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25770 +++ linux-2.6.32.44/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25771 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25772 .dma_boundary = MV_DMA_BOUNDARY,
25773 };
25774
25775 -static struct ata_port_operations mv5_ops = {
25776 +static const struct ata_port_operations mv5_ops = {
25777 .inherits = &ata_sff_port_ops,
25778
25779 .lost_interrupt = ATA_OP_NULL,
25780 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25781 .port_stop = mv_port_stop,
25782 };
25783
25784 -static struct ata_port_operations mv6_ops = {
25785 +static const struct ata_port_operations mv6_ops = {
25786 .inherits = &mv5_ops,
25787 .dev_config = mv6_dev_config,
25788 .scr_read = mv_scr_read,
25789 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25790 .bmdma_status = mv_bmdma_status,
25791 };
25792
25793 -static struct ata_port_operations mv_iie_ops = {
25794 +static const struct ata_port_operations mv_iie_ops = {
25795 .inherits = &mv6_ops,
25796 .dev_config = ATA_OP_NULL,
25797 .qc_prep = mv_qc_prep_iie,
25798 diff -urNp linux-2.6.32.44/drivers/ata/sata_nv.c linux-2.6.32.44/drivers/ata/sata_nv.c
25799 --- linux-2.6.32.44/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25800 +++ linux-2.6.32.44/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25801 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25802 * cases. Define nv_hardreset() which only kicks in for post-boot
25803 * probing and use it for all variants.
25804 */
25805 -static struct ata_port_operations nv_generic_ops = {
25806 +static const struct ata_port_operations nv_generic_ops = {
25807 .inherits = &ata_bmdma_port_ops,
25808 .lost_interrupt = ATA_OP_NULL,
25809 .scr_read = nv_scr_read,
25810 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25811 .hardreset = nv_hardreset,
25812 };
25813
25814 -static struct ata_port_operations nv_nf2_ops = {
25815 +static const struct ata_port_operations nv_nf2_ops = {
25816 .inherits = &nv_generic_ops,
25817 .freeze = nv_nf2_freeze,
25818 .thaw = nv_nf2_thaw,
25819 };
25820
25821 -static struct ata_port_operations nv_ck804_ops = {
25822 +static const struct ata_port_operations nv_ck804_ops = {
25823 .inherits = &nv_generic_ops,
25824 .freeze = nv_ck804_freeze,
25825 .thaw = nv_ck804_thaw,
25826 .host_stop = nv_ck804_host_stop,
25827 };
25828
25829 -static struct ata_port_operations nv_adma_ops = {
25830 +static const struct ata_port_operations nv_adma_ops = {
25831 .inherits = &nv_ck804_ops,
25832
25833 .check_atapi_dma = nv_adma_check_atapi_dma,
25834 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25835 .host_stop = nv_adma_host_stop,
25836 };
25837
25838 -static struct ata_port_operations nv_swncq_ops = {
25839 +static const struct ata_port_operations nv_swncq_ops = {
25840 .inherits = &nv_generic_ops,
25841
25842 .qc_defer = ata_std_qc_defer,
25843 diff -urNp linux-2.6.32.44/drivers/ata/sata_promise.c linux-2.6.32.44/drivers/ata/sata_promise.c
25844 --- linux-2.6.32.44/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25845 +++ linux-2.6.32.44/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25846 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25847 .error_handler = pdc_error_handler,
25848 };
25849
25850 -static struct ata_port_operations pdc_sata_ops = {
25851 +static const struct ata_port_operations pdc_sata_ops = {
25852 .inherits = &pdc_common_ops,
25853 .cable_detect = pdc_sata_cable_detect,
25854 .freeze = pdc_sata_freeze,
25855 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25856
25857 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25858 and ->freeze/thaw that ignore the hotplug controls. */
25859 -static struct ata_port_operations pdc_old_sata_ops = {
25860 +static const struct ata_port_operations pdc_old_sata_ops = {
25861 .inherits = &pdc_sata_ops,
25862 .freeze = pdc_freeze,
25863 .thaw = pdc_thaw,
25864 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25865 };
25866
25867 -static struct ata_port_operations pdc_pata_ops = {
25868 +static const struct ata_port_operations pdc_pata_ops = {
25869 .inherits = &pdc_common_ops,
25870 .cable_detect = pdc_pata_cable_detect,
25871 .freeze = pdc_freeze,
25872 diff -urNp linux-2.6.32.44/drivers/ata/sata_qstor.c linux-2.6.32.44/drivers/ata/sata_qstor.c
25873 --- linux-2.6.32.44/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25874 +++ linux-2.6.32.44/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25875 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25876 .dma_boundary = QS_DMA_BOUNDARY,
25877 };
25878
25879 -static struct ata_port_operations qs_ata_ops = {
25880 +static const struct ata_port_operations qs_ata_ops = {
25881 .inherits = &ata_sff_port_ops,
25882
25883 .check_atapi_dma = qs_check_atapi_dma,
25884 diff -urNp linux-2.6.32.44/drivers/ata/sata_sil24.c linux-2.6.32.44/drivers/ata/sata_sil24.c
25885 --- linux-2.6.32.44/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25886 +++ linux-2.6.32.44/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25887 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25888 .dma_boundary = ATA_DMA_BOUNDARY,
25889 };
25890
25891 -static struct ata_port_operations sil24_ops = {
25892 +static const struct ata_port_operations sil24_ops = {
25893 .inherits = &sata_pmp_port_ops,
25894
25895 .qc_defer = sil24_qc_defer,
25896 diff -urNp linux-2.6.32.44/drivers/ata/sata_sil.c linux-2.6.32.44/drivers/ata/sata_sil.c
25897 --- linux-2.6.32.44/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25898 +++ linux-2.6.32.44/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25899 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25900 .sg_tablesize = ATA_MAX_PRD
25901 };
25902
25903 -static struct ata_port_operations sil_ops = {
25904 +static const struct ata_port_operations sil_ops = {
25905 .inherits = &ata_bmdma32_port_ops,
25906 .dev_config = sil_dev_config,
25907 .set_mode = sil_set_mode,
25908 diff -urNp linux-2.6.32.44/drivers/ata/sata_sis.c linux-2.6.32.44/drivers/ata/sata_sis.c
25909 --- linux-2.6.32.44/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25910 +++ linux-2.6.32.44/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25911 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25912 ATA_BMDMA_SHT(DRV_NAME),
25913 };
25914
25915 -static struct ata_port_operations sis_ops = {
25916 +static const struct ata_port_operations sis_ops = {
25917 .inherits = &ata_bmdma_port_ops,
25918 .scr_read = sis_scr_read,
25919 .scr_write = sis_scr_write,
25920 diff -urNp linux-2.6.32.44/drivers/ata/sata_svw.c linux-2.6.32.44/drivers/ata/sata_svw.c
25921 --- linux-2.6.32.44/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25922 +++ linux-2.6.32.44/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25923 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25924 };
25925
25926
25927 -static struct ata_port_operations k2_sata_ops = {
25928 +static const struct ata_port_operations k2_sata_ops = {
25929 .inherits = &ata_bmdma_port_ops,
25930 .sff_tf_load = k2_sata_tf_load,
25931 .sff_tf_read = k2_sata_tf_read,
25932 diff -urNp linux-2.6.32.44/drivers/ata/sata_sx4.c linux-2.6.32.44/drivers/ata/sata_sx4.c
25933 --- linux-2.6.32.44/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25934 +++ linux-2.6.32.44/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25935 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25936 };
25937
25938 /* TODO: inherit from base port_ops after converting to new EH */
25939 -static struct ata_port_operations pdc_20621_ops = {
25940 +static const struct ata_port_operations pdc_20621_ops = {
25941 .inherits = &ata_sff_port_ops,
25942
25943 .check_atapi_dma = pdc_check_atapi_dma,
25944 diff -urNp linux-2.6.32.44/drivers/ata/sata_uli.c linux-2.6.32.44/drivers/ata/sata_uli.c
25945 --- linux-2.6.32.44/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25946 +++ linux-2.6.32.44/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25947 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25948 ATA_BMDMA_SHT(DRV_NAME),
25949 };
25950
25951 -static struct ata_port_operations uli_ops = {
25952 +static const struct ata_port_operations uli_ops = {
25953 .inherits = &ata_bmdma_port_ops,
25954 .scr_read = uli_scr_read,
25955 .scr_write = uli_scr_write,
25956 diff -urNp linux-2.6.32.44/drivers/ata/sata_via.c linux-2.6.32.44/drivers/ata/sata_via.c
25957 --- linux-2.6.32.44/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25958 +++ linux-2.6.32.44/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25959 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25960 ATA_BMDMA_SHT(DRV_NAME),
25961 };
25962
25963 -static struct ata_port_operations svia_base_ops = {
25964 +static const struct ata_port_operations svia_base_ops = {
25965 .inherits = &ata_bmdma_port_ops,
25966 .sff_tf_load = svia_tf_load,
25967 };
25968
25969 -static struct ata_port_operations vt6420_sata_ops = {
25970 +static const struct ata_port_operations vt6420_sata_ops = {
25971 .inherits = &svia_base_ops,
25972 .freeze = svia_noop_freeze,
25973 .prereset = vt6420_prereset,
25974 .bmdma_start = vt6420_bmdma_start,
25975 };
25976
25977 -static struct ata_port_operations vt6421_pata_ops = {
25978 +static const struct ata_port_operations vt6421_pata_ops = {
25979 .inherits = &svia_base_ops,
25980 .cable_detect = vt6421_pata_cable_detect,
25981 .set_piomode = vt6421_set_pio_mode,
25982 .set_dmamode = vt6421_set_dma_mode,
25983 };
25984
25985 -static struct ata_port_operations vt6421_sata_ops = {
25986 +static const struct ata_port_operations vt6421_sata_ops = {
25987 .inherits = &svia_base_ops,
25988 .scr_read = svia_scr_read,
25989 .scr_write = svia_scr_write,
25990 };
25991
25992 -static struct ata_port_operations vt8251_ops = {
25993 +static const struct ata_port_operations vt8251_ops = {
25994 .inherits = &svia_base_ops,
25995 .hardreset = sata_std_hardreset,
25996 .scr_read = vt8251_scr_read,
25997 diff -urNp linux-2.6.32.44/drivers/ata/sata_vsc.c linux-2.6.32.44/drivers/ata/sata_vsc.c
25998 --- linux-2.6.32.44/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25999 +++ linux-2.6.32.44/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26000 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26001 };
26002
26003
26004 -static struct ata_port_operations vsc_sata_ops = {
26005 +static const struct ata_port_operations vsc_sata_ops = {
26006 .inherits = &ata_bmdma_port_ops,
26007 /* The IRQ handling is not quite standard SFF behaviour so we
26008 cannot use the default lost interrupt handler */
26009 diff -urNp linux-2.6.32.44/drivers/atm/adummy.c linux-2.6.32.44/drivers/atm/adummy.c
26010 --- linux-2.6.32.44/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26011 +++ linux-2.6.32.44/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26012 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26013 vcc->pop(vcc, skb);
26014 else
26015 dev_kfree_skb_any(skb);
26016 - atomic_inc(&vcc->stats->tx);
26017 + atomic_inc_unchecked(&vcc->stats->tx);
26018
26019 return 0;
26020 }
26021 diff -urNp linux-2.6.32.44/drivers/atm/ambassador.c linux-2.6.32.44/drivers/atm/ambassador.c
26022 --- linux-2.6.32.44/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26023 +++ linux-2.6.32.44/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26024 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26025 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26026
26027 // VC layer stats
26028 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26029 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26030
26031 // free the descriptor
26032 kfree (tx_descr);
26033 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26034 dump_skb ("<<<", vc, skb);
26035
26036 // VC layer stats
26037 - atomic_inc(&atm_vcc->stats->rx);
26038 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26039 __net_timestamp(skb);
26040 // end of our responsability
26041 atm_vcc->push (atm_vcc, skb);
26042 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26043 } else {
26044 PRINTK (KERN_INFO, "dropped over-size frame");
26045 // should we count this?
26046 - atomic_inc(&atm_vcc->stats->rx_drop);
26047 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26048 }
26049
26050 } else {
26051 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26052 }
26053
26054 if (check_area (skb->data, skb->len)) {
26055 - atomic_inc(&atm_vcc->stats->tx_err);
26056 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26057 return -ENOMEM; // ?
26058 }
26059
26060 diff -urNp linux-2.6.32.44/drivers/atm/atmtcp.c linux-2.6.32.44/drivers/atm/atmtcp.c
26061 --- linux-2.6.32.44/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26062 +++ linux-2.6.32.44/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26063 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26064 if (vcc->pop) vcc->pop(vcc,skb);
26065 else dev_kfree_skb(skb);
26066 if (dev_data) return 0;
26067 - atomic_inc(&vcc->stats->tx_err);
26068 + atomic_inc_unchecked(&vcc->stats->tx_err);
26069 return -ENOLINK;
26070 }
26071 size = skb->len+sizeof(struct atmtcp_hdr);
26072 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26073 if (!new_skb) {
26074 if (vcc->pop) vcc->pop(vcc,skb);
26075 else dev_kfree_skb(skb);
26076 - atomic_inc(&vcc->stats->tx_err);
26077 + atomic_inc_unchecked(&vcc->stats->tx_err);
26078 return -ENOBUFS;
26079 }
26080 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26081 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26082 if (vcc->pop) vcc->pop(vcc,skb);
26083 else dev_kfree_skb(skb);
26084 out_vcc->push(out_vcc,new_skb);
26085 - atomic_inc(&vcc->stats->tx);
26086 - atomic_inc(&out_vcc->stats->rx);
26087 + atomic_inc_unchecked(&vcc->stats->tx);
26088 + atomic_inc_unchecked(&out_vcc->stats->rx);
26089 return 0;
26090 }
26091
26092 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26093 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26094 read_unlock(&vcc_sklist_lock);
26095 if (!out_vcc) {
26096 - atomic_inc(&vcc->stats->tx_err);
26097 + atomic_inc_unchecked(&vcc->stats->tx_err);
26098 goto done;
26099 }
26100 skb_pull(skb,sizeof(struct atmtcp_hdr));
26101 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26102 __net_timestamp(new_skb);
26103 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26104 out_vcc->push(out_vcc,new_skb);
26105 - atomic_inc(&vcc->stats->tx);
26106 - atomic_inc(&out_vcc->stats->rx);
26107 + atomic_inc_unchecked(&vcc->stats->tx);
26108 + atomic_inc_unchecked(&out_vcc->stats->rx);
26109 done:
26110 if (vcc->pop) vcc->pop(vcc,skb);
26111 else dev_kfree_skb(skb);
26112 diff -urNp linux-2.6.32.44/drivers/atm/eni.c linux-2.6.32.44/drivers/atm/eni.c
26113 --- linux-2.6.32.44/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26114 +++ linux-2.6.32.44/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26115 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26116 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26117 vcc->dev->number);
26118 length = 0;
26119 - atomic_inc(&vcc->stats->rx_err);
26120 + atomic_inc_unchecked(&vcc->stats->rx_err);
26121 }
26122 else {
26123 length = ATM_CELL_SIZE-1; /* no HEC */
26124 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26125 size);
26126 }
26127 eff = length = 0;
26128 - atomic_inc(&vcc->stats->rx_err);
26129 + atomic_inc_unchecked(&vcc->stats->rx_err);
26130 }
26131 else {
26132 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26133 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26134 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26135 vcc->dev->number,vcc->vci,length,size << 2,descr);
26136 length = eff = 0;
26137 - atomic_inc(&vcc->stats->rx_err);
26138 + atomic_inc_unchecked(&vcc->stats->rx_err);
26139 }
26140 }
26141 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26142 @@ -770,7 +770,7 @@ rx_dequeued++;
26143 vcc->push(vcc,skb);
26144 pushed++;
26145 }
26146 - atomic_inc(&vcc->stats->rx);
26147 + atomic_inc_unchecked(&vcc->stats->rx);
26148 }
26149 wake_up(&eni_dev->rx_wait);
26150 }
26151 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26152 PCI_DMA_TODEVICE);
26153 if (vcc->pop) vcc->pop(vcc,skb);
26154 else dev_kfree_skb_irq(skb);
26155 - atomic_inc(&vcc->stats->tx);
26156 + atomic_inc_unchecked(&vcc->stats->tx);
26157 wake_up(&eni_dev->tx_wait);
26158 dma_complete++;
26159 }
26160 diff -urNp linux-2.6.32.44/drivers/atm/firestream.c linux-2.6.32.44/drivers/atm/firestream.c
26161 --- linux-2.6.32.44/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26162 +++ linux-2.6.32.44/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26163 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26164 }
26165 }
26166
26167 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26168 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26169
26170 fs_dprintk (FS_DEBUG_TXMEM, "i");
26171 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26172 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26173 #endif
26174 skb_put (skb, qe->p1 & 0xffff);
26175 ATM_SKB(skb)->vcc = atm_vcc;
26176 - atomic_inc(&atm_vcc->stats->rx);
26177 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26178 __net_timestamp(skb);
26179 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26180 atm_vcc->push (atm_vcc, skb);
26181 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26182 kfree (pe);
26183 }
26184 if (atm_vcc)
26185 - atomic_inc(&atm_vcc->stats->rx_drop);
26186 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26187 break;
26188 case 0x1f: /* Reassembly abort: no buffers. */
26189 /* Silently increment error counter. */
26190 if (atm_vcc)
26191 - atomic_inc(&atm_vcc->stats->rx_drop);
26192 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26193 break;
26194 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26195 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26196 diff -urNp linux-2.6.32.44/drivers/atm/fore200e.c linux-2.6.32.44/drivers/atm/fore200e.c
26197 --- linux-2.6.32.44/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26198 +++ linux-2.6.32.44/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26199 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26200 #endif
26201 /* check error condition */
26202 if (*entry->status & STATUS_ERROR)
26203 - atomic_inc(&vcc->stats->tx_err);
26204 + atomic_inc_unchecked(&vcc->stats->tx_err);
26205 else
26206 - atomic_inc(&vcc->stats->tx);
26207 + atomic_inc_unchecked(&vcc->stats->tx);
26208 }
26209 }
26210
26211 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26212 if (skb == NULL) {
26213 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26214
26215 - atomic_inc(&vcc->stats->rx_drop);
26216 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26217 return -ENOMEM;
26218 }
26219
26220 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26221
26222 dev_kfree_skb_any(skb);
26223
26224 - atomic_inc(&vcc->stats->rx_drop);
26225 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26226 return -ENOMEM;
26227 }
26228
26229 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26230
26231 vcc->push(vcc, skb);
26232 - atomic_inc(&vcc->stats->rx);
26233 + atomic_inc_unchecked(&vcc->stats->rx);
26234
26235 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26236
26237 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26238 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26239 fore200e->atm_dev->number,
26240 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26241 - atomic_inc(&vcc->stats->rx_err);
26242 + atomic_inc_unchecked(&vcc->stats->rx_err);
26243 }
26244 }
26245
26246 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26247 goto retry_here;
26248 }
26249
26250 - atomic_inc(&vcc->stats->tx_err);
26251 + atomic_inc_unchecked(&vcc->stats->tx_err);
26252
26253 fore200e->tx_sat++;
26254 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26255 diff -urNp linux-2.6.32.44/drivers/atm/he.c linux-2.6.32.44/drivers/atm/he.c
26256 --- linux-2.6.32.44/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26257 +++ linux-2.6.32.44/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26258 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26259
26260 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26261 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26262 - atomic_inc(&vcc->stats->rx_drop);
26263 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26264 goto return_host_buffers;
26265 }
26266
26267 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26268 RBRQ_LEN_ERR(he_dev->rbrq_head)
26269 ? "LEN_ERR" : "",
26270 vcc->vpi, vcc->vci);
26271 - atomic_inc(&vcc->stats->rx_err);
26272 + atomic_inc_unchecked(&vcc->stats->rx_err);
26273 goto return_host_buffers;
26274 }
26275
26276 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26277 vcc->push(vcc, skb);
26278 spin_lock(&he_dev->global_lock);
26279
26280 - atomic_inc(&vcc->stats->rx);
26281 + atomic_inc_unchecked(&vcc->stats->rx);
26282
26283 return_host_buffers:
26284 ++pdus_assembled;
26285 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26286 tpd->vcc->pop(tpd->vcc, tpd->skb);
26287 else
26288 dev_kfree_skb_any(tpd->skb);
26289 - atomic_inc(&tpd->vcc->stats->tx_err);
26290 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26291 }
26292 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26293 return;
26294 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26295 vcc->pop(vcc, skb);
26296 else
26297 dev_kfree_skb_any(skb);
26298 - atomic_inc(&vcc->stats->tx_err);
26299 + atomic_inc_unchecked(&vcc->stats->tx_err);
26300 return -EINVAL;
26301 }
26302
26303 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26304 vcc->pop(vcc, skb);
26305 else
26306 dev_kfree_skb_any(skb);
26307 - atomic_inc(&vcc->stats->tx_err);
26308 + atomic_inc_unchecked(&vcc->stats->tx_err);
26309 return -EINVAL;
26310 }
26311 #endif
26312 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26313 vcc->pop(vcc, skb);
26314 else
26315 dev_kfree_skb_any(skb);
26316 - atomic_inc(&vcc->stats->tx_err);
26317 + atomic_inc_unchecked(&vcc->stats->tx_err);
26318 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26319 return -ENOMEM;
26320 }
26321 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26322 vcc->pop(vcc, skb);
26323 else
26324 dev_kfree_skb_any(skb);
26325 - atomic_inc(&vcc->stats->tx_err);
26326 + atomic_inc_unchecked(&vcc->stats->tx_err);
26327 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26328 return -ENOMEM;
26329 }
26330 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26331 __enqueue_tpd(he_dev, tpd, cid);
26332 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26333
26334 - atomic_inc(&vcc->stats->tx);
26335 + atomic_inc_unchecked(&vcc->stats->tx);
26336
26337 return 0;
26338 }
26339 diff -urNp linux-2.6.32.44/drivers/atm/horizon.c linux-2.6.32.44/drivers/atm/horizon.c
26340 --- linux-2.6.32.44/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26341 +++ linux-2.6.32.44/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26342 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26343 {
26344 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26345 // VC layer stats
26346 - atomic_inc(&vcc->stats->rx);
26347 + atomic_inc_unchecked(&vcc->stats->rx);
26348 __net_timestamp(skb);
26349 // end of our responsability
26350 vcc->push (vcc, skb);
26351 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26352 dev->tx_iovec = NULL;
26353
26354 // VC layer stats
26355 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26356 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26357
26358 // free the skb
26359 hrz_kfree_skb (skb);
26360 diff -urNp linux-2.6.32.44/drivers/atm/idt77252.c linux-2.6.32.44/drivers/atm/idt77252.c
26361 --- linux-2.6.32.44/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26362 +++ linux-2.6.32.44/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26363 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26364 else
26365 dev_kfree_skb(skb);
26366
26367 - atomic_inc(&vcc->stats->tx);
26368 + atomic_inc_unchecked(&vcc->stats->tx);
26369 }
26370
26371 atomic_dec(&scq->used);
26372 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26373 if ((sb = dev_alloc_skb(64)) == NULL) {
26374 printk("%s: Can't allocate buffers for aal0.\n",
26375 card->name);
26376 - atomic_add(i, &vcc->stats->rx_drop);
26377 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26378 break;
26379 }
26380 if (!atm_charge(vcc, sb->truesize)) {
26381 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26382 card->name);
26383 - atomic_add(i - 1, &vcc->stats->rx_drop);
26384 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26385 dev_kfree_skb(sb);
26386 break;
26387 }
26388 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26389 ATM_SKB(sb)->vcc = vcc;
26390 __net_timestamp(sb);
26391 vcc->push(vcc, sb);
26392 - atomic_inc(&vcc->stats->rx);
26393 + atomic_inc_unchecked(&vcc->stats->rx);
26394
26395 cell += ATM_CELL_PAYLOAD;
26396 }
26397 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26398 "(CDC: %08x)\n",
26399 card->name, len, rpp->len, readl(SAR_REG_CDC));
26400 recycle_rx_pool_skb(card, rpp);
26401 - atomic_inc(&vcc->stats->rx_err);
26402 + atomic_inc_unchecked(&vcc->stats->rx_err);
26403 return;
26404 }
26405 if (stat & SAR_RSQE_CRC) {
26406 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26407 recycle_rx_pool_skb(card, rpp);
26408 - atomic_inc(&vcc->stats->rx_err);
26409 + atomic_inc_unchecked(&vcc->stats->rx_err);
26410 return;
26411 }
26412 if (skb_queue_len(&rpp->queue) > 1) {
26413 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26414 RXPRINTK("%s: Can't alloc RX skb.\n",
26415 card->name);
26416 recycle_rx_pool_skb(card, rpp);
26417 - atomic_inc(&vcc->stats->rx_err);
26418 + atomic_inc_unchecked(&vcc->stats->rx_err);
26419 return;
26420 }
26421 if (!atm_charge(vcc, skb->truesize)) {
26422 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26423 __net_timestamp(skb);
26424
26425 vcc->push(vcc, skb);
26426 - atomic_inc(&vcc->stats->rx);
26427 + atomic_inc_unchecked(&vcc->stats->rx);
26428
26429 return;
26430 }
26431 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26432 __net_timestamp(skb);
26433
26434 vcc->push(vcc, skb);
26435 - atomic_inc(&vcc->stats->rx);
26436 + atomic_inc_unchecked(&vcc->stats->rx);
26437
26438 if (skb->truesize > SAR_FB_SIZE_3)
26439 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26440 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26441 if (vcc->qos.aal != ATM_AAL0) {
26442 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26443 card->name, vpi, vci);
26444 - atomic_inc(&vcc->stats->rx_drop);
26445 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26446 goto drop;
26447 }
26448
26449 if ((sb = dev_alloc_skb(64)) == NULL) {
26450 printk("%s: Can't allocate buffers for AAL0.\n",
26451 card->name);
26452 - atomic_inc(&vcc->stats->rx_err);
26453 + atomic_inc_unchecked(&vcc->stats->rx_err);
26454 goto drop;
26455 }
26456
26457 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26458 ATM_SKB(sb)->vcc = vcc;
26459 __net_timestamp(sb);
26460 vcc->push(vcc, sb);
26461 - atomic_inc(&vcc->stats->rx);
26462 + atomic_inc_unchecked(&vcc->stats->rx);
26463
26464 drop:
26465 skb_pull(queue, 64);
26466 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26467
26468 if (vc == NULL) {
26469 printk("%s: NULL connection in send().\n", card->name);
26470 - atomic_inc(&vcc->stats->tx_err);
26471 + atomic_inc_unchecked(&vcc->stats->tx_err);
26472 dev_kfree_skb(skb);
26473 return -EINVAL;
26474 }
26475 if (!test_bit(VCF_TX, &vc->flags)) {
26476 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26477 - atomic_inc(&vcc->stats->tx_err);
26478 + atomic_inc_unchecked(&vcc->stats->tx_err);
26479 dev_kfree_skb(skb);
26480 return -EINVAL;
26481 }
26482 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26483 break;
26484 default:
26485 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26486 - atomic_inc(&vcc->stats->tx_err);
26487 + atomic_inc_unchecked(&vcc->stats->tx_err);
26488 dev_kfree_skb(skb);
26489 return -EINVAL;
26490 }
26491
26492 if (skb_shinfo(skb)->nr_frags != 0) {
26493 printk("%s: No scatter-gather yet.\n", card->name);
26494 - atomic_inc(&vcc->stats->tx_err);
26495 + atomic_inc_unchecked(&vcc->stats->tx_err);
26496 dev_kfree_skb(skb);
26497 return -EINVAL;
26498 }
26499 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26500
26501 err = queue_skb(card, vc, skb, oam);
26502 if (err) {
26503 - atomic_inc(&vcc->stats->tx_err);
26504 + atomic_inc_unchecked(&vcc->stats->tx_err);
26505 dev_kfree_skb(skb);
26506 return err;
26507 }
26508 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26509 skb = dev_alloc_skb(64);
26510 if (!skb) {
26511 printk("%s: Out of memory in send_oam().\n", card->name);
26512 - atomic_inc(&vcc->stats->tx_err);
26513 + atomic_inc_unchecked(&vcc->stats->tx_err);
26514 return -ENOMEM;
26515 }
26516 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26517 diff -urNp linux-2.6.32.44/drivers/atm/iphase.c linux-2.6.32.44/drivers/atm/iphase.c
26518 --- linux-2.6.32.44/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26519 +++ linux-2.6.32.44/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26520 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26521 status = (u_short) (buf_desc_ptr->desc_mode);
26522 if (status & (RX_CER | RX_PTE | RX_OFL))
26523 {
26524 - atomic_inc(&vcc->stats->rx_err);
26525 + atomic_inc_unchecked(&vcc->stats->rx_err);
26526 IF_ERR(printk("IA: bad packet, dropping it");)
26527 if (status & RX_CER) {
26528 IF_ERR(printk(" cause: packet CRC error\n");)
26529 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26530 len = dma_addr - buf_addr;
26531 if (len > iadev->rx_buf_sz) {
26532 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26533 - atomic_inc(&vcc->stats->rx_err);
26534 + atomic_inc_unchecked(&vcc->stats->rx_err);
26535 goto out_free_desc;
26536 }
26537
26538 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26539 ia_vcc = INPH_IA_VCC(vcc);
26540 if (ia_vcc == NULL)
26541 {
26542 - atomic_inc(&vcc->stats->rx_err);
26543 + atomic_inc_unchecked(&vcc->stats->rx_err);
26544 dev_kfree_skb_any(skb);
26545 atm_return(vcc, atm_guess_pdu2truesize(len));
26546 goto INCR_DLE;
26547 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26548 if ((length > iadev->rx_buf_sz) || (length >
26549 (skb->len - sizeof(struct cpcs_trailer))))
26550 {
26551 - atomic_inc(&vcc->stats->rx_err);
26552 + atomic_inc_unchecked(&vcc->stats->rx_err);
26553 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26554 length, skb->len);)
26555 dev_kfree_skb_any(skb);
26556 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26557
26558 IF_RX(printk("rx_dle_intr: skb push");)
26559 vcc->push(vcc,skb);
26560 - atomic_inc(&vcc->stats->rx);
26561 + atomic_inc_unchecked(&vcc->stats->rx);
26562 iadev->rx_pkt_cnt++;
26563 }
26564 INCR_DLE:
26565 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26566 {
26567 struct k_sonet_stats *stats;
26568 stats = &PRIV(_ia_dev[board])->sonet_stats;
26569 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26570 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26571 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26572 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26573 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26574 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26575 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26576 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26577 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26578 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26579 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26580 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26581 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26582 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26583 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26584 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26585 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26586 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26587 }
26588 ia_cmds.status = 0;
26589 break;
26590 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26591 if ((desc == 0) || (desc > iadev->num_tx_desc))
26592 {
26593 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26594 - atomic_inc(&vcc->stats->tx);
26595 + atomic_inc_unchecked(&vcc->stats->tx);
26596 if (vcc->pop)
26597 vcc->pop(vcc, skb);
26598 else
26599 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26600 ATM_DESC(skb) = vcc->vci;
26601 skb_queue_tail(&iadev->tx_dma_q, skb);
26602
26603 - atomic_inc(&vcc->stats->tx);
26604 + atomic_inc_unchecked(&vcc->stats->tx);
26605 iadev->tx_pkt_cnt++;
26606 /* Increment transaction counter */
26607 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26608
26609 #if 0
26610 /* add flow control logic */
26611 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26612 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26613 if (iavcc->vc_desc_cnt > 10) {
26614 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26615 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26616 diff -urNp linux-2.6.32.44/drivers/atm/lanai.c linux-2.6.32.44/drivers/atm/lanai.c
26617 --- linux-2.6.32.44/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26618 +++ linux-2.6.32.44/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26619 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26620 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26621 lanai_endtx(lanai, lvcc);
26622 lanai_free_skb(lvcc->tx.atmvcc, skb);
26623 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26624 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26625 }
26626
26627 /* Try to fill the buffer - don't call unless there is backlog */
26628 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26629 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26630 __net_timestamp(skb);
26631 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26632 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26633 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26634 out:
26635 lvcc->rx.buf.ptr = end;
26636 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26637 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26638 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26639 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26640 lanai->stats.service_rxnotaal5++;
26641 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26642 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26643 return 0;
26644 }
26645 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26646 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26647 int bytes;
26648 read_unlock(&vcc_sklist_lock);
26649 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26650 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26651 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26652 lvcc->stats.x.aal5.service_trash++;
26653 bytes = (SERVICE_GET_END(s) * 16) -
26654 (((unsigned long) lvcc->rx.buf.ptr) -
26655 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26656 }
26657 if (s & SERVICE_STREAM) {
26658 read_unlock(&vcc_sklist_lock);
26659 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26660 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26661 lvcc->stats.x.aal5.service_stream++;
26662 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26663 "PDU on VCI %d!\n", lanai->number, vci);
26664 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26665 return 0;
26666 }
26667 DPRINTK("got rx crc error on vci %d\n", vci);
26668 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26669 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26670 lvcc->stats.x.aal5.service_rxcrc++;
26671 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26672 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26673 diff -urNp linux-2.6.32.44/drivers/atm/nicstar.c linux-2.6.32.44/drivers/atm/nicstar.c
26674 --- linux-2.6.32.44/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26675 +++ linux-2.6.32.44/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26676 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26677 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26678 {
26679 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26680 - atomic_inc(&vcc->stats->tx_err);
26681 + atomic_inc_unchecked(&vcc->stats->tx_err);
26682 dev_kfree_skb_any(skb);
26683 return -EINVAL;
26684 }
26685 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26686 if (!vc->tx)
26687 {
26688 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26689 - atomic_inc(&vcc->stats->tx_err);
26690 + atomic_inc_unchecked(&vcc->stats->tx_err);
26691 dev_kfree_skb_any(skb);
26692 return -EINVAL;
26693 }
26694 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26695 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26696 {
26697 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26698 - atomic_inc(&vcc->stats->tx_err);
26699 + atomic_inc_unchecked(&vcc->stats->tx_err);
26700 dev_kfree_skb_any(skb);
26701 return -EINVAL;
26702 }
26703 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26704 if (skb_shinfo(skb)->nr_frags != 0)
26705 {
26706 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26707 - atomic_inc(&vcc->stats->tx_err);
26708 + atomic_inc_unchecked(&vcc->stats->tx_err);
26709 dev_kfree_skb_any(skb);
26710 return -EINVAL;
26711 }
26712 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26713
26714 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26715 {
26716 - atomic_inc(&vcc->stats->tx_err);
26717 + atomic_inc_unchecked(&vcc->stats->tx_err);
26718 dev_kfree_skb_any(skb);
26719 return -EIO;
26720 }
26721 - atomic_inc(&vcc->stats->tx);
26722 + atomic_inc_unchecked(&vcc->stats->tx);
26723
26724 return 0;
26725 }
26726 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26727 {
26728 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26729 card->index);
26730 - atomic_add(i,&vcc->stats->rx_drop);
26731 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26732 break;
26733 }
26734 if (!atm_charge(vcc, sb->truesize))
26735 {
26736 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26737 card->index);
26738 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26739 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26740 dev_kfree_skb_any(sb);
26741 break;
26742 }
26743 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26744 ATM_SKB(sb)->vcc = vcc;
26745 __net_timestamp(sb);
26746 vcc->push(vcc, sb);
26747 - atomic_inc(&vcc->stats->rx);
26748 + atomic_inc_unchecked(&vcc->stats->rx);
26749 cell += ATM_CELL_PAYLOAD;
26750 }
26751
26752 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26753 if (iovb == NULL)
26754 {
26755 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26756 - atomic_inc(&vcc->stats->rx_drop);
26757 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26758 recycle_rx_buf(card, skb);
26759 return;
26760 }
26761 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26762 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26763 {
26764 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26765 - atomic_inc(&vcc->stats->rx_err);
26766 + atomic_inc_unchecked(&vcc->stats->rx_err);
26767 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26768 NS_SKB(iovb)->iovcnt = 0;
26769 iovb->len = 0;
26770 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26771 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26772 card->index);
26773 which_list(card, skb);
26774 - atomic_inc(&vcc->stats->rx_err);
26775 + atomic_inc_unchecked(&vcc->stats->rx_err);
26776 recycle_rx_buf(card, skb);
26777 vc->rx_iov = NULL;
26778 recycle_iov_buf(card, iovb);
26779 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26780 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26781 card->index);
26782 which_list(card, skb);
26783 - atomic_inc(&vcc->stats->rx_err);
26784 + atomic_inc_unchecked(&vcc->stats->rx_err);
26785 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26786 NS_SKB(iovb)->iovcnt);
26787 vc->rx_iov = NULL;
26788 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26789 printk(" - PDU size mismatch.\n");
26790 else
26791 printk(".\n");
26792 - atomic_inc(&vcc->stats->rx_err);
26793 + atomic_inc_unchecked(&vcc->stats->rx_err);
26794 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26795 NS_SKB(iovb)->iovcnt);
26796 vc->rx_iov = NULL;
26797 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26798 if (!atm_charge(vcc, skb->truesize))
26799 {
26800 push_rxbufs(card, skb);
26801 - atomic_inc(&vcc->stats->rx_drop);
26802 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26803 }
26804 else
26805 {
26806 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26807 ATM_SKB(skb)->vcc = vcc;
26808 __net_timestamp(skb);
26809 vcc->push(vcc, skb);
26810 - atomic_inc(&vcc->stats->rx);
26811 + atomic_inc_unchecked(&vcc->stats->rx);
26812 }
26813 }
26814 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26815 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26816 if (!atm_charge(vcc, sb->truesize))
26817 {
26818 push_rxbufs(card, sb);
26819 - atomic_inc(&vcc->stats->rx_drop);
26820 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26821 }
26822 else
26823 {
26824 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26825 ATM_SKB(sb)->vcc = vcc;
26826 __net_timestamp(sb);
26827 vcc->push(vcc, sb);
26828 - atomic_inc(&vcc->stats->rx);
26829 + atomic_inc_unchecked(&vcc->stats->rx);
26830 }
26831
26832 push_rxbufs(card, skb);
26833 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26834 if (!atm_charge(vcc, skb->truesize))
26835 {
26836 push_rxbufs(card, skb);
26837 - atomic_inc(&vcc->stats->rx_drop);
26838 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26839 }
26840 else
26841 {
26842 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26843 ATM_SKB(skb)->vcc = vcc;
26844 __net_timestamp(skb);
26845 vcc->push(vcc, skb);
26846 - atomic_inc(&vcc->stats->rx);
26847 + atomic_inc_unchecked(&vcc->stats->rx);
26848 }
26849
26850 push_rxbufs(card, sb);
26851 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26852 if (hb == NULL)
26853 {
26854 printk("nicstar%d: Out of huge buffers.\n", card->index);
26855 - atomic_inc(&vcc->stats->rx_drop);
26856 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26857 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26858 NS_SKB(iovb)->iovcnt);
26859 vc->rx_iov = NULL;
26860 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26861 }
26862 else
26863 dev_kfree_skb_any(hb);
26864 - atomic_inc(&vcc->stats->rx_drop);
26865 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26866 }
26867 else
26868 {
26869 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26870 #endif /* NS_USE_DESTRUCTORS */
26871 __net_timestamp(hb);
26872 vcc->push(vcc, hb);
26873 - atomic_inc(&vcc->stats->rx);
26874 + atomic_inc_unchecked(&vcc->stats->rx);
26875 }
26876 }
26877
26878 diff -urNp linux-2.6.32.44/drivers/atm/solos-pci.c linux-2.6.32.44/drivers/atm/solos-pci.c
26879 --- linux-2.6.32.44/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26880 +++ linux-2.6.32.44/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26881 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26882 }
26883 atm_charge(vcc, skb->truesize);
26884 vcc->push(vcc, skb);
26885 - atomic_inc(&vcc->stats->rx);
26886 + atomic_inc_unchecked(&vcc->stats->rx);
26887 break;
26888
26889 case PKT_STATUS:
26890 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26891 char msg[500];
26892 char item[10];
26893
26894 + pax_track_stack();
26895 +
26896 len = buf->len;
26897 for (i = 0; i < len; i++){
26898 if(i % 8 == 0)
26899 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26900 vcc = SKB_CB(oldskb)->vcc;
26901
26902 if (vcc) {
26903 - atomic_inc(&vcc->stats->tx);
26904 + atomic_inc_unchecked(&vcc->stats->tx);
26905 solos_pop(vcc, oldskb);
26906 } else
26907 dev_kfree_skb_irq(oldskb);
26908 diff -urNp linux-2.6.32.44/drivers/atm/suni.c linux-2.6.32.44/drivers/atm/suni.c
26909 --- linux-2.6.32.44/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26910 +++ linux-2.6.32.44/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26911 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26912
26913
26914 #define ADD_LIMITED(s,v) \
26915 - atomic_add((v),&stats->s); \
26916 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26917 + atomic_add_unchecked((v),&stats->s); \
26918 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26919
26920
26921 static void suni_hz(unsigned long from_timer)
26922 diff -urNp linux-2.6.32.44/drivers/atm/uPD98402.c linux-2.6.32.44/drivers/atm/uPD98402.c
26923 --- linux-2.6.32.44/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26924 +++ linux-2.6.32.44/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26925 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26926 struct sonet_stats tmp;
26927 int error = 0;
26928
26929 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26930 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26931 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26932 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26933 if (zero && !error) {
26934 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26935
26936
26937 #define ADD_LIMITED(s,v) \
26938 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26939 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26940 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26941 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26942 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26943 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26944
26945
26946 static void stat_event(struct atm_dev *dev)
26947 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26948 if (reason & uPD98402_INT_PFM) stat_event(dev);
26949 if (reason & uPD98402_INT_PCO) {
26950 (void) GET(PCOCR); /* clear interrupt cause */
26951 - atomic_add(GET(HECCT),
26952 + atomic_add_unchecked(GET(HECCT),
26953 &PRIV(dev)->sonet_stats.uncorr_hcs);
26954 }
26955 if ((reason & uPD98402_INT_RFO) &&
26956 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26957 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26958 uPD98402_INT_LOS),PIMR); /* enable them */
26959 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26960 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26961 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26962 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26963 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26964 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26965 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26966 return 0;
26967 }
26968
26969 diff -urNp linux-2.6.32.44/drivers/atm/zatm.c linux-2.6.32.44/drivers/atm/zatm.c
26970 --- linux-2.6.32.44/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26971 +++ linux-2.6.32.44/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26972 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26973 }
26974 if (!size) {
26975 dev_kfree_skb_irq(skb);
26976 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26977 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26978 continue;
26979 }
26980 if (!atm_charge(vcc,skb->truesize)) {
26981 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26982 skb->len = size;
26983 ATM_SKB(skb)->vcc = vcc;
26984 vcc->push(vcc,skb);
26985 - atomic_inc(&vcc->stats->rx);
26986 + atomic_inc_unchecked(&vcc->stats->rx);
26987 }
26988 zout(pos & 0xffff,MTA(mbx));
26989 #if 0 /* probably a stupid idea */
26990 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26991 skb_queue_head(&zatm_vcc->backlog,skb);
26992 break;
26993 }
26994 - atomic_inc(&vcc->stats->tx);
26995 + atomic_inc_unchecked(&vcc->stats->tx);
26996 wake_up(&zatm_vcc->tx_wait);
26997 }
26998
26999 diff -urNp linux-2.6.32.44/drivers/base/bus.c linux-2.6.32.44/drivers/base/bus.c
27000 --- linux-2.6.32.44/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27001 +++ linux-2.6.32.44/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27002 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27003 return ret;
27004 }
27005
27006 -static struct sysfs_ops driver_sysfs_ops = {
27007 +static const struct sysfs_ops driver_sysfs_ops = {
27008 .show = drv_attr_show,
27009 .store = drv_attr_store,
27010 };
27011 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27012 return ret;
27013 }
27014
27015 -static struct sysfs_ops bus_sysfs_ops = {
27016 +static const struct sysfs_ops bus_sysfs_ops = {
27017 .show = bus_attr_show,
27018 .store = bus_attr_store,
27019 };
27020 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27021 return 0;
27022 }
27023
27024 -static struct kset_uevent_ops bus_uevent_ops = {
27025 +static const struct kset_uevent_ops bus_uevent_ops = {
27026 .filter = bus_uevent_filter,
27027 };
27028
27029 diff -urNp linux-2.6.32.44/drivers/base/class.c linux-2.6.32.44/drivers/base/class.c
27030 --- linux-2.6.32.44/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27031 +++ linux-2.6.32.44/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27032 @@ -63,7 +63,7 @@ static void class_release(struct kobject
27033 kfree(cp);
27034 }
27035
27036 -static struct sysfs_ops class_sysfs_ops = {
27037 +static const struct sysfs_ops class_sysfs_ops = {
27038 .show = class_attr_show,
27039 .store = class_attr_store,
27040 };
27041 diff -urNp linux-2.6.32.44/drivers/base/core.c linux-2.6.32.44/drivers/base/core.c
27042 --- linux-2.6.32.44/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27043 +++ linux-2.6.32.44/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27044 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27045 return ret;
27046 }
27047
27048 -static struct sysfs_ops dev_sysfs_ops = {
27049 +static const struct sysfs_ops dev_sysfs_ops = {
27050 .show = dev_attr_show,
27051 .store = dev_attr_store,
27052 };
27053 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27054 return retval;
27055 }
27056
27057 -static struct kset_uevent_ops device_uevent_ops = {
27058 +static const struct kset_uevent_ops device_uevent_ops = {
27059 .filter = dev_uevent_filter,
27060 .name = dev_uevent_name,
27061 .uevent = dev_uevent,
27062 diff -urNp linux-2.6.32.44/drivers/base/memory.c linux-2.6.32.44/drivers/base/memory.c
27063 --- linux-2.6.32.44/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27064 +++ linux-2.6.32.44/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27065 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27066 return retval;
27067 }
27068
27069 -static struct kset_uevent_ops memory_uevent_ops = {
27070 +static const struct kset_uevent_ops memory_uevent_ops = {
27071 .name = memory_uevent_name,
27072 .uevent = memory_uevent,
27073 };
27074 diff -urNp linux-2.6.32.44/drivers/base/sys.c linux-2.6.32.44/drivers/base/sys.c
27075 --- linux-2.6.32.44/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27076 +++ linux-2.6.32.44/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27077 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27078 return -EIO;
27079 }
27080
27081 -static struct sysfs_ops sysfs_ops = {
27082 +static const struct sysfs_ops sysfs_ops = {
27083 .show = sysdev_show,
27084 .store = sysdev_store,
27085 };
27086 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27087 return -EIO;
27088 }
27089
27090 -static struct sysfs_ops sysfs_class_ops = {
27091 +static const struct sysfs_ops sysfs_class_ops = {
27092 .show = sysdev_class_show,
27093 .store = sysdev_class_store,
27094 };
27095 diff -urNp linux-2.6.32.44/drivers/block/cciss.c linux-2.6.32.44/drivers/block/cciss.c
27096 --- linux-2.6.32.44/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27097 +++ linux-2.6.32.44/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27098 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27099 int err;
27100 u32 cp;
27101
27102 + memset(&arg64, 0, sizeof(arg64));
27103 +
27104 err = 0;
27105 err |=
27106 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27107 @@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27108 /* Wait (up to 20 seconds) for a command to complete */
27109
27110 for (i = 20 * HZ; i > 0; i--) {
27111 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27112 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27113 if (done == FIFO_EMPTY)
27114 schedule_timeout_uninterruptible(1);
27115 else
27116 @@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27117 resend_cmd1:
27118
27119 /* Disable interrupt on the board. */
27120 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27121 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27122
27123 /* Make sure there is room in the command FIFO */
27124 /* Actually it should be completely empty at this time */
27125 @@ -2884,13 +2886,13 @@ resend_cmd1:
27126 /* tape side of the driver. */
27127 for (i = 200000; i > 0; i--) {
27128 /* if fifo isn't full go */
27129 - if (!(h->access.fifo_full(h)))
27130 + if (!(h->access->fifo_full(h)))
27131 break;
27132 udelay(10);
27133 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27134 " waiting!\n", h->ctlr);
27135 }
27136 - h->access.submit_command(h, c); /* Send the cmd */
27137 + h->access->submit_command(h, c); /* Send the cmd */
27138 do {
27139 complete = pollcomplete(h->ctlr);
27140
27141 @@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27142 while (!hlist_empty(&h->reqQ)) {
27143 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27144 /* can't do anything if fifo is full */
27145 - if ((h->access.fifo_full(h))) {
27146 + if ((h->access->fifo_full(h))) {
27147 printk(KERN_WARNING "cciss: fifo full\n");
27148 break;
27149 }
27150 @@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27151 h->Qdepth--;
27152
27153 /* Tell the controller execute command */
27154 - h->access.submit_command(h, c);
27155 + h->access->submit_command(h, c);
27156
27157 /* Put job onto the completed Q */
27158 addQ(&h->cmpQ, c);
27159 @@ -3393,17 +3395,17 @@ startio:
27160
27161 static inline unsigned long get_next_completion(ctlr_info_t *h)
27162 {
27163 - return h->access.command_completed(h);
27164 + return h->access->command_completed(h);
27165 }
27166
27167 static inline int interrupt_pending(ctlr_info_t *h)
27168 {
27169 - return h->access.intr_pending(h);
27170 + return h->access->intr_pending(h);
27171 }
27172
27173 static inline long interrupt_not_for_us(ctlr_info_t *h)
27174 {
27175 - return (((h->access.intr_pending(h) == 0) ||
27176 + return (((h->access->intr_pending(h) == 0) ||
27177 (h->interrupts_enabled == 0)));
27178 }
27179
27180 @@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27181 */
27182 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27183 c->product_name = products[prod_index].product_name;
27184 - c->access = *(products[prod_index].access);
27185 + c->access = products[prod_index].access;
27186 c->nr_cmds = c->max_commands - 4;
27187 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27188 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27189 @@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27190 }
27191
27192 /* make sure the board interrupts are off */
27193 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27194 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27195 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27196 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27197 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27198 @@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27199 cciss_scsi_setup(i);
27200
27201 /* Turn the interrupts on so we can service requests */
27202 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27203 + hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27204
27205 /* Get the firmware version */
27206 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27207 diff -urNp linux-2.6.32.44/drivers/block/cciss.h linux-2.6.32.44/drivers/block/cciss.h
27208 --- linux-2.6.32.44/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27209 +++ linux-2.6.32.44/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27210 @@ -90,7 +90,7 @@ struct ctlr_info
27211 // information about each logical volume
27212 drive_info_struct *drv[CISS_MAX_LUN];
27213
27214 - struct access_method access;
27215 + struct access_method *access;
27216
27217 /* queue and queue Info */
27218 struct hlist_head reqQ;
27219 diff -urNp linux-2.6.32.44/drivers/block/cpqarray.c linux-2.6.32.44/drivers/block/cpqarray.c
27220 --- linux-2.6.32.44/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27221 +++ linux-2.6.32.44/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27222 @@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27223 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27224 goto Enomem4;
27225 }
27226 - hba[i]->access.set_intr_mask(hba[i], 0);
27227 + hba[i]->access->set_intr_mask(hba[i], 0);
27228 if (request_irq(hba[i]->intr, do_ida_intr,
27229 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27230 {
27231 @@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27232 add_timer(&hba[i]->timer);
27233
27234 /* Enable IRQ now that spinlock and rate limit timer are set up */
27235 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27236 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27237
27238 for(j=0; j<NWD; j++) {
27239 struct gendisk *disk = ida_gendisk[i][j];
27240 @@ -695,7 +695,7 @@ DBGINFO(
27241 for(i=0; i<NR_PRODUCTS; i++) {
27242 if (board_id == products[i].board_id) {
27243 c->product_name = products[i].product_name;
27244 - c->access = *(products[i].access);
27245 + c->access = products[i].access;
27246 break;
27247 }
27248 }
27249 @@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27250 hba[ctlr]->intr = intr;
27251 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27252 hba[ctlr]->product_name = products[j].product_name;
27253 - hba[ctlr]->access = *(products[j].access);
27254 + hba[ctlr]->access = products[j].access;
27255 hba[ctlr]->ctlr = ctlr;
27256 hba[ctlr]->board_id = board_id;
27257 hba[ctlr]->pci_dev = NULL; /* not PCI */
27258 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27259 struct scatterlist tmp_sg[SG_MAX];
27260 int i, dir, seg;
27261
27262 + pax_track_stack();
27263 +
27264 if (blk_queue_plugged(q))
27265 goto startio;
27266
27267 @@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27268
27269 while((c = h->reqQ) != NULL) {
27270 /* Can't do anything if we're busy */
27271 - if (h->access.fifo_full(h) == 0)
27272 + if (h->access->fifo_full(h) == 0)
27273 return;
27274
27275 /* Get the first entry from the request Q */
27276 @@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27277 h->Qdepth--;
27278
27279 /* Tell the controller to do our bidding */
27280 - h->access.submit_command(h, c);
27281 + h->access->submit_command(h, c);
27282
27283 /* Get onto the completion Q */
27284 addQ(&h->cmpQ, c);
27285 @@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27286 unsigned long flags;
27287 __u32 a,a1;
27288
27289 - istat = h->access.intr_pending(h);
27290 + istat = h->access->intr_pending(h);
27291 /* Is this interrupt for us? */
27292 if (istat == 0)
27293 return IRQ_NONE;
27294 @@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27295 */
27296 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27297 if (istat & FIFO_NOT_EMPTY) {
27298 - while((a = h->access.command_completed(h))) {
27299 + while((a = h->access->command_completed(h))) {
27300 a1 = a; a &= ~3;
27301 if ((c = h->cmpQ) == NULL)
27302 {
27303 @@ -1434,11 +1436,11 @@ static int sendcmd(
27304 /*
27305 * Disable interrupt
27306 */
27307 - info_p->access.set_intr_mask(info_p, 0);
27308 + info_p->access->set_intr_mask(info_p, 0);
27309 /* Make sure there is room in the command FIFO */
27310 /* Actually it should be completely empty at this time. */
27311 for (i = 200000; i > 0; i--) {
27312 - temp = info_p->access.fifo_full(info_p);
27313 + temp = info_p->access->fifo_full(info_p);
27314 if (temp != 0) {
27315 break;
27316 }
27317 @@ -1451,7 +1453,7 @@ DBG(
27318 /*
27319 * Send the cmd
27320 */
27321 - info_p->access.submit_command(info_p, c);
27322 + info_p->access->submit_command(info_p, c);
27323 complete = pollcomplete(ctlr);
27324
27325 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27326 @@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27327 * we check the new geometry. Then turn interrupts back on when
27328 * we're done.
27329 */
27330 - host->access.set_intr_mask(host, 0);
27331 + host->access->set_intr_mask(host, 0);
27332 getgeometry(ctlr);
27333 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27334 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27335
27336 for(i=0; i<NWD; i++) {
27337 struct gendisk *disk = ida_gendisk[ctlr][i];
27338 @@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27339 /* Wait (up to 2 seconds) for a command to complete */
27340
27341 for (i = 200000; i > 0; i--) {
27342 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
27343 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
27344 if (done == 0) {
27345 udelay(10); /* a short fixed delay */
27346 } else
27347 diff -urNp linux-2.6.32.44/drivers/block/cpqarray.h linux-2.6.32.44/drivers/block/cpqarray.h
27348 --- linux-2.6.32.44/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27349 +++ linux-2.6.32.44/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27350 @@ -99,7 +99,7 @@ struct ctlr_info {
27351 drv_info_t drv[NWD];
27352 struct proc_dir_entry *proc;
27353
27354 - struct access_method access;
27355 + struct access_method *access;
27356
27357 cmdlist_t *reqQ;
27358 cmdlist_t *cmpQ;
27359 diff -urNp linux-2.6.32.44/drivers/block/DAC960.c linux-2.6.32.44/drivers/block/DAC960.c
27360 --- linux-2.6.32.44/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27361 +++ linux-2.6.32.44/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27362 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27363 unsigned long flags;
27364 int Channel, TargetID;
27365
27366 + pax_track_stack();
27367 +
27368 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27369 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27370 sizeof(DAC960_SCSI_Inquiry_T) +
27371 diff -urNp linux-2.6.32.44/drivers/block/nbd.c linux-2.6.32.44/drivers/block/nbd.c
27372 --- linux-2.6.32.44/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27373 +++ linux-2.6.32.44/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27374 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27375 struct kvec iov;
27376 sigset_t blocked, oldset;
27377
27378 + pax_track_stack();
27379 +
27380 if (unlikely(!sock)) {
27381 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27382 lo->disk->disk_name, (send ? "send" : "recv"));
27383 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27384 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27385 unsigned int cmd, unsigned long arg)
27386 {
27387 + pax_track_stack();
27388 +
27389 switch (cmd) {
27390 case NBD_DISCONNECT: {
27391 struct request sreq;
27392 diff -urNp linux-2.6.32.44/drivers/block/pktcdvd.c linux-2.6.32.44/drivers/block/pktcdvd.c
27393 --- linux-2.6.32.44/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27394 +++ linux-2.6.32.44/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27395 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27396 return len;
27397 }
27398
27399 -static struct sysfs_ops kobj_pkt_ops = {
27400 +static const struct sysfs_ops kobj_pkt_ops = {
27401 .show = kobj_pkt_show,
27402 .store = kobj_pkt_store
27403 };
27404 diff -urNp linux-2.6.32.44/drivers/char/agp/frontend.c linux-2.6.32.44/drivers/char/agp/frontend.c
27405 --- linux-2.6.32.44/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27406 +++ linux-2.6.32.44/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27407 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27408 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27409 return -EFAULT;
27410
27411 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27412 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27413 return -EFAULT;
27414
27415 client = agp_find_client_by_pid(reserve.pid);
27416 diff -urNp linux-2.6.32.44/drivers/char/briq_panel.c linux-2.6.32.44/drivers/char/briq_panel.c
27417 --- linux-2.6.32.44/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27418 +++ linux-2.6.32.44/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27419 @@ -10,6 +10,7 @@
27420 #include <linux/types.h>
27421 #include <linux/errno.h>
27422 #include <linux/tty.h>
27423 +#include <linux/mutex.h>
27424 #include <linux/timer.h>
27425 #include <linux/kernel.h>
27426 #include <linux/wait.h>
27427 @@ -36,6 +37,7 @@ static int vfd_is_open;
27428 static unsigned char vfd[40];
27429 static int vfd_cursor;
27430 static unsigned char ledpb, led;
27431 +static DEFINE_MUTEX(vfd_mutex);
27432
27433 static void update_vfd(void)
27434 {
27435 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27436 if (!vfd_is_open)
27437 return -EBUSY;
27438
27439 + mutex_lock(&vfd_mutex);
27440 for (;;) {
27441 char c;
27442 if (!indx)
27443 break;
27444 - if (get_user(c, buf))
27445 + if (get_user(c, buf)) {
27446 + mutex_unlock(&vfd_mutex);
27447 return -EFAULT;
27448 + }
27449 if (esc) {
27450 set_led(c);
27451 esc = 0;
27452 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27453 buf++;
27454 }
27455 update_vfd();
27456 + mutex_unlock(&vfd_mutex);
27457
27458 return len;
27459 }
27460 diff -urNp linux-2.6.32.44/drivers/char/genrtc.c linux-2.6.32.44/drivers/char/genrtc.c
27461 --- linux-2.6.32.44/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27462 +++ linux-2.6.32.44/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27463 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27464 switch (cmd) {
27465
27466 case RTC_PLL_GET:
27467 + memset(&pll, 0, sizeof(pll));
27468 if (get_rtc_pll(&pll))
27469 return -EINVAL;
27470 else
27471 diff -urNp linux-2.6.32.44/drivers/char/hpet.c linux-2.6.32.44/drivers/char/hpet.c
27472 --- linux-2.6.32.44/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27473 +++ linux-2.6.32.44/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27474 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27475 return 0;
27476 }
27477
27478 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27479 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27480
27481 static int
27482 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27483 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27484 }
27485
27486 static int
27487 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27488 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27489 {
27490 struct hpet_timer __iomem *timer;
27491 struct hpet __iomem *hpet;
27492 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27493 {
27494 struct hpet_info info;
27495
27496 + memset(&info, 0, sizeof(info));
27497 +
27498 if (devp->hd_ireqfreq)
27499 info.hi_ireqfreq =
27500 hpet_time_div(hpetp, devp->hd_ireqfreq);
27501 - else
27502 - info.hi_ireqfreq = 0;
27503 info.hi_flags =
27504 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27505 info.hi_hpet = hpetp->hp_which;
27506 diff -urNp linux-2.6.32.44/drivers/char/hvc_beat.c linux-2.6.32.44/drivers/char/hvc_beat.c
27507 --- linux-2.6.32.44/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27508 +++ linux-2.6.32.44/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27509 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27510 return cnt;
27511 }
27512
27513 -static struct hv_ops hvc_beat_get_put_ops = {
27514 +static const struct hv_ops hvc_beat_get_put_ops = {
27515 .get_chars = hvc_beat_get_chars,
27516 .put_chars = hvc_beat_put_chars,
27517 };
27518 diff -urNp linux-2.6.32.44/drivers/char/hvc_console.c linux-2.6.32.44/drivers/char/hvc_console.c
27519 --- linux-2.6.32.44/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27520 +++ linux-2.6.32.44/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27521 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27522 * console interfaces but can still be used as a tty device. This has to be
27523 * static because kmalloc will not work during early console init.
27524 */
27525 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27526 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27527 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27528 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27529
27530 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27531 * vty adapters do NOT get an hvc_instantiate() callback since they
27532 * appear after early console init.
27533 */
27534 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27535 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27536 {
27537 struct hvc_struct *hp;
27538
27539 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27540 };
27541
27542 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27543 - struct hv_ops *ops, int outbuf_size)
27544 + const struct hv_ops *ops, int outbuf_size)
27545 {
27546 struct hvc_struct *hp;
27547 int i;
27548 diff -urNp linux-2.6.32.44/drivers/char/hvc_console.h linux-2.6.32.44/drivers/char/hvc_console.h
27549 --- linux-2.6.32.44/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27550 +++ linux-2.6.32.44/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27551 @@ -55,7 +55,7 @@ struct hvc_struct {
27552 int outbuf_size;
27553 int n_outbuf;
27554 uint32_t vtermno;
27555 - struct hv_ops *ops;
27556 + const struct hv_ops *ops;
27557 int irq_requested;
27558 int data;
27559 struct winsize ws;
27560 @@ -76,11 +76,11 @@ struct hv_ops {
27561 };
27562
27563 /* Register a vterm and a slot index for use as a console (console_init) */
27564 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27565 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27566
27567 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27568 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27569 - struct hv_ops *ops, int outbuf_size);
27570 + const struct hv_ops *ops, int outbuf_size);
27571 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27572 extern int hvc_remove(struct hvc_struct *hp);
27573
27574 diff -urNp linux-2.6.32.44/drivers/char/hvc_iseries.c linux-2.6.32.44/drivers/char/hvc_iseries.c
27575 --- linux-2.6.32.44/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27576 +++ linux-2.6.32.44/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27577 @@ -197,7 +197,7 @@ done:
27578 return sent;
27579 }
27580
27581 -static struct hv_ops hvc_get_put_ops = {
27582 +static const struct hv_ops hvc_get_put_ops = {
27583 .get_chars = get_chars,
27584 .put_chars = put_chars,
27585 .notifier_add = notifier_add_irq,
27586 diff -urNp linux-2.6.32.44/drivers/char/hvc_iucv.c linux-2.6.32.44/drivers/char/hvc_iucv.c
27587 --- linux-2.6.32.44/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27588 +++ linux-2.6.32.44/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27589 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27590
27591
27592 /* HVC operations */
27593 -static struct hv_ops hvc_iucv_ops = {
27594 +static const struct hv_ops hvc_iucv_ops = {
27595 .get_chars = hvc_iucv_get_chars,
27596 .put_chars = hvc_iucv_put_chars,
27597 .notifier_add = hvc_iucv_notifier_add,
27598 diff -urNp linux-2.6.32.44/drivers/char/hvc_rtas.c linux-2.6.32.44/drivers/char/hvc_rtas.c
27599 --- linux-2.6.32.44/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27600 +++ linux-2.6.32.44/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27601 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27602 return i;
27603 }
27604
27605 -static struct hv_ops hvc_rtas_get_put_ops = {
27606 +static const struct hv_ops hvc_rtas_get_put_ops = {
27607 .get_chars = hvc_rtas_read_console,
27608 .put_chars = hvc_rtas_write_console,
27609 };
27610 diff -urNp linux-2.6.32.44/drivers/char/hvcs.c linux-2.6.32.44/drivers/char/hvcs.c
27611 --- linux-2.6.32.44/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27612 +++ linux-2.6.32.44/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27613 @@ -82,6 +82,7 @@
27614 #include <asm/hvcserver.h>
27615 #include <asm/uaccess.h>
27616 #include <asm/vio.h>
27617 +#include <asm/local.h>
27618
27619 /*
27620 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27621 @@ -269,7 +270,7 @@ struct hvcs_struct {
27622 unsigned int index;
27623
27624 struct tty_struct *tty;
27625 - int open_count;
27626 + local_t open_count;
27627
27628 /*
27629 * Used to tell the driver kernel_thread what operations need to take
27630 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27631
27632 spin_lock_irqsave(&hvcsd->lock, flags);
27633
27634 - if (hvcsd->open_count > 0) {
27635 + if (local_read(&hvcsd->open_count) > 0) {
27636 spin_unlock_irqrestore(&hvcsd->lock, flags);
27637 printk(KERN_INFO "HVCS: vterm state unchanged. "
27638 "The hvcs device node is still in use.\n");
27639 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27640 if ((retval = hvcs_partner_connect(hvcsd)))
27641 goto error_release;
27642
27643 - hvcsd->open_count = 1;
27644 + local_set(&hvcsd->open_count, 1);
27645 hvcsd->tty = tty;
27646 tty->driver_data = hvcsd;
27647
27648 @@ -1169,7 +1170,7 @@ fast_open:
27649
27650 spin_lock_irqsave(&hvcsd->lock, flags);
27651 kref_get(&hvcsd->kref);
27652 - hvcsd->open_count++;
27653 + local_inc(&hvcsd->open_count);
27654 hvcsd->todo_mask |= HVCS_SCHED_READ;
27655 spin_unlock_irqrestore(&hvcsd->lock, flags);
27656
27657 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27658 hvcsd = tty->driver_data;
27659
27660 spin_lock_irqsave(&hvcsd->lock, flags);
27661 - if (--hvcsd->open_count == 0) {
27662 + if (local_dec_and_test(&hvcsd->open_count)) {
27663
27664 vio_disable_interrupts(hvcsd->vdev);
27665
27666 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27667 free_irq(irq, hvcsd);
27668 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27669 return;
27670 - } else if (hvcsd->open_count < 0) {
27671 + } else if (local_read(&hvcsd->open_count) < 0) {
27672 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27673 " is missmanaged.\n",
27674 - hvcsd->vdev->unit_address, hvcsd->open_count);
27675 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27676 }
27677
27678 spin_unlock_irqrestore(&hvcsd->lock, flags);
27679 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27680
27681 spin_lock_irqsave(&hvcsd->lock, flags);
27682 /* Preserve this so that we know how many kref refs to put */
27683 - temp_open_count = hvcsd->open_count;
27684 + temp_open_count = local_read(&hvcsd->open_count);
27685
27686 /*
27687 * Don't kref put inside the spinlock because the destruction
27688 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27689 hvcsd->tty->driver_data = NULL;
27690 hvcsd->tty = NULL;
27691
27692 - hvcsd->open_count = 0;
27693 + local_set(&hvcsd->open_count, 0);
27694
27695 /* This will drop any buffered data on the floor which is OK in a hangup
27696 * scenario. */
27697 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27698 * the middle of a write operation? This is a crummy place to do this
27699 * but we want to keep it all in the spinlock.
27700 */
27701 - if (hvcsd->open_count <= 0) {
27702 + if (local_read(&hvcsd->open_count) <= 0) {
27703 spin_unlock_irqrestore(&hvcsd->lock, flags);
27704 return -ENODEV;
27705 }
27706 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27707 {
27708 struct hvcs_struct *hvcsd = tty->driver_data;
27709
27710 - if (!hvcsd || hvcsd->open_count <= 0)
27711 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27712 return 0;
27713
27714 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27715 diff -urNp linux-2.6.32.44/drivers/char/hvc_udbg.c linux-2.6.32.44/drivers/char/hvc_udbg.c
27716 --- linux-2.6.32.44/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27717 +++ linux-2.6.32.44/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27718 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27719 return i;
27720 }
27721
27722 -static struct hv_ops hvc_udbg_ops = {
27723 +static const struct hv_ops hvc_udbg_ops = {
27724 .get_chars = hvc_udbg_get,
27725 .put_chars = hvc_udbg_put,
27726 };
27727 diff -urNp linux-2.6.32.44/drivers/char/hvc_vio.c linux-2.6.32.44/drivers/char/hvc_vio.c
27728 --- linux-2.6.32.44/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27729 +++ linux-2.6.32.44/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27730 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27731 return got;
27732 }
27733
27734 -static struct hv_ops hvc_get_put_ops = {
27735 +static const struct hv_ops hvc_get_put_ops = {
27736 .get_chars = filtered_get_chars,
27737 .put_chars = hvc_put_chars,
27738 .notifier_add = notifier_add_irq,
27739 diff -urNp linux-2.6.32.44/drivers/char/hvc_xen.c linux-2.6.32.44/drivers/char/hvc_xen.c
27740 --- linux-2.6.32.44/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27741 +++ linux-2.6.32.44/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27742 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27743 return recv;
27744 }
27745
27746 -static struct hv_ops hvc_ops = {
27747 +static const struct hv_ops hvc_ops = {
27748 .get_chars = read_console,
27749 .put_chars = write_console,
27750 .notifier_add = notifier_add_irq,
27751 diff -urNp linux-2.6.32.44/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.44/drivers/char/ipmi/ipmi_msghandler.c
27752 --- linux-2.6.32.44/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27753 +++ linux-2.6.32.44/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27754 @@ -414,7 +414,7 @@ struct ipmi_smi {
27755 struct proc_dir_entry *proc_dir;
27756 char proc_dir_name[10];
27757
27758 - atomic_t stats[IPMI_NUM_STATS];
27759 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27760
27761 /*
27762 * run_to_completion duplicate of smb_info, smi_info
27763 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27764
27765
27766 #define ipmi_inc_stat(intf, stat) \
27767 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27768 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27769 #define ipmi_get_stat(intf, stat) \
27770 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27771 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27772
27773 static int is_lan_addr(struct ipmi_addr *addr)
27774 {
27775 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27776 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27777 init_waitqueue_head(&intf->waitq);
27778 for (i = 0; i < IPMI_NUM_STATS; i++)
27779 - atomic_set(&intf->stats[i], 0);
27780 + atomic_set_unchecked(&intf->stats[i], 0);
27781
27782 intf->proc_dir = NULL;
27783
27784 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27785 struct ipmi_smi_msg smi_msg;
27786 struct ipmi_recv_msg recv_msg;
27787
27788 + pax_track_stack();
27789 +
27790 si = (struct ipmi_system_interface_addr *) &addr;
27791 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27792 si->channel = IPMI_BMC_CHANNEL;
27793 diff -urNp linux-2.6.32.44/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.44/drivers/char/ipmi/ipmi_si_intf.c
27794 --- linux-2.6.32.44/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27795 +++ linux-2.6.32.44/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27796 @@ -277,7 +277,7 @@ struct smi_info {
27797 unsigned char slave_addr;
27798
27799 /* Counters and things for the proc filesystem. */
27800 - atomic_t stats[SI_NUM_STATS];
27801 + atomic_unchecked_t stats[SI_NUM_STATS];
27802
27803 struct task_struct *thread;
27804
27805 @@ -285,9 +285,9 @@ struct smi_info {
27806 };
27807
27808 #define smi_inc_stat(smi, stat) \
27809 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27810 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27811 #define smi_get_stat(smi, stat) \
27812 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27813 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27814
27815 #define SI_MAX_PARMS 4
27816
27817 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27818 atomic_set(&new_smi->req_events, 0);
27819 new_smi->run_to_completion = 0;
27820 for (i = 0; i < SI_NUM_STATS; i++)
27821 - atomic_set(&new_smi->stats[i], 0);
27822 + atomic_set_unchecked(&new_smi->stats[i], 0);
27823
27824 new_smi->interrupt_disabled = 0;
27825 atomic_set(&new_smi->stop_operation, 0);
27826 diff -urNp linux-2.6.32.44/drivers/char/istallion.c linux-2.6.32.44/drivers/char/istallion.c
27827 --- linux-2.6.32.44/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27828 +++ linux-2.6.32.44/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27829 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27830 * re-used for each stats call.
27831 */
27832 static comstats_t stli_comstats;
27833 -static combrd_t stli_brdstats;
27834 static struct asystats stli_cdkstats;
27835
27836 /*****************************************************************************/
27837 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27838 {
27839 struct stlibrd *brdp;
27840 unsigned int i;
27841 + combrd_t stli_brdstats;
27842
27843 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27844 return -EFAULT;
27845 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27846 struct stliport stli_dummyport;
27847 struct stliport *portp;
27848
27849 + pax_track_stack();
27850 +
27851 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27852 return -EFAULT;
27853 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27854 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27855 struct stlibrd stli_dummybrd;
27856 struct stlibrd *brdp;
27857
27858 + pax_track_stack();
27859 +
27860 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27861 return -EFAULT;
27862 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27863 diff -urNp linux-2.6.32.44/drivers/char/Kconfig linux-2.6.32.44/drivers/char/Kconfig
27864 --- linux-2.6.32.44/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27865 +++ linux-2.6.32.44/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27866 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27867
27868 config DEVKMEM
27869 bool "/dev/kmem virtual device support"
27870 - default y
27871 + default n
27872 + depends on !GRKERNSEC_KMEM
27873 help
27874 Say Y here if you want to support the /dev/kmem device. The
27875 /dev/kmem device is rarely used, but can be used for certain
27876 @@ -1114,6 +1115,7 @@ config DEVPORT
27877 bool
27878 depends on !M68K
27879 depends on ISA || PCI
27880 + depends on !GRKERNSEC_KMEM
27881 default y
27882
27883 source "drivers/s390/char/Kconfig"
27884 diff -urNp linux-2.6.32.44/drivers/char/keyboard.c linux-2.6.32.44/drivers/char/keyboard.c
27885 --- linux-2.6.32.44/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27886 +++ linux-2.6.32.44/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27887 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27888 kbd->kbdmode == VC_MEDIUMRAW) &&
27889 value != KVAL(K_SAK))
27890 return; /* SAK is allowed even in raw mode */
27891 +
27892 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27893 + {
27894 + void *func = fn_handler[value];
27895 + if (func == fn_show_state || func == fn_show_ptregs ||
27896 + func == fn_show_mem)
27897 + return;
27898 + }
27899 +#endif
27900 +
27901 fn_handler[value](vc);
27902 }
27903
27904 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27905 .evbit = { BIT_MASK(EV_SND) },
27906 },
27907
27908 - { }, /* Terminating entry */
27909 + { 0 }, /* Terminating entry */
27910 };
27911
27912 MODULE_DEVICE_TABLE(input, kbd_ids);
27913 diff -urNp linux-2.6.32.44/drivers/char/mem.c linux-2.6.32.44/drivers/char/mem.c
27914 --- linux-2.6.32.44/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27915 +++ linux-2.6.32.44/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27916 @@ -18,6 +18,7 @@
27917 #include <linux/raw.h>
27918 #include <linux/tty.h>
27919 #include <linux/capability.h>
27920 +#include <linux/security.h>
27921 #include <linux/ptrace.h>
27922 #include <linux/device.h>
27923 #include <linux/highmem.h>
27924 @@ -35,6 +36,10 @@
27925 # include <linux/efi.h>
27926 #endif
27927
27928 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27929 +extern struct file_operations grsec_fops;
27930 +#endif
27931 +
27932 static inline unsigned long size_inside_page(unsigned long start,
27933 unsigned long size)
27934 {
27935 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27936
27937 while (cursor < to) {
27938 if (!devmem_is_allowed(pfn)) {
27939 +#ifdef CONFIG_GRKERNSEC_KMEM
27940 + gr_handle_mem_readwrite(from, to);
27941 +#else
27942 printk(KERN_INFO
27943 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27944 current->comm, from, to);
27945 +#endif
27946 return 0;
27947 }
27948 cursor += PAGE_SIZE;
27949 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27950 }
27951 return 1;
27952 }
27953 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27954 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27955 +{
27956 + return 0;
27957 +}
27958 #else
27959 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27960 {
27961 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27962 #endif
27963
27964 while (count > 0) {
27965 + char *temp;
27966 +
27967 /*
27968 * Handle first page in case it's not aligned
27969 */
27970 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27971 if (!ptr)
27972 return -EFAULT;
27973
27974 - if (copy_to_user(buf, ptr, sz)) {
27975 +#ifdef CONFIG_PAX_USERCOPY
27976 + temp = kmalloc(sz, GFP_KERNEL);
27977 + if (!temp) {
27978 + unxlate_dev_mem_ptr(p, ptr);
27979 + return -ENOMEM;
27980 + }
27981 + memcpy(temp, ptr, sz);
27982 +#else
27983 + temp = ptr;
27984 +#endif
27985 +
27986 + if (copy_to_user(buf, temp, sz)) {
27987 +
27988 +#ifdef CONFIG_PAX_USERCOPY
27989 + kfree(temp);
27990 +#endif
27991 +
27992 unxlate_dev_mem_ptr(p, ptr);
27993 return -EFAULT;
27994 }
27995
27996 +#ifdef CONFIG_PAX_USERCOPY
27997 + kfree(temp);
27998 +#endif
27999 +
28000 unxlate_dev_mem_ptr(p, ptr);
28001
28002 buf += sz;
28003 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28004 size_t count, loff_t *ppos)
28005 {
28006 unsigned long p = *ppos;
28007 - ssize_t low_count, read, sz;
28008 + ssize_t low_count, read, sz, err = 0;
28009 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28010 - int err = 0;
28011
28012 read = 0;
28013 if (p < (unsigned long) high_memory) {
28014 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28015 }
28016 #endif
28017 while (low_count > 0) {
28018 + char *temp;
28019 +
28020 sz = size_inside_page(p, low_count);
28021
28022 /*
28023 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28024 */
28025 kbuf = xlate_dev_kmem_ptr((char *)p);
28026
28027 - if (copy_to_user(buf, kbuf, sz))
28028 +#ifdef CONFIG_PAX_USERCOPY
28029 + temp = kmalloc(sz, GFP_KERNEL);
28030 + if (!temp)
28031 + return -ENOMEM;
28032 + memcpy(temp, kbuf, sz);
28033 +#else
28034 + temp = kbuf;
28035 +#endif
28036 +
28037 + err = copy_to_user(buf, temp, sz);
28038 +
28039 +#ifdef CONFIG_PAX_USERCOPY
28040 + kfree(temp);
28041 +#endif
28042 +
28043 + if (err)
28044 return -EFAULT;
28045 buf += sz;
28046 p += sz;
28047 @@ -889,6 +941,9 @@ static const struct memdev {
28048 #ifdef CONFIG_CRASH_DUMP
28049 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28050 #endif
28051 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28052 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28053 +#endif
28054 };
28055
28056 static int memory_open(struct inode *inode, struct file *filp)
28057 diff -urNp linux-2.6.32.44/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.44/drivers/char/pcmcia/ipwireless/tty.c
28058 --- linux-2.6.32.44/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28059 +++ linux-2.6.32.44/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28060 @@ -29,6 +29,7 @@
28061 #include <linux/tty_driver.h>
28062 #include <linux/tty_flip.h>
28063 #include <linux/uaccess.h>
28064 +#include <asm/local.h>
28065
28066 #include "tty.h"
28067 #include "network.h"
28068 @@ -51,7 +52,7 @@ struct ipw_tty {
28069 int tty_type;
28070 struct ipw_network *network;
28071 struct tty_struct *linux_tty;
28072 - int open_count;
28073 + local_t open_count;
28074 unsigned int control_lines;
28075 struct mutex ipw_tty_mutex;
28076 int tx_bytes_queued;
28077 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28078 mutex_unlock(&tty->ipw_tty_mutex);
28079 return -ENODEV;
28080 }
28081 - if (tty->open_count == 0)
28082 + if (local_read(&tty->open_count) == 0)
28083 tty->tx_bytes_queued = 0;
28084
28085 - tty->open_count++;
28086 + local_inc(&tty->open_count);
28087
28088 tty->linux_tty = linux_tty;
28089 linux_tty->driver_data = tty;
28090 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28091
28092 static void do_ipw_close(struct ipw_tty *tty)
28093 {
28094 - tty->open_count--;
28095 -
28096 - if (tty->open_count == 0) {
28097 + if (local_dec_return(&tty->open_count) == 0) {
28098 struct tty_struct *linux_tty = tty->linux_tty;
28099
28100 if (linux_tty != NULL) {
28101 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28102 return;
28103
28104 mutex_lock(&tty->ipw_tty_mutex);
28105 - if (tty->open_count == 0) {
28106 + if (local_read(&tty->open_count) == 0) {
28107 mutex_unlock(&tty->ipw_tty_mutex);
28108 return;
28109 }
28110 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28111 return;
28112 }
28113
28114 - if (!tty->open_count) {
28115 + if (!local_read(&tty->open_count)) {
28116 mutex_unlock(&tty->ipw_tty_mutex);
28117 return;
28118 }
28119 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28120 return -ENODEV;
28121
28122 mutex_lock(&tty->ipw_tty_mutex);
28123 - if (!tty->open_count) {
28124 + if (!local_read(&tty->open_count)) {
28125 mutex_unlock(&tty->ipw_tty_mutex);
28126 return -EINVAL;
28127 }
28128 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28129 if (!tty)
28130 return -ENODEV;
28131
28132 - if (!tty->open_count)
28133 + if (!local_read(&tty->open_count))
28134 return -EINVAL;
28135
28136 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28137 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28138 if (!tty)
28139 return 0;
28140
28141 - if (!tty->open_count)
28142 + if (!local_read(&tty->open_count))
28143 return 0;
28144
28145 return tty->tx_bytes_queued;
28146 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28147 if (!tty)
28148 return -ENODEV;
28149
28150 - if (!tty->open_count)
28151 + if (!local_read(&tty->open_count))
28152 return -EINVAL;
28153
28154 return get_control_lines(tty);
28155 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28156 if (!tty)
28157 return -ENODEV;
28158
28159 - if (!tty->open_count)
28160 + if (!local_read(&tty->open_count))
28161 return -EINVAL;
28162
28163 return set_control_lines(tty, set, clear);
28164 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28165 if (!tty)
28166 return -ENODEV;
28167
28168 - if (!tty->open_count)
28169 + if (!local_read(&tty->open_count))
28170 return -EINVAL;
28171
28172 /* FIXME: Exactly how is the tty object locked here .. */
28173 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28174 against a parallel ioctl etc */
28175 mutex_lock(&ttyj->ipw_tty_mutex);
28176 }
28177 - while (ttyj->open_count)
28178 + while (local_read(&ttyj->open_count))
28179 do_ipw_close(ttyj);
28180 ipwireless_disassociate_network_ttys(network,
28181 ttyj->channel_idx);
28182 diff -urNp linux-2.6.32.44/drivers/char/pty.c linux-2.6.32.44/drivers/char/pty.c
28183 --- linux-2.6.32.44/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28184 +++ linux-2.6.32.44/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28185 @@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28186 register_sysctl_table(pty_root_table);
28187
28188 /* Now create the /dev/ptmx special device */
28189 + pax_open_kernel();
28190 tty_default_fops(&ptmx_fops);
28191 - ptmx_fops.open = ptmx_open;
28192 + *(void **)&ptmx_fops.open = ptmx_open;
28193 + pax_close_kernel();
28194
28195 cdev_init(&ptmx_cdev, &ptmx_fops);
28196 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28197 diff -urNp linux-2.6.32.44/drivers/char/random.c linux-2.6.32.44/drivers/char/random.c
28198 --- linux-2.6.32.44/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
28199 +++ linux-2.6.32.44/drivers/char/random.c 2011-08-07 19:48:09.000000000 -0400
28200 @@ -254,8 +254,13 @@
28201 /*
28202 * Configuration information
28203 */
28204 +#ifdef CONFIG_GRKERNSEC_RANDNET
28205 +#define INPUT_POOL_WORDS 512
28206 +#define OUTPUT_POOL_WORDS 128
28207 +#else
28208 #define INPUT_POOL_WORDS 128
28209 #define OUTPUT_POOL_WORDS 32
28210 +#endif
28211 #define SEC_XFER_SIZE 512
28212
28213 /*
28214 @@ -292,10 +297,17 @@ static struct poolinfo {
28215 int poolwords;
28216 int tap1, tap2, tap3, tap4, tap5;
28217 } poolinfo_table[] = {
28218 +#ifdef CONFIG_GRKERNSEC_RANDNET
28219 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28220 + { 512, 411, 308, 208, 104, 1 },
28221 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28222 + { 128, 103, 76, 51, 25, 1 },
28223 +#else
28224 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28225 { 128, 103, 76, 51, 25, 1 },
28226 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28227 { 32, 26, 20, 14, 7, 1 },
28228 +#endif
28229 #if 0
28230 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28231 { 2048, 1638, 1231, 819, 411, 1 },
28232 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28233 #include <linux/sysctl.h>
28234
28235 static int min_read_thresh = 8, min_write_thresh;
28236 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28237 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28238 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28239 static char sysctl_bootid[16];
28240
28241 @@ -1339,330 +1351,14 @@ ctl_table random_table[] = {
28242 };
28243 #endif /* CONFIG_SYSCTL */
28244
28245 -/********************************************************************
28246 - *
28247 - * Random funtions for networking
28248 - *
28249 - ********************************************************************/
28250 -
28251 -/*
28252 - * TCP initial sequence number picking. This uses the random number
28253 - * generator to pick an initial secret value. This value is hashed
28254 - * along with the TCP endpoint information to provide a unique
28255 - * starting point for each pair of TCP endpoints. This defeats
28256 - * attacks which rely on guessing the initial TCP sequence number.
28257 - * This algorithm was suggested by Steve Bellovin.
28258 - *
28259 - * Using a very strong hash was taking an appreciable amount of the total
28260 - * TCP connection establishment time, so this is a weaker hash,
28261 - * compensated for by changing the secret periodically.
28262 - */
28263 -
28264 -/* F, G and H are basic MD4 functions: selection, majority, parity */
28265 -#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
28266 -#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
28267 -#define H(x, y, z) ((x) ^ (y) ^ (z))
28268 -
28269 -/*
28270 - * The generic round function. The application is so specific that
28271 - * we don't bother protecting all the arguments with parens, as is generally
28272 - * good macro practice, in favor of extra legibility.
28273 - * Rotation is separate from addition to prevent recomputation
28274 - */
28275 -#define ROUND(f, a, b, c, d, x, s) \
28276 - (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
28277 -#define K1 0
28278 -#define K2 013240474631UL
28279 -#define K3 015666365641UL
28280 -
28281 -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28282 -
28283 -static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
28284 -{
28285 - __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
28286 -
28287 - /* Round 1 */
28288 - ROUND(F, a, b, c, d, in[ 0] + K1, 3);
28289 - ROUND(F, d, a, b, c, in[ 1] + K1, 7);
28290 - ROUND(F, c, d, a, b, in[ 2] + K1, 11);
28291 - ROUND(F, b, c, d, a, in[ 3] + K1, 19);
28292 - ROUND(F, a, b, c, d, in[ 4] + K1, 3);
28293 - ROUND(F, d, a, b, c, in[ 5] + K1, 7);
28294 - ROUND(F, c, d, a, b, in[ 6] + K1, 11);
28295 - ROUND(F, b, c, d, a, in[ 7] + K1, 19);
28296 - ROUND(F, a, b, c, d, in[ 8] + K1, 3);
28297 - ROUND(F, d, a, b, c, in[ 9] + K1, 7);
28298 - ROUND(F, c, d, a, b, in[10] + K1, 11);
28299 - ROUND(F, b, c, d, a, in[11] + K1, 19);
28300 -
28301 - /* Round 2 */
28302 - ROUND(G, a, b, c, d, in[ 1] + K2, 3);
28303 - ROUND(G, d, a, b, c, in[ 3] + K2, 5);
28304 - ROUND(G, c, d, a, b, in[ 5] + K2, 9);
28305 - ROUND(G, b, c, d, a, in[ 7] + K2, 13);
28306 - ROUND(G, a, b, c, d, in[ 9] + K2, 3);
28307 - ROUND(G, d, a, b, c, in[11] + K2, 5);
28308 - ROUND(G, c, d, a, b, in[ 0] + K2, 9);
28309 - ROUND(G, b, c, d, a, in[ 2] + K2, 13);
28310 - ROUND(G, a, b, c, d, in[ 4] + K2, 3);
28311 - ROUND(G, d, a, b, c, in[ 6] + K2, 5);
28312 - ROUND(G, c, d, a, b, in[ 8] + K2, 9);
28313 - ROUND(G, b, c, d, a, in[10] + K2, 13);
28314 -
28315 - /* Round 3 */
28316 - ROUND(H, a, b, c, d, in[ 3] + K3, 3);
28317 - ROUND(H, d, a, b, c, in[ 7] + K3, 9);
28318 - ROUND(H, c, d, a, b, in[11] + K3, 11);
28319 - ROUND(H, b, c, d, a, in[ 2] + K3, 15);
28320 - ROUND(H, a, b, c, d, in[ 6] + K3, 3);
28321 - ROUND(H, d, a, b, c, in[10] + K3, 9);
28322 - ROUND(H, c, d, a, b, in[ 1] + K3, 11);
28323 - ROUND(H, b, c, d, a, in[ 5] + K3, 15);
28324 - ROUND(H, a, b, c, d, in[ 9] + K3, 3);
28325 - ROUND(H, d, a, b, c, in[ 0] + K3, 9);
28326 - ROUND(H, c, d, a, b, in[ 4] + K3, 11);
28327 - ROUND(H, b, c, d, a, in[ 8] + K3, 15);
28328 -
28329 - return buf[1] + b; /* "most hashed" word */
28330 - /* Alternative: return sum of all words? */
28331 -}
28332 -#endif
28333 -
28334 -#undef ROUND
28335 -#undef F
28336 -#undef G
28337 -#undef H
28338 -#undef K1
28339 -#undef K2
28340 -#undef K3
28341 -
28342 -/* This should not be decreased so low that ISNs wrap too fast. */
28343 -#define REKEY_INTERVAL (300 * HZ)
28344 -/*
28345 - * Bit layout of the tcp sequence numbers (before adding current time):
28346 - * bit 24-31: increased after every key exchange
28347 - * bit 0-23: hash(source,dest)
28348 - *
28349 - * The implementation is similar to the algorithm described
28350 - * in the Appendix of RFC 1185, except that
28351 - * - it uses a 1 MHz clock instead of a 250 kHz clock
28352 - * - it performs a rekey every 5 minutes, which is equivalent
28353 - * to a (source,dest) tulple dependent forward jump of the
28354 - * clock by 0..2^(HASH_BITS+1)
28355 - *
28356 - * Thus the average ISN wraparound time is 68 minutes instead of
28357 - * 4.55 hours.
28358 - *
28359 - * SMP cleanup and lock avoidance with poor man's RCU.
28360 - * Manfred Spraul <manfred@colorfullife.com>
28361 - *
28362 - */
28363 -#define COUNT_BITS 8
28364 -#define COUNT_MASK ((1 << COUNT_BITS) - 1)
28365 -#define HASH_BITS 24
28366 -#define HASH_MASK ((1 << HASH_BITS) - 1)
28367 -
28368 -static struct keydata {
28369 - __u32 count; /* already shifted to the final position */
28370 - __u32 secret[12];
28371 -} ____cacheline_aligned ip_keydata[2];
28372 -
28373 -static unsigned int ip_cnt;
28374 -
28375 -static void rekey_seq_generator(struct work_struct *work);
28376 -
28377 -static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
28378 -
28379 -/*
28380 - * Lock avoidance:
28381 - * The ISN generation runs lockless - it's just a hash over random data.
28382 - * State changes happen every 5 minutes when the random key is replaced.
28383 - * Synchronization is performed by having two copies of the hash function
28384 - * state and rekey_seq_generator always updates the inactive copy.
28385 - * The copy is then activated by updating ip_cnt.
28386 - * The implementation breaks down if someone blocks the thread
28387 - * that processes SYN requests for more than 5 minutes. Should never
28388 - * happen, and even if that happens only a not perfectly compliant
28389 - * ISN is generated, nothing fatal.
28390 - */
28391 -static void rekey_seq_generator(struct work_struct *work)
28392 -{
28393 - struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
28394 -
28395 - get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
28396 - keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
28397 - smp_wmb();
28398 - ip_cnt++;
28399 - schedule_delayed_work(&rekey_work,
28400 - round_jiffies_relative(REKEY_INTERVAL));
28401 -}
28402 -
28403 -static inline struct keydata *get_keyptr(void)
28404 -{
28405 - struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
28406 -
28407 - smp_rmb();
28408 -
28409 - return keyptr;
28410 -}
28411 -
28412 -static __init int seqgen_init(void)
28413 +static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
28414 +static int __init random_int_secret_init(void)
28415 {
28416 - rekey_seq_generator(NULL);
28417 + get_random_bytes(random_int_secret, sizeof(random_int_secret));
28418 return 0;
28419 }
28420 -late_initcall(seqgen_init);
28421 -
28422 -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28423 -__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
28424 - __be16 sport, __be16 dport)
28425 -{
28426 - __u32 seq;
28427 - __u32 hash[12];
28428 - struct keydata *keyptr = get_keyptr();
28429 -
28430 - /* The procedure is the same as for IPv4, but addresses are longer.
28431 - * Thus we must use twothirdsMD4Transform.
28432 - */
28433 -
28434 - memcpy(hash, saddr, 16);
28435 - hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
28436 - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
28437 -
28438 - seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
28439 - seq += keyptr->count;
28440 -
28441 - seq += ktime_to_ns(ktime_get_real());
28442 -
28443 - return seq;
28444 -}
28445 -EXPORT_SYMBOL(secure_tcpv6_sequence_number);
28446 -#endif
28447 -
28448 -/* The code below is shamelessly stolen from secure_tcp_sequence_number().
28449 - * All blames to Andrey V. Savochkin <saw@msu.ru>.
28450 - */
28451 -__u32 secure_ip_id(__be32 daddr)
28452 -{
28453 - struct keydata *keyptr;
28454 - __u32 hash[4];
28455 -
28456 - keyptr = get_keyptr();
28457 -
28458 - /*
28459 - * Pick a unique starting offset for each IP destination.
28460 - * The dest ip address is placed in the starting vector,
28461 - * which is then hashed with random data.
28462 - */
28463 - hash[0] = (__force __u32)daddr;
28464 - hash[1] = keyptr->secret[9];
28465 - hash[2] = keyptr->secret[10];
28466 - hash[3] = keyptr->secret[11];
28467 -
28468 - return half_md4_transform(hash, keyptr->secret);
28469 -}
28470 -
28471 -#ifdef CONFIG_INET
28472 -
28473 -__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
28474 - __be16 sport, __be16 dport)
28475 -{
28476 - __u32 seq;
28477 - __u32 hash[4];
28478 - struct keydata *keyptr = get_keyptr();
28479 -
28480 - /*
28481 - * Pick a unique starting offset for each TCP connection endpoints
28482 - * (saddr, daddr, sport, dport).
28483 - * Note that the words are placed into the starting vector, which is
28484 - * then mixed with a partial MD4 over random data.
28485 - */
28486 - hash[0] = (__force u32)saddr;
28487 - hash[1] = (__force u32)daddr;
28488 - hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
28489 - hash[3] = keyptr->secret[11];
28490 -
28491 - seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
28492 - seq += keyptr->count;
28493 - /*
28494 - * As close as possible to RFC 793, which
28495 - * suggests using a 250 kHz clock.
28496 - * Further reading shows this assumes 2 Mb/s networks.
28497 - * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
28498 - * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
28499 - * we also need to limit the resolution so that the u32 seq
28500 - * overlaps less than one time per MSL (2 minutes).
28501 - * Choosing a clock of 64 ns period is OK. (period of 274 s)
28502 - */
28503 - seq += ktime_to_ns(ktime_get_real()) >> 6;
28504 -
28505 - return seq;
28506 -}
28507 -
28508 -/* Generate secure starting point for ephemeral IPV4 transport port search */
28509 -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
28510 -{
28511 - struct keydata *keyptr = get_keyptr();
28512 - u32 hash[4];
28513 -
28514 - /*
28515 - * Pick a unique starting offset for each ephemeral port search
28516 - * (saddr, daddr, dport) and 48bits of random data.
28517 - */
28518 - hash[0] = (__force u32)saddr;
28519 - hash[1] = (__force u32)daddr;
28520 - hash[2] = (__force u32)dport ^ keyptr->secret[10];
28521 - hash[3] = keyptr->secret[11];
28522 -
28523 - return half_md4_transform(hash, keyptr->secret);
28524 -}
28525 -EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
28526 -
28527 -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28528 -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
28529 - __be16 dport)
28530 -{
28531 - struct keydata *keyptr = get_keyptr();
28532 - u32 hash[12];
28533 -
28534 - memcpy(hash, saddr, 16);
28535 - hash[4] = (__force u32)dport;
28536 - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
28537 -
28538 - return twothirdsMD4Transform((const __u32 *)daddr, hash);
28539 -}
28540 -#endif
28541 -
28542 -#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
28543 -/* Similar to secure_tcp_sequence_number but generate a 48 bit value
28544 - * bit's 32-47 increase every key exchange
28545 - * 0-31 hash(source, dest)
28546 - */
28547 -u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
28548 - __be16 sport, __be16 dport)
28549 -{
28550 - u64 seq;
28551 - __u32 hash[4];
28552 - struct keydata *keyptr = get_keyptr();
28553 -
28554 - hash[0] = (__force u32)saddr;
28555 - hash[1] = (__force u32)daddr;
28556 - hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
28557 - hash[3] = keyptr->secret[11];
28558 -
28559 - seq = half_md4_transform(hash, keyptr->secret);
28560 - seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
28561 -
28562 - seq += ktime_to_ns(ktime_get_real());
28563 - seq &= (1ull << 48) - 1;
28564 -
28565 - return seq;
28566 -}
28567 -EXPORT_SYMBOL(secure_dccp_sequence_number);
28568 -#endif
28569 -
28570 -#endif /* CONFIG_INET */
28571
28572 +late_initcall(random_int_secret_init);
28573
28574 /*
28575 * Get a random word for internal kernel use only. Similar to urandom but
28576 @@ -1670,17 +1366,16 @@ EXPORT_SYMBOL(secure_dccp_sequence_numbe
28577 * value is not cryptographically secure but for several uses the cost of
28578 * depleting entropy is too high
28579 */
28580 -DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
28581 +DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
28582 unsigned int get_random_int(void)
28583 {
28584 - struct keydata *keyptr;
28585 __u32 *hash = get_cpu_var(get_random_int_hash);
28586 - int ret;
28587 + unsigned int ret;
28588
28589 - keyptr = get_keyptr();
28590 hash[0] += current->pid + jiffies + get_cycles();
28591
28592 - ret = half_md4_transform(hash, keyptr->secret);
28593 + md5_transform(hash, random_int_secret);
28594 + ret = hash[0];
28595 put_cpu_var(get_random_int_hash);
28596
28597 return ret;
28598 diff -urNp linux-2.6.32.44/drivers/char/rocket.c linux-2.6.32.44/drivers/char/rocket.c
28599 --- linux-2.6.32.44/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28600 +++ linux-2.6.32.44/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28601 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28602 struct rocket_ports tmp;
28603 int board;
28604
28605 + pax_track_stack();
28606 +
28607 if (!retports)
28608 return -EFAULT;
28609 memset(&tmp, 0, sizeof (tmp));
28610 diff -urNp linux-2.6.32.44/drivers/char/sonypi.c linux-2.6.32.44/drivers/char/sonypi.c
28611 --- linux-2.6.32.44/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28612 +++ linux-2.6.32.44/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28613 @@ -55,6 +55,7 @@
28614 #include <asm/uaccess.h>
28615 #include <asm/io.h>
28616 #include <asm/system.h>
28617 +#include <asm/local.h>
28618
28619 #include <linux/sonypi.h>
28620
28621 @@ -491,7 +492,7 @@ static struct sonypi_device {
28622 spinlock_t fifo_lock;
28623 wait_queue_head_t fifo_proc_list;
28624 struct fasync_struct *fifo_async;
28625 - int open_count;
28626 + local_t open_count;
28627 int model;
28628 struct input_dev *input_jog_dev;
28629 struct input_dev *input_key_dev;
28630 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28631 static int sonypi_misc_release(struct inode *inode, struct file *file)
28632 {
28633 mutex_lock(&sonypi_device.lock);
28634 - sonypi_device.open_count--;
28635 + local_dec(&sonypi_device.open_count);
28636 mutex_unlock(&sonypi_device.lock);
28637 return 0;
28638 }
28639 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28640 lock_kernel();
28641 mutex_lock(&sonypi_device.lock);
28642 /* Flush input queue on first open */
28643 - if (!sonypi_device.open_count)
28644 + if (!local_read(&sonypi_device.open_count))
28645 kfifo_reset(sonypi_device.fifo);
28646 - sonypi_device.open_count++;
28647 + local_inc(&sonypi_device.open_count);
28648 mutex_unlock(&sonypi_device.lock);
28649 unlock_kernel();
28650 return 0;
28651 diff -urNp linux-2.6.32.44/drivers/char/stallion.c linux-2.6.32.44/drivers/char/stallion.c
28652 --- linux-2.6.32.44/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28653 +++ linux-2.6.32.44/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28654 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28655 struct stlport stl_dummyport;
28656 struct stlport *portp;
28657
28658 + pax_track_stack();
28659 +
28660 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28661 return -EFAULT;
28662 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28663 diff -urNp linux-2.6.32.44/drivers/char/tpm/tpm_bios.c linux-2.6.32.44/drivers/char/tpm/tpm_bios.c
28664 --- linux-2.6.32.44/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28665 +++ linux-2.6.32.44/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28666 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28667 event = addr;
28668
28669 if ((event->event_type == 0 && event->event_size == 0) ||
28670 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28671 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28672 return NULL;
28673
28674 return addr;
28675 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28676 return NULL;
28677
28678 if ((event->event_type == 0 && event->event_size == 0) ||
28679 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28680 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28681 return NULL;
28682
28683 (*pos)++;
28684 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28685 int i;
28686
28687 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28688 - seq_putc(m, data[i]);
28689 + if (!seq_putc(m, data[i]))
28690 + return -EFAULT;
28691
28692 return 0;
28693 }
28694 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28695 log->bios_event_log_end = log->bios_event_log + len;
28696
28697 virt = acpi_os_map_memory(start, len);
28698 + if (!virt) {
28699 + kfree(log->bios_event_log);
28700 + log->bios_event_log = NULL;
28701 + return -EFAULT;
28702 + }
28703
28704 memcpy(log->bios_event_log, virt, len);
28705
28706 diff -urNp linux-2.6.32.44/drivers/char/tpm/tpm.c linux-2.6.32.44/drivers/char/tpm/tpm.c
28707 --- linux-2.6.32.44/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28708 +++ linux-2.6.32.44/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28709 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28710 chip->vendor.req_complete_val)
28711 goto out_recv;
28712
28713 - if ((status == chip->vendor.req_canceled)) {
28714 + if (status == chip->vendor.req_canceled) {
28715 dev_err(chip->dev, "Operation Canceled\n");
28716 rc = -ECANCELED;
28717 goto out;
28718 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28719
28720 struct tpm_chip *chip = dev_get_drvdata(dev);
28721
28722 + pax_track_stack();
28723 +
28724 tpm_cmd.header.in = tpm_readpubek_header;
28725 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28726 "attempting to read the PUBEK");
28727 diff -urNp linux-2.6.32.44/drivers/char/tty_io.c linux-2.6.32.44/drivers/char/tty_io.c
28728 --- linux-2.6.32.44/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28729 +++ linux-2.6.32.44/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28730 @@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28731 return retval;
28732 }
28733
28734 +EXPORT_SYMBOL(tty_ioctl);
28735 +
28736 #ifdef CONFIG_COMPAT
28737 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28738 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
28739 unsigned long arg)
28740 {
28741 struct inode *inode = file->f_dentry->d_inode;
28742 @@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28743
28744 return retval;
28745 }
28746 +
28747 +EXPORT_SYMBOL(tty_compat_ioctl);
28748 #endif
28749
28750 /*
28751 @@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28752
28753 void tty_default_fops(struct file_operations *fops)
28754 {
28755 - *fops = tty_fops;
28756 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28757 }
28758
28759 /*
28760 diff -urNp linux-2.6.32.44/drivers/char/tty_ldisc.c linux-2.6.32.44/drivers/char/tty_ldisc.c
28761 --- linux-2.6.32.44/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28762 +++ linux-2.6.32.44/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28763 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28764 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28765 struct tty_ldisc_ops *ldo = ld->ops;
28766
28767 - ldo->refcount--;
28768 + atomic_dec(&ldo->refcount);
28769 module_put(ldo->owner);
28770 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28771
28772 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28773 spin_lock_irqsave(&tty_ldisc_lock, flags);
28774 tty_ldiscs[disc] = new_ldisc;
28775 new_ldisc->num = disc;
28776 - new_ldisc->refcount = 0;
28777 + atomic_set(&new_ldisc->refcount, 0);
28778 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28779
28780 return ret;
28781 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28782 return -EINVAL;
28783
28784 spin_lock_irqsave(&tty_ldisc_lock, flags);
28785 - if (tty_ldiscs[disc]->refcount)
28786 + if (atomic_read(&tty_ldiscs[disc]->refcount))
28787 ret = -EBUSY;
28788 else
28789 tty_ldiscs[disc] = NULL;
28790 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28791 if (ldops) {
28792 ret = ERR_PTR(-EAGAIN);
28793 if (try_module_get(ldops->owner)) {
28794 - ldops->refcount++;
28795 + atomic_inc(&ldops->refcount);
28796 ret = ldops;
28797 }
28798 }
28799 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28800 unsigned long flags;
28801
28802 spin_lock_irqsave(&tty_ldisc_lock, flags);
28803 - ldops->refcount--;
28804 + atomic_dec(&ldops->refcount);
28805 module_put(ldops->owner);
28806 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28807 }
28808 diff -urNp linux-2.6.32.44/drivers/char/virtio_console.c linux-2.6.32.44/drivers/char/virtio_console.c
28809 --- linux-2.6.32.44/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28810 +++ linux-2.6.32.44/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28811 @@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28812 * virtqueue, so we let the drivers do some boutique early-output thing. */
28813 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28814 {
28815 - virtio_cons.put_chars = put_chars;
28816 + pax_open_kernel();
28817 + *(void **)&virtio_cons.put_chars = put_chars;
28818 + pax_close_kernel();
28819 return hvc_instantiate(0, 0, &virtio_cons);
28820 }
28821
28822 @@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28823 out_vq = vqs[1];
28824
28825 /* Start using the new console output. */
28826 - virtio_cons.get_chars = get_chars;
28827 - virtio_cons.put_chars = put_chars;
28828 - virtio_cons.notifier_add = notifier_add_vio;
28829 - virtio_cons.notifier_del = notifier_del_vio;
28830 - virtio_cons.notifier_hangup = notifier_del_vio;
28831 + pax_open_kernel();
28832 + *(void **)&virtio_cons.get_chars = get_chars;
28833 + *(void **)&virtio_cons.put_chars = put_chars;
28834 + *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28835 + *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28836 + *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28837 + pax_close_kernel();
28838
28839 /* The first argument of hvc_alloc() is the virtual console number, so
28840 * we use zero. The second argument is the parameter for the
28841 diff -urNp linux-2.6.32.44/drivers/char/vt.c linux-2.6.32.44/drivers/char/vt.c
28842 --- linux-2.6.32.44/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28843 +++ linux-2.6.32.44/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28844 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28845
28846 static void notify_write(struct vc_data *vc, unsigned int unicode)
28847 {
28848 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28849 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28850 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28851 }
28852
28853 diff -urNp linux-2.6.32.44/drivers/char/vt_ioctl.c linux-2.6.32.44/drivers/char/vt_ioctl.c
28854 --- linux-2.6.32.44/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28855 +++ linux-2.6.32.44/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28856 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28857 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28858 return -EFAULT;
28859
28860 - if (!capable(CAP_SYS_TTY_CONFIG))
28861 - perm = 0;
28862 -
28863 switch (cmd) {
28864 case KDGKBENT:
28865 key_map = key_maps[s];
28866 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28867 val = (i ? K_HOLE : K_NOSUCHMAP);
28868 return put_user(val, &user_kbe->kb_value);
28869 case KDSKBENT:
28870 + if (!capable(CAP_SYS_TTY_CONFIG))
28871 + perm = 0;
28872 +
28873 if (!perm)
28874 return -EPERM;
28875 +
28876 if (!i && v == K_NOSUCHMAP) {
28877 /* deallocate map */
28878 key_map = key_maps[s];
28879 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28880 int i, j, k;
28881 int ret;
28882
28883 - if (!capable(CAP_SYS_TTY_CONFIG))
28884 - perm = 0;
28885 -
28886 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28887 if (!kbs) {
28888 ret = -ENOMEM;
28889 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28890 kfree(kbs);
28891 return ((p && *p) ? -EOVERFLOW : 0);
28892 case KDSKBSENT:
28893 + if (!capable(CAP_SYS_TTY_CONFIG))
28894 + perm = 0;
28895 +
28896 if (!perm) {
28897 ret = -EPERM;
28898 goto reterr;
28899 diff -urNp linux-2.6.32.44/drivers/cpufreq/cpufreq.c linux-2.6.32.44/drivers/cpufreq/cpufreq.c
28900 --- linux-2.6.32.44/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28901 +++ linux-2.6.32.44/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28902 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28903 complete(&policy->kobj_unregister);
28904 }
28905
28906 -static struct sysfs_ops sysfs_ops = {
28907 +static const struct sysfs_ops sysfs_ops = {
28908 .show = show,
28909 .store = store,
28910 };
28911 diff -urNp linux-2.6.32.44/drivers/cpuidle/sysfs.c linux-2.6.32.44/drivers/cpuidle/sysfs.c
28912 --- linux-2.6.32.44/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28913 +++ linux-2.6.32.44/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28914 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28915 return ret;
28916 }
28917
28918 -static struct sysfs_ops cpuidle_sysfs_ops = {
28919 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28920 .show = cpuidle_show,
28921 .store = cpuidle_store,
28922 };
28923 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28924 return ret;
28925 }
28926
28927 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28928 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28929 .show = cpuidle_state_show,
28930 };
28931
28932 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28933 .release = cpuidle_state_sysfs_release,
28934 };
28935
28936 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28937 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28938 {
28939 kobject_put(&device->kobjs[i]->kobj);
28940 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28941 diff -urNp linux-2.6.32.44/drivers/crypto/hifn_795x.c linux-2.6.32.44/drivers/crypto/hifn_795x.c
28942 --- linux-2.6.32.44/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28943 +++ linux-2.6.32.44/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28944 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28945 0xCA, 0x34, 0x2B, 0x2E};
28946 struct scatterlist sg;
28947
28948 + pax_track_stack();
28949 +
28950 memset(src, 0, sizeof(src));
28951 memset(ctx.key, 0, sizeof(ctx.key));
28952
28953 diff -urNp linux-2.6.32.44/drivers/crypto/padlock-aes.c linux-2.6.32.44/drivers/crypto/padlock-aes.c
28954 --- linux-2.6.32.44/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28955 +++ linux-2.6.32.44/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28956 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28957 struct crypto_aes_ctx gen_aes;
28958 int cpu;
28959
28960 + pax_track_stack();
28961 +
28962 if (key_len % 8) {
28963 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28964 return -EINVAL;
28965 diff -urNp linux-2.6.32.44/drivers/dma/ioat/dma.c linux-2.6.32.44/drivers/dma/ioat/dma.c
28966 --- linux-2.6.32.44/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28967 +++ linux-2.6.32.44/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28968 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28969 return entry->show(&chan->common, page);
28970 }
28971
28972 -struct sysfs_ops ioat_sysfs_ops = {
28973 +const struct sysfs_ops ioat_sysfs_ops = {
28974 .show = ioat_attr_show,
28975 };
28976
28977 diff -urNp linux-2.6.32.44/drivers/dma/ioat/dma.h linux-2.6.32.44/drivers/dma/ioat/dma.h
28978 --- linux-2.6.32.44/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28979 +++ linux-2.6.32.44/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28980 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28981 unsigned long *phys_complete);
28982 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28983 void ioat_kobject_del(struct ioatdma_device *device);
28984 -extern struct sysfs_ops ioat_sysfs_ops;
28985 +extern const struct sysfs_ops ioat_sysfs_ops;
28986 extern struct ioat_sysfs_entry ioat_version_attr;
28987 extern struct ioat_sysfs_entry ioat_cap_attr;
28988 #endif /* IOATDMA_H */
28989 diff -urNp linux-2.6.32.44/drivers/edac/edac_device_sysfs.c linux-2.6.32.44/drivers/edac/edac_device_sysfs.c
28990 --- linux-2.6.32.44/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28991 +++ linux-2.6.32.44/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28992 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28993 }
28994
28995 /* edac_dev file operations for an 'ctl_info' */
28996 -static struct sysfs_ops device_ctl_info_ops = {
28997 +static const struct sysfs_ops device_ctl_info_ops = {
28998 .show = edac_dev_ctl_info_show,
28999 .store = edac_dev_ctl_info_store
29000 };
29001 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
29002 }
29003
29004 /* edac_dev file operations for an 'instance' */
29005 -static struct sysfs_ops device_instance_ops = {
29006 +static const struct sysfs_ops device_instance_ops = {
29007 .show = edac_dev_instance_show,
29008 .store = edac_dev_instance_store
29009 };
29010 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
29011 }
29012
29013 /* edac_dev file operations for a 'block' */
29014 -static struct sysfs_ops device_block_ops = {
29015 +static const struct sysfs_ops device_block_ops = {
29016 .show = edac_dev_block_show,
29017 .store = edac_dev_block_store
29018 };
29019 diff -urNp linux-2.6.32.44/drivers/edac/edac_mc_sysfs.c linux-2.6.32.44/drivers/edac/edac_mc_sysfs.c
29020 --- linux-2.6.32.44/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
29021 +++ linux-2.6.32.44/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
29022 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
29023 return -EIO;
29024 }
29025
29026 -static struct sysfs_ops csrowfs_ops = {
29027 +static const struct sysfs_ops csrowfs_ops = {
29028 .show = csrowdev_show,
29029 .store = csrowdev_store
29030 };
29031 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
29032 }
29033
29034 /* Intermediate show/store table */
29035 -static struct sysfs_ops mci_ops = {
29036 +static const struct sysfs_ops mci_ops = {
29037 .show = mcidev_show,
29038 .store = mcidev_store
29039 };
29040 diff -urNp linux-2.6.32.44/drivers/edac/edac_pci_sysfs.c linux-2.6.32.44/drivers/edac/edac_pci_sysfs.c
29041 --- linux-2.6.32.44/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
29042 +++ linux-2.6.32.44/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
29043 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
29044 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29045 static int edac_pci_poll_msec = 1000; /* one second workq period */
29046
29047 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29048 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29049 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29050 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29051
29052 static struct kobject *edac_pci_top_main_kobj;
29053 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29054 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
29055 }
29056
29057 /* fs_ops table */
29058 -static struct sysfs_ops pci_instance_ops = {
29059 +static const struct sysfs_ops pci_instance_ops = {
29060 .show = edac_pci_instance_show,
29061 .store = edac_pci_instance_store
29062 };
29063 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
29064 return -EIO;
29065 }
29066
29067 -static struct sysfs_ops edac_pci_sysfs_ops = {
29068 +static const struct sysfs_ops edac_pci_sysfs_ops = {
29069 .show = edac_pci_dev_show,
29070 .store = edac_pci_dev_store
29071 };
29072 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
29073 edac_printk(KERN_CRIT, EDAC_PCI,
29074 "Signaled System Error on %s\n",
29075 pci_name(dev));
29076 - atomic_inc(&pci_nonparity_count);
29077 + atomic_inc_unchecked(&pci_nonparity_count);
29078 }
29079
29080 if (status & (PCI_STATUS_PARITY)) {
29081 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
29082 "Master Data Parity Error on %s\n",
29083 pci_name(dev));
29084
29085 - atomic_inc(&pci_parity_count);
29086 + atomic_inc_unchecked(&pci_parity_count);
29087 }
29088
29089 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29090 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
29091 "Detected Parity Error on %s\n",
29092 pci_name(dev));
29093
29094 - atomic_inc(&pci_parity_count);
29095 + atomic_inc_unchecked(&pci_parity_count);
29096 }
29097 }
29098
29099 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
29100 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29101 "Signaled System Error on %s\n",
29102 pci_name(dev));
29103 - atomic_inc(&pci_nonparity_count);
29104 + atomic_inc_unchecked(&pci_nonparity_count);
29105 }
29106
29107 if (status & (PCI_STATUS_PARITY)) {
29108 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
29109 "Master Data Parity Error on "
29110 "%s\n", pci_name(dev));
29111
29112 - atomic_inc(&pci_parity_count);
29113 + atomic_inc_unchecked(&pci_parity_count);
29114 }
29115
29116 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29117 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
29118 "Detected Parity Error on %s\n",
29119 pci_name(dev));
29120
29121 - atomic_inc(&pci_parity_count);
29122 + atomic_inc_unchecked(&pci_parity_count);
29123 }
29124 }
29125 }
29126 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
29127 if (!check_pci_errors)
29128 return;
29129
29130 - before_count = atomic_read(&pci_parity_count);
29131 + before_count = atomic_read_unchecked(&pci_parity_count);
29132
29133 /* scan all PCI devices looking for a Parity Error on devices and
29134 * bridges.
29135 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
29136 /* Only if operator has selected panic on PCI Error */
29137 if (edac_pci_get_panic_on_pe()) {
29138 /* If the count is different 'after' from 'before' */
29139 - if (before_count != atomic_read(&pci_parity_count))
29140 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29141 panic("EDAC: PCI Parity Error");
29142 }
29143 }
29144 diff -urNp linux-2.6.32.44/drivers/firewire/core-card.c linux-2.6.32.44/drivers/firewire/core-card.c
29145 --- linux-2.6.32.44/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
29146 +++ linux-2.6.32.44/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
29147 @@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
29148 mutex_unlock(&card_mutex);
29149
29150 /* Switch off most of the card driver interface. */
29151 - dummy_driver.free_iso_context = card->driver->free_iso_context;
29152 - dummy_driver.stop_iso = card->driver->stop_iso;
29153 + pax_open_kernel();
29154 + *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
29155 + *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
29156 + pax_close_kernel();
29157 card->driver = &dummy_driver;
29158
29159 fw_destroy_nodes(card);
29160 diff -urNp linux-2.6.32.44/drivers/firewire/core-cdev.c linux-2.6.32.44/drivers/firewire/core-cdev.c
29161 --- linux-2.6.32.44/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29162 +++ linux-2.6.32.44/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29163 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29164 int ret;
29165
29166 if ((request->channels == 0 && request->bandwidth == 0) ||
29167 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29168 - request->bandwidth < 0)
29169 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29170 return -EINVAL;
29171
29172 r = kmalloc(sizeof(*r), GFP_KERNEL);
29173 diff -urNp linux-2.6.32.44/drivers/firewire/core-transaction.c linux-2.6.32.44/drivers/firewire/core-transaction.c
29174 --- linux-2.6.32.44/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29175 +++ linux-2.6.32.44/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29176 @@ -36,6 +36,7 @@
29177 #include <linux/string.h>
29178 #include <linux/timer.h>
29179 #include <linux/types.h>
29180 +#include <linux/sched.h>
29181
29182 #include <asm/byteorder.h>
29183
29184 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29185 struct transaction_callback_data d;
29186 struct fw_transaction t;
29187
29188 + pax_track_stack();
29189 +
29190 init_completion(&d.done);
29191 d.payload = payload;
29192 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29193 diff -urNp linux-2.6.32.44/drivers/firmware/dmi_scan.c linux-2.6.32.44/drivers/firmware/dmi_scan.c
29194 --- linux-2.6.32.44/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29195 +++ linux-2.6.32.44/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29196 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29197 }
29198 }
29199 else {
29200 - /*
29201 - * no iounmap() for that ioremap(); it would be a no-op, but
29202 - * it's so early in setup that sucker gets confused into doing
29203 - * what it shouldn't if we actually call it.
29204 - */
29205 p = dmi_ioremap(0xF0000, 0x10000);
29206 if (p == NULL)
29207 goto error;
29208 diff -urNp linux-2.6.32.44/drivers/firmware/edd.c linux-2.6.32.44/drivers/firmware/edd.c
29209 --- linux-2.6.32.44/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29210 +++ linux-2.6.32.44/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29211 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29212 return ret;
29213 }
29214
29215 -static struct sysfs_ops edd_attr_ops = {
29216 +static const struct sysfs_ops edd_attr_ops = {
29217 .show = edd_attr_show,
29218 };
29219
29220 diff -urNp linux-2.6.32.44/drivers/firmware/efivars.c linux-2.6.32.44/drivers/firmware/efivars.c
29221 --- linux-2.6.32.44/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29222 +++ linux-2.6.32.44/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29223 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29224 return ret;
29225 }
29226
29227 -static struct sysfs_ops efivar_attr_ops = {
29228 +static const struct sysfs_ops efivar_attr_ops = {
29229 .show = efivar_attr_show,
29230 .store = efivar_attr_store,
29231 };
29232 diff -urNp linux-2.6.32.44/drivers/firmware/iscsi_ibft.c linux-2.6.32.44/drivers/firmware/iscsi_ibft.c
29233 --- linux-2.6.32.44/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29234 +++ linux-2.6.32.44/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29235 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29236 return ret;
29237 }
29238
29239 -static struct sysfs_ops ibft_attr_ops = {
29240 +static const struct sysfs_ops ibft_attr_ops = {
29241 .show = ibft_show_attribute,
29242 };
29243
29244 diff -urNp linux-2.6.32.44/drivers/firmware/memmap.c linux-2.6.32.44/drivers/firmware/memmap.c
29245 --- linux-2.6.32.44/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29246 +++ linux-2.6.32.44/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29247 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29248 NULL
29249 };
29250
29251 -static struct sysfs_ops memmap_attr_ops = {
29252 +static const struct sysfs_ops memmap_attr_ops = {
29253 .show = memmap_attr_show,
29254 };
29255
29256 diff -urNp linux-2.6.32.44/drivers/gpio/vr41xx_giu.c linux-2.6.32.44/drivers/gpio/vr41xx_giu.c
29257 --- linux-2.6.32.44/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29258 +++ linux-2.6.32.44/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29259 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29260 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29261 maskl, pendl, maskh, pendh);
29262
29263 - atomic_inc(&irq_err_count);
29264 + atomic_inc_unchecked(&irq_err_count);
29265
29266 return -EINVAL;
29267 }
29268 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.44/drivers/gpu/drm/drm_crtc_helper.c
29269 --- linux-2.6.32.44/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29270 +++ linux-2.6.32.44/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29271 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29272 struct drm_crtc *tmp;
29273 int crtc_mask = 1;
29274
29275 - WARN(!crtc, "checking null crtc?");
29276 + BUG_ON(!crtc);
29277
29278 dev = crtc->dev;
29279
29280 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29281
29282 adjusted_mode = drm_mode_duplicate(dev, mode);
29283
29284 + pax_track_stack();
29285 +
29286 crtc->enabled = drm_helper_crtc_in_use(crtc);
29287
29288 if (!crtc->enabled)
29289 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_drv.c linux-2.6.32.44/drivers/gpu/drm/drm_drv.c
29290 --- linux-2.6.32.44/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29291 +++ linux-2.6.32.44/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29292 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29293 char *kdata = NULL;
29294
29295 atomic_inc(&dev->ioctl_count);
29296 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29297 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29298 ++file_priv->ioctl_count;
29299
29300 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29301 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_fops.c linux-2.6.32.44/drivers/gpu/drm/drm_fops.c
29302 --- linux-2.6.32.44/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29303 +++ linux-2.6.32.44/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29304 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29305 }
29306
29307 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29308 - atomic_set(&dev->counts[i], 0);
29309 + atomic_set_unchecked(&dev->counts[i], 0);
29310
29311 dev->sigdata.lock = NULL;
29312
29313 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29314
29315 retcode = drm_open_helper(inode, filp, dev);
29316 if (!retcode) {
29317 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29318 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29319 spin_lock(&dev->count_lock);
29320 - if (!dev->open_count++) {
29321 + if (local_inc_return(&dev->open_count) == 1) {
29322 spin_unlock(&dev->count_lock);
29323 retcode = drm_setup(dev);
29324 goto out;
29325 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29326
29327 lock_kernel();
29328
29329 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29330 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29331
29332 if (dev->driver->preclose)
29333 dev->driver->preclose(dev, file_priv);
29334 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29335 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29336 task_pid_nr(current),
29337 (long)old_encode_dev(file_priv->minor->device),
29338 - dev->open_count);
29339 + local_read(&dev->open_count));
29340
29341 /* if the master has gone away we can't do anything with the lock */
29342 if (file_priv->minor->master)
29343 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29344 * End inline drm_release
29345 */
29346
29347 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29348 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29349 spin_lock(&dev->count_lock);
29350 - if (!--dev->open_count) {
29351 + if (local_dec_and_test(&dev->open_count)) {
29352 if (atomic_read(&dev->ioctl_count)) {
29353 DRM_ERROR("Device busy: %d\n",
29354 atomic_read(&dev->ioctl_count));
29355 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_gem.c linux-2.6.32.44/drivers/gpu/drm/drm_gem.c
29356 --- linux-2.6.32.44/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29357 +++ linux-2.6.32.44/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29358 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29359 spin_lock_init(&dev->object_name_lock);
29360 idr_init(&dev->object_name_idr);
29361 atomic_set(&dev->object_count, 0);
29362 - atomic_set(&dev->object_memory, 0);
29363 + atomic_set_unchecked(&dev->object_memory, 0);
29364 atomic_set(&dev->pin_count, 0);
29365 - atomic_set(&dev->pin_memory, 0);
29366 + atomic_set_unchecked(&dev->pin_memory, 0);
29367 atomic_set(&dev->gtt_count, 0);
29368 - atomic_set(&dev->gtt_memory, 0);
29369 + atomic_set_unchecked(&dev->gtt_memory, 0);
29370
29371 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29372 if (!mm) {
29373 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29374 goto fput;
29375 }
29376 atomic_inc(&dev->object_count);
29377 - atomic_add(obj->size, &dev->object_memory);
29378 + atomic_add_unchecked(obj->size, &dev->object_memory);
29379 return obj;
29380 fput:
29381 fput(obj->filp);
29382 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29383
29384 fput(obj->filp);
29385 atomic_dec(&dev->object_count);
29386 - atomic_sub(obj->size, &dev->object_memory);
29387 + atomic_sub_unchecked(obj->size, &dev->object_memory);
29388 kfree(obj);
29389 }
29390 EXPORT_SYMBOL(drm_gem_object_free);
29391 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_info.c linux-2.6.32.44/drivers/gpu/drm/drm_info.c
29392 --- linux-2.6.32.44/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29393 +++ linux-2.6.32.44/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29394 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29395 struct drm_local_map *map;
29396 struct drm_map_list *r_list;
29397
29398 - /* Hardcoded from _DRM_FRAME_BUFFER,
29399 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29400 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29401 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29402 + static const char * const types[] = {
29403 + [_DRM_FRAME_BUFFER] = "FB",
29404 + [_DRM_REGISTERS] = "REG",
29405 + [_DRM_SHM] = "SHM",
29406 + [_DRM_AGP] = "AGP",
29407 + [_DRM_SCATTER_GATHER] = "SG",
29408 + [_DRM_CONSISTENT] = "PCI",
29409 + [_DRM_GEM] = "GEM" };
29410 const char *type;
29411 int i;
29412
29413 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29414 map = r_list->map;
29415 if (!map)
29416 continue;
29417 - if (map->type < 0 || map->type > 5)
29418 + if (map->type >= ARRAY_SIZE(types))
29419 type = "??";
29420 else
29421 type = types[map->type];
29422 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29423 struct drm_device *dev = node->minor->dev;
29424
29425 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29426 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29427 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29428 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29429 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29430 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29431 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29432 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29433 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29434 return 0;
29435 }
29436 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29437 mutex_lock(&dev->struct_mutex);
29438 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29439 atomic_read(&dev->vma_count),
29440 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29441 + NULL, 0);
29442 +#else
29443 high_memory, (u64)virt_to_phys(high_memory));
29444 +#endif
29445
29446 list_for_each_entry(pt, &dev->vmalist, head) {
29447 vma = pt->vma;
29448 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29449 continue;
29450 seq_printf(m,
29451 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29452 - pt->pid, vma->vm_start, vma->vm_end,
29453 + pt->pid,
29454 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29455 + 0, 0,
29456 +#else
29457 + vma->vm_start, vma->vm_end,
29458 +#endif
29459 vma->vm_flags & VM_READ ? 'r' : '-',
29460 vma->vm_flags & VM_WRITE ? 'w' : '-',
29461 vma->vm_flags & VM_EXEC ? 'x' : '-',
29462 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29463 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29464 vma->vm_flags & VM_IO ? 'i' : '-',
29465 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29466 + 0);
29467 +#else
29468 vma->vm_pgoff);
29469 +#endif
29470
29471 #if defined(__i386__)
29472 pgprot = pgprot_val(vma->vm_page_prot);
29473 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.44/drivers/gpu/drm/drm_ioctl.c
29474 --- linux-2.6.32.44/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29475 +++ linux-2.6.32.44/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29476 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29477 stats->data[i].value =
29478 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29479 else
29480 - stats->data[i].value = atomic_read(&dev->counts[i]);
29481 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29482 stats->data[i].type = dev->types[i];
29483 }
29484
29485 diff -urNp linux-2.6.32.44/drivers/gpu/drm/drm_lock.c linux-2.6.32.44/drivers/gpu/drm/drm_lock.c
29486 --- linux-2.6.32.44/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29487 +++ linux-2.6.32.44/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29488 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29489 if (drm_lock_take(&master->lock, lock->context)) {
29490 master->lock.file_priv = file_priv;
29491 master->lock.lock_time = jiffies;
29492 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29493 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29494 break; /* Got lock */
29495 }
29496
29497 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29498 return -EINVAL;
29499 }
29500
29501 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29502 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29503
29504 /* kernel_context_switch isn't used by any of the x86 drm
29505 * modules but is required by the Sparc driver.
29506 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.44/drivers/gpu/drm/i810/i810_dma.c
29507 --- linux-2.6.32.44/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29508 +++ linux-2.6.32.44/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29509 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29510 dma->buflist[vertex->idx],
29511 vertex->discard, vertex->used);
29512
29513 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29514 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29515 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29516 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29517 sarea_priv->last_enqueue = dev_priv->counter - 1;
29518 sarea_priv->last_dispatch = (int)hw_status[5];
29519
29520 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29521 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29522 mc->last_render);
29523
29524 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29525 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29526 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29527 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29528 sarea_priv->last_enqueue = dev_priv->counter - 1;
29529 sarea_priv->last_dispatch = (int)hw_status[5];
29530
29531 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.44/drivers/gpu/drm/i810/i810_drv.h
29532 --- linux-2.6.32.44/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29533 +++ linux-2.6.32.44/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29534 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29535 int page_flipping;
29536
29537 wait_queue_head_t irq_queue;
29538 - atomic_t irq_received;
29539 - atomic_t irq_emitted;
29540 + atomic_unchecked_t irq_received;
29541 + atomic_unchecked_t irq_emitted;
29542
29543 int front_offset;
29544 } drm_i810_private_t;
29545 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.44/drivers/gpu/drm/i830/i830_drv.h
29546 --- linux-2.6.32.44/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29547 +++ linux-2.6.32.44/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29548 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29549 int page_flipping;
29550
29551 wait_queue_head_t irq_queue;
29552 - atomic_t irq_received;
29553 - atomic_t irq_emitted;
29554 + atomic_unchecked_t irq_received;
29555 + atomic_unchecked_t irq_emitted;
29556
29557 int use_mi_batchbuffer_start;
29558
29559 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.44/drivers/gpu/drm/i830/i830_irq.c
29560 --- linux-2.6.32.44/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29561 +++ linux-2.6.32.44/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29562 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29563
29564 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29565
29566 - atomic_inc(&dev_priv->irq_received);
29567 + atomic_inc_unchecked(&dev_priv->irq_received);
29568 wake_up_interruptible(&dev_priv->irq_queue);
29569
29570 return IRQ_HANDLED;
29571 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29572
29573 DRM_DEBUG("%s\n", __func__);
29574
29575 - atomic_inc(&dev_priv->irq_emitted);
29576 + atomic_inc_unchecked(&dev_priv->irq_emitted);
29577
29578 BEGIN_LP_RING(2);
29579 OUT_RING(0);
29580 OUT_RING(GFX_OP_USER_INTERRUPT);
29581 ADVANCE_LP_RING();
29582
29583 - return atomic_read(&dev_priv->irq_emitted);
29584 + return atomic_read_unchecked(&dev_priv->irq_emitted);
29585 }
29586
29587 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29588 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29589
29590 DRM_DEBUG("%s\n", __func__);
29591
29592 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29593 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29594 return 0;
29595
29596 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29597 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29598
29599 for (;;) {
29600 __set_current_state(TASK_INTERRUPTIBLE);
29601 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29602 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29603 break;
29604 if ((signed)(end - jiffies) <= 0) {
29605 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29606 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29607 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29608 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29609 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29610 - atomic_set(&dev_priv->irq_received, 0);
29611 - atomic_set(&dev_priv->irq_emitted, 0);
29612 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29613 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29614 init_waitqueue_head(&dev_priv->irq_queue);
29615 }
29616
29617 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7017.c
29618 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29619 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29620 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29621 }
29622 }
29623
29624 -struct intel_dvo_dev_ops ch7017_ops = {
29625 +const struct intel_dvo_dev_ops ch7017_ops = {
29626 .init = ch7017_init,
29627 .detect = ch7017_detect,
29628 .mode_valid = ch7017_mode_valid,
29629 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7xxx.c
29630 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29631 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29632 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29633 }
29634 }
29635
29636 -struct intel_dvo_dev_ops ch7xxx_ops = {
29637 +const struct intel_dvo_dev_ops ch7xxx_ops = {
29638 .init = ch7xxx_init,
29639 .detect = ch7xxx_detect,
29640 .mode_valid = ch7xxx_mode_valid,
29641 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo.h linux-2.6.32.44/drivers/gpu/drm/i915/dvo.h
29642 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29643 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29644 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29645 *
29646 * \return singly-linked list of modes or NULL if no modes found.
29647 */
29648 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29649 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29650
29651 /**
29652 * Clean up driver-specific bits of the output
29653 */
29654 - void (*destroy) (struct intel_dvo_device *dvo);
29655 + void (* const destroy) (struct intel_dvo_device *dvo);
29656
29657 /**
29658 * Debugging hook to dump device registers to log file
29659 */
29660 - void (*dump_regs)(struct intel_dvo_device *dvo);
29661 + void (* const dump_regs)(struct intel_dvo_device *dvo);
29662 };
29663
29664 -extern struct intel_dvo_dev_ops sil164_ops;
29665 -extern struct intel_dvo_dev_ops ch7xxx_ops;
29666 -extern struct intel_dvo_dev_ops ivch_ops;
29667 -extern struct intel_dvo_dev_ops tfp410_ops;
29668 -extern struct intel_dvo_dev_ops ch7017_ops;
29669 +extern const struct intel_dvo_dev_ops sil164_ops;
29670 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
29671 +extern const struct intel_dvo_dev_ops ivch_ops;
29672 +extern const struct intel_dvo_dev_ops tfp410_ops;
29673 +extern const struct intel_dvo_dev_ops ch7017_ops;
29674
29675 #endif /* _INTEL_DVO_H */
29676 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ivch.c
29677 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29678 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29679 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29680 }
29681 }
29682
29683 -struct intel_dvo_dev_ops ivch_ops= {
29684 +const struct intel_dvo_dev_ops ivch_ops= {
29685 .init = ivch_init,
29686 .dpms = ivch_dpms,
29687 .save = ivch_save,
29688 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.44/drivers/gpu/drm/i915/dvo_sil164.c
29689 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29690 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29691 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29692 }
29693 }
29694
29695 -struct intel_dvo_dev_ops sil164_ops = {
29696 +const struct intel_dvo_dev_ops sil164_ops = {
29697 .init = sil164_init,
29698 .detect = sil164_detect,
29699 .mode_valid = sil164_mode_valid,
29700 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.44/drivers/gpu/drm/i915/dvo_tfp410.c
29701 --- linux-2.6.32.44/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29702 +++ linux-2.6.32.44/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29703 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29704 }
29705 }
29706
29707 -struct intel_dvo_dev_ops tfp410_ops = {
29708 +const struct intel_dvo_dev_ops tfp410_ops = {
29709 .init = tfp410_init,
29710 .detect = tfp410_detect,
29711 .mode_valid = tfp410_mode_valid,
29712 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.44/drivers/gpu/drm/i915/i915_debugfs.c
29713 --- linux-2.6.32.44/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29714 +++ linux-2.6.32.44/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29715 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29716 I915_READ(GTIMR));
29717 }
29718 seq_printf(m, "Interrupts received: %d\n",
29719 - atomic_read(&dev_priv->irq_received));
29720 + atomic_read_unchecked(&dev_priv->irq_received));
29721 if (dev_priv->hw_status_page != NULL) {
29722 seq_printf(m, "Current sequence: %d\n",
29723 i915_get_gem_seqno(dev));
29724 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.c
29725 --- linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29726 +++ linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29727 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29728 return i915_resume(dev);
29729 }
29730
29731 -static struct vm_operations_struct i915_gem_vm_ops = {
29732 +static const struct vm_operations_struct i915_gem_vm_ops = {
29733 .fault = i915_gem_fault,
29734 .open = drm_gem_vm_open,
29735 .close = drm_gem_vm_close,
29736 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.h
29737 --- linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29738 +++ linux-2.6.32.44/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29739 @@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29740 /* display clock increase/decrease */
29741 /* pll clock increase/decrease */
29742 /* clock gating init */
29743 -};
29744 +} __no_const;
29745
29746 typedef struct drm_i915_private {
29747 struct drm_device *dev;
29748 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29749 int page_flipping;
29750
29751 wait_queue_head_t irq_queue;
29752 - atomic_t irq_received;
29753 + atomic_unchecked_t irq_received;
29754 /** Protects user_irq_refcount and irq_mask_reg */
29755 spinlock_t user_irq_lock;
29756 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29757 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.44/drivers/gpu/drm/i915/i915_gem.c
29758 --- linux-2.6.32.44/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29759 +++ linux-2.6.32.44/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29760 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29761
29762 args->aper_size = dev->gtt_total;
29763 args->aper_available_size = (args->aper_size -
29764 - atomic_read(&dev->pin_memory));
29765 + atomic_read_unchecked(&dev->pin_memory));
29766
29767 return 0;
29768 }
29769 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29770 return -EINVAL;
29771 }
29772
29773 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29774 + drm_gem_object_unreference(obj);
29775 + return -EFAULT;
29776 + }
29777 +
29778 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29779 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29780 } else {
29781 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29782 return -EINVAL;
29783 }
29784
29785 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29786 + drm_gem_object_unreference(obj);
29787 + return -EFAULT;
29788 + }
29789 +
29790 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29791 * it would end up going through the fenced access, and we'll get
29792 * different detiling behavior between reading and writing.
29793 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29794
29795 if (obj_priv->gtt_space) {
29796 atomic_dec(&dev->gtt_count);
29797 - atomic_sub(obj->size, &dev->gtt_memory);
29798 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29799
29800 drm_mm_put_block(obj_priv->gtt_space);
29801 obj_priv->gtt_space = NULL;
29802 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29803 goto search_free;
29804 }
29805 atomic_inc(&dev->gtt_count);
29806 - atomic_add(obj->size, &dev->gtt_memory);
29807 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
29808
29809 /* Assert that the object is not currently in any GPU domain. As it
29810 * wasn't in the GTT, there shouldn't be any way it could have been in
29811 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29812 "%d/%d gtt bytes\n",
29813 atomic_read(&dev->object_count),
29814 atomic_read(&dev->pin_count),
29815 - atomic_read(&dev->object_memory),
29816 - atomic_read(&dev->pin_memory),
29817 - atomic_read(&dev->gtt_memory),
29818 + atomic_read_unchecked(&dev->object_memory),
29819 + atomic_read_unchecked(&dev->pin_memory),
29820 + atomic_read_unchecked(&dev->gtt_memory),
29821 dev->gtt_total);
29822 }
29823 goto err;
29824 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29825 */
29826 if (obj_priv->pin_count == 1) {
29827 atomic_inc(&dev->pin_count);
29828 - atomic_add(obj->size, &dev->pin_memory);
29829 + atomic_add_unchecked(obj->size, &dev->pin_memory);
29830 if (!obj_priv->active &&
29831 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29832 !list_empty(&obj_priv->list))
29833 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29834 list_move_tail(&obj_priv->list,
29835 &dev_priv->mm.inactive_list);
29836 atomic_dec(&dev->pin_count);
29837 - atomic_sub(obj->size, &dev->pin_memory);
29838 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
29839 }
29840 i915_verify_inactive(dev, __FILE__, __LINE__);
29841 }
29842 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.44/drivers/gpu/drm/i915/i915_irq.c
29843 --- linux-2.6.32.44/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29844 +++ linux-2.6.32.44/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29845 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29846 int irq_received;
29847 int ret = IRQ_NONE;
29848
29849 - atomic_inc(&dev_priv->irq_received);
29850 + atomic_inc_unchecked(&dev_priv->irq_received);
29851
29852 if (IS_IGDNG(dev))
29853 return igdng_irq_handler(dev);
29854 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29855 {
29856 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29857
29858 - atomic_set(&dev_priv->irq_received, 0);
29859 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29860
29861 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29862 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29863 diff -urNp linux-2.6.32.44/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.44/drivers/gpu/drm/i915/intel_sdvo.c
29864 --- linux-2.6.32.44/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29865 +++ linux-2.6.32.44/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29866 @@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29867 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29868
29869 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29870 - intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29871 + pax_open_kernel();
29872 + *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29873 + pax_close_kernel();
29874
29875 /* Read the regs to test if we can talk to the device */
29876 for (i = 0; i < 0x40; i++) {
29877 diff -urNp linux-2.6.32.44/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.44/drivers/gpu/drm/mga/mga_drv.h
29878 --- linux-2.6.32.44/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29879 +++ linux-2.6.32.44/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29880 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29881 u32 clear_cmd;
29882 u32 maccess;
29883
29884 - atomic_t vbl_received; /**< Number of vblanks received. */
29885 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29886 wait_queue_head_t fence_queue;
29887 - atomic_t last_fence_retired;
29888 + atomic_unchecked_t last_fence_retired;
29889 u32 next_fence_to_post;
29890
29891 unsigned int fb_cpp;
29892 diff -urNp linux-2.6.32.44/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.44/drivers/gpu/drm/mga/mga_irq.c
29893 --- linux-2.6.32.44/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29894 +++ linux-2.6.32.44/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29895 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29896 if (crtc != 0)
29897 return 0;
29898
29899 - return atomic_read(&dev_priv->vbl_received);
29900 + return atomic_read_unchecked(&dev_priv->vbl_received);
29901 }
29902
29903
29904 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29905 /* VBLANK interrupt */
29906 if (status & MGA_VLINEPEN) {
29907 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29908 - atomic_inc(&dev_priv->vbl_received);
29909 + atomic_inc_unchecked(&dev_priv->vbl_received);
29910 drm_handle_vblank(dev, 0);
29911 handled = 1;
29912 }
29913 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29914 MGA_WRITE(MGA_PRIMEND, prim_end);
29915 }
29916
29917 - atomic_inc(&dev_priv->last_fence_retired);
29918 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29919 DRM_WAKEUP(&dev_priv->fence_queue);
29920 handled = 1;
29921 }
29922 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29923 * using fences.
29924 */
29925 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29926 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29927 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29928 - *sequence) <= (1 << 23)));
29929
29930 *sequence = cur_fence;
29931 diff -urNp linux-2.6.32.44/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.44/drivers/gpu/drm/r128/r128_cce.c
29932 --- linux-2.6.32.44/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29933 +++ linux-2.6.32.44/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29934 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29935
29936 /* GH: Simple idle check.
29937 */
29938 - atomic_set(&dev_priv->idle_count, 0);
29939 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29940
29941 /* We don't support anything other than bus-mastering ring mode,
29942 * but the ring can be in either AGP or PCI space for the ring
29943 diff -urNp linux-2.6.32.44/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.44/drivers/gpu/drm/r128/r128_drv.h
29944 --- linux-2.6.32.44/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29945 +++ linux-2.6.32.44/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29946 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29947 int is_pci;
29948 unsigned long cce_buffers_offset;
29949
29950 - atomic_t idle_count;
29951 + atomic_unchecked_t idle_count;
29952
29953 int page_flipping;
29954 int current_page;
29955 u32 crtc_offset;
29956 u32 crtc_offset_cntl;
29957
29958 - atomic_t vbl_received;
29959 + atomic_unchecked_t vbl_received;
29960
29961 u32 color_fmt;
29962 unsigned int front_offset;
29963 diff -urNp linux-2.6.32.44/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.44/drivers/gpu/drm/r128/r128_irq.c
29964 --- linux-2.6.32.44/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29965 +++ linux-2.6.32.44/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29966 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29967 if (crtc != 0)
29968 return 0;
29969
29970 - return atomic_read(&dev_priv->vbl_received);
29971 + return atomic_read_unchecked(&dev_priv->vbl_received);
29972 }
29973
29974 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29975 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29976 /* VBLANK interrupt */
29977 if (status & R128_CRTC_VBLANK_INT) {
29978 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29979 - atomic_inc(&dev_priv->vbl_received);
29980 + atomic_inc_unchecked(&dev_priv->vbl_received);
29981 drm_handle_vblank(dev, 0);
29982 return IRQ_HANDLED;
29983 }
29984 diff -urNp linux-2.6.32.44/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.44/drivers/gpu/drm/r128/r128_state.c
29985 --- linux-2.6.32.44/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29986 +++ linux-2.6.32.44/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29987 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29988
29989 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29990 {
29991 - if (atomic_read(&dev_priv->idle_count) == 0) {
29992 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29993 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29994 } else {
29995 - atomic_set(&dev_priv->idle_count, 0);
29996 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29997 }
29998 }
29999
30000 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/atom.c linux-2.6.32.44/drivers/gpu/drm/radeon/atom.c
30001 --- linux-2.6.32.44/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
30002 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
30003 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
30004 char name[512];
30005 int i;
30006
30007 + pax_track_stack();
30008 +
30009 ctx->card = card;
30010 ctx->bios = bios;
30011
30012 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.44/drivers/gpu/drm/radeon/mkregtable.c
30013 --- linux-2.6.32.44/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
30014 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
30015 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
30016 regex_t mask_rex;
30017 regmatch_t match[4];
30018 char buf[1024];
30019 - size_t end;
30020 + long end;
30021 int len;
30022 int done = 0;
30023 int r;
30024 unsigned o;
30025 struct offset *offset;
30026 char last_reg_s[10];
30027 - int last_reg;
30028 + unsigned long last_reg;
30029
30030 if (regcomp
30031 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30032 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_atombios.c
30033 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
30034 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
30035 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
30036 bool linkb;
30037 struct radeon_i2c_bus_rec ddc_bus;
30038
30039 + pax_track_stack();
30040 +
30041 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30042
30043 if (data_offset == 0)
30044 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
30045 }
30046 }
30047
30048 -struct bios_connector {
30049 +static struct bios_connector {
30050 bool valid;
30051 uint16_t line_mux;
30052 uint16_t devices;
30053 int connector_type;
30054 struct radeon_i2c_bus_rec ddc_bus;
30055 -};
30056 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30057
30058 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
30059 drm_device
30060 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
30061 uint8_t dac;
30062 union atom_supported_devices *supported_devices;
30063 int i, j;
30064 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30065
30066 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30067
30068 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_display.c
30069 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
30070 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
30071 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
30072
30073 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
30074 error = freq - current_freq;
30075 - error = error < 0 ? 0xffffffff : error;
30076 + error = (int32_t)error < 0 ? 0xffffffff : error;
30077 } else
30078 error = abs(current_freq - freq);
30079 vco_diff = abs(vco - best_vco);
30080 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_drv.h
30081 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
30082 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
30083 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
30084
30085 /* SW interrupt */
30086 wait_queue_head_t swi_queue;
30087 - atomic_t swi_emitted;
30088 + atomic_unchecked_t swi_emitted;
30089 int vblank_crtc;
30090 uint32_t irq_enable_reg;
30091 uint32_t r500_disp_irq_reg;
30092 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_fence.c
30093 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
30094 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
30095 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
30096 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
30097 return 0;
30098 }
30099 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
30100 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
30101 if (!rdev->cp.ready) {
30102 /* FIXME: cp is not running assume everythings is done right
30103 * away
30104 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
30105 return r;
30106 }
30107 WREG32(rdev->fence_drv.scratch_reg, 0);
30108 - atomic_set(&rdev->fence_drv.seq, 0);
30109 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
30110 INIT_LIST_HEAD(&rdev->fence_drv.created);
30111 INIT_LIST_HEAD(&rdev->fence_drv.emited);
30112 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
30113 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.44/drivers/gpu/drm/radeon/radeon.h
30114 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
30115 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
30116 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
30117 */
30118 struct radeon_fence_driver {
30119 uint32_t scratch_reg;
30120 - atomic_t seq;
30121 + atomic_unchecked_t seq;
30122 uint32_t last_seq;
30123 unsigned long count_timeout;
30124 wait_queue_head_t queue;
30125 @@ -640,7 +640,7 @@ struct radeon_asic {
30126 uint32_t offset, uint32_t obj_size);
30127 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
30128 void (*bandwidth_update)(struct radeon_device *rdev);
30129 -};
30130 +} __no_const;
30131
30132 /*
30133 * Asic structures
30134 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ioc32.c
30135 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
30136 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
30137 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
30138 request = compat_alloc_user_space(sizeof(*request));
30139 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30140 || __put_user(req32.param, &request->param)
30141 - || __put_user((void __user *)(unsigned long)req32.value,
30142 + || __put_user((unsigned long)req32.value,
30143 &request->value))
30144 return -EFAULT;
30145
30146 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_irq.c
30147 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30148 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30149 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30150 unsigned int ret;
30151 RING_LOCALS;
30152
30153 - atomic_inc(&dev_priv->swi_emitted);
30154 - ret = atomic_read(&dev_priv->swi_emitted);
30155 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30156 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30157
30158 BEGIN_RING(4);
30159 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30160 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30161 drm_radeon_private_t *dev_priv =
30162 (drm_radeon_private_t *) dev->dev_private;
30163
30164 - atomic_set(&dev_priv->swi_emitted, 0);
30165 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30166 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30167
30168 dev->max_vblank_count = 0x001fffff;
30169 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_state.c
30170 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30171 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30172 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30173 {
30174 drm_radeon_private_t *dev_priv = dev->dev_private;
30175 drm_radeon_getparam_t *param = data;
30176 - int value;
30177 + int value = 0;
30178
30179 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30180
30181 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ttm.c
30182 --- linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30183 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30184 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30185 DRM_INFO("radeon: ttm finalized\n");
30186 }
30187
30188 -static struct vm_operations_struct radeon_ttm_vm_ops;
30189 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
30190 -
30191 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30192 -{
30193 - struct ttm_buffer_object *bo;
30194 - int r;
30195 -
30196 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
30197 - if (bo == NULL) {
30198 - return VM_FAULT_NOPAGE;
30199 - }
30200 - r = ttm_vm_ops->fault(vma, vmf);
30201 - return r;
30202 -}
30203 -
30204 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30205 {
30206 struct drm_file *file_priv;
30207 struct radeon_device *rdev;
30208 - int r;
30209
30210 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30211 return drm_mmap(filp, vma);
30212 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30213
30214 file_priv = (struct drm_file *)filp->private_data;
30215 rdev = file_priv->minor->dev->dev_private;
30216 - if (rdev == NULL) {
30217 + if (!rdev)
30218 return -EINVAL;
30219 - }
30220 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30221 - if (unlikely(r != 0)) {
30222 - return r;
30223 - }
30224 - if (unlikely(ttm_vm_ops == NULL)) {
30225 - ttm_vm_ops = vma->vm_ops;
30226 - radeon_ttm_vm_ops = *ttm_vm_ops;
30227 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30228 - }
30229 - vma->vm_ops = &radeon_ttm_vm_ops;
30230 - return 0;
30231 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30232 }
30233
30234
30235 diff -urNp linux-2.6.32.44/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.44/drivers/gpu/drm/radeon/rs690.c
30236 --- linux-2.6.32.44/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30237 +++ linux-2.6.32.44/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30238 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30239 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30240 rdev->pm.sideport_bandwidth.full)
30241 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30242 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30243 + read_delay_latency.full = rfixed_const(800 * 1000);
30244 read_delay_latency.full = rfixed_div(read_delay_latency,
30245 rdev->pm.igp_sideport_mclk);
30246 + a.full = rfixed_const(370);
30247 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30248 } else {
30249 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30250 rdev->pm.k8_bandwidth.full)
30251 diff -urNp linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo.c
30252 --- linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30253 +++ linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30254 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30255 NULL
30256 };
30257
30258 -static struct sysfs_ops ttm_bo_global_ops = {
30259 +static const struct sysfs_ops ttm_bo_global_ops = {
30260 .show = &ttm_bo_global_show
30261 };
30262
30263 diff -urNp linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo_vm.c
30264 --- linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30265 +++ linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30266 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30267 {
30268 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30269 vma->vm_private_data;
30270 - struct ttm_bo_device *bdev = bo->bdev;
30271 + struct ttm_bo_device *bdev;
30272 unsigned long bus_base;
30273 unsigned long bus_offset;
30274 unsigned long bus_size;
30275 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30276 unsigned long address = (unsigned long)vmf->virtual_address;
30277 int retval = VM_FAULT_NOPAGE;
30278
30279 + if (!bo)
30280 + return VM_FAULT_NOPAGE;
30281 + bdev = bo->bdev;
30282 +
30283 /*
30284 * Work around locking order reversal in fault / nopfn
30285 * between mmap_sem and bo_reserve: Perform a trylock operation
30286 diff -urNp linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_global.c
30287 --- linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30288 +++ linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30289 @@ -36,7 +36,7 @@
30290 struct ttm_global_item {
30291 struct mutex mutex;
30292 void *object;
30293 - int refcount;
30294 + atomic_t refcount;
30295 };
30296
30297 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30298 @@ -49,7 +49,7 @@ void ttm_global_init(void)
30299 struct ttm_global_item *item = &glob[i];
30300 mutex_init(&item->mutex);
30301 item->object = NULL;
30302 - item->refcount = 0;
30303 + atomic_set(&item->refcount, 0);
30304 }
30305 }
30306
30307 @@ -59,7 +59,7 @@ void ttm_global_release(void)
30308 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30309 struct ttm_global_item *item = &glob[i];
30310 BUG_ON(item->object != NULL);
30311 - BUG_ON(item->refcount != 0);
30312 + BUG_ON(atomic_read(&item->refcount) != 0);
30313 }
30314 }
30315
30316 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30317 void *object;
30318
30319 mutex_lock(&item->mutex);
30320 - if (item->refcount == 0) {
30321 + if (atomic_read(&item->refcount) == 0) {
30322 item->object = kzalloc(ref->size, GFP_KERNEL);
30323 if (unlikely(item->object == NULL)) {
30324 ret = -ENOMEM;
30325 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30326 goto out_err;
30327
30328 }
30329 - ++item->refcount;
30330 + atomic_inc(&item->refcount);
30331 ref->object = item->object;
30332 object = item->object;
30333 mutex_unlock(&item->mutex);
30334 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30335 struct ttm_global_item *item = &glob[ref->global_type];
30336
30337 mutex_lock(&item->mutex);
30338 - BUG_ON(item->refcount == 0);
30339 + BUG_ON(atomic_read(&item->refcount) == 0);
30340 BUG_ON(ref->object != item->object);
30341 - if (--item->refcount == 0) {
30342 + if (atomic_dec_and_test(&item->refcount)) {
30343 ref->release(ref);
30344 item->object = NULL;
30345 }
30346 diff -urNp linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_memory.c
30347 --- linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30348 +++ linux-2.6.32.44/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30349 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30350 NULL
30351 };
30352
30353 -static struct sysfs_ops ttm_mem_zone_ops = {
30354 +static const struct sysfs_ops ttm_mem_zone_ops = {
30355 .show = &ttm_mem_zone_show,
30356 .store = &ttm_mem_zone_store
30357 };
30358 diff -urNp linux-2.6.32.44/drivers/gpu/drm/via/via_drv.h linux-2.6.32.44/drivers/gpu/drm/via/via_drv.h
30359 --- linux-2.6.32.44/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30360 +++ linux-2.6.32.44/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30361 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30362 typedef uint32_t maskarray_t[5];
30363
30364 typedef struct drm_via_irq {
30365 - atomic_t irq_received;
30366 + atomic_unchecked_t irq_received;
30367 uint32_t pending_mask;
30368 uint32_t enable_mask;
30369 wait_queue_head_t irq_queue;
30370 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30371 struct timeval last_vblank;
30372 int last_vblank_valid;
30373 unsigned usec_per_vblank;
30374 - atomic_t vbl_received;
30375 + atomic_unchecked_t vbl_received;
30376 drm_via_state_t hc_state;
30377 char pci_buf[VIA_PCI_BUF_SIZE];
30378 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30379 diff -urNp linux-2.6.32.44/drivers/gpu/drm/via/via_irq.c linux-2.6.32.44/drivers/gpu/drm/via/via_irq.c
30380 --- linux-2.6.32.44/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30381 +++ linux-2.6.32.44/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30382 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30383 if (crtc != 0)
30384 return 0;
30385
30386 - return atomic_read(&dev_priv->vbl_received);
30387 + return atomic_read_unchecked(&dev_priv->vbl_received);
30388 }
30389
30390 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30391 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30392
30393 status = VIA_READ(VIA_REG_INTERRUPT);
30394 if (status & VIA_IRQ_VBLANK_PENDING) {
30395 - atomic_inc(&dev_priv->vbl_received);
30396 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30397 + atomic_inc_unchecked(&dev_priv->vbl_received);
30398 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30399 do_gettimeofday(&cur_vblank);
30400 if (dev_priv->last_vblank_valid) {
30401 dev_priv->usec_per_vblank =
30402 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30403 dev_priv->last_vblank = cur_vblank;
30404 dev_priv->last_vblank_valid = 1;
30405 }
30406 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30407 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30408 DRM_DEBUG("US per vblank is: %u\n",
30409 dev_priv->usec_per_vblank);
30410 }
30411 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30412
30413 for (i = 0; i < dev_priv->num_irqs; ++i) {
30414 if (status & cur_irq->pending_mask) {
30415 - atomic_inc(&cur_irq->irq_received);
30416 + atomic_inc_unchecked(&cur_irq->irq_received);
30417 DRM_WAKEUP(&cur_irq->irq_queue);
30418 handled = 1;
30419 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30420 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30421 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30422 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30423 masks[irq][4]));
30424 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30425 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30426 } else {
30427 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30428 (((cur_irq_sequence =
30429 - atomic_read(&cur_irq->irq_received)) -
30430 + atomic_read_unchecked(&cur_irq->irq_received)) -
30431 *sequence) <= (1 << 23)));
30432 }
30433 *sequence = cur_irq_sequence;
30434 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30435 }
30436
30437 for (i = 0; i < dev_priv->num_irqs; ++i) {
30438 - atomic_set(&cur_irq->irq_received, 0);
30439 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30440 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30441 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30442 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30443 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30444 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30445 case VIA_IRQ_RELATIVE:
30446 irqwait->request.sequence +=
30447 - atomic_read(&cur_irq->irq_received);
30448 + atomic_read_unchecked(&cur_irq->irq_received);
30449 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30450 case VIA_IRQ_ABSOLUTE:
30451 break;
30452 diff -urNp linux-2.6.32.44/drivers/hid/hid-core.c linux-2.6.32.44/drivers/hid/hid-core.c
30453 --- linux-2.6.32.44/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30454 +++ linux-2.6.32.44/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30455 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30456
30457 int hid_add_device(struct hid_device *hdev)
30458 {
30459 - static atomic_t id = ATOMIC_INIT(0);
30460 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30461 int ret;
30462
30463 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30464 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30465 /* XXX hack, any other cleaner solution after the driver core
30466 * is converted to allow more than 20 bytes as the device name? */
30467 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30468 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30469 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30470
30471 ret = device_add(&hdev->dev);
30472 if (!ret)
30473 diff -urNp linux-2.6.32.44/drivers/hid/usbhid/hiddev.c linux-2.6.32.44/drivers/hid/usbhid/hiddev.c
30474 --- linux-2.6.32.44/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30475 +++ linux-2.6.32.44/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30476 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30477 return put_user(HID_VERSION, (int __user *)arg);
30478
30479 case HIDIOCAPPLICATION:
30480 - if (arg < 0 || arg >= hid->maxapplication)
30481 + if (arg >= hid->maxapplication)
30482 return -EINVAL;
30483
30484 for (i = 0; i < hid->maxcollection; i++)
30485 diff -urNp linux-2.6.32.44/drivers/hwmon/lis3lv02d.c linux-2.6.32.44/drivers/hwmon/lis3lv02d.c
30486 --- linux-2.6.32.44/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30487 +++ linux-2.6.32.44/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30488 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30489 * the lid is closed. This leads to interrupts as soon as a little move
30490 * is done.
30491 */
30492 - atomic_inc(&lis3_dev.count);
30493 + atomic_inc_unchecked(&lis3_dev.count);
30494
30495 wake_up_interruptible(&lis3_dev.misc_wait);
30496 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30497 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30498 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30499 return -EBUSY; /* already open */
30500
30501 - atomic_set(&lis3_dev.count, 0);
30502 + atomic_set_unchecked(&lis3_dev.count, 0);
30503
30504 /*
30505 * The sensor can generate interrupts for free-fall and direction
30506 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30507 add_wait_queue(&lis3_dev.misc_wait, &wait);
30508 while (true) {
30509 set_current_state(TASK_INTERRUPTIBLE);
30510 - data = atomic_xchg(&lis3_dev.count, 0);
30511 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30512 if (data)
30513 break;
30514
30515 @@ -244,7 +244,7 @@ out:
30516 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30517 {
30518 poll_wait(file, &lis3_dev.misc_wait, wait);
30519 - if (atomic_read(&lis3_dev.count))
30520 + if (atomic_read_unchecked(&lis3_dev.count))
30521 return POLLIN | POLLRDNORM;
30522 return 0;
30523 }
30524 diff -urNp linux-2.6.32.44/drivers/hwmon/lis3lv02d.h linux-2.6.32.44/drivers/hwmon/lis3lv02d.h
30525 --- linux-2.6.32.44/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30526 +++ linux-2.6.32.44/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30527 @@ -201,7 +201,7 @@ struct lis3lv02d {
30528
30529 struct input_polled_dev *idev; /* input device */
30530 struct platform_device *pdev; /* platform device */
30531 - atomic_t count; /* interrupt count after last read */
30532 + atomic_unchecked_t count; /* interrupt count after last read */
30533 int xcalib; /* calibrated null value for x */
30534 int ycalib; /* calibrated null value for y */
30535 int zcalib; /* calibrated null value for z */
30536 diff -urNp linux-2.6.32.44/drivers/hwmon/sht15.c linux-2.6.32.44/drivers/hwmon/sht15.c
30537 --- linux-2.6.32.44/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30538 +++ linux-2.6.32.44/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30539 @@ -112,7 +112,7 @@ struct sht15_data {
30540 int supply_uV;
30541 int supply_uV_valid;
30542 struct work_struct update_supply_work;
30543 - atomic_t interrupt_handled;
30544 + atomic_unchecked_t interrupt_handled;
30545 };
30546
30547 /**
30548 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30549 return ret;
30550
30551 gpio_direction_input(data->pdata->gpio_data);
30552 - atomic_set(&data->interrupt_handled, 0);
30553 + atomic_set_unchecked(&data->interrupt_handled, 0);
30554
30555 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30556 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30557 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30558 /* Only relevant if the interrupt hasn't occured. */
30559 - if (!atomic_read(&data->interrupt_handled))
30560 + if (!atomic_read_unchecked(&data->interrupt_handled))
30561 schedule_work(&data->read_work);
30562 }
30563 ret = wait_event_timeout(data->wait_queue,
30564 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30565 struct sht15_data *data = d;
30566 /* First disable the interrupt */
30567 disable_irq_nosync(irq);
30568 - atomic_inc(&data->interrupt_handled);
30569 + atomic_inc_unchecked(&data->interrupt_handled);
30570 /* Then schedule a reading work struct */
30571 if (data->flag != SHT15_READING_NOTHING)
30572 schedule_work(&data->read_work);
30573 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30574 here as could have gone low in meantime so verify
30575 it hasn't!
30576 */
30577 - atomic_set(&data->interrupt_handled, 0);
30578 + atomic_set_unchecked(&data->interrupt_handled, 0);
30579 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30580 /* If still not occured or another handler has been scheduled */
30581 if (gpio_get_value(data->pdata->gpio_data)
30582 - || atomic_read(&data->interrupt_handled))
30583 + || atomic_read_unchecked(&data->interrupt_handled))
30584 return;
30585 }
30586 /* Read the data back from the device */
30587 diff -urNp linux-2.6.32.44/drivers/hwmon/w83791d.c linux-2.6.32.44/drivers/hwmon/w83791d.c
30588 --- linux-2.6.32.44/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30589 +++ linux-2.6.32.44/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30590 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30591 struct i2c_board_info *info);
30592 static int w83791d_remove(struct i2c_client *client);
30593
30594 -static int w83791d_read(struct i2c_client *client, u8 register);
30595 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30596 +static int w83791d_read(struct i2c_client *client, u8 reg);
30597 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30598 static struct w83791d_data *w83791d_update_device(struct device *dev);
30599
30600 #ifdef DEBUG
30601 diff -urNp linux-2.6.32.44/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.44/drivers/i2c/busses/i2c-amd756-s4882.c
30602 --- linux-2.6.32.44/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30603 +++ linux-2.6.32.44/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30604 @@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30605 }
30606
30607 /* Fill in the new structures */
30608 - s4882_algo[0] = *(amd756_smbus.algo);
30609 - s4882_algo[0].smbus_xfer = amd756_access_virt0;
30610 + memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30611 + *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30612 s4882_adapter[0] = amd756_smbus;
30613 s4882_adapter[0].algo = s4882_algo;
30614 - s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30615 + *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30616 for (i = 1; i < 5; i++) {
30617 - s4882_algo[i] = *(amd756_smbus.algo);
30618 + memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30619 s4882_adapter[i] = amd756_smbus;
30620 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30621 "SMBus 8111 adapter (CPU%d)", i-1);
30622 s4882_adapter[i].algo = s4882_algo+i;
30623 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30624 }
30625 - s4882_algo[1].smbus_xfer = amd756_access_virt1;
30626 - s4882_algo[2].smbus_xfer = amd756_access_virt2;
30627 - s4882_algo[3].smbus_xfer = amd756_access_virt3;
30628 - s4882_algo[4].smbus_xfer = amd756_access_virt4;
30629 + *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30630 + *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30631 + *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30632 + *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30633
30634 /* Register virtual adapters */
30635 for (i = 0; i < 5; i++) {
30636 diff -urNp linux-2.6.32.44/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.44/drivers/i2c/busses/i2c-nforce2-s4985.c
30637 --- linux-2.6.32.44/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30638 +++ linux-2.6.32.44/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30639 @@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30640 }
30641
30642 /* Fill in the new structures */
30643 - s4985_algo[0] = *(nforce2_smbus->algo);
30644 - s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30645 + memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30646 + *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30647 s4985_adapter[0] = *nforce2_smbus;
30648 s4985_adapter[0].algo = s4985_algo;
30649 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30650 for (i = 1; i < 5; i++) {
30651 - s4985_algo[i] = *(nforce2_smbus->algo);
30652 + memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30653 s4985_adapter[i] = *nforce2_smbus;
30654 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30655 "SMBus nForce2 adapter (CPU%d)", i - 1);
30656 s4985_adapter[i].algo = s4985_algo + i;
30657 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30658 }
30659 - s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30660 - s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30661 - s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30662 - s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30663 + *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30664 + *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30665 + *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30666 + *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30667
30668 /* Register virtual adapters */
30669 for (i = 0; i < 5; i++) {
30670 diff -urNp linux-2.6.32.44/drivers/ide/ide-cd.c linux-2.6.32.44/drivers/ide/ide-cd.c
30671 --- linux-2.6.32.44/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30672 +++ linux-2.6.32.44/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30673 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30674 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30675 if ((unsigned long)buf & alignment
30676 || blk_rq_bytes(rq) & q->dma_pad_mask
30677 - || object_is_on_stack(buf))
30678 + || object_starts_on_stack(buf))
30679 drive->dma = 0;
30680 }
30681 }
30682 diff -urNp linux-2.6.32.44/drivers/ide/ide-floppy.c linux-2.6.32.44/drivers/ide/ide-floppy.c
30683 --- linux-2.6.32.44/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30684 +++ linux-2.6.32.44/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30685 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30686 u8 pc_buf[256], header_len, desc_cnt;
30687 int i, rc = 1, blocks, length;
30688
30689 + pax_track_stack();
30690 +
30691 ide_debug_log(IDE_DBG_FUNC, "enter");
30692
30693 drive->bios_cyl = 0;
30694 diff -urNp linux-2.6.32.44/drivers/ide/setup-pci.c linux-2.6.32.44/drivers/ide/setup-pci.c
30695 --- linux-2.6.32.44/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30696 +++ linux-2.6.32.44/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30697 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30698 int ret, i, n_ports = dev2 ? 4 : 2;
30699 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30700
30701 + pax_track_stack();
30702 +
30703 for (i = 0; i < n_ports / 2; i++) {
30704 ret = ide_setup_pci_controller(pdev[i], d, !i);
30705 if (ret < 0)
30706 diff -urNp linux-2.6.32.44/drivers/ieee1394/dv1394.c linux-2.6.32.44/drivers/ieee1394/dv1394.c
30707 --- linux-2.6.32.44/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30708 +++ linux-2.6.32.44/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30709 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30710 based upon DIF section and sequence
30711 */
30712
30713 -static void inline
30714 +static inline void
30715 frame_put_packet (struct frame *f, struct packet *p)
30716 {
30717 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30718 diff -urNp linux-2.6.32.44/drivers/ieee1394/hosts.c linux-2.6.32.44/drivers/ieee1394/hosts.c
30719 --- linux-2.6.32.44/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30720 +++ linux-2.6.32.44/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30721 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30722 }
30723
30724 static struct hpsb_host_driver dummy_driver = {
30725 + .name = "dummy",
30726 .transmit_packet = dummy_transmit_packet,
30727 .devctl = dummy_devctl,
30728 .isoctl = dummy_isoctl
30729 diff -urNp linux-2.6.32.44/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.44/drivers/ieee1394/init_ohci1394_dma.c
30730 --- linux-2.6.32.44/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30731 +++ linux-2.6.32.44/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30732 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30733 for (func = 0; func < 8; func++) {
30734 u32 class = read_pci_config(num,slot,func,
30735 PCI_CLASS_REVISION);
30736 - if ((class == 0xffffffff))
30737 + if (class == 0xffffffff)
30738 continue; /* No device at this func */
30739
30740 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30741 diff -urNp linux-2.6.32.44/drivers/ieee1394/ohci1394.c linux-2.6.32.44/drivers/ieee1394/ohci1394.c
30742 --- linux-2.6.32.44/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30743 +++ linux-2.6.32.44/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30744 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30745 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30746
30747 /* Module Parameters */
30748 -static int phys_dma = 1;
30749 +static int phys_dma;
30750 module_param(phys_dma, int, 0444);
30751 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30752 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30753
30754 static void dma_trm_tasklet(unsigned long data);
30755 static void dma_trm_reset(struct dma_trm_ctx *d);
30756 diff -urNp linux-2.6.32.44/drivers/ieee1394/sbp2.c linux-2.6.32.44/drivers/ieee1394/sbp2.c
30757 --- linux-2.6.32.44/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30758 +++ linux-2.6.32.44/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30759 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30760 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30761 MODULE_LICENSE("GPL");
30762
30763 -static int sbp2_module_init(void)
30764 +static int __init sbp2_module_init(void)
30765 {
30766 int ret;
30767
30768 diff -urNp linux-2.6.32.44/drivers/infiniband/core/cm.c linux-2.6.32.44/drivers/infiniband/core/cm.c
30769 --- linux-2.6.32.44/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30770 +++ linux-2.6.32.44/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30771 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
30772
30773 struct cm_counter_group {
30774 struct kobject obj;
30775 - atomic_long_t counter[CM_ATTR_COUNT];
30776 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30777 };
30778
30779 struct cm_counter_attribute {
30780 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30781 struct ib_mad_send_buf *msg = NULL;
30782 int ret;
30783
30784 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30785 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30786 counter[CM_REQ_COUNTER]);
30787
30788 /* Quick state check to discard duplicate REQs. */
30789 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30790 if (!cm_id_priv)
30791 return;
30792
30793 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30794 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30795 counter[CM_REP_COUNTER]);
30796 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30797 if (ret)
30798 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30799 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30800 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30801 spin_unlock_irq(&cm_id_priv->lock);
30802 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30803 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30804 counter[CM_RTU_COUNTER]);
30805 goto out;
30806 }
30807 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30808 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30809 dreq_msg->local_comm_id);
30810 if (!cm_id_priv) {
30811 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30812 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30813 counter[CM_DREQ_COUNTER]);
30814 cm_issue_drep(work->port, work->mad_recv_wc);
30815 return -EINVAL;
30816 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30817 case IB_CM_MRA_REP_RCVD:
30818 break;
30819 case IB_CM_TIMEWAIT:
30820 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30821 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30822 counter[CM_DREQ_COUNTER]);
30823 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30824 goto unlock;
30825 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30826 cm_free_msg(msg);
30827 goto deref;
30828 case IB_CM_DREQ_RCVD:
30829 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30830 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30831 counter[CM_DREQ_COUNTER]);
30832 goto unlock;
30833 default:
30834 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30835 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30836 cm_id_priv->msg, timeout)) {
30837 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30838 - atomic_long_inc(&work->port->
30839 + atomic_long_inc_unchecked(&work->port->
30840 counter_group[CM_RECV_DUPLICATES].
30841 counter[CM_MRA_COUNTER]);
30842 goto out;
30843 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30844 break;
30845 case IB_CM_MRA_REQ_RCVD:
30846 case IB_CM_MRA_REP_RCVD:
30847 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30848 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30849 counter[CM_MRA_COUNTER]);
30850 /* fall through */
30851 default:
30852 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30853 case IB_CM_LAP_IDLE:
30854 break;
30855 case IB_CM_MRA_LAP_SENT:
30856 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30857 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30858 counter[CM_LAP_COUNTER]);
30859 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30860 goto unlock;
30861 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30862 cm_free_msg(msg);
30863 goto deref;
30864 case IB_CM_LAP_RCVD:
30865 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30866 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30867 counter[CM_LAP_COUNTER]);
30868 goto unlock;
30869 default:
30870 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30871 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30872 if (cur_cm_id_priv) {
30873 spin_unlock_irq(&cm.lock);
30874 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30875 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30876 counter[CM_SIDR_REQ_COUNTER]);
30877 goto out; /* Duplicate message. */
30878 }
30879 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30880 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30881 msg->retries = 1;
30882
30883 - atomic_long_add(1 + msg->retries,
30884 + atomic_long_add_unchecked(1 + msg->retries,
30885 &port->counter_group[CM_XMIT].counter[attr_index]);
30886 if (msg->retries)
30887 - atomic_long_add(msg->retries,
30888 + atomic_long_add_unchecked(msg->retries,
30889 &port->counter_group[CM_XMIT_RETRIES].
30890 counter[attr_index]);
30891
30892 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30893 }
30894
30895 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30896 - atomic_long_inc(&port->counter_group[CM_RECV].
30897 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30898 counter[attr_id - CM_ATTR_ID_OFFSET]);
30899
30900 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30901 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30902 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30903
30904 return sprintf(buf, "%ld\n",
30905 - atomic_long_read(&group->counter[cm_attr->index]));
30906 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30907 }
30908
30909 -static struct sysfs_ops cm_counter_ops = {
30910 +static const struct sysfs_ops cm_counter_ops = {
30911 .show = cm_show_counter
30912 };
30913
30914 diff -urNp linux-2.6.32.44/drivers/infiniband/core/fmr_pool.c linux-2.6.32.44/drivers/infiniband/core/fmr_pool.c
30915 --- linux-2.6.32.44/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30916 +++ linux-2.6.32.44/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30917 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30918
30919 struct task_struct *thread;
30920
30921 - atomic_t req_ser;
30922 - atomic_t flush_ser;
30923 + atomic_unchecked_t req_ser;
30924 + atomic_unchecked_t flush_ser;
30925
30926 wait_queue_head_t force_wait;
30927 };
30928 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30929 struct ib_fmr_pool *pool = pool_ptr;
30930
30931 do {
30932 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30933 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30934 ib_fmr_batch_release(pool);
30935
30936 - atomic_inc(&pool->flush_ser);
30937 + atomic_inc_unchecked(&pool->flush_ser);
30938 wake_up_interruptible(&pool->force_wait);
30939
30940 if (pool->flush_function)
30941 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30942 }
30943
30944 set_current_state(TASK_INTERRUPTIBLE);
30945 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30946 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30947 !kthread_should_stop())
30948 schedule();
30949 __set_current_state(TASK_RUNNING);
30950 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30951 pool->dirty_watermark = params->dirty_watermark;
30952 pool->dirty_len = 0;
30953 spin_lock_init(&pool->pool_lock);
30954 - atomic_set(&pool->req_ser, 0);
30955 - atomic_set(&pool->flush_ser, 0);
30956 + atomic_set_unchecked(&pool->req_ser, 0);
30957 + atomic_set_unchecked(&pool->flush_ser, 0);
30958 init_waitqueue_head(&pool->force_wait);
30959
30960 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30961 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30962 }
30963 spin_unlock_irq(&pool->pool_lock);
30964
30965 - serial = atomic_inc_return(&pool->req_ser);
30966 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30967 wake_up_process(pool->thread);
30968
30969 if (wait_event_interruptible(pool->force_wait,
30970 - atomic_read(&pool->flush_ser) - serial >= 0))
30971 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30972 return -EINTR;
30973
30974 return 0;
30975 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30976 } else {
30977 list_add_tail(&fmr->list, &pool->dirty_list);
30978 if (++pool->dirty_len >= pool->dirty_watermark) {
30979 - atomic_inc(&pool->req_ser);
30980 + atomic_inc_unchecked(&pool->req_ser);
30981 wake_up_process(pool->thread);
30982 }
30983 }
30984 diff -urNp linux-2.6.32.44/drivers/infiniband/core/sysfs.c linux-2.6.32.44/drivers/infiniband/core/sysfs.c
30985 --- linux-2.6.32.44/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30986 +++ linux-2.6.32.44/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30987 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30988 return port_attr->show(p, port_attr, buf);
30989 }
30990
30991 -static struct sysfs_ops port_sysfs_ops = {
30992 +static const struct sysfs_ops port_sysfs_ops = {
30993 .show = port_attr_show
30994 };
30995
30996 diff -urNp linux-2.6.32.44/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.44/drivers/infiniband/core/uverbs_marshall.c
30997 --- linux-2.6.32.44/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30998 +++ linux-2.6.32.44/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30999 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
31000 dst->grh.sgid_index = src->grh.sgid_index;
31001 dst->grh.hop_limit = src->grh.hop_limit;
31002 dst->grh.traffic_class = src->grh.traffic_class;
31003 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
31004 dst->dlid = src->dlid;
31005 dst->sl = src->sl;
31006 dst->src_path_bits = src->src_path_bits;
31007 dst->static_rate = src->static_rate;
31008 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
31009 dst->port_num = src->port_num;
31010 + dst->reserved = 0;
31011 }
31012 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
31013
31014 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
31015 struct ib_qp_attr *src)
31016 {
31017 + dst->qp_state = src->qp_state;
31018 dst->cur_qp_state = src->cur_qp_state;
31019 dst->path_mtu = src->path_mtu;
31020 dst->path_mig_state = src->path_mig_state;
31021 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
31022 dst->rnr_retry = src->rnr_retry;
31023 dst->alt_port_num = src->alt_port_num;
31024 dst->alt_timeout = src->alt_timeout;
31025 + memset(dst->reserved, 0, sizeof(dst->reserved));
31026 }
31027 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
31028
31029 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.44/drivers/infiniband/hw/ipath/ipath_fs.c
31030 --- linux-2.6.32.44/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
31031 +++ linux-2.6.32.44/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
31032 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
31033 struct infinipath_counters counters;
31034 struct ipath_devdata *dd;
31035
31036 + pax_track_stack();
31037 +
31038 dd = file->f_path.dentry->d_inode->i_private;
31039 dd->ipath_f_read_counters(dd, &counters);
31040
31041 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/nes/nes.c linux-2.6.32.44/drivers/infiniband/hw/nes/nes.c
31042 --- linux-2.6.32.44/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
31043 +++ linux-2.6.32.44/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
31044 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
31045 LIST_HEAD(nes_adapter_list);
31046 static LIST_HEAD(nes_dev_list);
31047
31048 -atomic_t qps_destroyed;
31049 +atomic_unchecked_t qps_destroyed;
31050
31051 static unsigned int ee_flsh_adapter;
31052 static unsigned int sysfs_nonidx_addr;
31053 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
31054 struct nes_adapter *nesadapter = nesdev->nesadapter;
31055 u32 qp_id;
31056
31057 - atomic_inc(&qps_destroyed);
31058 + atomic_inc_unchecked(&qps_destroyed);
31059
31060 /* Free the control structures */
31061
31062 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.44/drivers/infiniband/hw/nes/nes_cm.c
31063 --- linux-2.6.32.44/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
31064 +++ linux-2.6.32.44/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
31065 @@ -69,11 +69,11 @@ u32 cm_packets_received;
31066 u32 cm_listens_created;
31067 u32 cm_listens_destroyed;
31068 u32 cm_backlog_drops;
31069 -atomic_t cm_loopbacks;
31070 -atomic_t cm_nodes_created;
31071 -atomic_t cm_nodes_destroyed;
31072 -atomic_t cm_accel_dropped_pkts;
31073 -atomic_t cm_resets_recvd;
31074 +atomic_unchecked_t cm_loopbacks;
31075 +atomic_unchecked_t cm_nodes_created;
31076 +atomic_unchecked_t cm_nodes_destroyed;
31077 +atomic_unchecked_t cm_accel_dropped_pkts;
31078 +atomic_unchecked_t cm_resets_recvd;
31079
31080 static inline int mini_cm_accelerated(struct nes_cm_core *,
31081 struct nes_cm_node *);
31082 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
31083
31084 static struct nes_cm_core *g_cm_core;
31085
31086 -atomic_t cm_connects;
31087 -atomic_t cm_accepts;
31088 -atomic_t cm_disconnects;
31089 -atomic_t cm_closes;
31090 -atomic_t cm_connecteds;
31091 -atomic_t cm_connect_reqs;
31092 -atomic_t cm_rejects;
31093 +atomic_unchecked_t cm_connects;
31094 +atomic_unchecked_t cm_accepts;
31095 +atomic_unchecked_t cm_disconnects;
31096 +atomic_unchecked_t cm_closes;
31097 +atomic_unchecked_t cm_connecteds;
31098 +atomic_unchecked_t cm_connect_reqs;
31099 +atomic_unchecked_t cm_rejects;
31100
31101
31102 /**
31103 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
31104 cm_node->rem_mac);
31105
31106 add_hte_node(cm_core, cm_node);
31107 - atomic_inc(&cm_nodes_created);
31108 + atomic_inc_unchecked(&cm_nodes_created);
31109
31110 return cm_node;
31111 }
31112 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
31113 }
31114
31115 atomic_dec(&cm_core->node_cnt);
31116 - atomic_inc(&cm_nodes_destroyed);
31117 + atomic_inc_unchecked(&cm_nodes_destroyed);
31118 nesqp = cm_node->nesqp;
31119 if (nesqp) {
31120 nesqp->cm_node = NULL;
31121 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
31122
31123 static void drop_packet(struct sk_buff *skb)
31124 {
31125 - atomic_inc(&cm_accel_dropped_pkts);
31126 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31127 dev_kfree_skb_any(skb);
31128 }
31129
31130 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
31131
31132 int reset = 0; /* whether to send reset in case of err.. */
31133 int passive_state;
31134 - atomic_inc(&cm_resets_recvd);
31135 + atomic_inc_unchecked(&cm_resets_recvd);
31136 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31137 " refcnt=%d\n", cm_node, cm_node->state,
31138 atomic_read(&cm_node->ref_count));
31139 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
31140 rem_ref_cm_node(cm_node->cm_core, cm_node);
31141 return NULL;
31142 }
31143 - atomic_inc(&cm_loopbacks);
31144 + atomic_inc_unchecked(&cm_loopbacks);
31145 loopbackremotenode->loopbackpartner = cm_node;
31146 loopbackremotenode->tcp_cntxt.rcv_wscale =
31147 NES_CM_DEFAULT_RCV_WND_SCALE;
31148 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
31149 add_ref_cm_node(cm_node);
31150 } else if (cm_node->state == NES_CM_STATE_TSA) {
31151 rem_ref_cm_node(cm_core, cm_node);
31152 - atomic_inc(&cm_accel_dropped_pkts);
31153 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31154 dev_kfree_skb_any(skb);
31155 break;
31156 }
31157 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
31158
31159 if ((cm_id) && (cm_id->event_handler)) {
31160 if (issue_disconn) {
31161 - atomic_inc(&cm_disconnects);
31162 + atomic_inc_unchecked(&cm_disconnects);
31163 cm_event.event = IW_CM_EVENT_DISCONNECT;
31164 cm_event.status = disconn_status;
31165 cm_event.local_addr = cm_id->local_addr;
31166 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
31167 }
31168
31169 if (issue_close) {
31170 - atomic_inc(&cm_closes);
31171 + atomic_inc_unchecked(&cm_closes);
31172 nes_disconnect(nesqp, 1);
31173
31174 cm_id->provider_data = nesqp;
31175 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
31176
31177 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31178 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31179 - atomic_inc(&cm_accepts);
31180 + atomic_inc_unchecked(&cm_accepts);
31181
31182 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31183 atomic_read(&nesvnic->netdev->refcnt));
31184 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
31185
31186 struct nes_cm_core *cm_core;
31187
31188 - atomic_inc(&cm_rejects);
31189 + atomic_inc_unchecked(&cm_rejects);
31190 cm_node = (struct nes_cm_node *) cm_id->provider_data;
31191 loopback = cm_node->loopbackpartner;
31192 cm_core = cm_node->cm_core;
31193 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31194 ntohl(cm_id->local_addr.sin_addr.s_addr),
31195 ntohs(cm_id->local_addr.sin_port));
31196
31197 - atomic_inc(&cm_connects);
31198 + atomic_inc_unchecked(&cm_connects);
31199 nesqp->active_conn = 1;
31200
31201 /* cache the cm_id in the qp */
31202 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31203 if (nesqp->destroyed) {
31204 return;
31205 }
31206 - atomic_inc(&cm_connecteds);
31207 + atomic_inc_unchecked(&cm_connecteds);
31208 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31209 " local port 0x%04X. jiffies = %lu.\n",
31210 nesqp->hwqp.qp_id,
31211 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31212
31213 ret = cm_id->event_handler(cm_id, &cm_event);
31214 cm_id->add_ref(cm_id);
31215 - atomic_inc(&cm_closes);
31216 + atomic_inc_unchecked(&cm_closes);
31217 cm_event.event = IW_CM_EVENT_CLOSE;
31218 cm_event.status = IW_CM_EVENT_STATUS_OK;
31219 cm_event.provider_data = cm_id->provider_data;
31220 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31221 return;
31222 cm_id = cm_node->cm_id;
31223
31224 - atomic_inc(&cm_connect_reqs);
31225 + atomic_inc_unchecked(&cm_connect_reqs);
31226 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31227 cm_node, cm_id, jiffies);
31228
31229 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31230 return;
31231 cm_id = cm_node->cm_id;
31232
31233 - atomic_inc(&cm_connect_reqs);
31234 + atomic_inc_unchecked(&cm_connect_reqs);
31235 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31236 cm_node, cm_id, jiffies);
31237
31238 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/nes/nes.h linux-2.6.32.44/drivers/infiniband/hw/nes/nes.h
31239 --- linux-2.6.32.44/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31240 +++ linux-2.6.32.44/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31241 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31242 extern unsigned int wqm_quanta;
31243 extern struct list_head nes_adapter_list;
31244
31245 -extern atomic_t cm_connects;
31246 -extern atomic_t cm_accepts;
31247 -extern atomic_t cm_disconnects;
31248 -extern atomic_t cm_closes;
31249 -extern atomic_t cm_connecteds;
31250 -extern atomic_t cm_connect_reqs;
31251 -extern atomic_t cm_rejects;
31252 -extern atomic_t mod_qp_timouts;
31253 -extern atomic_t qps_created;
31254 -extern atomic_t qps_destroyed;
31255 -extern atomic_t sw_qps_destroyed;
31256 +extern atomic_unchecked_t cm_connects;
31257 +extern atomic_unchecked_t cm_accepts;
31258 +extern atomic_unchecked_t cm_disconnects;
31259 +extern atomic_unchecked_t cm_closes;
31260 +extern atomic_unchecked_t cm_connecteds;
31261 +extern atomic_unchecked_t cm_connect_reqs;
31262 +extern atomic_unchecked_t cm_rejects;
31263 +extern atomic_unchecked_t mod_qp_timouts;
31264 +extern atomic_unchecked_t qps_created;
31265 +extern atomic_unchecked_t qps_destroyed;
31266 +extern atomic_unchecked_t sw_qps_destroyed;
31267 extern u32 mh_detected;
31268 extern u32 mh_pauses_sent;
31269 extern u32 cm_packets_sent;
31270 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31271 extern u32 cm_listens_created;
31272 extern u32 cm_listens_destroyed;
31273 extern u32 cm_backlog_drops;
31274 -extern atomic_t cm_loopbacks;
31275 -extern atomic_t cm_nodes_created;
31276 -extern atomic_t cm_nodes_destroyed;
31277 -extern atomic_t cm_accel_dropped_pkts;
31278 -extern atomic_t cm_resets_recvd;
31279 +extern atomic_unchecked_t cm_loopbacks;
31280 +extern atomic_unchecked_t cm_nodes_created;
31281 +extern atomic_unchecked_t cm_nodes_destroyed;
31282 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31283 +extern atomic_unchecked_t cm_resets_recvd;
31284
31285 extern u32 int_mod_timer_init;
31286 extern u32 int_mod_cq_depth_256;
31287 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.44/drivers/infiniband/hw/nes/nes_nic.c
31288 --- linux-2.6.32.44/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31289 +++ linux-2.6.32.44/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31290 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31291 target_stat_values[++index] = mh_detected;
31292 target_stat_values[++index] = mh_pauses_sent;
31293 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31294 - target_stat_values[++index] = atomic_read(&cm_connects);
31295 - target_stat_values[++index] = atomic_read(&cm_accepts);
31296 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31297 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31298 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31299 - target_stat_values[++index] = atomic_read(&cm_rejects);
31300 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31301 - target_stat_values[++index] = atomic_read(&qps_created);
31302 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31303 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31304 - target_stat_values[++index] = atomic_read(&cm_closes);
31305 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31306 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31307 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31308 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31309 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31310 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31311 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31312 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31313 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31314 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31315 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31316 target_stat_values[++index] = cm_packets_sent;
31317 target_stat_values[++index] = cm_packets_bounced;
31318 target_stat_values[++index] = cm_packets_created;
31319 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31320 target_stat_values[++index] = cm_listens_created;
31321 target_stat_values[++index] = cm_listens_destroyed;
31322 target_stat_values[++index] = cm_backlog_drops;
31323 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31324 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31325 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31326 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31327 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31328 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31329 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31330 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31331 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31332 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31333 target_stat_values[++index] = int_mod_timer_init;
31334 target_stat_values[++index] = int_mod_cq_depth_1;
31335 target_stat_values[++index] = int_mod_cq_depth_4;
31336 diff -urNp linux-2.6.32.44/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.44/drivers/infiniband/hw/nes/nes_verbs.c
31337 --- linux-2.6.32.44/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31338 +++ linux-2.6.32.44/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31339 @@ -45,9 +45,9 @@
31340
31341 #include <rdma/ib_umem.h>
31342
31343 -atomic_t mod_qp_timouts;
31344 -atomic_t qps_created;
31345 -atomic_t sw_qps_destroyed;
31346 +atomic_unchecked_t mod_qp_timouts;
31347 +atomic_unchecked_t qps_created;
31348 +atomic_unchecked_t sw_qps_destroyed;
31349
31350 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31351
31352 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31353 if (init_attr->create_flags)
31354 return ERR_PTR(-EINVAL);
31355
31356 - atomic_inc(&qps_created);
31357 + atomic_inc_unchecked(&qps_created);
31358 switch (init_attr->qp_type) {
31359 case IB_QPT_RC:
31360 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31361 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31362 struct iw_cm_event cm_event;
31363 int ret;
31364
31365 - atomic_inc(&sw_qps_destroyed);
31366 + atomic_inc_unchecked(&sw_qps_destroyed);
31367 nesqp->destroyed = 1;
31368
31369 /* Blow away the connection if it exists. */
31370 diff -urNp linux-2.6.32.44/drivers/input/gameport/gameport.c linux-2.6.32.44/drivers/input/gameport/gameport.c
31371 --- linux-2.6.32.44/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31372 +++ linux-2.6.32.44/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31373 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31374 */
31375 static void gameport_init_port(struct gameport *gameport)
31376 {
31377 - static atomic_t gameport_no = ATOMIC_INIT(0);
31378 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31379
31380 __module_get(THIS_MODULE);
31381
31382 mutex_init(&gameport->drv_mutex);
31383 device_initialize(&gameport->dev);
31384 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31385 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31386 gameport->dev.bus = &gameport_bus;
31387 gameport->dev.release = gameport_release_port;
31388 if (gameport->parent)
31389 diff -urNp linux-2.6.32.44/drivers/input/input.c linux-2.6.32.44/drivers/input/input.c
31390 --- linux-2.6.32.44/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31391 +++ linux-2.6.32.44/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31392 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31393 */
31394 int input_register_device(struct input_dev *dev)
31395 {
31396 - static atomic_t input_no = ATOMIC_INIT(0);
31397 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31398 struct input_handler *handler;
31399 const char *path;
31400 int error;
31401 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31402 dev->setkeycode = input_default_setkeycode;
31403
31404 dev_set_name(&dev->dev, "input%ld",
31405 - (unsigned long) atomic_inc_return(&input_no) - 1);
31406 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31407
31408 error = device_add(&dev->dev);
31409 if (error)
31410 diff -urNp linux-2.6.32.44/drivers/input/joystick/sidewinder.c linux-2.6.32.44/drivers/input/joystick/sidewinder.c
31411 --- linux-2.6.32.44/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31412 +++ linux-2.6.32.44/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31413 @@ -30,6 +30,7 @@
31414 #include <linux/kernel.h>
31415 #include <linux/module.h>
31416 #include <linux/slab.h>
31417 +#include <linux/sched.h>
31418 #include <linux/init.h>
31419 #include <linux/input.h>
31420 #include <linux/gameport.h>
31421 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31422 unsigned char buf[SW_LENGTH];
31423 int i;
31424
31425 + pax_track_stack();
31426 +
31427 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31428
31429 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31430 diff -urNp linux-2.6.32.44/drivers/input/joystick/xpad.c linux-2.6.32.44/drivers/input/joystick/xpad.c
31431 --- linux-2.6.32.44/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31432 +++ linux-2.6.32.44/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31433 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31434
31435 static int xpad_led_probe(struct usb_xpad *xpad)
31436 {
31437 - static atomic_t led_seq = ATOMIC_INIT(0);
31438 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31439 long led_no;
31440 struct xpad_led *led;
31441 struct led_classdev *led_cdev;
31442 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31443 if (!led)
31444 return -ENOMEM;
31445
31446 - led_no = (long)atomic_inc_return(&led_seq) - 1;
31447 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31448
31449 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31450 led->xpad = xpad;
31451 diff -urNp linux-2.6.32.44/drivers/input/serio/serio.c linux-2.6.32.44/drivers/input/serio/serio.c
31452 --- linux-2.6.32.44/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31453 +++ linux-2.6.32.44/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31454 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
31455 */
31456 static void serio_init_port(struct serio *serio)
31457 {
31458 - static atomic_t serio_no = ATOMIC_INIT(0);
31459 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31460
31461 __module_get(THIS_MODULE);
31462
31463 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31464 mutex_init(&serio->drv_mutex);
31465 device_initialize(&serio->dev);
31466 dev_set_name(&serio->dev, "serio%ld",
31467 - (long)atomic_inc_return(&serio_no) - 1);
31468 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
31469 serio->dev.bus = &serio_bus;
31470 serio->dev.release = serio_release_port;
31471 if (serio->parent) {
31472 diff -urNp linux-2.6.32.44/drivers/isdn/gigaset/common.c linux-2.6.32.44/drivers/isdn/gigaset/common.c
31473 --- linux-2.6.32.44/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31474 +++ linux-2.6.32.44/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31475 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31476 cs->commands_pending = 0;
31477 cs->cur_at_seq = 0;
31478 cs->gotfwver = -1;
31479 - cs->open_count = 0;
31480 + local_set(&cs->open_count, 0);
31481 cs->dev = NULL;
31482 cs->tty = NULL;
31483 cs->tty_dev = NULL;
31484 diff -urNp linux-2.6.32.44/drivers/isdn/gigaset/gigaset.h linux-2.6.32.44/drivers/isdn/gigaset/gigaset.h
31485 --- linux-2.6.32.44/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31486 +++ linux-2.6.32.44/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31487 @@ -34,6 +34,7 @@
31488 #include <linux/tty_driver.h>
31489 #include <linux/list.h>
31490 #include <asm/atomic.h>
31491 +#include <asm/local.h>
31492
31493 #define GIG_VERSION {0,5,0,0}
31494 #define GIG_COMPAT {0,4,0,0}
31495 @@ -446,7 +447,7 @@ struct cardstate {
31496 spinlock_t cmdlock;
31497 unsigned curlen, cmdbytes;
31498
31499 - unsigned open_count;
31500 + local_t open_count;
31501 struct tty_struct *tty;
31502 struct tasklet_struct if_wake_tasklet;
31503 unsigned control_state;
31504 diff -urNp linux-2.6.32.44/drivers/isdn/gigaset/interface.c linux-2.6.32.44/drivers/isdn/gigaset/interface.c
31505 --- linux-2.6.32.44/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31506 +++ linux-2.6.32.44/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31507 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31508 return -ERESTARTSYS; // FIXME -EINTR?
31509 tty->driver_data = cs;
31510
31511 - ++cs->open_count;
31512 -
31513 - if (cs->open_count == 1) {
31514 + if (local_inc_return(&cs->open_count) == 1) {
31515 spin_lock_irqsave(&cs->lock, flags);
31516 cs->tty = tty;
31517 spin_unlock_irqrestore(&cs->lock, flags);
31518 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31519
31520 if (!cs->connected)
31521 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31522 - else if (!cs->open_count)
31523 + else if (!local_read(&cs->open_count))
31524 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31525 else {
31526 - if (!--cs->open_count) {
31527 + if (!local_dec_return(&cs->open_count)) {
31528 spin_lock_irqsave(&cs->lock, flags);
31529 cs->tty = NULL;
31530 spin_unlock_irqrestore(&cs->lock, flags);
31531 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31532 if (!cs->connected) {
31533 gig_dbg(DEBUG_IF, "not connected");
31534 retval = -ENODEV;
31535 - } else if (!cs->open_count)
31536 + } else if (!local_read(&cs->open_count))
31537 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31538 else {
31539 retval = 0;
31540 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31541 if (!cs->connected) {
31542 gig_dbg(DEBUG_IF, "not connected");
31543 retval = -ENODEV;
31544 - } else if (!cs->open_count)
31545 + } else if (!local_read(&cs->open_count))
31546 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31547 else if (cs->mstate != MS_LOCKED) {
31548 dev_warn(cs->dev, "can't write to unlocked device\n");
31549 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31550 if (!cs->connected) {
31551 gig_dbg(DEBUG_IF, "not connected");
31552 retval = -ENODEV;
31553 - } else if (!cs->open_count)
31554 + } else if (!local_read(&cs->open_count))
31555 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31556 else if (cs->mstate != MS_LOCKED) {
31557 dev_warn(cs->dev, "can't write to unlocked device\n");
31558 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31559
31560 if (!cs->connected)
31561 gig_dbg(DEBUG_IF, "not connected");
31562 - else if (!cs->open_count)
31563 + else if (!local_read(&cs->open_count))
31564 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31565 else if (cs->mstate != MS_LOCKED)
31566 dev_warn(cs->dev, "can't write to unlocked device\n");
31567 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31568
31569 if (!cs->connected)
31570 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31571 - else if (!cs->open_count)
31572 + else if (!local_read(&cs->open_count))
31573 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31574 else {
31575 //FIXME
31576 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31577
31578 if (!cs->connected)
31579 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31580 - else if (!cs->open_count)
31581 + else if (!local_read(&cs->open_count))
31582 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31583 else {
31584 //FIXME
31585 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31586 goto out;
31587 }
31588
31589 - if (!cs->open_count) {
31590 + if (!local_read(&cs->open_count)) {
31591 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31592 goto out;
31593 }
31594 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/avm/b1.c linux-2.6.32.44/drivers/isdn/hardware/avm/b1.c
31595 --- linux-2.6.32.44/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31596 +++ linux-2.6.32.44/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31597 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31598 }
31599 if (left) {
31600 if (t4file->user) {
31601 - if (copy_from_user(buf, dp, left))
31602 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31603 return -EFAULT;
31604 } else {
31605 memcpy(buf, dp, left);
31606 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31607 }
31608 if (left) {
31609 if (config->user) {
31610 - if (copy_from_user(buf, dp, left))
31611 + if (left > sizeof buf || copy_from_user(buf, dp, left))
31612 return -EFAULT;
31613 } else {
31614 memcpy(buf, dp, left);
31615 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.44/drivers/isdn/hardware/eicon/capidtmf.c
31616 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31617 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31618 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31619 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31620 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31621
31622 + pax_track_stack();
31623
31624 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31625 {
31626 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.44/drivers/isdn/hardware/eicon/capifunc.c
31627 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31628 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31629 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31630 IDI_SYNC_REQ req;
31631 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31632
31633 + pax_track_stack();
31634 +
31635 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31636
31637 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31638 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.44/drivers/isdn/hardware/eicon/diddfunc.c
31639 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31640 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31641 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31642 IDI_SYNC_REQ req;
31643 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31644
31645 + pax_track_stack();
31646 +
31647 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31648
31649 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31650 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.44/drivers/isdn/hardware/eicon/divasfunc.c
31651 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31652 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31653 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31654 IDI_SYNC_REQ req;
31655 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31656
31657 + pax_track_stack();
31658 +
31659 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31660
31661 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31662 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.44/drivers/isdn/hardware/eicon/divasync.h
31663 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31664 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31665 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31666 } diva_didd_add_adapter_t;
31667 typedef struct _diva_didd_remove_adapter {
31668 IDI_CALL p_request;
31669 -} diva_didd_remove_adapter_t;
31670 +} __no_const diva_didd_remove_adapter_t;
31671 typedef struct _diva_didd_read_adapter_array {
31672 void * buffer;
31673 dword length;
31674 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.44/drivers/isdn/hardware/eicon/idifunc.c
31675 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31676 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31677 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31678 IDI_SYNC_REQ req;
31679 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31680
31681 + pax_track_stack();
31682 +
31683 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31684
31685 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31686 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/message.c linux-2.6.32.44/drivers/isdn/hardware/eicon/message.c
31687 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31688 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31689 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31690 dword d;
31691 word w;
31692
31693 + pax_track_stack();
31694 +
31695 a = plci->adapter;
31696 Id = ((word)plci->Id<<8)|a->Id;
31697 PUT_WORD(&SS_Ind[4],0x0000);
31698 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31699 word j, n, w;
31700 dword d;
31701
31702 + pax_track_stack();
31703 +
31704
31705 for(i=0;i<8;i++) bp_parms[i].length = 0;
31706 for(i=0;i<2;i++) global_config[i].length = 0;
31707 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31708 const byte llc3[] = {4,3,2,2,6,6,0};
31709 const byte header[] = {0,2,3,3,0,0,0};
31710
31711 + pax_track_stack();
31712 +
31713 for(i=0;i<8;i++) bp_parms[i].length = 0;
31714 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31715 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31716 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31717 word appl_number_group_type[MAX_APPL];
31718 PLCI *auxplci;
31719
31720 + pax_track_stack();
31721 +
31722 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31723
31724 if(!a->group_optimization_enabled)
31725 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.44/drivers/isdn/hardware/eicon/mntfunc.c
31726 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31727 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31728 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31729 IDI_SYNC_REQ req;
31730 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31731
31732 + pax_track_stack();
31733 +
31734 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31735
31736 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31737 diff -urNp linux-2.6.32.44/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.44/drivers/isdn/hardware/eicon/xdi_adapter.h
31738 --- linux-2.6.32.44/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31739 +++ linux-2.6.32.44/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31740 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31741 typedef struct _diva_os_idi_adapter_interface {
31742 diva_init_card_proc_t cleanup_adapter_proc;
31743 diva_cmd_card_proc_t cmd_proc;
31744 -} diva_os_idi_adapter_interface_t;
31745 +} __no_const diva_os_idi_adapter_interface_t;
31746
31747 typedef struct _diva_os_xdi_adapter {
31748 struct list_head link;
31749 diff -urNp linux-2.6.32.44/drivers/isdn/i4l/isdn_common.c linux-2.6.32.44/drivers/isdn/i4l/isdn_common.c
31750 --- linux-2.6.32.44/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31751 +++ linux-2.6.32.44/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31752 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31753 } iocpar;
31754 void __user *argp = (void __user *)arg;
31755
31756 + pax_track_stack();
31757 +
31758 #define name iocpar.name
31759 #define bname iocpar.bname
31760 #define iocts iocpar.iocts
31761 diff -urNp linux-2.6.32.44/drivers/isdn/icn/icn.c linux-2.6.32.44/drivers/isdn/icn/icn.c
31762 --- linux-2.6.32.44/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31763 +++ linux-2.6.32.44/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31764 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31765 if (count > len)
31766 count = len;
31767 if (user) {
31768 - if (copy_from_user(msg, buf, count))
31769 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31770 return -EFAULT;
31771 } else
31772 memcpy(msg, buf, count);
31773 diff -urNp linux-2.6.32.44/drivers/isdn/mISDN/socket.c linux-2.6.32.44/drivers/isdn/mISDN/socket.c
31774 --- linux-2.6.32.44/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31775 +++ linux-2.6.32.44/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31776 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31777 if (dev) {
31778 struct mISDN_devinfo di;
31779
31780 + memset(&di, 0, sizeof(di));
31781 di.id = dev->id;
31782 di.Dprotocols = dev->Dprotocols;
31783 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31784 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31785 if (dev) {
31786 struct mISDN_devinfo di;
31787
31788 + memset(&di, 0, sizeof(di));
31789 di.id = dev->id;
31790 di.Dprotocols = dev->Dprotocols;
31791 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31792 diff -urNp linux-2.6.32.44/drivers/isdn/sc/interrupt.c linux-2.6.32.44/drivers/isdn/sc/interrupt.c
31793 --- linux-2.6.32.44/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31794 +++ linux-2.6.32.44/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31795 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31796 }
31797 else if(callid>=0x0000 && callid<=0x7FFF)
31798 {
31799 + int len;
31800 +
31801 pr_debug("%s: Got Incoming Call\n",
31802 sc_adapter[card]->devicename);
31803 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31804 - strcpy(setup.eazmsn,
31805 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31806 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31807 + sizeof(setup.phone));
31808 + if (len >= sizeof(setup.phone))
31809 + continue;
31810 + len = strlcpy(setup.eazmsn,
31811 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31812 + sizeof(setup.eazmsn));
31813 + if (len >= sizeof(setup.eazmsn))
31814 + continue;
31815 setup.si1 = 7;
31816 setup.si2 = 0;
31817 setup.plan = 0;
31818 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31819 * Handle a GetMyNumber Rsp
31820 */
31821 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31822 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31823 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31824 + rcvmsg.msg_data.byte_array,
31825 + sizeof(rcvmsg.msg_data.byte_array));
31826 continue;
31827 }
31828
31829 diff -urNp linux-2.6.32.44/drivers/lguest/core.c linux-2.6.32.44/drivers/lguest/core.c
31830 --- linux-2.6.32.44/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31831 +++ linux-2.6.32.44/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31832 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
31833 * it's worked so far. The end address needs +1 because __get_vm_area
31834 * allocates an extra guard page, so we need space for that.
31835 */
31836 +
31837 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31838 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31839 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31840 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31841 +#else
31842 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31843 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31844 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31845 +#endif
31846 +
31847 if (!switcher_vma) {
31848 err = -ENOMEM;
31849 printk("lguest: could not map switcher pages high\n");
31850 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
31851 * Now the Switcher is mapped at the right address, we can't fail!
31852 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31853 */
31854 - memcpy(switcher_vma->addr, start_switcher_text,
31855 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31856 end_switcher_text - start_switcher_text);
31857
31858 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31859 diff -urNp linux-2.6.32.44/drivers/lguest/x86/core.c linux-2.6.32.44/drivers/lguest/x86/core.c
31860 --- linux-2.6.32.44/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31861 +++ linux-2.6.32.44/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31862 @@ -59,7 +59,7 @@ static struct {
31863 /* Offset from where switcher.S was compiled to where we've copied it */
31864 static unsigned long switcher_offset(void)
31865 {
31866 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31867 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31868 }
31869
31870 /* This cpu's struct lguest_pages. */
31871 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31872 * These copies are pretty cheap, so we do them unconditionally: */
31873 /* Save the current Host top-level page directory.
31874 */
31875 +
31876 +#ifdef CONFIG_PAX_PER_CPU_PGD
31877 + pages->state.host_cr3 = read_cr3();
31878 +#else
31879 pages->state.host_cr3 = __pa(current->mm->pgd);
31880 +#endif
31881 +
31882 /*
31883 * Set up the Guest's page tables to see this CPU's pages (and no
31884 * other CPU's pages).
31885 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31886 * compiled-in switcher code and the high-mapped copy we just made.
31887 */
31888 for (i = 0; i < IDT_ENTRIES; i++)
31889 - default_idt_entries[i] += switcher_offset();
31890 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31891
31892 /*
31893 * Set up the Switcher's per-cpu areas.
31894 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31895 * it will be undisturbed when we switch. To change %cs and jump we
31896 * need this structure to feed to Intel's "lcall" instruction.
31897 */
31898 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31899 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31900 lguest_entry.segment = LGUEST_CS;
31901
31902 /*
31903 diff -urNp linux-2.6.32.44/drivers/lguest/x86/switcher_32.S linux-2.6.32.44/drivers/lguest/x86/switcher_32.S
31904 --- linux-2.6.32.44/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31905 +++ linux-2.6.32.44/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31906 @@ -87,6 +87,7 @@
31907 #include <asm/page.h>
31908 #include <asm/segment.h>
31909 #include <asm/lguest.h>
31910 +#include <asm/processor-flags.h>
31911
31912 // We mark the start of the code to copy
31913 // It's placed in .text tho it's never run here
31914 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31915 // Changes type when we load it: damn Intel!
31916 // For after we switch over our page tables
31917 // That entry will be read-only: we'd crash.
31918 +
31919 +#ifdef CONFIG_PAX_KERNEXEC
31920 + mov %cr0, %edx
31921 + xor $X86_CR0_WP, %edx
31922 + mov %edx, %cr0
31923 +#endif
31924 +
31925 movl $(GDT_ENTRY_TSS*8), %edx
31926 ltr %dx
31927
31928 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31929 // Let's clear it again for our return.
31930 // The GDT descriptor of the Host
31931 // Points to the table after two "size" bytes
31932 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31933 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31934 // Clear "used" from type field (byte 5, bit 2)
31935 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31936 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31937 +
31938 +#ifdef CONFIG_PAX_KERNEXEC
31939 + mov %cr0, %eax
31940 + xor $X86_CR0_WP, %eax
31941 + mov %eax, %cr0
31942 +#endif
31943
31944 // Once our page table's switched, the Guest is live!
31945 // The Host fades as we run this final step.
31946 @@ -295,13 +309,12 @@ deliver_to_host:
31947 // I consulted gcc, and it gave
31948 // These instructions, which I gladly credit:
31949 leal (%edx,%ebx,8), %eax
31950 - movzwl (%eax),%edx
31951 - movl 4(%eax), %eax
31952 - xorw %ax, %ax
31953 - orl %eax, %edx
31954 + movl 4(%eax), %edx
31955 + movw (%eax), %dx
31956 // Now the address of the handler's in %edx
31957 // We call it now: its "iret" drops us home.
31958 - jmp *%edx
31959 + ljmp $__KERNEL_CS, $1f
31960 +1: jmp *%edx
31961
31962 // Every interrupt can come to us here
31963 // But we must truly tell each apart.
31964 diff -urNp linux-2.6.32.44/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.44/drivers/macintosh/via-pmu-backlight.c
31965 --- linux-2.6.32.44/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31966 +++ linux-2.6.32.44/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31967 @@ -15,7 +15,7 @@
31968
31969 #define MAX_PMU_LEVEL 0xFF
31970
31971 -static struct backlight_ops pmu_backlight_data;
31972 +static const struct backlight_ops pmu_backlight_data;
31973 static DEFINE_SPINLOCK(pmu_backlight_lock);
31974 static int sleeping, uses_pmu_bl;
31975 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31976 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31977 return bd->props.brightness;
31978 }
31979
31980 -static struct backlight_ops pmu_backlight_data = {
31981 +static const struct backlight_ops pmu_backlight_data = {
31982 .get_brightness = pmu_backlight_get_brightness,
31983 .update_status = pmu_backlight_update_status,
31984
31985 diff -urNp linux-2.6.32.44/drivers/macintosh/via-pmu.c linux-2.6.32.44/drivers/macintosh/via-pmu.c
31986 --- linux-2.6.32.44/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31987 +++ linux-2.6.32.44/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31988 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31989 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31990 }
31991
31992 -static struct platform_suspend_ops pmu_pm_ops = {
31993 +static const struct platform_suspend_ops pmu_pm_ops = {
31994 .enter = powerbook_sleep,
31995 .valid = pmu_sleep_valid,
31996 };
31997 diff -urNp linux-2.6.32.44/drivers/md/dm.c linux-2.6.32.44/drivers/md/dm.c
31998 --- linux-2.6.32.44/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31999 +++ linux-2.6.32.44/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
32000 @@ -165,9 +165,9 @@ struct mapped_device {
32001 /*
32002 * Event handling.
32003 */
32004 - atomic_t event_nr;
32005 + atomic_unchecked_t event_nr;
32006 wait_queue_head_t eventq;
32007 - atomic_t uevent_seq;
32008 + atomic_unchecked_t uevent_seq;
32009 struct list_head uevent_list;
32010 spinlock_t uevent_lock; /* Protect access to uevent_list */
32011
32012 @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
32013 rwlock_init(&md->map_lock);
32014 atomic_set(&md->holders, 1);
32015 atomic_set(&md->open_count, 0);
32016 - atomic_set(&md->event_nr, 0);
32017 - atomic_set(&md->uevent_seq, 0);
32018 + atomic_set_unchecked(&md->event_nr, 0);
32019 + atomic_set_unchecked(&md->uevent_seq, 0);
32020 INIT_LIST_HEAD(&md->uevent_list);
32021 spin_lock_init(&md->uevent_lock);
32022
32023 @@ -1927,7 +1927,7 @@ static void event_callback(void *context
32024
32025 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32026
32027 - atomic_inc(&md->event_nr);
32028 + atomic_inc_unchecked(&md->event_nr);
32029 wake_up(&md->eventq);
32030 }
32031
32032 @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
32033
32034 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32035 {
32036 - return atomic_add_return(1, &md->uevent_seq);
32037 + return atomic_add_return_unchecked(1, &md->uevent_seq);
32038 }
32039
32040 uint32_t dm_get_event_nr(struct mapped_device *md)
32041 {
32042 - return atomic_read(&md->event_nr);
32043 + return atomic_read_unchecked(&md->event_nr);
32044 }
32045
32046 int dm_wait_event(struct mapped_device *md, int event_nr)
32047 {
32048 return wait_event_interruptible(md->eventq,
32049 - (event_nr != atomic_read(&md->event_nr)));
32050 + (event_nr != atomic_read_unchecked(&md->event_nr)));
32051 }
32052
32053 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32054 diff -urNp linux-2.6.32.44/drivers/md/dm-ioctl.c linux-2.6.32.44/drivers/md/dm-ioctl.c
32055 --- linux-2.6.32.44/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
32056 +++ linux-2.6.32.44/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
32057 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
32058 cmd == DM_LIST_VERSIONS_CMD)
32059 return 0;
32060
32061 - if ((cmd == DM_DEV_CREATE_CMD)) {
32062 + if (cmd == DM_DEV_CREATE_CMD) {
32063 if (!*param->name) {
32064 DMWARN("name not supplied when creating device");
32065 return -EINVAL;
32066 diff -urNp linux-2.6.32.44/drivers/md/dm-raid1.c linux-2.6.32.44/drivers/md/dm-raid1.c
32067 --- linux-2.6.32.44/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
32068 +++ linux-2.6.32.44/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
32069 @@ -41,7 +41,7 @@ enum dm_raid1_error {
32070
32071 struct mirror {
32072 struct mirror_set *ms;
32073 - atomic_t error_count;
32074 + atomic_unchecked_t error_count;
32075 unsigned long error_type;
32076 struct dm_dev *dev;
32077 sector_t offset;
32078 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
32079 * simple way to tell if a device has encountered
32080 * errors.
32081 */
32082 - atomic_inc(&m->error_count);
32083 + atomic_inc_unchecked(&m->error_count);
32084
32085 if (test_and_set_bit(error_type, &m->error_type))
32086 return;
32087 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
32088 }
32089
32090 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
32091 - if (!atomic_read(&new->error_count)) {
32092 + if (!atomic_read_unchecked(&new->error_count)) {
32093 set_default_mirror(new);
32094 break;
32095 }
32096 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
32097 struct mirror *m = get_default_mirror(ms);
32098
32099 do {
32100 - if (likely(!atomic_read(&m->error_count)))
32101 + if (likely(!atomic_read_unchecked(&m->error_count)))
32102 return m;
32103
32104 if (m-- == ms->mirror)
32105 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
32106 {
32107 struct mirror *default_mirror = get_default_mirror(m->ms);
32108
32109 - return !atomic_read(&default_mirror->error_count);
32110 + return !atomic_read_unchecked(&default_mirror->error_count);
32111 }
32112
32113 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32114 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
32115 */
32116 if (likely(region_in_sync(ms, region, 1)))
32117 m = choose_mirror(ms, bio->bi_sector);
32118 - else if (m && atomic_read(&m->error_count))
32119 + else if (m && atomic_read_unchecked(&m->error_count))
32120 m = NULL;
32121
32122 if (likely(m))
32123 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
32124 }
32125
32126 ms->mirror[mirror].ms = ms;
32127 - atomic_set(&(ms->mirror[mirror].error_count), 0);
32128 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32129 ms->mirror[mirror].error_type = 0;
32130 ms->mirror[mirror].offset = offset;
32131
32132 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
32133 */
32134 static char device_status_char(struct mirror *m)
32135 {
32136 - if (!atomic_read(&(m->error_count)))
32137 + if (!atomic_read_unchecked(&(m->error_count)))
32138 return 'A';
32139
32140 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
32141 diff -urNp linux-2.6.32.44/drivers/md/dm-stripe.c linux-2.6.32.44/drivers/md/dm-stripe.c
32142 --- linux-2.6.32.44/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
32143 +++ linux-2.6.32.44/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
32144 @@ -20,7 +20,7 @@ struct stripe {
32145 struct dm_dev *dev;
32146 sector_t physical_start;
32147
32148 - atomic_t error_count;
32149 + atomic_unchecked_t error_count;
32150 };
32151
32152 struct stripe_c {
32153 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
32154 kfree(sc);
32155 return r;
32156 }
32157 - atomic_set(&(sc->stripe[i].error_count), 0);
32158 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32159 }
32160
32161 ti->private = sc;
32162 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
32163 DMEMIT("%d ", sc->stripes);
32164 for (i = 0; i < sc->stripes; i++) {
32165 DMEMIT("%s ", sc->stripe[i].dev->name);
32166 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32167 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32168 'D' : 'A';
32169 }
32170 buffer[i] = '\0';
32171 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
32172 */
32173 for (i = 0; i < sc->stripes; i++)
32174 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32175 - atomic_inc(&(sc->stripe[i].error_count));
32176 - if (atomic_read(&(sc->stripe[i].error_count)) <
32177 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
32178 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32179 DM_IO_ERROR_THRESHOLD)
32180 queue_work(kstriped, &sc->kstriped_ws);
32181 }
32182 diff -urNp linux-2.6.32.44/drivers/md/dm-sysfs.c linux-2.6.32.44/drivers/md/dm-sysfs.c
32183 --- linux-2.6.32.44/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
32184 +++ linux-2.6.32.44/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
32185 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
32186 NULL,
32187 };
32188
32189 -static struct sysfs_ops dm_sysfs_ops = {
32190 +static const struct sysfs_ops dm_sysfs_ops = {
32191 .show = dm_attr_show,
32192 };
32193
32194 diff -urNp linux-2.6.32.44/drivers/md/dm-table.c linux-2.6.32.44/drivers/md/dm-table.c
32195 --- linux-2.6.32.44/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32196 +++ linux-2.6.32.44/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32197 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32198 if (!dev_size)
32199 return 0;
32200
32201 - if ((start >= dev_size) || (start + len > dev_size)) {
32202 + if ((start >= dev_size) || (len > dev_size - start)) {
32203 DMWARN("%s: %s too small for target: "
32204 "start=%llu, len=%llu, dev_size=%llu",
32205 dm_device_name(ti->table->md), bdevname(bdev, b),
32206 diff -urNp linux-2.6.32.44/drivers/md/md.c linux-2.6.32.44/drivers/md/md.c
32207 --- linux-2.6.32.44/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32208 +++ linux-2.6.32.44/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32209 @@ -153,10 +153,10 @@ static int start_readonly;
32210 * start build, activate spare
32211 */
32212 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32213 -static atomic_t md_event_count;
32214 +static atomic_unchecked_t md_event_count;
32215 void md_new_event(mddev_t *mddev)
32216 {
32217 - atomic_inc(&md_event_count);
32218 + atomic_inc_unchecked(&md_event_count);
32219 wake_up(&md_event_waiters);
32220 }
32221 EXPORT_SYMBOL_GPL(md_new_event);
32222 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32223 */
32224 static void md_new_event_inintr(mddev_t *mddev)
32225 {
32226 - atomic_inc(&md_event_count);
32227 + atomic_inc_unchecked(&md_event_count);
32228 wake_up(&md_event_waiters);
32229 }
32230
32231 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32232
32233 rdev->preferred_minor = 0xffff;
32234 rdev->data_offset = le64_to_cpu(sb->data_offset);
32235 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32236 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32237
32238 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32239 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32240 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32241 else
32242 sb->resync_offset = cpu_to_le64(0);
32243
32244 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32245 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32246
32247 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32248 sb->size = cpu_to_le64(mddev->dev_sectors);
32249 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32250 static ssize_t
32251 errors_show(mdk_rdev_t *rdev, char *page)
32252 {
32253 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32254 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32255 }
32256
32257 static ssize_t
32258 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32259 char *e;
32260 unsigned long n = simple_strtoul(buf, &e, 10);
32261 if (*buf && (*e == 0 || *e == '\n')) {
32262 - atomic_set(&rdev->corrected_errors, n);
32263 + atomic_set_unchecked(&rdev->corrected_errors, n);
32264 return len;
32265 }
32266 return -EINVAL;
32267 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32268 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32269 kfree(rdev);
32270 }
32271 -static struct sysfs_ops rdev_sysfs_ops = {
32272 +static const struct sysfs_ops rdev_sysfs_ops = {
32273 .show = rdev_attr_show,
32274 .store = rdev_attr_store,
32275 };
32276 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32277 rdev->data_offset = 0;
32278 rdev->sb_events = 0;
32279 atomic_set(&rdev->nr_pending, 0);
32280 - atomic_set(&rdev->read_errors, 0);
32281 - atomic_set(&rdev->corrected_errors, 0);
32282 + atomic_set_unchecked(&rdev->read_errors, 0);
32283 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32284
32285 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32286 if (!size) {
32287 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32288 kfree(mddev);
32289 }
32290
32291 -static struct sysfs_ops md_sysfs_ops = {
32292 +static const struct sysfs_ops md_sysfs_ops = {
32293 .show = md_attr_show,
32294 .store = md_attr_store,
32295 };
32296 @@ -4474,7 +4474,8 @@ out:
32297 err = 0;
32298 blk_integrity_unregister(disk);
32299 md_new_event(mddev);
32300 - sysfs_notify_dirent(mddev->sysfs_state);
32301 + if (mddev->sysfs_state)
32302 + sysfs_notify_dirent(mddev->sysfs_state);
32303 return err;
32304 }
32305
32306 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32307
32308 spin_unlock(&pers_lock);
32309 seq_printf(seq, "\n");
32310 - mi->event = atomic_read(&md_event_count);
32311 + mi->event = atomic_read_unchecked(&md_event_count);
32312 return 0;
32313 }
32314 if (v == (void*)2) {
32315 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32316 chunk_kb ? "KB" : "B");
32317 if (bitmap->file) {
32318 seq_printf(seq, ", file: ");
32319 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32320 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32321 }
32322
32323 seq_printf(seq, "\n");
32324 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32325 else {
32326 struct seq_file *p = file->private_data;
32327 p->private = mi;
32328 - mi->event = atomic_read(&md_event_count);
32329 + mi->event = atomic_read_unchecked(&md_event_count);
32330 }
32331 return error;
32332 }
32333 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32334 /* always allow read */
32335 mask = POLLIN | POLLRDNORM;
32336
32337 - if (mi->event != atomic_read(&md_event_count))
32338 + if (mi->event != atomic_read_unchecked(&md_event_count))
32339 mask |= POLLERR | POLLPRI;
32340 return mask;
32341 }
32342 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32343 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32344 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32345 (int)part_stat_read(&disk->part0, sectors[1]) -
32346 - atomic_read(&disk->sync_io);
32347 + atomic_read_unchecked(&disk->sync_io);
32348 /* sync IO will cause sync_io to increase before the disk_stats
32349 * as sync_io is counted when a request starts, and
32350 * disk_stats is counted when it completes.
32351 diff -urNp linux-2.6.32.44/drivers/md/md.h linux-2.6.32.44/drivers/md/md.h
32352 --- linux-2.6.32.44/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32353 +++ linux-2.6.32.44/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32354 @@ -94,10 +94,10 @@ struct mdk_rdev_s
32355 * only maintained for arrays that
32356 * support hot removal
32357 */
32358 - atomic_t read_errors; /* number of consecutive read errors that
32359 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32360 * we have tried to ignore.
32361 */
32362 - atomic_t corrected_errors; /* number of corrected read errors,
32363 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32364 * for reporting to userspace and storing
32365 * in superblock.
32366 */
32367 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32368
32369 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32370 {
32371 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32372 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32373 }
32374
32375 struct mdk_personality
32376 diff -urNp linux-2.6.32.44/drivers/md/raid10.c linux-2.6.32.44/drivers/md/raid10.c
32377 --- linux-2.6.32.44/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32378 +++ linux-2.6.32.44/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32379 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32380 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32381 set_bit(R10BIO_Uptodate, &r10_bio->state);
32382 else {
32383 - atomic_add(r10_bio->sectors,
32384 + atomic_add_unchecked(r10_bio->sectors,
32385 &conf->mirrors[d].rdev->corrected_errors);
32386 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32387 md_error(r10_bio->mddev,
32388 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32389 test_bit(In_sync, &rdev->flags)) {
32390 atomic_inc(&rdev->nr_pending);
32391 rcu_read_unlock();
32392 - atomic_add(s, &rdev->corrected_errors);
32393 + atomic_add_unchecked(s, &rdev->corrected_errors);
32394 if (sync_page_io(rdev->bdev,
32395 r10_bio->devs[sl].addr +
32396 sect + rdev->data_offset,
32397 diff -urNp linux-2.6.32.44/drivers/md/raid1.c linux-2.6.32.44/drivers/md/raid1.c
32398 --- linux-2.6.32.44/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32399 +++ linux-2.6.32.44/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32400 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32401 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32402 continue;
32403 rdev = conf->mirrors[d].rdev;
32404 - atomic_add(s, &rdev->corrected_errors);
32405 + atomic_add_unchecked(s, &rdev->corrected_errors);
32406 if (sync_page_io(rdev->bdev,
32407 sect + rdev->data_offset,
32408 s<<9,
32409 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32410 /* Well, this device is dead */
32411 md_error(mddev, rdev);
32412 else {
32413 - atomic_add(s, &rdev->corrected_errors);
32414 + atomic_add_unchecked(s, &rdev->corrected_errors);
32415 printk(KERN_INFO
32416 "raid1:%s: read error corrected "
32417 "(%d sectors at %llu on %s)\n",
32418 diff -urNp linux-2.6.32.44/drivers/md/raid5.c linux-2.6.32.44/drivers/md/raid5.c
32419 --- linux-2.6.32.44/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32420 +++ linux-2.6.32.44/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32421 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32422 bi->bi_next = NULL;
32423 if ((rw & WRITE) &&
32424 test_bit(R5_ReWrite, &sh->dev[i].flags))
32425 - atomic_add(STRIPE_SECTORS,
32426 + atomic_add_unchecked(STRIPE_SECTORS,
32427 &rdev->corrected_errors);
32428 generic_make_request(bi);
32429 } else {
32430 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32431 clear_bit(R5_ReadError, &sh->dev[i].flags);
32432 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32433 }
32434 - if (atomic_read(&conf->disks[i].rdev->read_errors))
32435 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
32436 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32437 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32438 } else {
32439 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32440 int retry = 0;
32441 rdev = conf->disks[i].rdev;
32442
32443 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32444 - atomic_inc(&rdev->read_errors);
32445 + atomic_inc_unchecked(&rdev->read_errors);
32446 if (conf->mddev->degraded >= conf->max_degraded)
32447 printk_rl(KERN_WARNING
32448 "raid5:%s: read error not correctable "
32449 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32450 (unsigned long long)(sh->sector
32451 + rdev->data_offset),
32452 bdn);
32453 - else if (atomic_read(&rdev->read_errors)
32454 + else if (atomic_read_unchecked(&rdev->read_errors)
32455 > conf->max_nr_stripes)
32456 printk(KERN_WARNING
32457 "raid5:%s: Too many read errors, failing device %s.\n",
32458 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32459 sector_t r_sector;
32460 struct stripe_head sh2;
32461
32462 + pax_track_stack();
32463
32464 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32465 stripe = new_sector;
32466 diff -urNp linux-2.6.32.44/drivers/media/common/saa7146_fops.c linux-2.6.32.44/drivers/media/common/saa7146_fops.c
32467 --- linux-2.6.32.44/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32468 +++ linux-2.6.32.44/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32469 @@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32470 ERR(("out of memory. aborting.\n"));
32471 return -ENOMEM;
32472 }
32473 - ext_vv->ops = saa7146_video_ioctl_ops;
32474 + memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32475 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32476
32477 DEB_EE(("dev:%p\n",dev));
32478 diff -urNp linux-2.6.32.44/drivers/media/common/saa7146_hlp.c linux-2.6.32.44/drivers/media/common/saa7146_hlp.c
32479 --- linux-2.6.32.44/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32480 +++ linux-2.6.32.44/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32481 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
32482
32483 int x[32], y[32], w[32], h[32];
32484
32485 + pax_track_stack();
32486 +
32487 /* clear out memory */
32488 memset(&line_list[0], 0x00, sizeof(u32)*32);
32489 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32490 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32491 --- linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32492 +++ linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32493 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32494 u8 buf[HOST_LINK_BUF_SIZE];
32495 int i;
32496
32497 + pax_track_stack();
32498 +
32499 dprintk("%s\n", __func__);
32500
32501 /* check if we have space for a link buf in the rx_buffer */
32502 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32503 unsigned long timeout;
32504 int written;
32505
32506 + pax_track_stack();
32507 +
32508 dprintk("%s\n", __func__);
32509
32510 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32511 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_demux.h
32512 --- linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32513 +++ linux-2.6.32.44/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32514 @@ -71,7 +71,7 @@ struct dvb_demux_feed {
32515 union {
32516 dmx_ts_cb ts;
32517 dmx_section_cb sec;
32518 - } cb;
32519 + } __no_const cb;
32520
32521 struct dvb_demux *demux;
32522 void *priv;
32523 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.44/drivers/media/dvb/dvb-core/dvbdev.c
32524 --- linux-2.6.32.44/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32525 +++ linux-2.6.32.44/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32526 @@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32527 dvbdev->fops = dvbdevfops;
32528 init_waitqueue_head (&dvbdev->wait_queue);
32529
32530 - memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32531 - dvbdevfops->owner = adap->module;
32532 + memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32533 + *(void **)&dvbdevfops->owner = adap->module;
32534
32535 list_add_tail (&dvbdev->list_head, &adap->device_list);
32536
32537 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.44/drivers/media/dvb/dvb-usb/cxusb.c
32538 --- linux-2.6.32.44/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32539 +++ linux-2.6.32.44/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32540 @@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32541 struct dib0700_adapter_state {
32542 int (*set_param_save) (struct dvb_frontend *,
32543 struct dvb_frontend_parameters *);
32544 -};
32545 +} __no_const;
32546
32547 static int dib7070_set_param_override(struct dvb_frontend *fe,
32548 struct dvb_frontend_parameters *fep)
32549 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_core.c
32550 --- linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32551 +++ linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32552 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32553
32554 u8 buf[260];
32555
32556 + pax_track_stack();
32557 +
32558 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32559 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32560
32561 diff -urNp linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_devices.c
32562 --- linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32563 +++ linux-2.6.32.44/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32564 @@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32565
32566 struct dib0700_adapter_state {
32567 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32568 -};
32569 +} __no_const;
32570
32571 /* Hauppauge Nova-T 500 (aka Bristol)
32572 * has a LNA on GPIO0 which is enabled by setting 1 */
32573 diff -urNp linux-2.6.32.44/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.44/drivers/media/dvb/frontends/dib3000.h
32574 --- linux-2.6.32.44/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32575 +++ linux-2.6.32.44/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32576 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32577 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32578 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32579 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32580 -};
32581 +} __no_const;
32582
32583 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32584 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32585 diff -urNp linux-2.6.32.44/drivers/media/dvb/frontends/or51211.c linux-2.6.32.44/drivers/media/dvb/frontends/or51211.c
32586 --- linux-2.6.32.44/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32587 +++ linux-2.6.32.44/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32588 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32589 u8 tudata[585];
32590 int i;
32591
32592 + pax_track_stack();
32593 +
32594 dprintk("Firmware is %zd bytes\n",fw->size);
32595
32596 /* Get eprom data */
32597 diff -urNp linux-2.6.32.44/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.44/drivers/media/dvb/ttpci/av7110_v4l.c
32598 --- linux-2.6.32.44/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32599 +++ linux-2.6.32.44/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32600 @@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32601 ERR(("cannot init capture device. skipping.\n"));
32602 return -ENODEV;
32603 }
32604 - vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32605 - vv_data->ops.vidioc_g_input = vidioc_g_input;
32606 - vv_data->ops.vidioc_s_input = vidioc_s_input;
32607 - vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32608 - vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32609 - vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32610 - vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32611 - vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32612 - vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32613 - vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32614 - vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32615 - vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32616 + *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32617 + *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32618 + *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32619 + *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32620 + *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32621 + *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32622 + *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32623 + *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32624 + *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32625 + *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32626 + *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32627 + *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32628
32629 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32630 ERR(("cannot register capture device. skipping.\n"));
32631 diff -urNp linux-2.6.32.44/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.44/drivers/media/dvb/ttpci/budget-av.c
32632 --- linux-2.6.32.44/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32633 +++ linux-2.6.32.44/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32634 @@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32635 ERR(("cannot init vv subsystem.\n"));
32636 return err;
32637 }
32638 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32639 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32640 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32641 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32642 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32643 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32644
32645 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32646 /* fixme: proper cleanup here */
32647 diff -urNp linux-2.6.32.44/drivers/media/radio/radio-cadet.c linux-2.6.32.44/drivers/media/radio/radio-cadet.c
32648 --- linux-2.6.32.44/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32649 +++ linux-2.6.32.44/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32650 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32651 while (i < count && dev->rdsin != dev->rdsout)
32652 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32653
32654 - if (copy_to_user(data, readbuf, i))
32655 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32656 return -EFAULT;
32657 return i;
32658 }
32659 diff -urNp linux-2.6.32.44/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.44/drivers/media/video/cx18/cx18-driver.c
32660 --- linux-2.6.32.44/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32661 +++ linux-2.6.32.44/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32662 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32663
32664 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32665
32666 -static atomic_t cx18_instance = ATOMIC_INIT(0);
32667 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32668
32669 /* Parameter declarations */
32670 static int cardtype[CX18_MAX_CARDS];
32671 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32672 struct i2c_client c;
32673 u8 eedata[256];
32674
32675 + pax_track_stack();
32676 +
32677 memset(&c, 0, sizeof(c));
32678 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32679 c.adapter = &cx->i2c_adap[0];
32680 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32681 struct cx18 *cx;
32682
32683 /* FIXME - module parameter arrays constrain max instances */
32684 - i = atomic_inc_return(&cx18_instance) - 1;
32685 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32686 if (i >= CX18_MAX_CARDS) {
32687 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32688 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32689 diff -urNp linux-2.6.32.44/drivers/media/video/hexium_gemini.c linux-2.6.32.44/drivers/media/video/hexium_gemini.c
32690 --- linux-2.6.32.44/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32691 +++ linux-2.6.32.44/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32692 @@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32693 hexium->cur_input = 0;
32694
32695 saa7146_vv_init(dev, &vv_data);
32696 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32697 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32698 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32699 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32700 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32701 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32702 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32703 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32704 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32705 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32706 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32707 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32708 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32709 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32710 return -1;
32711 diff -urNp linux-2.6.32.44/drivers/media/video/hexium_orion.c linux-2.6.32.44/drivers/media/video/hexium_orion.c
32712 --- linux-2.6.32.44/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32713 +++ linux-2.6.32.44/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32714 @@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32715 DEB_EE((".\n"));
32716
32717 saa7146_vv_init(dev, &vv_data);
32718 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32719 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32720 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32721 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32722 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32723 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32724 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32725 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32726 return -1;
32727 diff -urNp linux-2.6.32.44/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.44/drivers/media/video/ivtv/ivtv-driver.c
32728 --- linux-2.6.32.44/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32729 +++ linux-2.6.32.44/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32730 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32731 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32732
32733 /* ivtv instance counter */
32734 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
32735 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32736
32737 /* Parameter declarations */
32738 static int cardtype[IVTV_MAX_CARDS];
32739 diff -urNp linux-2.6.32.44/drivers/media/video/mxb.c linux-2.6.32.44/drivers/media/video/mxb.c
32740 --- linux-2.6.32.44/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32741 +++ linux-2.6.32.44/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32742 @@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32743 already did this in "mxb_vl42_probe" */
32744
32745 saa7146_vv_init(dev, &vv_data);
32746 - vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32747 - vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32748 - vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32749 - vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32750 - vv_data.ops.vidioc_g_input = vidioc_g_input;
32751 - vv_data.ops.vidioc_s_input = vidioc_s_input;
32752 - vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32753 - vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32754 - vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32755 - vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32756 - vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32757 - vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32758 + *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32759 + *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32760 + *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32761 + *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32762 + *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32763 + *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32764 + *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32765 + *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32766 + *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32767 + *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32768 + *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32769 + *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32770 #ifdef CONFIG_VIDEO_ADV_DEBUG
32771 - vv_data.ops.vidioc_g_register = vidioc_g_register;
32772 - vv_data.ops.vidioc_s_register = vidioc_s_register;
32773 + *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32774 + *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32775 #endif
32776 - vv_data.ops.vidioc_default = vidioc_default;
32777 + *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32778 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32779 ERR(("cannot register capture v4l2 device. skipping.\n"));
32780 return -1;
32781 diff -urNp linux-2.6.32.44/drivers/media/video/omap24xxcam.c linux-2.6.32.44/drivers/media/video/omap24xxcam.c
32782 --- linux-2.6.32.44/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32783 +++ linux-2.6.32.44/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32784 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32785 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32786
32787 do_gettimeofday(&vb->ts);
32788 - vb->field_count = atomic_add_return(2, &fh->field_count);
32789 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32790 if (csr & csr_error) {
32791 vb->state = VIDEOBUF_ERROR;
32792 if (!atomic_read(&fh->cam->in_reset)) {
32793 diff -urNp linux-2.6.32.44/drivers/media/video/omap24xxcam.h linux-2.6.32.44/drivers/media/video/omap24xxcam.h
32794 --- linux-2.6.32.44/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32795 +++ linux-2.6.32.44/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32796 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32797 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32798 struct videobuf_queue vbq;
32799 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32800 - atomic_t field_count; /* field counter for videobuf_buffer */
32801 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32802 /* accessing cam here doesn't need serialisation: it's constant */
32803 struct omap24xxcam_device *cam;
32804 };
32805 diff -urNp linux-2.6.32.44/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.44/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32806 --- linux-2.6.32.44/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32807 +++ linux-2.6.32.44/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32808 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32809 u8 *eeprom;
32810 struct tveeprom tvdata;
32811
32812 + pax_track_stack();
32813 +
32814 memset(&tvdata,0,sizeof(tvdata));
32815
32816 eeprom = pvr2_eeprom_fetch(hdw);
32817 diff -urNp linux-2.6.32.44/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.44/drivers/media/video/saa7134/saa6752hs.c
32818 --- linux-2.6.32.44/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32819 +++ linux-2.6.32.44/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32820 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32821 unsigned char localPAT[256];
32822 unsigned char localPMT[256];
32823
32824 + pax_track_stack();
32825 +
32826 /* Set video format - must be done first as it resets other settings */
32827 set_reg8(client, 0x41, h->video_format);
32828
32829 diff -urNp linux-2.6.32.44/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.44/drivers/media/video/saa7164/saa7164-cmd.c
32830 --- linux-2.6.32.44/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32831 +++ linux-2.6.32.44/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32832 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32833 wait_queue_head_t *q = 0;
32834 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32835
32836 + pax_track_stack();
32837 +
32838 /* While any outstand message on the bus exists... */
32839 do {
32840
32841 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32842 u8 tmp[512];
32843 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32844
32845 + pax_track_stack();
32846 +
32847 while (loop) {
32848
32849 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32850 diff -urNp linux-2.6.32.44/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.44/drivers/media/video/usbvideo/ibmcam.c
32851 --- linux-2.6.32.44/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32852 +++ linux-2.6.32.44/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32853 @@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32854 static int __init ibmcam_init(void)
32855 {
32856 struct usbvideo_cb cbTbl;
32857 - memset(&cbTbl, 0, sizeof(cbTbl));
32858 - cbTbl.probe = ibmcam_probe;
32859 - cbTbl.setupOnOpen = ibmcam_setup_on_open;
32860 - cbTbl.videoStart = ibmcam_video_start;
32861 - cbTbl.videoStop = ibmcam_video_stop;
32862 - cbTbl.processData = ibmcam_ProcessIsocData;
32863 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32864 - cbTbl.adjustPicture = ibmcam_adjust_picture;
32865 - cbTbl.getFPS = ibmcam_calculate_fps;
32866 + memset((void *)&cbTbl, 0, sizeof(cbTbl));
32867 + *(void **)&cbTbl.probe = ibmcam_probe;
32868 + *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32869 + *(void **)&cbTbl.videoStart = ibmcam_video_start;
32870 + *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32871 + *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32872 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32873 + *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32874 + *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32875 return usbvideo_register(
32876 &cams,
32877 MAX_IBMCAM,
32878 diff -urNp linux-2.6.32.44/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.44/drivers/media/video/usbvideo/konicawc.c
32879 --- linux-2.6.32.44/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32880 +++ linux-2.6.32.44/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32881 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32882 int error;
32883
32884 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32885 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32886 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32887
32888 cam->input = input_dev = input_allocate_device();
32889 if (!input_dev) {
32890 @@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32891 struct usbvideo_cb cbTbl;
32892 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32893 DRIVER_DESC "\n");
32894 - memset(&cbTbl, 0, sizeof(cbTbl));
32895 - cbTbl.probe = konicawc_probe;
32896 - cbTbl.setupOnOpen = konicawc_setup_on_open;
32897 - cbTbl.processData = konicawc_process_isoc;
32898 - cbTbl.getFPS = konicawc_calculate_fps;
32899 - cbTbl.setVideoMode = konicawc_set_video_mode;
32900 - cbTbl.startDataPump = konicawc_start_data;
32901 - cbTbl.stopDataPump = konicawc_stop_data;
32902 - cbTbl.adjustPicture = konicawc_adjust_picture;
32903 - cbTbl.userFree = konicawc_free_uvd;
32904 + memset((void * )&cbTbl, 0, sizeof(cbTbl));
32905 + *(void **)&cbTbl.probe = konicawc_probe;
32906 + *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32907 + *(void **)&cbTbl.processData = konicawc_process_isoc;
32908 + *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32909 + *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32910 + *(void **)&cbTbl.startDataPump = konicawc_start_data;
32911 + *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32912 + *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32913 + *(void **)&cbTbl.userFree = konicawc_free_uvd;
32914 return usbvideo_register(
32915 &cams,
32916 MAX_CAMERAS,
32917 diff -urNp linux-2.6.32.44/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.44/drivers/media/video/usbvideo/quickcam_messenger.c
32918 --- linux-2.6.32.44/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32919 +++ linux-2.6.32.44/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32920 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32921 int error;
32922
32923 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32924 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32925 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32926
32927 cam->input = input_dev = input_allocate_device();
32928 if (!input_dev) {
32929 diff -urNp linux-2.6.32.44/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.44/drivers/media/video/usbvideo/ultracam.c
32930 --- linux-2.6.32.44/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32931 +++ linux-2.6.32.44/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32932 @@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32933 {
32934 struct usbvideo_cb cbTbl;
32935 memset(&cbTbl, 0, sizeof(cbTbl));
32936 - cbTbl.probe = ultracam_probe;
32937 - cbTbl.setupOnOpen = ultracam_setup_on_open;
32938 - cbTbl.videoStart = ultracam_video_start;
32939 - cbTbl.videoStop = ultracam_video_stop;
32940 - cbTbl.processData = ultracam_ProcessIsocData;
32941 - cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32942 - cbTbl.adjustPicture = ultracam_adjust_picture;
32943 - cbTbl.getFPS = ultracam_calculate_fps;
32944 + *(void **)&cbTbl.probe = ultracam_probe;
32945 + *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32946 + *(void **)&cbTbl.videoStart = ultracam_video_start;
32947 + *(void **)&cbTbl.videoStop = ultracam_video_stop;
32948 + *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32949 + *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32950 + *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32951 + *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32952 return usbvideo_register(
32953 &cams,
32954 MAX_CAMERAS,
32955 diff -urNp linux-2.6.32.44/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.44/drivers/media/video/usbvideo/usbvideo.c
32956 --- linux-2.6.32.44/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32957 +++ linux-2.6.32.44/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32958 @@ -697,15 +697,15 @@ int usbvideo_register(
32959 __func__, cams, base_size, num_cams);
32960
32961 /* Copy callbacks, apply defaults for those that are not set */
32962 - memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32963 + memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32964 if (cams->cb.getFrame == NULL)
32965 - cams->cb.getFrame = usbvideo_GetFrame;
32966 + *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32967 if (cams->cb.disconnect == NULL)
32968 - cams->cb.disconnect = usbvideo_Disconnect;
32969 + *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32970 if (cams->cb.startDataPump == NULL)
32971 - cams->cb.startDataPump = usbvideo_StartDataPump;
32972 + *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32973 if (cams->cb.stopDataPump == NULL)
32974 - cams->cb.stopDataPump = usbvideo_StopDataPump;
32975 + *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32976
32977 cams->num_cameras = num_cams;
32978 cams->cam = (struct uvd *) &cams[1];
32979 diff -urNp linux-2.6.32.44/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.44/drivers/media/video/usbvision/usbvision-core.c
32980 --- linux-2.6.32.44/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32981 +++ linux-2.6.32.44/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32982 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32983 unsigned char rv, gv, bv;
32984 static unsigned char *Y, *U, *V;
32985
32986 + pax_track_stack();
32987 +
32988 frame = usbvision->curFrame;
32989 imageSize = frame->frmwidth * frame->frmheight;
32990 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32991 diff -urNp linux-2.6.32.44/drivers/media/video/v4l2-device.c linux-2.6.32.44/drivers/media/video/v4l2-device.c
32992 --- linux-2.6.32.44/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32993 +++ linux-2.6.32.44/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32994 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32995 EXPORT_SYMBOL_GPL(v4l2_device_register);
32996
32997 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32998 - atomic_t *instance)
32999 + atomic_unchecked_t *instance)
33000 {
33001 - int num = atomic_inc_return(instance) - 1;
33002 + int num = atomic_inc_return_unchecked(instance) - 1;
33003 int len = strlen(basename);
33004
33005 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
33006 diff -urNp linux-2.6.32.44/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.44/drivers/media/video/videobuf-dma-sg.c
33007 --- linux-2.6.32.44/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
33008 +++ linux-2.6.32.44/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
33009 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
33010 {
33011 struct videobuf_queue q;
33012
33013 + pax_track_stack();
33014 +
33015 /* Required to make generic handler to call __videobuf_alloc */
33016 q.int_ops = &sg_ops;
33017
33018 diff -urNp linux-2.6.32.44/drivers/message/fusion/mptbase.c linux-2.6.32.44/drivers/message/fusion/mptbase.c
33019 --- linux-2.6.32.44/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
33020 +++ linux-2.6.32.44/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
33021 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
33022 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33023 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33024
33025 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33026 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33027 + NULL, NULL);
33028 +#else
33029 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33030 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33031 +#endif
33032 +
33033 /*
33034 * Rounding UP to nearest 4-kB boundary here...
33035 */
33036 diff -urNp linux-2.6.32.44/drivers/message/fusion/mptsas.c linux-2.6.32.44/drivers/message/fusion/mptsas.c
33037 --- linux-2.6.32.44/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
33038 +++ linux-2.6.32.44/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
33039 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
33040 return 0;
33041 }
33042
33043 +static inline void
33044 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33045 +{
33046 + if (phy_info->port_details) {
33047 + phy_info->port_details->rphy = rphy;
33048 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33049 + ioc->name, rphy));
33050 + }
33051 +
33052 + if (rphy) {
33053 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33054 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33055 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33056 + ioc->name, rphy, rphy->dev.release));
33057 + }
33058 +}
33059 +
33060 /* no mutex */
33061 static void
33062 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33063 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
33064 return NULL;
33065 }
33066
33067 -static inline void
33068 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33069 -{
33070 - if (phy_info->port_details) {
33071 - phy_info->port_details->rphy = rphy;
33072 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33073 - ioc->name, rphy));
33074 - }
33075 -
33076 - if (rphy) {
33077 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33078 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33079 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33080 - ioc->name, rphy, rphy->dev.release));
33081 - }
33082 -}
33083 -
33084 static inline struct sas_port *
33085 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33086 {
33087 diff -urNp linux-2.6.32.44/drivers/message/fusion/mptscsih.c linux-2.6.32.44/drivers/message/fusion/mptscsih.c
33088 --- linux-2.6.32.44/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
33089 +++ linux-2.6.32.44/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
33090 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33091
33092 h = shost_priv(SChost);
33093
33094 - if (h) {
33095 - if (h->info_kbuf == NULL)
33096 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33097 - return h->info_kbuf;
33098 - h->info_kbuf[0] = '\0';
33099 + if (!h)
33100 + return NULL;
33101
33102 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33103 - h->info_kbuf[size-1] = '\0';
33104 - }
33105 + if (h->info_kbuf == NULL)
33106 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33107 + return h->info_kbuf;
33108 + h->info_kbuf[0] = '\0';
33109 +
33110 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33111 + h->info_kbuf[size-1] = '\0';
33112
33113 return h->info_kbuf;
33114 }
33115 diff -urNp linux-2.6.32.44/drivers/message/i2o/i2o_config.c linux-2.6.32.44/drivers/message/i2o/i2o_config.c
33116 --- linux-2.6.32.44/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
33117 +++ linux-2.6.32.44/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
33118 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
33119 struct i2o_message *msg;
33120 unsigned int iop;
33121
33122 + pax_track_stack();
33123 +
33124 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
33125 return -EFAULT;
33126
33127 diff -urNp linux-2.6.32.44/drivers/message/i2o/i2o_proc.c linux-2.6.32.44/drivers/message/i2o/i2o_proc.c
33128 --- linux-2.6.32.44/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
33129 +++ linux-2.6.32.44/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
33130 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
33131 "Array Controller Device"
33132 };
33133
33134 -static char *chtostr(u8 * chars, int n)
33135 -{
33136 - char tmp[256];
33137 - tmp[0] = 0;
33138 - return strncat(tmp, (char *)chars, n);
33139 -}
33140 -
33141 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33142 char *group)
33143 {
33144 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
33145
33146 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33147 seq_printf(seq, "%-#8x", ddm_table.module_id);
33148 - seq_printf(seq, "%-29s",
33149 - chtostr(ddm_table.module_name_version, 28));
33150 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33151 seq_printf(seq, "%9d ", ddm_table.data_size);
33152 seq_printf(seq, "%8d", ddm_table.code_size);
33153
33154 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
33155
33156 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33157 seq_printf(seq, "%-#8x", dst->module_id);
33158 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33159 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33160 + seq_printf(seq, "%-.28s", dst->module_name_version);
33161 + seq_printf(seq, "%-.8s", dst->date);
33162 seq_printf(seq, "%8d ", dst->module_size);
33163 seq_printf(seq, "%8d ", dst->mpb_size);
33164 seq_printf(seq, "0x%04x", dst->module_flags);
33165 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
33166 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33167 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33168 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33169 - seq_printf(seq, "Vendor info : %s\n",
33170 - chtostr((u8 *) (work32 + 2), 16));
33171 - seq_printf(seq, "Product info : %s\n",
33172 - chtostr((u8 *) (work32 + 6), 16));
33173 - seq_printf(seq, "Description : %s\n",
33174 - chtostr((u8 *) (work32 + 10), 16));
33175 - seq_printf(seq, "Product rev. : %s\n",
33176 - chtostr((u8 *) (work32 + 14), 8));
33177 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33178 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33179 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33180 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33181
33182 seq_printf(seq, "Serial number : ");
33183 print_serial_number(seq, (u8 *) (work32 + 16),
33184 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
33185 }
33186
33187 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33188 - seq_printf(seq, "Module name : %s\n",
33189 - chtostr(result.module_name, 24));
33190 - seq_printf(seq, "Module revision : %s\n",
33191 - chtostr(result.module_rev, 8));
33192 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33193 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33194
33195 seq_printf(seq, "Serial number : ");
33196 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33197 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
33198 return 0;
33199 }
33200
33201 - seq_printf(seq, "Device name : %s\n",
33202 - chtostr(result.device_name, 64));
33203 - seq_printf(seq, "Service name : %s\n",
33204 - chtostr(result.service_name, 64));
33205 - seq_printf(seq, "Physical name : %s\n",
33206 - chtostr(result.physical_location, 64));
33207 - seq_printf(seq, "Instance number : %s\n",
33208 - chtostr(result.instance_number, 4));
33209 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33210 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33211 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33212 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33213
33214 return 0;
33215 }
33216 diff -urNp linux-2.6.32.44/drivers/message/i2o/iop.c linux-2.6.32.44/drivers/message/i2o/iop.c
33217 --- linux-2.6.32.44/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
33218 +++ linux-2.6.32.44/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
33219 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
33220
33221 spin_lock_irqsave(&c->context_list_lock, flags);
33222
33223 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33224 - atomic_inc(&c->context_list_counter);
33225 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33226 + atomic_inc_unchecked(&c->context_list_counter);
33227
33228 - entry->context = atomic_read(&c->context_list_counter);
33229 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33230
33231 list_add(&entry->list, &c->context_list);
33232
33233 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
33234
33235 #if BITS_PER_LONG == 64
33236 spin_lock_init(&c->context_list_lock);
33237 - atomic_set(&c->context_list_counter, 0);
33238 + atomic_set_unchecked(&c->context_list_counter, 0);
33239 INIT_LIST_HEAD(&c->context_list);
33240 #endif
33241
33242 diff -urNp linux-2.6.32.44/drivers/mfd/wm8350-i2c.c linux-2.6.32.44/drivers/mfd/wm8350-i2c.c
33243 --- linux-2.6.32.44/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
33244 +++ linux-2.6.32.44/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
33245 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
33246 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
33247 int ret;
33248
33249 + pax_track_stack();
33250 +
33251 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
33252 return -EINVAL;
33253
33254 diff -urNp linux-2.6.32.44/drivers/misc/kgdbts.c linux-2.6.32.44/drivers/misc/kgdbts.c
33255 --- linux-2.6.32.44/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
33256 +++ linux-2.6.32.44/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
33257 @@ -118,7 +118,7 @@
33258 } while (0)
33259 #define MAX_CONFIG_LEN 40
33260
33261 -static struct kgdb_io kgdbts_io_ops;
33262 +static const struct kgdb_io kgdbts_io_ops;
33263 static char get_buf[BUFMAX];
33264 static int get_buf_cnt;
33265 static char put_buf[BUFMAX];
33266 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
33267 module_put(THIS_MODULE);
33268 }
33269
33270 -static struct kgdb_io kgdbts_io_ops = {
33271 +static const struct kgdb_io kgdbts_io_ops = {
33272 .name = "kgdbts",
33273 .read_char = kgdbts_get_char,
33274 .write_char = kgdbts_put_char,
33275 diff -urNp linux-2.6.32.44/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.44/drivers/misc/sgi-gru/gruhandles.c
33276 --- linux-2.6.32.44/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
33277 +++ linux-2.6.32.44/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
33278 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
33279
33280 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33281 {
33282 - atomic_long_inc(&mcs_op_statistics[op].count);
33283 - atomic_long_add(clks, &mcs_op_statistics[op].total);
33284 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33285 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
33286 if (mcs_op_statistics[op].max < clks)
33287 mcs_op_statistics[op].max = clks;
33288 }
33289 diff -urNp linux-2.6.32.44/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.44/drivers/misc/sgi-gru/gruprocfs.c
33290 --- linux-2.6.32.44/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
33291 +++ linux-2.6.32.44/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
33292 @@ -32,9 +32,9 @@
33293
33294 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33295
33296 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33297 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33298 {
33299 - unsigned long val = atomic_long_read(v);
33300 + unsigned long val = atomic_long_read_unchecked(v);
33301
33302 if (val)
33303 seq_printf(s, "%16lu %s\n", val, id);
33304 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
33305 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
33306
33307 for (op = 0; op < mcsop_last; op++) {
33308 - count = atomic_long_read(&mcs_op_statistics[op].count);
33309 - total = atomic_long_read(&mcs_op_statistics[op].total);
33310 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33311 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33312 max = mcs_op_statistics[op].max;
33313 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33314 count ? total / count : 0, max);
33315 diff -urNp linux-2.6.32.44/drivers/misc/sgi-gru/grutables.h linux-2.6.32.44/drivers/misc/sgi-gru/grutables.h
33316 --- linux-2.6.32.44/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
33317 +++ linux-2.6.32.44/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
33318 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33319 * GRU statistics.
33320 */
33321 struct gru_stats_s {
33322 - atomic_long_t vdata_alloc;
33323 - atomic_long_t vdata_free;
33324 - atomic_long_t gts_alloc;
33325 - atomic_long_t gts_free;
33326 - atomic_long_t vdata_double_alloc;
33327 - atomic_long_t gts_double_allocate;
33328 - atomic_long_t assign_context;
33329 - atomic_long_t assign_context_failed;
33330 - atomic_long_t free_context;
33331 - atomic_long_t load_user_context;
33332 - atomic_long_t load_kernel_context;
33333 - atomic_long_t lock_kernel_context;
33334 - atomic_long_t unlock_kernel_context;
33335 - atomic_long_t steal_user_context;
33336 - atomic_long_t steal_kernel_context;
33337 - atomic_long_t steal_context_failed;
33338 - atomic_long_t nopfn;
33339 - atomic_long_t break_cow;
33340 - atomic_long_t asid_new;
33341 - atomic_long_t asid_next;
33342 - atomic_long_t asid_wrap;
33343 - atomic_long_t asid_reuse;
33344 - atomic_long_t intr;
33345 - atomic_long_t intr_mm_lock_failed;
33346 - atomic_long_t call_os;
33347 - atomic_long_t call_os_offnode_reference;
33348 - atomic_long_t call_os_check_for_bug;
33349 - atomic_long_t call_os_wait_queue;
33350 - atomic_long_t user_flush_tlb;
33351 - atomic_long_t user_unload_context;
33352 - atomic_long_t user_exception;
33353 - atomic_long_t set_context_option;
33354 - atomic_long_t migrate_check;
33355 - atomic_long_t migrated_retarget;
33356 - atomic_long_t migrated_unload;
33357 - atomic_long_t migrated_unload_delay;
33358 - atomic_long_t migrated_nopfn_retarget;
33359 - atomic_long_t migrated_nopfn_unload;
33360 - atomic_long_t tlb_dropin;
33361 - atomic_long_t tlb_dropin_fail_no_asid;
33362 - atomic_long_t tlb_dropin_fail_upm;
33363 - atomic_long_t tlb_dropin_fail_invalid;
33364 - atomic_long_t tlb_dropin_fail_range_active;
33365 - atomic_long_t tlb_dropin_fail_idle;
33366 - atomic_long_t tlb_dropin_fail_fmm;
33367 - atomic_long_t tlb_dropin_fail_no_exception;
33368 - atomic_long_t tlb_dropin_fail_no_exception_war;
33369 - atomic_long_t tfh_stale_on_fault;
33370 - atomic_long_t mmu_invalidate_range;
33371 - atomic_long_t mmu_invalidate_page;
33372 - atomic_long_t mmu_clear_flush_young;
33373 - atomic_long_t flush_tlb;
33374 - atomic_long_t flush_tlb_gru;
33375 - atomic_long_t flush_tlb_gru_tgh;
33376 - atomic_long_t flush_tlb_gru_zero_asid;
33377 -
33378 - atomic_long_t copy_gpa;
33379 -
33380 - atomic_long_t mesq_receive;
33381 - atomic_long_t mesq_receive_none;
33382 - atomic_long_t mesq_send;
33383 - atomic_long_t mesq_send_failed;
33384 - atomic_long_t mesq_noop;
33385 - atomic_long_t mesq_send_unexpected_error;
33386 - atomic_long_t mesq_send_lb_overflow;
33387 - atomic_long_t mesq_send_qlimit_reached;
33388 - atomic_long_t mesq_send_amo_nacked;
33389 - atomic_long_t mesq_send_put_nacked;
33390 - atomic_long_t mesq_qf_not_full;
33391 - atomic_long_t mesq_qf_locked;
33392 - atomic_long_t mesq_qf_noop_not_full;
33393 - atomic_long_t mesq_qf_switch_head_failed;
33394 - atomic_long_t mesq_qf_unexpected_error;
33395 - atomic_long_t mesq_noop_unexpected_error;
33396 - atomic_long_t mesq_noop_lb_overflow;
33397 - atomic_long_t mesq_noop_qlimit_reached;
33398 - atomic_long_t mesq_noop_amo_nacked;
33399 - atomic_long_t mesq_noop_put_nacked;
33400 + atomic_long_unchecked_t vdata_alloc;
33401 + atomic_long_unchecked_t vdata_free;
33402 + atomic_long_unchecked_t gts_alloc;
33403 + atomic_long_unchecked_t gts_free;
33404 + atomic_long_unchecked_t vdata_double_alloc;
33405 + atomic_long_unchecked_t gts_double_allocate;
33406 + atomic_long_unchecked_t assign_context;
33407 + atomic_long_unchecked_t assign_context_failed;
33408 + atomic_long_unchecked_t free_context;
33409 + atomic_long_unchecked_t load_user_context;
33410 + atomic_long_unchecked_t load_kernel_context;
33411 + atomic_long_unchecked_t lock_kernel_context;
33412 + atomic_long_unchecked_t unlock_kernel_context;
33413 + atomic_long_unchecked_t steal_user_context;
33414 + atomic_long_unchecked_t steal_kernel_context;
33415 + atomic_long_unchecked_t steal_context_failed;
33416 + atomic_long_unchecked_t nopfn;
33417 + atomic_long_unchecked_t break_cow;
33418 + atomic_long_unchecked_t asid_new;
33419 + atomic_long_unchecked_t asid_next;
33420 + atomic_long_unchecked_t asid_wrap;
33421 + atomic_long_unchecked_t asid_reuse;
33422 + atomic_long_unchecked_t intr;
33423 + atomic_long_unchecked_t intr_mm_lock_failed;
33424 + atomic_long_unchecked_t call_os;
33425 + atomic_long_unchecked_t call_os_offnode_reference;
33426 + atomic_long_unchecked_t call_os_check_for_bug;
33427 + atomic_long_unchecked_t call_os_wait_queue;
33428 + atomic_long_unchecked_t user_flush_tlb;
33429 + atomic_long_unchecked_t user_unload_context;
33430 + atomic_long_unchecked_t user_exception;
33431 + atomic_long_unchecked_t set_context_option;
33432 + atomic_long_unchecked_t migrate_check;
33433 + atomic_long_unchecked_t migrated_retarget;
33434 + atomic_long_unchecked_t migrated_unload;
33435 + atomic_long_unchecked_t migrated_unload_delay;
33436 + atomic_long_unchecked_t migrated_nopfn_retarget;
33437 + atomic_long_unchecked_t migrated_nopfn_unload;
33438 + atomic_long_unchecked_t tlb_dropin;
33439 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33440 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33441 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33442 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33443 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33444 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33445 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33446 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33447 + atomic_long_unchecked_t tfh_stale_on_fault;
33448 + atomic_long_unchecked_t mmu_invalidate_range;
33449 + atomic_long_unchecked_t mmu_invalidate_page;
33450 + atomic_long_unchecked_t mmu_clear_flush_young;
33451 + atomic_long_unchecked_t flush_tlb;
33452 + atomic_long_unchecked_t flush_tlb_gru;
33453 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33454 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33455 +
33456 + atomic_long_unchecked_t copy_gpa;
33457 +
33458 + atomic_long_unchecked_t mesq_receive;
33459 + atomic_long_unchecked_t mesq_receive_none;
33460 + atomic_long_unchecked_t mesq_send;
33461 + atomic_long_unchecked_t mesq_send_failed;
33462 + atomic_long_unchecked_t mesq_noop;
33463 + atomic_long_unchecked_t mesq_send_unexpected_error;
33464 + atomic_long_unchecked_t mesq_send_lb_overflow;
33465 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33466 + atomic_long_unchecked_t mesq_send_amo_nacked;
33467 + atomic_long_unchecked_t mesq_send_put_nacked;
33468 + atomic_long_unchecked_t mesq_qf_not_full;
33469 + atomic_long_unchecked_t mesq_qf_locked;
33470 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33471 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33472 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33473 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33474 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33475 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33476 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33477 + atomic_long_unchecked_t mesq_noop_put_nacked;
33478
33479 };
33480
33481 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33482 cchop_deallocate, tghop_invalidate, mcsop_last};
33483
33484 struct mcs_op_statistic {
33485 - atomic_long_t count;
33486 - atomic_long_t total;
33487 + atomic_long_unchecked_t count;
33488 + atomic_long_unchecked_t total;
33489 unsigned long max;
33490 };
33491
33492 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33493
33494 #define STAT(id) do { \
33495 if (gru_options & OPT_STATS) \
33496 - atomic_long_inc(&gru_stats.id); \
33497 + atomic_long_inc_unchecked(&gru_stats.id); \
33498 } while (0)
33499
33500 #ifdef CONFIG_SGI_GRU_DEBUG
33501 diff -urNp linux-2.6.32.44/drivers/misc/sgi-xp/xpc.h linux-2.6.32.44/drivers/misc/sgi-xp/xpc.h
33502 --- linux-2.6.32.44/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33503 +++ linux-2.6.32.44/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33504 @@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33505 /* found in xpc_main.c */
33506 extern struct device *xpc_part;
33507 extern struct device *xpc_chan;
33508 -extern struct xpc_arch_operations xpc_arch_ops;
33509 +extern const struct xpc_arch_operations xpc_arch_ops;
33510 extern int xpc_disengage_timelimit;
33511 extern int xpc_disengage_timedout;
33512 extern int xpc_activate_IRQ_rcvd;
33513 diff -urNp linux-2.6.32.44/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.44/drivers/misc/sgi-xp/xpc_main.c
33514 --- linux-2.6.32.44/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33515 +++ linux-2.6.32.44/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33516 @@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33517 .notifier_call = xpc_system_die,
33518 };
33519
33520 -struct xpc_arch_operations xpc_arch_ops;
33521 +const struct xpc_arch_operations xpc_arch_ops;
33522
33523 /*
33524 * Timer function to enforce the timelimit on the partition disengage.
33525 diff -urNp linux-2.6.32.44/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.44/drivers/misc/sgi-xp/xpc_sn2.c
33526 --- linux-2.6.32.44/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33527 +++ linux-2.6.32.44/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33528 @@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33529 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33530 }
33531
33532 -static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33533 +static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33534 .setup_partitions = xpc_setup_partitions_sn2,
33535 .teardown_partitions = xpc_teardown_partitions_sn2,
33536 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33537 @@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33538 int ret;
33539 size_t buf_size;
33540
33541 - xpc_arch_ops = xpc_arch_ops_sn2;
33542 + pax_open_kernel();
33543 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33544 + pax_close_kernel();
33545
33546 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33547 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33548 diff -urNp linux-2.6.32.44/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.44/drivers/misc/sgi-xp/xpc_uv.c
33549 --- linux-2.6.32.44/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33550 +++ linux-2.6.32.44/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33551 @@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33552 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33553 }
33554
33555 -static struct xpc_arch_operations xpc_arch_ops_uv = {
33556 +static const struct xpc_arch_operations xpc_arch_ops_uv = {
33557 .setup_partitions = xpc_setup_partitions_uv,
33558 .teardown_partitions = xpc_teardown_partitions_uv,
33559 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33560 @@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33561 int
33562 xpc_init_uv(void)
33563 {
33564 - xpc_arch_ops = xpc_arch_ops_uv;
33565 + pax_open_kernel();
33566 + memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33567 + pax_close_kernel();
33568
33569 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33570 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33571 diff -urNp linux-2.6.32.44/drivers/misc/sgi-xp/xp.h linux-2.6.32.44/drivers/misc/sgi-xp/xp.h
33572 --- linux-2.6.32.44/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33573 +++ linux-2.6.32.44/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33574 @@ -289,7 +289,7 @@ struct xpc_interface {
33575 xpc_notify_func, void *);
33576 void (*received) (short, int, void *);
33577 enum xp_retval (*partid_to_nasids) (short, void *);
33578 -};
33579 +} __no_const;
33580
33581 extern struct xpc_interface xpc_interface;
33582
33583 diff -urNp linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0001.c
33584 --- linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33585 +++ linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33586 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33587 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33588 unsigned long timeo = jiffies + HZ;
33589
33590 + pax_track_stack();
33591 +
33592 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33593 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33594 goto sleep;
33595 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33596 unsigned long initial_adr;
33597 int initial_len = len;
33598
33599 + pax_track_stack();
33600 +
33601 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33602 adr += chip->start;
33603 initial_adr = adr;
33604 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33605 int retries = 3;
33606 int ret;
33607
33608 + pax_track_stack();
33609 +
33610 adr += chip->start;
33611
33612 retry:
33613 diff -urNp linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0020.c
33614 --- linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33615 +++ linux-2.6.32.44/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33616 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33617 unsigned long cmd_addr;
33618 struct cfi_private *cfi = map->fldrv_priv;
33619
33620 + pax_track_stack();
33621 +
33622 adr += chip->start;
33623
33624 /* Ensure cmd read/writes are aligned. */
33625 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33626 DECLARE_WAITQUEUE(wait, current);
33627 int wbufsize, z;
33628
33629 + pax_track_stack();
33630 +
33631 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33632 if (adr & (map_bankwidth(map)-1))
33633 return -EINVAL;
33634 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33635 DECLARE_WAITQUEUE(wait, current);
33636 int ret = 0;
33637
33638 + pax_track_stack();
33639 +
33640 adr += chip->start;
33641
33642 /* Let's determine this according to the interleave only once */
33643 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33644 unsigned long timeo = jiffies + HZ;
33645 DECLARE_WAITQUEUE(wait, current);
33646
33647 + pax_track_stack();
33648 +
33649 adr += chip->start;
33650
33651 /* Let's determine this according to the interleave only once */
33652 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33653 unsigned long timeo = jiffies + HZ;
33654 DECLARE_WAITQUEUE(wait, current);
33655
33656 + pax_track_stack();
33657 +
33658 adr += chip->start;
33659
33660 /* Let's determine this according to the interleave only once */
33661 diff -urNp linux-2.6.32.44/drivers/mtd/devices/doc2000.c linux-2.6.32.44/drivers/mtd/devices/doc2000.c
33662 --- linux-2.6.32.44/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33663 +++ linux-2.6.32.44/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33664 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33665
33666 /* The ECC will not be calculated correctly if less than 512 is written */
33667 /* DBB-
33668 - if (len != 0x200 && eccbuf)
33669 + if (len != 0x200)
33670 printk(KERN_WARNING
33671 "ECC needs a full sector write (adr: %lx size %lx)\n",
33672 (long) to, (long) len);
33673 diff -urNp linux-2.6.32.44/drivers/mtd/devices/doc2001.c linux-2.6.32.44/drivers/mtd/devices/doc2001.c
33674 --- linux-2.6.32.44/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33675 +++ linux-2.6.32.44/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33676 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33677 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33678
33679 /* Don't allow read past end of device */
33680 - if (from >= this->totlen)
33681 + if (from >= this->totlen || !len)
33682 return -EINVAL;
33683
33684 /* Don't allow a single read to cross a 512-byte block boundary */
33685 diff -urNp linux-2.6.32.44/drivers/mtd/ftl.c linux-2.6.32.44/drivers/mtd/ftl.c
33686 --- linux-2.6.32.44/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33687 +++ linux-2.6.32.44/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33688 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33689 loff_t offset;
33690 uint16_t srcunitswap = cpu_to_le16(srcunit);
33691
33692 + pax_track_stack();
33693 +
33694 eun = &part->EUNInfo[srcunit];
33695 xfer = &part->XferInfo[xferunit];
33696 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33697 diff -urNp linux-2.6.32.44/drivers/mtd/inftlcore.c linux-2.6.32.44/drivers/mtd/inftlcore.c
33698 --- linux-2.6.32.44/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33699 +++ linux-2.6.32.44/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33700 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33701 struct inftl_oob oob;
33702 size_t retlen;
33703
33704 + pax_track_stack();
33705 +
33706 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33707 "pending=%d)\n", inftl, thisVUC, pendingblock);
33708
33709 diff -urNp linux-2.6.32.44/drivers/mtd/inftlmount.c linux-2.6.32.44/drivers/mtd/inftlmount.c
33710 --- linux-2.6.32.44/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33711 +++ linux-2.6.32.44/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33712 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33713 struct INFTLPartition *ip;
33714 size_t retlen;
33715
33716 + pax_track_stack();
33717 +
33718 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33719
33720 /*
33721 diff -urNp linux-2.6.32.44/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.44/drivers/mtd/lpddr/qinfo_probe.c
33722 --- linux-2.6.32.44/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33723 +++ linux-2.6.32.44/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33724 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33725 {
33726 map_word pfow_val[4];
33727
33728 + pax_track_stack();
33729 +
33730 /* Check identification string */
33731 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33732 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33733 diff -urNp linux-2.6.32.44/drivers/mtd/mtdchar.c linux-2.6.32.44/drivers/mtd/mtdchar.c
33734 --- linux-2.6.32.44/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33735 +++ linux-2.6.32.44/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33736 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33737 u_long size;
33738 struct mtd_info_user info;
33739
33740 + pax_track_stack();
33741 +
33742 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33743
33744 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33745 diff -urNp linux-2.6.32.44/drivers/mtd/nftlcore.c linux-2.6.32.44/drivers/mtd/nftlcore.c
33746 --- linux-2.6.32.44/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33747 +++ linux-2.6.32.44/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33748 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33749 int inplace = 1;
33750 size_t retlen;
33751
33752 + pax_track_stack();
33753 +
33754 memset(BlockMap, 0xff, sizeof(BlockMap));
33755 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33756
33757 diff -urNp linux-2.6.32.44/drivers/mtd/nftlmount.c linux-2.6.32.44/drivers/mtd/nftlmount.c
33758 --- linux-2.6.32.44/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33759 +++ linux-2.6.32.44/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33760 @@ -23,6 +23,7 @@
33761 #include <asm/errno.h>
33762 #include <linux/delay.h>
33763 #include <linux/slab.h>
33764 +#include <linux/sched.h>
33765 #include <linux/mtd/mtd.h>
33766 #include <linux/mtd/nand.h>
33767 #include <linux/mtd/nftl.h>
33768 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33769 struct mtd_info *mtd = nftl->mbd.mtd;
33770 unsigned int i;
33771
33772 + pax_track_stack();
33773 +
33774 /* Assume logical EraseSize == physical erasesize for starting the scan.
33775 We'll sort it out later if we find a MediaHeader which says otherwise */
33776 /* Actually, we won't. The new DiskOnChip driver has already scanned
33777 diff -urNp linux-2.6.32.44/drivers/mtd/ubi/build.c linux-2.6.32.44/drivers/mtd/ubi/build.c
33778 --- linux-2.6.32.44/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33779 +++ linux-2.6.32.44/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33780 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33781 static int __init bytes_str_to_int(const char *str)
33782 {
33783 char *endp;
33784 - unsigned long result;
33785 + unsigned long result, scale = 1;
33786
33787 result = simple_strtoul(str, &endp, 0);
33788 if (str == endp || result >= INT_MAX) {
33789 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33790
33791 switch (*endp) {
33792 case 'G':
33793 - result *= 1024;
33794 + scale *= 1024;
33795 case 'M':
33796 - result *= 1024;
33797 + scale *= 1024;
33798 case 'K':
33799 - result *= 1024;
33800 + scale *= 1024;
33801 if (endp[1] == 'i' && endp[2] == 'B')
33802 endp += 2;
33803 case '\0':
33804 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33805 return -EINVAL;
33806 }
33807
33808 - return result;
33809 + if ((intoverflow_t)result*scale >= INT_MAX) {
33810 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33811 + str);
33812 + return -EINVAL;
33813 + }
33814 +
33815 + return result*scale;
33816 }
33817
33818 /**
33819 diff -urNp linux-2.6.32.44/drivers/net/bnx2.c linux-2.6.32.44/drivers/net/bnx2.c
33820 --- linux-2.6.32.44/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33821 +++ linux-2.6.32.44/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33822 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33823 int rc = 0;
33824 u32 magic, csum;
33825
33826 + pax_track_stack();
33827 +
33828 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33829 goto test_nvram_done;
33830
33831 diff -urNp linux-2.6.32.44/drivers/net/cxgb3/l2t.h linux-2.6.32.44/drivers/net/cxgb3/l2t.h
33832 --- linux-2.6.32.44/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33833 +++ linux-2.6.32.44/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33834 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33835 */
33836 struct l2t_skb_cb {
33837 arp_failure_handler_func arp_failure_handler;
33838 -};
33839 +} __no_const;
33840
33841 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33842
33843 diff -urNp linux-2.6.32.44/drivers/net/cxgb3/t3_hw.c linux-2.6.32.44/drivers/net/cxgb3/t3_hw.c
33844 --- linux-2.6.32.44/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33845 +++ linux-2.6.32.44/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33846 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33847 int i, addr, ret;
33848 struct t3_vpd vpd;
33849
33850 + pax_track_stack();
33851 +
33852 /*
33853 * Card information is normally at VPD_BASE but some early cards had
33854 * it at 0.
33855 diff -urNp linux-2.6.32.44/drivers/net/e1000e/82571.c linux-2.6.32.44/drivers/net/e1000e/82571.c
33856 --- linux-2.6.32.44/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33857 +++ linux-2.6.32.44/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33858 @@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33859 /* check for link */
33860 switch (hw->phy.media_type) {
33861 case e1000_media_type_copper:
33862 - func->setup_physical_interface = e1000_setup_copper_link_82571;
33863 - func->check_for_link = e1000e_check_for_copper_link;
33864 - func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33865 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33866 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33867 + *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33868 break;
33869 case e1000_media_type_fiber:
33870 - func->setup_physical_interface =
33871 + *(void **)&func->setup_physical_interface =
33872 e1000_setup_fiber_serdes_link_82571;
33873 - func->check_for_link = e1000e_check_for_fiber_link;
33874 - func->get_link_up_info =
33875 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33876 + *(void **)&func->get_link_up_info =
33877 e1000e_get_speed_and_duplex_fiber_serdes;
33878 break;
33879 case e1000_media_type_internal_serdes:
33880 - func->setup_physical_interface =
33881 + *(void **)&func->setup_physical_interface =
33882 e1000_setup_fiber_serdes_link_82571;
33883 - func->check_for_link = e1000_check_for_serdes_link_82571;
33884 - func->get_link_up_info =
33885 + *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33886 + *(void **)&func->get_link_up_info =
33887 e1000e_get_speed_and_duplex_fiber_serdes;
33888 break;
33889 default:
33890 @@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33891 switch (hw->mac.type) {
33892 case e1000_82574:
33893 case e1000_82583:
33894 - func->check_mng_mode = e1000_check_mng_mode_82574;
33895 - func->led_on = e1000_led_on_82574;
33896 + *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33897 + *(void **)&func->led_on = e1000_led_on_82574;
33898 break;
33899 default:
33900 - func->check_mng_mode = e1000e_check_mng_mode_generic;
33901 - func->led_on = e1000e_led_on_generic;
33902 + *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33903 + *(void **)&func->led_on = e1000e_led_on_generic;
33904 break;
33905 }
33906
33907 @@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33908 temp = er32(ICRXDMTC);
33909 }
33910
33911 -static struct e1000_mac_operations e82571_mac_ops = {
33912 +static const struct e1000_mac_operations e82571_mac_ops = {
33913 /* .check_mng_mode: mac type dependent */
33914 /* .check_for_link: media type dependent */
33915 .id_led_init = e1000e_id_led_init,
33916 @@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33917 .setup_led = e1000e_setup_led_generic,
33918 };
33919
33920 -static struct e1000_phy_operations e82_phy_ops_igp = {
33921 +static const struct e1000_phy_operations e82_phy_ops_igp = {
33922 .acquire_phy = e1000_get_hw_semaphore_82571,
33923 .check_reset_block = e1000e_check_reset_block_generic,
33924 .commit_phy = NULL,
33925 @@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33926 .cfg_on_link_up = NULL,
33927 };
33928
33929 -static struct e1000_phy_operations e82_phy_ops_m88 = {
33930 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
33931 .acquire_phy = e1000_get_hw_semaphore_82571,
33932 .check_reset_block = e1000e_check_reset_block_generic,
33933 .commit_phy = e1000e_phy_sw_reset,
33934 @@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33935 .cfg_on_link_up = NULL,
33936 };
33937
33938 -static struct e1000_phy_operations e82_phy_ops_bm = {
33939 +static const struct e1000_phy_operations e82_phy_ops_bm = {
33940 .acquire_phy = e1000_get_hw_semaphore_82571,
33941 .check_reset_block = e1000e_check_reset_block_generic,
33942 .commit_phy = e1000e_phy_sw_reset,
33943 @@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33944 .cfg_on_link_up = NULL,
33945 };
33946
33947 -static struct e1000_nvm_operations e82571_nvm_ops = {
33948 +static const struct e1000_nvm_operations e82571_nvm_ops = {
33949 .acquire_nvm = e1000_acquire_nvm_82571,
33950 .read_nvm = e1000e_read_nvm_eerd,
33951 .release_nvm = e1000_release_nvm_82571,
33952 diff -urNp linux-2.6.32.44/drivers/net/e1000e/e1000.h linux-2.6.32.44/drivers/net/e1000e/e1000.h
33953 --- linux-2.6.32.44/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33954 +++ linux-2.6.32.44/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33955 @@ -375,9 +375,9 @@ struct e1000_info {
33956 u32 pba;
33957 u32 max_hw_frame_size;
33958 s32 (*get_variants)(struct e1000_adapter *);
33959 - struct e1000_mac_operations *mac_ops;
33960 - struct e1000_phy_operations *phy_ops;
33961 - struct e1000_nvm_operations *nvm_ops;
33962 + const struct e1000_mac_operations *mac_ops;
33963 + const struct e1000_phy_operations *phy_ops;
33964 + const struct e1000_nvm_operations *nvm_ops;
33965 };
33966
33967 /* hardware capability, feature, and workaround flags */
33968 diff -urNp linux-2.6.32.44/drivers/net/e1000e/es2lan.c linux-2.6.32.44/drivers/net/e1000e/es2lan.c
33969 --- linux-2.6.32.44/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33970 +++ linux-2.6.32.44/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33971 @@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33972 /* check for link */
33973 switch (hw->phy.media_type) {
33974 case e1000_media_type_copper:
33975 - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33976 - func->check_for_link = e1000e_check_for_copper_link;
33977 + *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33978 + *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33979 break;
33980 case e1000_media_type_fiber:
33981 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33982 - func->check_for_link = e1000e_check_for_fiber_link;
33983 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33984 + *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33985 break;
33986 case e1000_media_type_internal_serdes:
33987 - func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33988 - func->check_for_link = e1000e_check_for_serdes_link;
33989 + *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33990 + *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33991 break;
33992 default:
33993 return -E1000_ERR_CONFIG;
33994 @@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33995 temp = er32(ICRXDMTC);
33996 }
33997
33998 -static struct e1000_mac_operations es2_mac_ops = {
33999 +static const struct e1000_mac_operations es2_mac_ops = {
34000 .id_led_init = e1000e_id_led_init,
34001 .check_mng_mode = e1000e_check_mng_mode_generic,
34002 /* check_for_link dependent on media type */
34003 @@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
34004 .setup_led = e1000e_setup_led_generic,
34005 };
34006
34007 -static struct e1000_phy_operations es2_phy_ops = {
34008 +static const struct e1000_phy_operations es2_phy_ops = {
34009 .acquire_phy = e1000_acquire_phy_80003es2lan,
34010 .check_reset_block = e1000e_check_reset_block_generic,
34011 .commit_phy = e1000e_phy_sw_reset,
34012 @@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
34013 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
34014 };
34015
34016 -static struct e1000_nvm_operations es2_nvm_ops = {
34017 +static const struct e1000_nvm_operations es2_nvm_ops = {
34018 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
34019 .read_nvm = e1000e_read_nvm_eerd,
34020 .release_nvm = e1000_release_nvm_80003es2lan,
34021 diff -urNp linux-2.6.32.44/drivers/net/e1000e/hw.h linux-2.6.32.44/drivers/net/e1000e/hw.h
34022 --- linux-2.6.32.44/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
34023 +++ linux-2.6.32.44/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
34024 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
34025
34026 /* Function pointers for the PHY. */
34027 struct e1000_phy_operations {
34028 - s32 (*acquire_phy)(struct e1000_hw *);
34029 - s32 (*check_polarity)(struct e1000_hw *);
34030 - s32 (*check_reset_block)(struct e1000_hw *);
34031 - s32 (*commit_phy)(struct e1000_hw *);
34032 - s32 (*force_speed_duplex)(struct e1000_hw *);
34033 - s32 (*get_cfg_done)(struct e1000_hw *hw);
34034 - s32 (*get_cable_length)(struct e1000_hw *);
34035 - s32 (*get_phy_info)(struct e1000_hw *);
34036 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
34037 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
34038 - void (*release_phy)(struct e1000_hw *);
34039 - s32 (*reset_phy)(struct e1000_hw *);
34040 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
34041 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34042 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
34043 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
34044 - s32 (*cfg_on_link_up)(struct e1000_hw *);
34045 + s32 (* acquire_phy)(struct e1000_hw *);
34046 + s32 (* check_polarity)(struct e1000_hw *);
34047 + s32 (* check_reset_block)(struct e1000_hw *);
34048 + s32 (* commit_phy)(struct e1000_hw *);
34049 + s32 (* force_speed_duplex)(struct e1000_hw *);
34050 + s32 (* get_cfg_done)(struct e1000_hw *hw);
34051 + s32 (* get_cable_length)(struct e1000_hw *);
34052 + s32 (* get_phy_info)(struct e1000_hw *);
34053 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
34054 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
34055 + void (* release_phy)(struct e1000_hw *);
34056 + s32 (* reset_phy)(struct e1000_hw *);
34057 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
34058 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
34059 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
34060 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
34061 + s32 (* cfg_on_link_up)(struct e1000_hw *);
34062 };
34063
34064 /* Function pointers for the NVM. */
34065 struct e1000_nvm_operations {
34066 - s32 (*acquire_nvm)(struct e1000_hw *);
34067 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
34068 - void (*release_nvm)(struct e1000_hw *);
34069 - s32 (*update_nvm)(struct e1000_hw *);
34070 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
34071 - s32 (*validate_nvm)(struct e1000_hw *);
34072 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
34073 + s32 (* const acquire_nvm)(struct e1000_hw *);
34074 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
34075 + void (* const release_nvm)(struct e1000_hw *);
34076 + s32 (* const update_nvm)(struct e1000_hw *);
34077 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
34078 + s32 (* const validate_nvm)(struct e1000_hw *);
34079 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
34080 };
34081
34082 struct e1000_mac_info {
34083 diff -urNp linux-2.6.32.44/drivers/net/e1000e/ich8lan.c linux-2.6.32.44/drivers/net/e1000e/ich8lan.c
34084 --- linux-2.6.32.44/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
34085 +++ linux-2.6.32.44/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
34086 @@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
34087 phy->addr = 1;
34088 phy->reset_delay_us = 100;
34089
34090 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34091 - phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
34092 - phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
34093 - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
34094 - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
34095 - phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
34096 - phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
34097 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34098 + *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
34099 + *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
34100 + *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
34101 + *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
34102 + *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
34103 + *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
34104 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34105
34106 /*
34107 @@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
34108 phy->type = e1000e_get_phy_type_from_id(phy->id);
34109
34110 if (phy->type == e1000_phy_82577) {
34111 - phy->ops.check_polarity = e1000_check_polarity_82577;
34112 - phy->ops.force_speed_duplex =
34113 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
34114 + *(void **)&phy->ops.force_speed_duplex =
34115 e1000_phy_force_speed_duplex_82577;
34116 - phy->ops.get_cable_length = e1000_get_cable_length_82577;
34117 - phy->ops.get_phy_info = e1000_get_phy_info_82577;
34118 - phy->ops.commit_phy = e1000e_phy_sw_reset;
34119 + *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
34120 + *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
34121 + *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
34122 }
34123
34124 out:
34125 @@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
34126 */
34127 ret_val = e1000e_determine_phy_address(hw);
34128 if (ret_val) {
34129 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34130 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34131 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34132 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34133 ret_val = e1000e_determine_phy_address(hw);
34134 if (ret_val)
34135 return ret_val;
34136 @@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
34137 case IGP03E1000_E_PHY_ID:
34138 phy->type = e1000_phy_igp_3;
34139 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34140 - phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
34141 - phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
34142 + *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
34143 + *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
34144 break;
34145 case IFE_E_PHY_ID:
34146 case IFE_PLUS_E_PHY_ID:
34147 @@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
34148 case BME1000_E_PHY_ID:
34149 phy->type = e1000_phy_bm;
34150 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34151 - hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34152 - hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34153 - hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
34154 + *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34155 + *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34156 + *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
34157 break;
34158 default:
34159 return -E1000_ERR_PHY;
34160 break;
34161 }
34162
34163 - phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34164 + *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34165
34166 return 0;
34167 }
34168 @@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
34169 case e1000_ich9lan:
34170 case e1000_ich10lan:
34171 /* ID LED init */
34172 - mac->ops.id_led_init = e1000e_id_led_init;
34173 + *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
34174 /* setup LED */
34175 - mac->ops.setup_led = e1000e_setup_led_generic;
34176 + *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
34177 /* cleanup LED */
34178 - mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
34179 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
34180 /* turn on/off LED */
34181 - mac->ops.led_on = e1000_led_on_ich8lan;
34182 - mac->ops.led_off = e1000_led_off_ich8lan;
34183 + *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
34184 + *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
34185 break;
34186 case e1000_pchlan:
34187 /* ID LED init */
34188 - mac->ops.id_led_init = e1000_id_led_init_pchlan;
34189 + *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
34190 /* setup LED */
34191 - mac->ops.setup_led = e1000_setup_led_pchlan;
34192 + *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
34193 /* cleanup LED */
34194 - mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
34195 + *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
34196 /* turn on/off LED */
34197 - mac->ops.led_on = e1000_led_on_pchlan;
34198 - mac->ops.led_off = e1000_led_off_pchlan;
34199 + *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
34200 + *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
34201 break;
34202 default:
34203 break;
34204 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
34205 }
34206 }
34207
34208 -static struct e1000_mac_operations ich8_mac_ops = {
34209 +static const struct e1000_mac_operations ich8_mac_ops = {
34210 .id_led_init = e1000e_id_led_init,
34211 .check_mng_mode = e1000_check_mng_mode_ich8lan,
34212 .check_for_link = e1000_check_for_copper_link_ich8lan,
34213 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
34214 /* id_led_init dependent on mac type */
34215 };
34216
34217 -static struct e1000_phy_operations ich8_phy_ops = {
34218 +static const struct e1000_phy_operations ich8_phy_ops = {
34219 .acquire_phy = e1000_acquire_swflag_ich8lan,
34220 .check_reset_block = e1000_check_reset_block_ich8lan,
34221 .commit_phy = NULL,
34222 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
34223 .write_phy_reg = e1000e_write_phy_reg_igp,
34224 };
34225
34226 -static struct e1000_nvm_operations ich8_nvm_ops = {
34227 +static const struct e1000_nvm_operations ich8_nvm_ops = {
34228 .acquire_nvm = e1000_acquire_nvm_ich8lan,
34229 .read_nvm = e1000_read_nvm_ich8lan,
34230 .release_nvm = e1000_release_nvm_ich8lan,
34231 diff -urNp linux-2.6.32.44/drivers/net/e1000e/netdev.c linux-2.6.32.44/drivers/net/e1000e/netdev.c
34232 --- linux-2.6.32.44/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
34233 +++ linux-2.6.32.44/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
34234 @@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
34235
34236 err = -EIO;
34237
34238 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34239 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34240 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34241 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34242 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34243 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34244
34245 err = ei->get_variants(adapter);
34246 if (err)
34247 diff -urNp linux-2.6.32.44/drivers/net/hamradio/6pack.c linux-2.6.32.44/drivers/net/hamradio/6pack.c
34248 --- linux-2.6.32.44/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
34249 +++ linux-2.6.32.44/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
34250 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
34251 unsigned char buf[512];
34252 int count1;
34253
34254 + pax_track_stack();
34255 +
34256 if (!count)
34257 return;
34258
34259 diff -urNp linux-2.6.32.44/drivers/net/ibmveth.c linux-2.6.32.44/drivers/net/ibmveth.c
34260 --- linux-2.6.32.44/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
34261 +++ linux-2.6.32.44/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
34262 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
34263 NULL,
34264 };
34265
34266 -static struct sysfs_ops veth_pool_ops = {
34267 +static const struct sysfs_ops veth_pool_ops = {
34268 .show = veth_pool_show,
34269 .store = veth_pool_store,
34270 };
34271 diff -urNp linux-2.6.32.44/drivers/net/igb/e1000_82575.c linux-2.6.32.44/drivers/net/igb/e1000_82575.c
34272 --- linux-2.6.32.44/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
34273 +++ linux-2.6.32.44/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
34274 @@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
34275 ? true : false;
34276
34277 /* physical interface link setup */
34278 - mac->ops.setup_physical_interface =
34279 + *(void **)&mac->ops.setup_physical_interface =
34280 (hw->phy.media_type == e1000_media_type_copper)
34281 ? igb_setup_copper_link_82575
34282 : igb_setup_serdes_link_82575;
34283 @@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
34284
34285 /* PHY function pointers */
34286 if (igb_sgmii_active_82575(hw)) {
34287 - phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34288 - phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34289 - phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34290 + *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34291 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34292 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34293 } else {
34294 - phy->ops.reset = igb_phy_hw_reset;
34295 - phy->ops.read_reg = igb_read_phy_reg_igp;
34296 - phy->ops.write_reg = igb_write_phy_reg_igp;
34297 + *(void **)&phy->ops.reset = igb_phy_hw_reset;
34298 + *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
34299 + *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
34300 }
34301
34302 /* set lan id */
34303 @@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
34304 switch (phy->id) {
34305 case M88E1111_I_PHY_ID:
34306 phy->type = e1000_phy_m88;
34307 - phy->ops.get_phy_info = igb_get_phy_info_m88;
34308 - phy->ops.get_cable_length = igb_get_cable_length_m88;
34309 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34310 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
34311 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
34312 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34313 break;
34314 case IGP03E1000_E_PHY_ID:
34315 phy->type = e1000_phy_igp_3;
34316 - phy->ops.get_phy_info = igb_get_phy_info_igp;
34317 - phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34318 - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34319 - phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34320 - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34321 + *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
34322 + *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34323 + *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34324 + *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34325 + *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34326 break;
34327 default:
34328 return -E1000_ERR_PHY;
34329 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
34330 wr32(E1000_VT_CTL, vt_ctl);
34331 }
34332
34333 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
34334 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
34335 .reset_hw = igb_reset_hw_82575,
34336 .init_hw = igb_init_hw_82575,
34337 .check_for_link = igb_check_for_link_82575,
34338 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
34339 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
34340 };
34341
34342 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
34343 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
34344 .acquire = igb_acquire_phy_82575,
34345 .get_cfg_done = igb_get_cfg_done_82575,
34346 .release = igb_release_phy_82575,
34347 };
34348
34349 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34350 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34351 .acquire = igb_acquire_nvm_82575,
34352 .read = igb_read_nvm_eerd,
34353 .release = igb_release_nvm_82575,
34354 diff -urNp linux-2.6.32.44/drivers/net/igb/e1000_hw.h linux-2.6.32.44/drivers/net/igb/e1000_hw.h
34355 --- linux-2.6.32.44/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
34356 +++ linux-2.6.32.44/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
34357 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
34358 };
34359
34360 struct e1000_nvm_operations {
34361 - s32 (*acquire)(struct e1000_hw *);
34362 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
34363 - void (*release)(struct e1000_hw *);
34364 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34365 + s32 (* const acquire)(struct e1000_hw *);
34366 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
34367 + void (* const release)(struct e1000_hw *);
34368 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
34369 };
34370
34371 struct e1000_info {
34372 s32 (*get_invariants)(struct e1000_hw *);
34373 - struct e1000_mac_operations *mac_ops;
34374 - struct e1000_phy_operations *phy_ops;
34375 - struct e1000_nvm_operations *nvm_ops;
34376 + const struct e1000_mac_operations *mac_ops;
34377 + const struct e1000_phy_operations *phy_ops;
34378 + const struct e1000_nvm_operations *nvm_ops;
34379 };
34380
34381 extern const struct e1000_info e1000_82575_info;
34382 diff -urNp linux-2.6.32.44/drivers/net/igb/e1000_mbx.c linux-2.6.32.44/drivers/net/igb/e1000_mbx.c
34383 --- linux-2.6.32.44/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
34384 +++ linux-2.6.32.44/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
34385 @@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
34386
34387 mbx->size = E1000_VFMAILBOX_SIZE;
34388
34389 - mbx->ops.read = igb_read_mbx_pf;
34390 - mbx->ops.write = igb_write_mbx_pf;
34391 - mbx->ops.read_posted = igb_read_posted_mbx;
34392 - mbx->ops.write_posted = igb_write_posted_mbx;
34393 - mbx->ops.check_for_msg = igb_check_for_msg_pf;
34394 - mbx->ops.check_for_ack = igb_check_for_ack_pf;
34395 - mbx->ops.check_for_rst = igb_check_for_rst_pf;
34396 + *(void **)&mbx->ops.read = igb_read_mbx_pf;
34397 + *(void **)&mbx->ops.write = igb_write_mbx_pf;
34398 + *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
34399 + *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
34400 + *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
34401 + *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
34402 + *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
34403
34404 mbx->stats.msgs_tx = 0;
34405 mbx->stats.msgs_rx = 0;
34406 diff -urNp linux-2.6.32.44/drivers/net/igb/igb_main.c linux-2.6.32.44/drivers/net/igb/igb_main.c
34407 --- linux-2.6.32.44/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
34408 +++ linux-2.6.32.44/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
34409 @@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
34410 /* setup the private structure */
34411 hw->back = adapter;
34412 /* Copy the default MAC, PHY and NVM function pointers */
34413 - memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34414 - memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34415 - memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34416 + memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34417 + memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34418 + memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34419 /* Initialize skew-specific constants */
34420 err = ei->get_invariants(hw);
34421 if (err)
34422 diff -urNp linux-2.6.32.44/drivers/net/igbvf/mbx.c linux-2.6.32.44/drivers/net/igbvf/mbx.c
34423 --- linux-2.6.32.44/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
34424 +++ linux-2.6.32.44/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
34425 @@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
34426
34427 mbx->size = E1000_VFMAILBOX_SIZE;
34428
34429 - mbx->ops.read = e1000_read_mbx_vf;
34430 - mbx->ops.write = e1000_write_mbx_vf;
34431 - mbx->ops.read_posted = e1000_read_posted_mbx;
34432 - mbx->ops.write_posted = e1000_write_posted_mbx;
34433 - mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34434 - mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34435 - mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34436 + *(void **)&mbx->ops.read = e1000_read_mbx_vf;
34437 + *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34438 + *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34439 + *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34440 + *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34441 + *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34442 + *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34443
34444 mbx->stats.msgs_tx = 0;
34445 mbx->stats.msgs_rx = 0;
34446 diff -urNp linux-2.6.32.44/drivers/net/igbvf/vf.c linux-2.6.32.44/drivers/net/igbvf/vf.c
34447 --- linux-2.6.32.44/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34448 +++ linux-2.6.32.44/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34449 @@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34450
34451 /* Function pointers */
34452 /* reset */
34453 - mac->ops.reset_hw = e1000_reset_hw_vf;
34454 + *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34455 /* hw initialization */
34456 - mac->ops.init_hw = e1000_init_hw_vf;
34457 + *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34458 /* check for link */
34459 - mac->ops.check_for_link = e1000_check_for_link_vf;
34460 + *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34461 /* link info */
34462 - mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34463 + *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34464 /* multicast address update */
34465 - mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34466 + *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34467 /* set mac address */
34468 - mac->ops.rar_set = e1000_rar_set_vf;
34469 + *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34470 /* read mac address */
34471 - mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34472 + *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34473 /* set vlan filter table array */
34474 - mac->ops.set_vfta = e1000_set_vfta_vf;
34475 + *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34476
34477 return E1000_SUCCESS;
34478 }
34479 @@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34480 **/
34481 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34482 {
34483 - hw->mac.ops.init_params = e1000_init_mac_params_vf;
34484 - hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34485 + *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34486 + *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34487 }
34488
34489 /**
34490 diff -urNp linux-2.6.32.44/drivers/net/iseries_veth.c linux-2.6.32.44/drivers/net/iseries_veth.c
34491 --- linux-2.6.32.44/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34492 +++ linux-2.6.32.44/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34493 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34494 NULL
34495 };
34496
34497 -static struct sysfs_ops veth_cnx_sysfs_ops = {
34498 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
34499 .show = veth_cnx_attribute_show
34500 };
34501
34502 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34503 NULL
34504 };
34505
34506 -static struct sysfs_ops veth_port_sysfs_ops = {
34507 +static const struct sysfs_ops veth_port_sysfs_ops = {
34508 .show = veth_port_attribute_show
34509 };
34510
34511 diff -urNp linux-2.6.32.44/drivers/net/ixgb/ixgb_main.c linux-2.6.32.44/drivers/net/ixgb/ixgb_main.c
34512 --- linux-2.6.32.44/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34513 +++ linux-2.6.32.44/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34514 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34515 u32 rctl;
34516 int i;
34517
34518 + pax_track_stack();
34519 +
34520 /* Check for Promiscuous and All Multicast modes */
34521
34522 rctl = IXGB_READ_REG(hw, RCTL);
34523 diff -urNp linux-2.6.32.44/drivers/net/ixgb/ixgb_param.c linux-2.6.32.44/drivers/net/ixgb/ixgb_param.c
34524 --- linux-2.6.32.44/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34525 +++ linux-2.6.32.44/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34526 @@ -260,6 +260,9 @@ void __devinit
34527 ixgb_check_options(struct ixgb_adapter *adapter)
34528 {
34529 int bd = adapter->bd_number;
34530 +
34531 + pax_track_stack();
34532 +
34533 if (bd >= IXGB_MAX_NIC) {
34534 printk(KERN_NOTICE
34535 "Warning: no configuration for board #%i\n", bd);
34536 diff -urNp linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82598.c
34537 --- linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34538 +++ linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34539 @@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34540
34541 /* Overwrite the link function pointers if copper PHY */
34542 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34543 - mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34544 - mac->ops.get_link_capabilities =
34545 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34546 + *(void **)&mac->ops.get_link_capabilities =
34547 &ixgbe_get_copper_link_capabilities_82598;
34548 }
34549
34550 switch (hw->phy.type) {
34551 case ixgbe_phy_tn:
34552 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34553 - phy->ops.get_firmware_version =
34554 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34555 + *(void **)&phy->ops.get_firmware_version =
34556 &ixgbe_get_phy_firmware_version_tnx;
34557 break;
34558 case ixgbe_phy_nl:
34559 - phy->ops.reset = &ixgbe_reset_phy_nl;
34560 + *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34561
34562 /* Call SFP+ identify routine to get the SFP+ module type */
34563 ret_val = phy->ops.identify_sfp(hw);
34564 diff -urNp linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82599.c
34565 --- linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34566 +++ linux-2.6.32.44/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34567 @@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34568 struct ixgbe_mac_info *mac = &hw->mac;
34569 if (hw->phy.multispeed_fiber) {
34570 /* Set up dual speed SFP+ support */
34571 - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34572 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34573 } else {
34574 - mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34575 + *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34576 }
34577 }
34578
34579 @@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34580 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34581 ixgbe_init_mac_link_ops_82599(hw);
34582
34583 - hw->phy.ops.reset = NULL;
34584 + *(void **)&hw->phy.ops.reset = NULL;
34585
34586 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34587 &data_offset);
34588 @@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34589
34590 /* If copper media, overwrite with copper function pointers */
34591 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34592 - mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34593 - mac->ops.get_link_capabilities =
34594 + *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34595 + *(void **)&mac->ops.get_link_capabilities =
34596 &ixgbe_get_copper_link_capabilities_82599;
34597 }
34598
34599 /* Set necessary function pointers based on phy type */
34600 switch (hw->phy.type) {
34601 case ixgbe_phy_tn:
34602 - phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34603 - phy->ops.get_firmware_version =
34604 + *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34605 + *(void **)&phy->ops.get_firmware_version =
34606 &ixgbe_get_phy_firmware_version_tnx;
34607 break;
34608 default:
34609 diff -urNp linux-2.6.32.44/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.44/drivers/net/ixgbe/ixgbe_main.c
34610 --- linux-2.6.32.44/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34611 +++ linux-2.6.32.44/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34612 @@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34613 adapter->bd_number = cards_found;
34614
34615 /* Setup hw api */
34616 - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34617 + memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34618 hw->mac.type = ii->mac;
34619
34620 /* EEPROM */
34621 - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34622 + memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34623 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34624 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34625 if (!(eec & (1 << 8)))
34626 - hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34627 + *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34628
34629 /* PHY */
34630 - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34631 + memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34632 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34633 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34634 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34635 diff -urNp linux-2.6.32.44/drivers/net/mlx4/main.c linux-2.6.32.44/drivers/net/mlx4/main.c
34636 --- linux-2.6.32.44/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34637 +++ linux-2.6.32.44/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34638 @@ -38,6 +38,7 @@
34639 #include <linux/errno.h>
34640 #include <linux/pci.h>
34641 #include <linux/dma-mapping.h>
34642 +#include <linux/sched.h>
34643
34644 #include <linux/mlx4/device.h>
34645 #include <linux/mlx4/doorbell.h>
34646 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34647 u64 icm_size;
34648 int err;
34649
34650 + pax_track_stack();
34651 +
34652 err = mlx4_QUERY_FW(dev);
34653 if (err) {
34654 if (err == -EACCES)
34655 diff -urNp linux-2.6.32.44/drivers/net/niu.c linux-2.6.32.44/drivers/net/niu.c
34656 --- linux-2.6.32.44/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34657 +++ linux-2.6.32.44/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34658 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34659 int i, num_irqs, err;
34660 u8 first_ldg;
34661
34662 + pax_track_stack();
34663 +
34664 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34665 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34666 ldg_num_map[i] = first_ldg + i;
34667 diff -urNp linux-2.6.32.44/drivers/net/pcnet32.c linux-2.6.32.44/drivers/net/pcnet32.c
34668 --- linux-2.6.32.44/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34669 +++ linux-2.6.32.44/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34670 @@ -79,7 +79,7 @@ static int cards_found;
34671 /*
34672 * VLB I/O addresses
34673 */
34674 -static unsigned int pcnet32_portlist[] __initdata =
34675 +static unsigned int pcnet32_portlist[] __devinitdata =
34676 { 0x300, 0x320, 0x340, 0x360, 0 };
34677
34678 static int pcnet32_debug = 0;
34679 @@ -267,7 +267,7 @@ struct pcnet32_private {
34680 struct sk_buff **rx_skbuff;
34681 dma_addr_t *tx_dma_addr;
34682 dma_addr_t *rx_dma_addr;
34683 - struct pcnet32_access a;
34684 + struct pcnet32_access *a;
34685 spinlock_t lock; /* Guard lock */
34686 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34687 unsigned int rx_ring_size; /* current rx ring size */
34688 @@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34689 u16 val;
34690
34691 netif_wake_queue(dev);
34692 - val = lp->a.read_csr(ioaddr, CSR3);
34693 + val = lp->a->read_csr(ioaddr, CSR3);
34694 val &= 0x00ff;
34695 - lp->a.write_csr(ioaddr, CSR3, val);
34696 + lp->a->write_csr(ioaddr, CSR3, val);
34697 napi_enable(&lp->napi);
34698 }
34699
34700 @@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34701 r = mii_link_ok(&lp->mii_if);
34702 } else if (lp->chip_version >= PCNET32_79C970A) {
34703 ulong ioaddr = dev->base_addr; /* card base I/O address */
34704 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34705 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34706 } else { /* can not detect link on really old chips */
34707 r = 1;
34708 }
34709 @@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34710 pcnet32_netif_stop(dev);
34711
34712 spin_lock_irqsave(&lp->lock, flags);
34713 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34714 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34715
34716 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34717
34718 @@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34719 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34720 {
34721 struct pcnet32_private *lp = netdev_priv(dev);
34722 - struct pcnet32_access *a = &lp->a; /* access to registers */
34723 + struct pcnet32_access *a = lp->a; /* access to registers */
34724 ulong ioaddr = dev->base_addr; /* card base I/O address */
34725 struct sk_buff *skb; /* sk buff */
34726 int x, i; /* counters */
34727 @@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34728 pcnet32_netif_stop(dev);
34729
34730 spin_lock_irqsave(&lp->lock, flags);
34731 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34732 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34733
34734 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34735
34736 /* Reset the PCNET32 */
34737 - lp->a.reset(ioaddr);
34738 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34739 + lp->a->reset(ioaddr);
34740 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34741
34742 /* switch pcnet32 to 32bit mode */
34743 - lp->a.write_bcr(ioaddr, 20, 2);
34744 + lp->a->write_bcr(ioaddr, 20, 2);
34745
34746 /* purge & init rings but don't actually restart */
34747 pcnet32_restart(dev, 0x0000);
34748
34749 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34750 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34751
34752 /* Initialize Transmit buffers. */
34753 size = data_len + 15;
34754 @@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34755
34756 /* set int loopback in CSR15 */
34757 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34758 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34759 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34760
34761 teststatus = cpu_to_le16(0x8000);
34762 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34763 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34764
34765 /* Check status of descriptors */
34766 for (x = 0; x < numbuffs; x++) {
34767 @@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34768 }
34769 }
34770
34771 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34772 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34773 wmb();
34774 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34775 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34776 @@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34777 pcnet32_restart(dev, CSR0_NORMAL);
34778 } else {
34779 pcnet32_purge_rx_ring(dev);
34780 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34781 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34782 }
34783 spin_unlock_irqrestore(&lp->lock, flags);
34784
34785 @@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34786 static void pcnet32_led_blink_callback(struct net_device *dev)
34787 {
34788 struct pcnet32_private *lp = netdev_priv(dev);
34789 - struct pcnet32_access *a = &lp->a;
34790 + struct pcnet32_access *a = lp->a;
34791 ulong ioaddr = dev->base_addr;
34792 unsigned long flags;
34793 int i;
34794 @@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34795 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34796 {
34797 struct pcnet32_private *lp = netdev_priv(dev);
34798 - struct pcnet32_access *a = &lp->a;
34799 + struct pcnet32_access *a = lp->a;
34800 ulong ioaddr = dev->base_addr;
34801 unsigned long flags;
34802 int i, regs[4];
34803 @@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34804 {
34805 int csr5;
34806 struct pcnet32_private *lp = netdev_priv(dev);
34807 - struct pcnet32_access *a = &lp->a;
34808 + struct pcnet32_access *a = lp->a;
34809 ulong ioaddr = dev->base_addr;
34810 int ticks;
34811
34812 @@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34813 spin_lock_irqsave(&lp->lock, flags);
34814 if (pcnet32_tx(dev)) {
34815 /* reset the chip to clear the error condition, then restart */
34816 - lp->a.reset(ioaddr);
34817 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34818 + lp->a->reset(ioaddr);
34819 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34820 pcnet32_restart(dev, CSR0_START);
34821 netif_wake_queue(dev);
34822 }
34823 @@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34824 __napi_complete(napi);
34825
34826 /* clear interrupt masks */
34827 - val = lp->a.read_csr(ioaddr, CSR3);
34828 + val = lp->a->read_csr(ioaddr, CSR3);
34829 val &= 0x00ff;
34830 - lp->a.write_csr(ioaddr, CSR3, val);
34831 + lp->a->write_csr(ioaddr, CSR3, val);
34832
34833 /* Set interrupt enable. */
34834 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34835 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34836
34837 spin_unlock_irqrestore(&lp->lock, flags);
34838 }
34839 @@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34840 int i, csr0;
34841 u16 *buff = ptr;
34842 struct pcnet32_private *lp = netdev_priv(dev);
34843 - struct pcnet32_access *a = &lp->a;
34844 + struct pcnet32_access *a = lp->a;
34845 ulong ioaddr = dev->base_addr;
34846 unsigned long flags;
34847
34848 @@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34849 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34850 if (lp->phymask & (1 << j)) {
34851 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34852 - lp->a.write_bcr(ioaddr, 33,
34853 + lp->a->write_bcr(ioaddr, 33,
34854 (j << 5) | i);
34855 - *buff++ = lp->a.read_bcr(ioaddr, 34);
34856 + *buff++ = lp->a->read_bcr(ioaddr, 34);
34857 }
34858 }
34859 }
34860 @@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34861 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34862 lp->options |= PCNET32_PORT_FD;
34863
34864 - lp->a = *a;
34865 + lp->a = a;
34866
34867 /* prior to register_netdev, dev->name is not yet correct */
34868 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34869 @@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34870 if (lp->mii) {
34871 /* lp->phycount and lp->phymask are set to 0 by memset above */
34872
34873 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34874 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34875 /* scan for PHYs */
34876 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34877 unsigned short id1, id2;
34878 @@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34879 "Found PHY %04x:%04x at address %d.\n",
34880 id1, id2, i);
34881 }
34882 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34883 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34884 if (lp->phycount > 1) {
34885 lp->options |= PCNET32_PORT_MII;
34886 }
34887 @@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34888 }
34889
34890 /* Reset the PCNET32 */
34891 - lp->a.reset(ioaddr);
34892 + lp->a->reset(ioaddr);
34893
34894 /* switch pcnet32 to 32bit mode */
34895 - lp->a.write_bcr(ioaddr, 20, 2);
34896 + lp->a->write_bcr(ioaddr, 20, 2);
34897
34898 if (netif_msg_ifup(lp))
34899 printk(KERN_DEBUG
34900 @@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34901 (u32) (lp->init_dma_addr));
34902
34903 /* set/reset autoselect bit */
34904 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
34905 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
34906 if (lp->options & PCNET32_PORT_ASEL)
34907 val |= 2;
34908 - lp->a.write_bcr(ioaddr, 2, val);
34909 + lp->a->write_bcr(ioaddr, 2, val);
34910
34911 /* handle full duplex setting */
34912 if (lp->mii_if.full_duplex) {
34913 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
34914 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
34915 if (lp->options & PCNET32_PORT_FD) {
34916 val |= 1;
34917 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34918 @@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34919 if (lp->chip_version == 0x2627)
34920 val |= 3;
34921 }
34922 - lp->a.write_bcr(ioaddr, 9, val);
34923 + lp->a->write_bcr(ioaddr, 9, val);
34924 }
34925
34926 /* set/reset GPSI bit in test register */
34927 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34928 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34929 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34930 val |= 0x10;
34931 - lp->a.write_csr(ioaddr, 124, val);
34932 + lp->a->write_csr(ioaddr, 124, val);
34933
34934 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34935 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34936 @@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34937 * duplex, and/or enable auto negotiation, and clear DANAS
34938 */
34939 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34940 - lp->a.write_bcr(ioaddr, 32,
34941 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
34942 + lp->a->write_bcr(ioaddr, 32,
34943 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
34944 /* disable Auto Negotiation, set 10Mpbs, HD */
34945 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34946 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34947 if (lp->options & PCNET32_PORT_FD)
34948 val |= 0x10;
34949 if (lp->options & PCNET32_PORT_100)
34950 val |= 0x08;
34951 - lp->a.write_bcr(ioaddr, 32, val);
34952 + lp->a->write_bcr(ioaddr, 32, val);
34953 } else {
34954 if (lp->options & PCNET32_PORT_ASEL) {
34955 - lp->a.write_bcr(ioaddr, 32,
34956 - lp->a.read_bcr(ioaddr,
34957 + lp->a->write_bcr(ioaddr, 32,
34958 + lp->a->read_bcr(ioaddr,
34959 32) | 0x0080);
34960 /* enable auto negotiate, setup, disable fd */
34961 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34962 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34963 val |= 0x20;
34964 - lp->a.write_bcr(ioaddr, 32, val);
34965 + lp->a->write_bcr(ioaddr, 32, val);
34966 }
34967 }
34968 } else {
34969 @@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34970 * There is really no good other way to handle multiple PHYs
34971 * other than turning off all automatics
34972 */
34973 - val = lp->a.read_bcr(ioaddr, 2);
34974 - lp->a.write_bcr(ioaddr, 2, val & ~2);
34975 - val = lp->a.read_bcr(ioaddr, 32);
34976 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34977 + val = lp->a->read_bcr(ioaddr, 2);
34978 + lp->a->write_bcr(ioaddr, 2, val & ~2);
34979 + val = lp->a->read_bcr(ioaddr, 32);
34980 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34981
34982 if (!(lp->options & PCNET32_PORT_ASEL)) {
34983 /* setup ecmd */
34984 @@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34985 ecmd.speed =
34986 lp->
34987 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34988 - bcr9 = lp->a.read_bcr(ioaddr, 9);
34989 + bcr9 = lp->a->read_bcr(ioaddr, 9);
34990
34991 if (lp->options & PCNET32_PORT_FD) {
34992 ecmd.duplex = DUPLEX_FULL;
34993 @@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34994 ecmd.duplex = DUPLEX_HALF;
34995 bcr9 |= ~(1 << 0);
34996 }
34997 - lp->a.write_bcr(ioaddr, 9, bcr9);
34998 + lp->a->write_bcr(ioaddr, 9, bcr9);
34999 }
35000
35001 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
35002 @@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
35003
35004 #ifdef DO_DXSUFLO
35005 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
35006 - val = lp->a.read_csr(ioaddr, CSR3);
35007 + val = lp->a->read_csr(ioaddr, CSR3);
35008 val |= 0x40;
35009 - lp->a.write_csr(ioaddr, CSR3, val);
35010 + lp->a->write_csr(ioaddr, CSR3, val);
35011 }
35012 #endif
35013
35014 @@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
35015 napi_enable(&lp->napi);
35016
35017 /* Re-initialize the PCNET32, and start it when done. */
35018 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35019 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35020 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35021 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35022
35023 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35024 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35025 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35026 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35027
35028 netif_start_queue(dev);
35029
35030 @@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
35031
35032 i = 0;
35033 while (i++ < 100)
35034 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35035 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35036 break;
35037 /*
35038 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
35039 * reports that doing so triggers a bug in the '974.
35040 */
35041 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
35042 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
35043
35044 if (netif_msg_ifup(lp))
35045 printk(KERN_DEBUG
35046 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
35047 dev->name, i,
35048 (u32) (lp->init_dma_addr),
35049 - lp->a.read_csr(ioaddr, CSR0));
35050 + lp->a->read_csr(ioaddr, CSR0));
35051
35052 spin_unlock_irqrestore(&lp->lock, flags);
35053
35054 @@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
35055 * Switch back to 16bit mode to avoid problems with dumb
35056 * DOS packet driver after a warm reboot
35057 */
35058 - lp->a.write_bcr(ioaddr, 20, 4);
35059 + lp->a->write_bcr(ioaddr, 20, 4);
35060
35061 err_free_irq:
35062 spin_unlock_irqrestore(&lp->lock, flags);
35063 @@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
35064
35065 /* wait for stop */
35066 for (i = 0; i < 100; i++)
35067 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
35068 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
35069 break;
35070
35071 if (i >= 100 && netif_msg_drv(lp))
35072 @@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
35073 return;
35074
35075 /* ReInit Ring */
35076 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35077 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35078 i = 0;
35079 while (i++ < 1000)
35080 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35081 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35082 break;
35083
35084 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
35085 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
35086 }
35087
35088 static void pcnet32_tx_timeout(struct net_device *dev)
35089 @@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
35090 if (pcnet32_debug & NETIF_MSG_DRV)
35091 printk(KERN_ERR
35092 "%s: transmit timed out, status %4.4x, resetting.\n",
35093 - dev->name, lp->a.read_csr(ioaddr, CSR0));
35094 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35095 + dev->name, lp->a->read_csr(ioaddr, CSR0));
35096 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35097 dev->stats.tx_errors++;
35098 if (netif_msg_tx_err(lp)) {
35099 int i;
35100 @@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35101 if (netif_msg_tx_queued(lp)) {
35102 printk(KERN_DEBUG
35103 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
35104 - dev->name, lp->a.read_csr(ioaddr, CSR0));
35105 + dev->name, lp->a->read_csr(ioaddr, CSR0));
35106 }
35107
35108 /* Default status -- will not enable Successful-TxDone
35109 @@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35110 dev->stats.tx_bytes += skb->len;
35111
35112 /* Trigger an immediate send poll. */
35113 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35114 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35115
35116 dev->trans_start = jiffies;
35117
35118 @@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
35119
35120 spin_lock(&lp->lock);
35121
35122 - csr0 = lp->a.read_csr(ioaddr, CSR0);
35123 + csr0 = lp->a->read_csr(ioaddr, CSR0);
35124 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
35125 if (csr0 == 0xffff) {
35126 break; /* PCMCIA remove happened */
35127 }
35128 /* Acknowledge all of the current interrupt sources ASAP. */
35129 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35130 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35131
35132 if (netif_msg_intr(lp))
35133 printk(KERN_DEBUG
35134 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
35135 - dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
35136 + dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
35137
35138 /* Log misc errors. */
35139 if (csr0 & 0x4000)
35140 @@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
35141 if (napi_schedule_prep(&lp->napi)) {
35142 u16 val;
35143 /* set interrupt masks */
35144 - val = lp->a.read_csr(ioaddr, CSR3);
35145 + val = lp->a->read_csr(ioaddr, CSR3);
35146 val |= 0x5f00;
35147 - lp->a.write_csr(ioaddr, CSR3, val);
35148 + lp->a->write_csr(ioaddr, CSR3, val);
35149
35150 __napi_schedule(&lp->napi);
35151 break;
35152 }
35153 - csr0 = lp->a.read_csr(ioaddr, CSR0);
35154 + csr0 = lp->a->read_csr(ioaddr, CSR0);
35155 }
35156
35157 if (netif_msg_intr(lp))
35158 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
35159 - dev->name, lp->a.read_csr(ioaddr, CSR0));
35160 + dev->name, lp->a->read_csr(ioaddr, CSR0));
35161
35162 spin_unlock(&lp->lock);
35163
35164 @@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
35165
35166 spin_lock_irqsave(&lp->lock, flags);
35167
35168 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35169 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35170
35171 if (netif_msg_ifdown(lp))
35172 printk(KERN_DEBUG
35173 "%s: Shutting down ethercard, status was %2.2x.\n",
35174 - dev->name, lp->a.read_csr(ioaddr, CSR0));
35175 + dev->name, lp->a->read_csr(ioaddr, CSR0));
35176
35177 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
35178 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35179 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35180
35181 /*
35182 * Switch back to 16bit mode to avoid problems with dumb
35183 * DOS packet driver after a warm reboot
35184 */
35185 - lp->a.write_bcr(ioaddr, 20, 4);
35186 + lp->a->write_bcr(ioaddr, 20, 4);
35187
35188 spin_unlock_irqrestore(&lp->lock, flags);
35189
35190 @@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
35191 unsigned long flags;
35192
35193 spin_lock_irqsave(&lp->lock, flags);
35194 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35195 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35196 spin_unlock_irqrestore(&lp->lock, flags);
35197
35198 return &dev->stats;
35199 @@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
35200 if (dev->flags & IFF_ALLMULTI) {
35201 ib->filter[0] = cpu_to_le32(~0U);
35202 ib->filter[1] = cpu_to_le32(~0U);
35203 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35204 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35205 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35206 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35207 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35208 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35209 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35210 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35211 return;
35212 }
35213 /* clear the multicast filter */
35214 @@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
35215 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
35216 }
35217 for (i = 0; i < 4; i++)
35218 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
35219 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
35220 le16_to_cpu(mcast_table[i]));
35221 return;
35222 }
35223 @@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
35224
35225 spin_lock_irqsave(&lp->lock, flags);
35226 suspended = pcnet32_suspend(dev, &flags, 0);
35227 - csr15 = lp->a.read_csr(ioaddr, CSR15);
35228 + csr15 = lp->a->read_csr(ioaddr, CSR15);
35229 if (dev->flags & IFF_PROMISC) {
35230 /* Log any net taps. */
35231 if (netif_msg_hw(lp))
35232 @@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
35233 lp->init_block->mode =
35234 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
35235 7);
35236 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
35237 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
35238 } else {
35239 lp->init_block->mode =
35240 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
35241 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35242 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35243 pcnet32_load_multicast(dev);
35244 }
35245
35246 if (suspended) {
35247 int csr5;
35248 /* clear SUSPEND (SPND) - CSR5 bit 0 */
35249 - csr5 = lp->a.read_csr(ioaddr, CSR5);
35250 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35251 + csr5 = lp->a->read_csr(ioaddr, CSR5);
35252 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35253 } else {
35254 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35255 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35256 pcnet32_restart(dev, CSR0_NORMAL);
35257 netif_wake_queue(dev);
35258 }
35259 @@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
35260 if (!lp->mii)
35261 return 0;
35262
35263 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35264 - val_out = lp->a.read_bcr(ioaddr, 34);
35265 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35266 + val_out = lp->a->read_bcr(ioaddr, 34);
35267
35268 return val_out;
35269 }
35270 @@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
35271 if (!lp->mii)
35272 return;
35273
35274 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35275 - lp->a.write_bcr(ioaddr, 34, val);
35276 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35277 + lp->a->write_bcr(ioaddr, 34, val);
35278 }
35279
35280 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35281 @@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
35282 curr_link = mii_link_ok(&lp->mii_if);
35283 } else {
35284 ulong ioaddr = dev->base_addr; /* card base I/O address */
35285 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
35286 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
35287 }
35288 if (!curr_link) {
35289 if (prev_link || verbose) {
35290 @@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
35291 (ecmd.duplex ==
35292 DUPLEX_FULL) ? "full" : "half");
35293 }
35294 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
35295 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
35296 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
35297 if (lp->mii_if.full_duplex)
35298 bcr9 |= (1 << 0);
35299 else
35300 bcr9 &= ~(1 << 0);
35301 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
35302 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
35303 }
35304 } else {
35305 if (netif_msg_link(lp))
35306 diff -urNp linux-2.6.32.44/drivers/net/tg3.h linux-2.6.32.44/drivers/net/tg3.h
35307 --- linux-2.6.32.44/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
35308 +++ linux-2.6.32.44/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
35309 @@ -95,6 +95,7 @@
35310 #define CHIPREV_ID_5750_A0 0x4000
35311 #define CHIPREV_ID_5750_A1 0x4001
35312 #define CHIPREV_ID_5750_A3 0x4003
35313 +#define CHIPREV_ID_5750_C1 0x4201
35314 #define CHIPREV_ID_5750_C2 0x4202
35315 #define CHIPREV_ID_5752_A0_HW 0x5000
35316 #define CHIPREV_ID_5752_A0 0x6000
35317 diff -urNp linux-2.6.32.44/drivers/net/tokenring/abyss.c linux-2.6.32.44/drivers/net/tokenring/abyss.c
35318 --- linux-2.6.32.44/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
35319 +++ linux-2.6.32.44/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
35320 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
35321
35322 static int __init abyss_init (void)
35323 {
35324 - abyss_netdev_ops = tms380tr_netdev_ops;
35325 + pax_open_kernel();
35326 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35327
35328 - abyss_netdev_ops.ndo_open = abyss_open;
35329 - abyss_netdev_ops.ndo_stop = abyss_close;
35330 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35331 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35332 + pax_close_kernel();
35333
35334 return pci_register_driver(&abyss_driver);
35335 }
35336 diff -urNp linux-2.6.32.44/drivers/net/tokenring/madgemc.c linux-2.6.32.44/drivers/net/tokenring/madgemc.c
35337 --- linux-2.6.32.44/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
35338 +++ linux-2.6.32.44/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
35339 @@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
35340
35341 static int __init madgemc_init (void)
35342 {
35343 - madgemc_netdev_ops = tms380tr_netdev_ops;
35344 - madgemc_netdev_ops.ndo_open = madgemc_open;
35345 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35346 + pax_open_kernel();
35347 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35348 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35349 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35350 + pax_close_kernel();
35351
35352 return mca_register_driver (&madgemc_driver);
35353 }
35354 diff -urNp linux-2.6.32.44/drivers/net/tokenring/proteon.c linux-2.6.32.44/drivers/net/tokenring/proteon.c
35355 --- linux-2.6.32.44/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
35356 +++ linux-2.6.32.44/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
35357 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
35358 struct platform_device *pdev;
35359 int i, num = 0, err = 0;
35360
35361 - proteon_netdev_ops = tms380tr_netdev_ops;
35362 - proteon_netdev_ops.ndo_open = proteon_open;
35363 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35364 + pax_open_kernel();
35365 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35366 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35367 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35368 + pax_close_kernel();
35369
35370 err = platform_driver_register(&proteon_driver);
35371 if (err)
35372 diff -urNp linux-2.6.32.44/drivers/net/tokenring/skisa.c linux-2.6.32.44/drivers/net/tokenring/skisa.c
35373 --- linux-2.6.32.44/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
35374 +++ linux-2.6.32.44/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
35375 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35376 struct platform_device *pdev;
35377 int i, num = 0, err = 0;
35378
35379 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35380 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35381 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35382 + pax_open_kernel();
35383 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35384 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35385 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35386 + pax_close_kernel();
35387
35388 err = platform_driver_register(&sk_isa_driver);
35389 if (err)
35390 diff -urNp linux-2.6.32.44/drivers/net/tulip/de2104x.c linux-2.6.32.44/drivers/net/tulip/de2104x.c
35391 --- linux-2.6.32.44/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
35392 +++ linux-2.6.32.44/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
35393 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
35394 struct de_srom_info_leaf *il;
35395 void *bufp;
35396
35397 + pax_track_stack();
35398 +
35399 /* download entire eeprom */
35400 for (i = 0; i < DE_EEPROM_WORDS; i++)
35401 ((__le16 *)ee_data)[i] =
35402 diff -urNp linux-2.6.32.44/drivers/net/tulip/de4x5.c linux-2.6.32.44/drivers/net/tulip/de4x5.c
35403 --- linux-2.6.32.44/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
35404 +++ linux-2.6.32.44/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
35405 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
35406 for (i=0; i<ETH_ALEN; i++) {
35407 tmp.addr[i] = dev->dev_addr[i];
35408 }
35409 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35410 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35411 break;
35412
35413 case DE4X5_SET_HWADDR: /* Set the hardware address */
35414 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35415 spin_lock_irqsave(&lp->lock, flags);
35416 memcpy(&statbuf, &lp->pktStats, ioc->len);
35417 spin_unlock_irqrestore(&lp->lock, flags);
35418 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
35419 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35420 return -EFAULT;
35421 break;
35422 }
35423 diff -urNp linux-2.6.32.44/drivers/net/usb/hso.c linux-2.6.32.44/drivers/net/usb/hso.c
35424 --- linux-2.6.32.44/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
35425 +++ linux-2.6.32.44/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
35426 @@ -71,7 +71,7 @@
35427 #include <asm/byteorder.h>
35428 #include <linux/serial_core.h>
35429 #include <linux/serial.h>
35430 -
35431 +#include <asm/local.h>
35432
35433 #define DRIVER_VERSION "1.2"
35434 #define MOD_AUTHOR "Option Wireless"
35435 @@ -258,7 +258,7 @@ struct hso_serial {
35436
35437 /* from usb_serial_port */
35438 struct tty_struct *tty;
35439 - int open_count;
35440 + local_t open_count;
35441 spinlock_t serial_lock;
35442
35443 int (*write_data) (struct hso_serial *serial);
35444 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35445 struct urb *urb;
35446
35447 urb = serial->rx_urb[0];
35448 - if (serial->open_count > 0) {
35449 + if (local_read(&serial->open_count) > 0) {
35450 count = put_rxbuf_data(urb, serial);
35451 if (count == -1)
35452 return;
35453 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35454 DUMP1(urb->transfer_buffer, urb->actual_length);
35455
35456 /* Anyone listening? */
35457 - if (serial->open_count == 0)
35458 + if (local_read(&serial->open_count) == 0)
35459 return;
35460
35461 if (status == 0) {
35462 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35463 spin_unlock_irq(&serial->serial_lock);
35464
35465 /* check for port already opened, if not set the termios */
35466 - serial->open_count++;
35467 - if (serial->open_count == 1) {
35468 + if (local_inc_return(&serial->open_count) == 1) {
35469 tty->low_latency = 1;
35470 serial->rx_state = RX_IDLE;
35471 /* Force default termio settings */
35472 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35473 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35474 if (result) {
35475 hso_stop_serial_device(serial->parent);
35476 - serial->open_count--;
35477 + local_dec(&serial->open_count);
35478 kref_put(&serial->parent->ref, hso_serial_ref_free);
35479 }
35480 } else {
35481 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35482
35483 /* reset the rts and dtr */
35484 /* do the actual close */
35485 - serial->open_count--;
35486 + local_dec(&serial->open_count);
35487
35488 - if (serial->open_count <= 0) {
35489 - serial->open_count = 0;
35490 + if (local_read(&serial->open_count) <= 0) {
35491 + local_set(&serial->open_count, 0);
35492 spin_lock_irq(&serial->serial_lock);
35493 if (serial->tty == tty) {
35494 serial->tty->driver_data = NULL;
35495 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35496
35497 /* the actual setup */
35498 spin_lock_irqsave(&serial->serial_lock, flags);
35499 - if (serial->open_count)
35500 + if (local_read(&serial->open_count))
35501 _hso_serial_set_termios(tty, old);
35502 else
35503 tty->termios = old;
35504 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35505 /* Start all serial ports */
35506 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35507 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35508 - if (dev2ser(serial_table[i])->open_count) {
35509 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35510 result =
35511 hso_start_serial_device(serial_table[i], GFP_NOIO);
35512 hso_kick_transmit(dev2ser(serial_table[i]));
35513 diff -urNp linux-2.6.32.44/drivers/net/vxge/vxge-config.h linux-2.6.32.44/drivers/net/vxge/vxge-config.h
35514 --- linux-2.6.32.44/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35515 +++ linux-2.6.32.44/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35516 @@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35517 void (*link_down)(struct __vxge_hw_device *devh);
35518 void (*crit_err)(struct __vxge_hw_device *devh,
35519 enum vxge_hw_event type, u64 ext_data);
35520 -};
35521 +} __no_const;
35522
35523 /*
35524 * struct __vxge_hw_blockpool_entry - Block private data structure
35525 diff -urNp linux-2.6.32.44/drivers/net/vxge/vxge-main.c linux-2.6.32.44/drivers/net/vxge/vxge-main.c
35526 --- linux-2.6.32.44/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35527 +++ linux-2.6.32.44/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35528 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35529 struct sk_buff *completed[NR_SKB_COMPLETED];
35530 int more;
35531
35532 + pax_track_stack();
35533 +
35534 do {
35535 more = 0;
35536 skb_ptr = completed;
35537 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35538 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35539 int index;
35540
35541 + pax_track_stack();
35542 +
35543 /*
35544 * Filling
35545 * - itable with bucket numbers
35546 diff -urNp linux-2.6.32.44/drivers/net/vxge/vxge-traffic.h linux-2.6.32.44/drivers/net/vxge/vxge-traffic.h
35547 --- linux-2.6.32.44/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35548 +++ linux-2.6.32.44/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35549 @@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35550 struct vxge_hw_mempool_dma *dma_object,
35551 u32 index,
35552 u32 is_last);
35553 -};
35554 +} __no_const;
35555
35556 void
35557 __vxge_hw_mempool_destroy(
35558 diff -urNp linux-2.6.32.44/drivers/net/wan/cycx_x25.c linux-2.6.32.44/drivers/net/wan/cycx_x25.c
35559 --- linux-2.6.32.44/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35560 +++ linux-2.6.32.44/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35561 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35562 unsigned char hex[1024],
35563 * phex = hex;
35564
35565 + pax_track_stack();
35566 +
35567 if (len >= (sizeof(hex) / 2))
35568 len = (sizeof(hex) / 2) - 1;
35569
35570 diff -urNp linux-2.6.32.44/drivers/net/wan/hdlc_x25.c linux-2.6.32.44/drivers/net/wan/hdlc_x25.c
35571 --- linux-2.6.32.44/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35572 +++ linux-2.6.32.44/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35573 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35574
35575 static int x25_open(struct net_device *dev)
35576 {
35577 - struct lapb_register_struct cb;
35578 + static struct lapb_register_struct cb = {
35579 + .connect_confirmation = x25_connected,
35580 + .connect_indication = x25_connected,
35581 + .disconnect_confirmation = x25_disconnected,
35582 + .disconnect_indication = x25_disconnected,
35583 + .data_indication = x25_data_indication,
35584 + .data_transmit = x25_data_transmit
35585 + };
35586 int result;
35587
35588 - cb.connect_confirmation = x25_connected;
35589 - cb.connect_indication = x25_connected;
35590 - cb.disconnect_confirmation = x25_disconnected;
35591 - cb.disconnect_indication = x25_disconnected;
35592 - cb.data_indication = x25_data_indication;
35593 - cb.data_transmit = x25_data_transmit;
35594 -
35595 result = lapb_register(dev, &cb);
35596 if (result != LAPB_OK)
35597 return result;
35598 diff -urNp linux-2.6.32.44/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.44/drivers/net/wimax/i2400m/usb-fw.c
35599 --- linux-2.6.32.44/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35600 +++ linux-2.6.32.44/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35601 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35602 int do_autopm = 1;
35603 DECLARE_COMPLETION_ONSTACK(notif_completion);
35604
35605 + pax_track_stack();
35606 +
35607 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35608 i2400m, ack, ack_size);
35609 BUG_ON(_ack == i2400m->bm_ack_buf);
35610 diff -urNp linux-2.6.32.44/drivers/net/wireless/airo.c linux-2.6.32.44/drivers/net/wireless/airo.c
35611 --- linux-2.6.32.44/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35612 +++ linux-2.6.32.44/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35613 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35614 BSSListElement * loop_net;
35615 BSSListElement * tmp_net;
35616
35617 + pax_track_stack();
35618 +
35619 /* Blow away current list of scan results */
35620 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35621 list_move_tail (&loop_net->list, &ai->network_free_list);
35622 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35623 WepKeyRid wkr;
35624 int rc;
35625
35626 + pax_track_stack();
35627 +
35628 memset( &mySsid, 0, sizeof( mySsid ) );
35629 kfree (ai->flash);
35630 ai->flash = NULL;
35631 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35632 __le32 *vals = stats.vals;
35633 int len;
35634
35635 + pax_track_stack();
35636 +
35637 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35638 return -ENOMEM;
35639 data = (struct proc_data *)file->private_data;
35640 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35641 /* If doLoseSync is not 1, we won't do a Lose Sync */
35642 int doLoseSync = -1;
35643
35644 + pax_track_stack();
35645 +
35646 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35647 return -ENOMEM;
35648 data = (struct proc_data *)file->private_data;
35649 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35650 int i;
35651 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35652
35653 + pax_track_stack();
35654 +
35655 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35656 if (!qual)
35657 return -ENOMEM;
35658 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35659 CapabilityRid cap_rid;
35660 __le32 *vals = stats_rid.vals;
35661
35662 + pax_track_stack();
35663 +
35664 /* Get stats out of the card */
35665 clear_bit(JOB_WSTATS, &local->jobs);
35666 if (local->power.event) {
35667 diff -urNp linux-2.6.32.44/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.44/drivers/net/wireless/ath/ath5k/debug.c
35668 --- linux-2.6.32.44/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35669 +++ linux-2.6.32.44/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35670 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35671 unsigned int v;
35672 u64 tsf;
35673
35674 + pax_track_stack();
35675 +
35676 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35677 len += snprintf(buf+len, sizeof(buf)-len,
35678 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35679 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35680 unsigned int len = 0;
35681 unsigned int i;
35682
35683 + pax_track_stack();
35684 +
35685 len += snprintf(buf+len, sizeof(buf)-len,
35686 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35687
35688 diff -urNp linux-2.6.32.44/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.44/drivers/net/wireless/ath/ath9k/debug.c
35689 --- linux-2.6.32.44/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35690 +++ linux-2.6.32.44/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35691 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35692 char buf[512];
35693 unsigned int len = 0;
35694
35695 + pax_track_stack();
35696 +
35697 len += snprintf(buf + len, sizeof(buf) - len,
35698 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35699 len += snprintf(buf + len, sizeof(buf) - len,
35700 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35701 int i;
35702 u8 addr[ETH_ALEN];
35703
35704 + pax_track_stack();
35705 +
35706 len += snprintf(buf + len, sizeof(buf) - len,
35707 "primary: %s (%s chan=%d ht=%d)\n",
35708 wiphy_name(sc->pri_wiphy->hw->wiphy),
35709 diff -urNp linux-2.6.32.44/drivers/net/wireless/b43/debugfs.c linux-2.6.32.44/drivers/net/wireless/b43/debugfs.c
35710 --- linux-2.6.32.44/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35711 +++ linux-2.6.32.44/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35712 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
35713 struct b43_debugfs_fops {
35714 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35715 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35716 - struct file_operations fops;
35717 + const struct file_operations fops;
35718 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35719 size_t file_struct_offset;
35720 };
35721 diff -urNp linux-2.6.32.44/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.44/drivers/net/wireless/b43legacy/debugfs.c
35722 --- linux-2.6.32.44/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35723 +++ linux-2.6.32.44/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35724 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
35725 struct b43legacy_debugfs_fops {
35726 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35727 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35728 - struct file_operations fops;
35729 + const struct file_operations fops;
35730 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35731 size_t file_struct_offset;
35732 /* Take wl->irq_lock before calling read/write? */
35733 diff -urNp linux-2.6.32.44/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.44/drivers/net/wireless/ipw2x00/ipw2100.c
35734 --- linux-2.6.32.44/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35735 +++ linux-2.6.32.44/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35736 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35737 int err;
35738 DECLARE_SSID_BUF(ssid);
35739
35740 + pax_track_stack();
35741 +
35742 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35743
35744 if (ssid_len)
35745 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35746 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35747 int err;
35748
35749 + pax_track_stack();
35750 +
35751 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35752 idx, keylen, len);
35753
35754 diff -urNp linux-2.6.32.44/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.44/drivers/net/wireless/ipw2x00/libipw_rx.c
35755 --- linux-2.6.32.44/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35756 +++ linux-2.6.32.44/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35757 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35758 unsigned long flags;
35759 DECLARE_SSID_BUF(ssid);
35760
35761 + pax_track_stack();
35762 +
35763 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35764 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35765 print_ssid(ssid, info_element->data, info_element->len),
35766 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-1000.c
35767 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35768 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35769 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35770 },
35771 };
35772
35773 -static struct iwl_ops iwl1000_ops = {
35774 +static const struct iwl_ops iwl1000_ops = {
35775 .ucode = &iwl5000_ucode,
35776 .lib = &iwl1000_lib,
35777 .hcmd = &iwl5000_hcmd,
35778 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl3945-base.c
35779 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35780 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35781 @@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35782 */
35783 if (iwl3945_mod_params.disable_hw_scan) {
35784 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35785 - iwl3945_hw_ops.hw_scan = NULL;
35786 + pax_open_kernel();
35787 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35788 + pax_close_kernel();
35789 }
35790
35791
35792 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-3945.c
35793 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35794 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35795 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35796 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35797 };
35798
35799 -static struct iwl_ops iwl3945_ops = {
35800 +static const struct iwl_ops iwl3945_ops = {
35801 .ucode = &iwl3945_ucode,
35802 .lib = &iwl3945_lib,
35803 .hcmd = &iwl3945_hcmd,
35804 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-4965.c
35805 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35806 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35807 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35808 },
35809 };
35810
35811 -static struct iwl_ops iwl4965_ops = {
35812 +static const struct iwl_ops iwl4965_ops = {
35813 .ucode = &iwl4965_ucode,
35814 .lib = &iwl4965_lib,
35815 .hcmd = &iwl4965_hcmd,
35816 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-5000.c
35817 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35818 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35819 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35820 },
35821 };
35822
35823 -struct iwl_ops iwl5000_ops = {
35824 +const struct iwl_ops iwl5000_ops = {
35825 .ucode = &iwl5000_ucode,
35826 .lib = &iwl5000_lib,
35827 .hcmd = &iwl5000_hcmd,
35828 .utils = &iwl5000_hcmd_utils,
35829 };
35830
35831 -static struct iwl_ops iwl5150_ops = {
35832 +static const struct iwl_ops iwl5150_ops = {
35833 .ucode = &iwl5000_ucode,
35834 .lib = &iwl5150_lib,
35835 .hcmd = &iwl5000_hcmd,
35836 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-6000.c
35837 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35838 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35839 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35840 .calc_rssi = iwl5000_calc_rssi,
35841 };
35842
35843 -static struct iwl_ops iwl6000_ops = {
35844 +static const struct iwl_ops iwl6000_ops = {
35845 .ucode = &iwl5000_ucode,
35846 .lib = &iwl6000_lib,
35847 .hcmd = &iwl5000_hcmd,
35848 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn.c
35849 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35850 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35851 @@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35852 if (iwl_debug_level & IWL_DL_INFO)
35853 dev_printk(KERN_DEBUG, &(pdev->dev),
35854 "Disabling hw_scan\n");
35855 - iwl_hw_ops.hw_scan = NULL;
35856 + pax_open_kernel();
35857 + *(void **)&iwl_hw_ops.hw_scan = NULL;
35858 + pax_close_kernel();
35859 }
35860
35861 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35862 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35863 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35864 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35865 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35866 u8 active_index = 0;
35867 s32 tpt = 0;
35868
35869 + pax_track_stack();
35870 +
35871 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35872
35873 if (!ieee80211_is_data(hdr->frame_control) ||
35874 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35875 u8 valid_tx_ant = 0;
35876 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35877
35878 + pax_track_stack();
35879 +
35880 /* Override starting rate (index 0) if needed for debug purposes */
35881 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35882
35883 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35884 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35885 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35886 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35887 int pos = 0;
35888 const size_t bufsz = sizeof(buf);
35889
35890 + pax_track_stack();
35891 +
35892 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35893 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35894 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35895 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35896 const size_t bufsz = sizeof(buf);
35897 ssize_t ret;
35898
35899 + pax_track_stack();
35900 +
35901 for (i = 0; i < AC_NUM; i++) {
35902 pos += scnprintf(buf + pos, bufsz - pos,
35903 "\tcw_min\tcw_max\taifsn\ttxop\n");
35904 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debug.h
35905 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35906 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35907 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35908 #endif
35909
35910 #else
35911 -#define IWL_DEBUG(__priv, level, fmt, args...)
35912 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35913 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35914 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35915 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35916 void *p, u32 len)
35917 {}
35918 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-dev.h
35919 --- linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35920 +++ linux-2.6.32.44/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35921 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
35922
35923 /* shared structures from iwl-5000.c */
35924 extern struct iwl_mod_params iwl50_mod_params;
35925 -extern struct iwl_ops iwl5000_ops;
35926 +extern const struct iwl_ops iwl5000_ops;
35927 extern struct iwl_ucode_ops iwl5000_ucode;
35928 extern struct iwl_lib_ops iwl5000_lib;
35929 extern struct iwl_hcmd_ops iwl5000_hcmd;
35930 diff -urNp linux-2.6.32.44/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.44/drivers/net/wireless/iwmc3200wifi/debugfs.c
35931 --- linux-2.6.32.44/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35932 +++ linux-2.6.32.44/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35933 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35934 int buf_len = 512;
35935 size_t len = 0;
35936
35937 + pax_track_stack();
35938 +
35939 if (*ppos != 0)
35940 return 0;
35941 if (count < sizeof(buf))
35942 diff -urNp linux-2.6.32.44/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.44/drivers/net/wireless/libertas/debugfs.c
35943 --- linux-2.6.32.44/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35944 +++ linux-2.6.32.44/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35945 @@ -708,7 +708,7 @@ out_unlock:
35946 struct lbs_debugfs_files {
35947 const char *name;
35948 int perm;
35949 - struct file_operations fops;
35950 + const struct file_operations fops;
35951 };
35952
35953 static const struct lbs_debugfs_files debugfs_files[] = {
35954 diff -urNp linux-2.6.32.44/drivers/net/wireless/rndis_wlan.c linux-2.6.32.44/drivers/net/wireless/rndis_wlan.c
35955 --- linux-2.6.32.44/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35956 +++ linux-2.6.32.44/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35957 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35958
35959 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35960
35961 - if (rts_threshold < 0 || rts_threshold > 2347)
35962 + if (rts_threshold > 2347)
35963 rts_threshold = 2347;
35964
35965 tmp = cpu_to_le32(rts_threshold);
35966 diff -urNp linux-2.6.32.44/drivers/oprofile/buffer_sync.c linux-2.6.32.44/drivers/oprofile/buffer_sync.c
35967 --- linux-2.6.32.44/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35968 +++ linux-2.6.32.44/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35969 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35970 if (cookie == NO_COOKIE)
35971 offset = pc;
35972 if (cookie == INVALID_COOKIE) {
35973 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35974 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35975 offset = pc;
35976 }
35977 if (cookie != last_cookie) {
35978 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35979 /* add userspace sample */
35980
35981 if (!mm) {
35982 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35983 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35984 return 0;
35985 }
35986
35987 cookie = lookup_dcookie(mm, s->eip, &offset);
35988
35989 if (cookie == INVALID_COOKIE) {
35990 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35991 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35992 return 0;
35993 }
35994
35995 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35996 /* ignore backtraces if failed to add a sample */
35997 if (state == sb_bt_start) {
35998 state = sb_bt_ignore;
35999 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36000 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36001 }
36002 }
36003 release_mm(mm);
36004 diff -urNp linux-2.6.32.44/drivers/oprofile/event_buffer.c linux-2.6.32.44/drivers/oprofile/event_buffer.c
36005 --- linux-2.6.32.44/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
36006 +++ linux-2.6.32.44/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
36007 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
36008 }
36009
36010 if (buffer_pos == buffer_size) {
36011 - atomic_inc(&oprofile_stats.event_lost_overflow);
36012 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36013 return;
36014 }
36015
36016 diff -urNp linux-2.6.32.44/drivers/oprofile/oprof.c linux-2.6.32.44/drivers/oprofile/oprof.c
36017 --- linux-2.6.32.44/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
36018 +++ linux-2.6.32.44/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
36019 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
36020 if (oprofile_ops.switch_events())
36021 return;
36022
36023 - atomic_inc(&oprofile_stats.multiplex_counter);
36024 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36025 start_switch_worker();
36026 }
36027
36028 diff -urNp linux-2.6.32.44/drivers/oprofile/oprofilefs.c linux-2.6.32.44/drivers/oprofile/oprofilefs.c
36029 --- linux-2.6.32.44/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
36030 +++ linux-2.6.32.44/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
36031 @@ -187,7 +187,7 @@ static const struct file_operations atom
36032
36033
36034 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36035 - char const *name, atomic_t *val)
36036 + char const *name, atomic_unchecked_t *val)
36037 {
36038 struct dentry *d = __oprofilefs_create_file(sb, root, name,
36039 &atomic_ro_fops, 0444);
36040 diff -urNp linux-2.6.32.44/drivers/oprofile/oprofile_stats.c linux-2.6.32.44/drivers/oprofile/oprofile_stats.c
36041 --- linux-2.6.32.44/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
36042 +++ linux-2.6.32.44/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
36043 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36044 cpu_buf->sample_invalid_eip = 0;
36045 }
36046
36047 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36048 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36049 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
36050 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36051 - atomic_set(&oprofile_stats.multiplex_counter, 0);
36052 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36053 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36054 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36055 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36056 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36057 }
36058
36059
36060 diff -urNp linux-2.6.32.44/drivers/oprofile/oprofile_stats.h linux-2.6.32.44/drivers/oprofile/oprofile_stats.h
36061 --- linux-2.6.32.44/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
36062 +++ linux-2.6.32.44/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
36063 @@ -13,11 +13,11 @@
36064 #include <asm/atomic.h>
36065
36066 struct oprofile_stat_struct {
36067 - atomic_t sample_lost_no_mm;
36068 - atomic_t sample_lost_no_mapping;
36069 - atomic_t bt_lost_no_mapping;
36070 - atomic_t event_lost_overflow;
36071 - atomic_t multiplex_counter;
36072 + atomic_unchecked_t sample_lost_no_mm;
36073 + atomic_unchecked_t sample_lost_no_mapping;
36074 + atomic_unchecked_t bt_lost_no_mapping;
36075 + atomic_unchecked_t event_lost_overflow;
36076 + atomic_unchecked_t multiplex_counter;
36077 };
36078
36079 extern struct oprofile_stat_struct oprofile_stats;
36080 diff -urNp linux-2.6.32.44/drivers/parisc/pdc_stable.c linux-2.6.32.44/drivers/parisc/pdc_stable.c
36081 --- linux-2.6.32.44/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
36082 +++ linux-2.6.32.44/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
36083 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
36084 return ret;
36085 }
36086
36087 -static struct sysfs_ops pdcspath_attr_ops = {
36088 +static const struct sysfs_ops pdcspath_attr_ops = {
36089 .show = pdcspath_attr_show,
36090 .store = pdcspath_attr_store,
36091 };
36092 diff -urNp linux-2.6.32.44/drivers/parport/procfs.c linux-2.6.32.44/drivers/parport/procfs.c
36093 --- linux-2.6.32.44/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
36094 +++ linux-2.6.32.44/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
36095 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
36096
36097 *ppos += len;
36098
36099 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36100 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36101 }
36102
36103 #ifdef CONFIG_PARPORT_1284
36104 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
36105
36106 *ppos += len;
36107
36108 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36109 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36110 }
36111 #endif /* IEEE1284.3 support. */
36112
36113 diff -urNp linux-2.6.32.44/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.44/drivers/pci/hotplug/acpiphp_glue.c
36114 --- linux-2.6.32.44/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
36115 +++ linux-2.6.32.44/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
36116 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
36117 }
36118
36119
36120 -static struct acpi_dock_ops acpiphp_dock_ops = {
36121 +static const struct acpi_dock_ops acpiphp_dock_ops = {
36122 .handler = handle_hotplug_event_func,
36123 };
36124
36125 diff -urNp linux-2.6.32.44/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.44/drivers/pci/hotplug/cpci_hotplug.h
36126 --- linux-2.6.32.44/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
36127 +++ linux-2.6.32.44/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
36128 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36129 int (*hardware_test) (struct slot* slot, u32 value);
36130 u8 (*get_power) (struct slot* slot);
36131 int (*set_power) (struct slot* slot, int value);
36132 -};
36133 +} __no_const;
36134
36135 struct cpci_hp_controller {
36136 unsigned int irq;
36137 diff -urNp linux-2.6.32.44/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.44/drivers/pci/hotplug/cpqphp_nvram.c
36138 --- linux-2.6.32.44/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
36139 +++ linux-2.6.32.44/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
36140 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
36141
36142 void compaq_nvram_init (void __iomem *rom_start)
36143 {
36144 +
36145 +#ifndef CONFIG_PAX_KERNEXEC
36146 if (rom_start) {
36147 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36148 }
36149 +#endif
36150 +
36151 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36152
36153 /* initialize our int15 lock */
36154 diff -urNp linux-2.6.32.44/drivers/pci/hotplug/fakephp.c linux-2.6.32.44/drivers/pci/hotplug/fakephp.c
36155 --- linux-2.6.32.44/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
36156 +++ linux-2.6.32.44/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
36157 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
36158 }
36159
36160 static struct kobj_type legacy_ktype = {
36161 - .sysfs_ops = &(struct sysfs_ops){
36162 + .sysfs_ops = &(const struct sysfs_ops){
36163 .store = legacy_store, .show = legacy_show
36164 },
36165 .release = &legacy_release,
36166 diff -urNp linux-2.6.32.44/drivers/pci/intel-iommu.c linux-2.6.32.44/drivers/pci/intel-iommu.c
36167 --- linux-2.6.32.44/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
36168 +++ linux-2.6.32.44/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
36169 @@ -2643,7 +2643,7 @@ error:
36170 return 0;
36171 }
36172
36173 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
36174 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
36175 unsigned long offset, size_t size,
36176 enum dma_data_direction dir,
36177 struct dma_attrs *attrs)
36178 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
36179 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
36180 }
36181
36182 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36183 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36184 size_t size, enum dma_data_direction dir,
36185 struct dma_attrs *attrs)
36186 {
36187 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
36188 }
36189 }
36190
36191 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
36192 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
36193 dma_addr_t *dma_handle, gfp_t flags)
36194 {
36195 void *vaddr;
36196 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
36197 return NULL;
36198 }
36199
36200 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36201 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36202 dma_addr_t dma_handle)
36203 {
36204 int order;
36205 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
36206 free_pages((unsigned long)vaddr, order);
36207 }
36208
36209 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36210 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36211 int nelems, enum dma_data_direction dir,
36212 struct dma_attrs *attrs)
36213 {
36214 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
36215 return nelems;
36216 }
36217
36218 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36219 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36220 enum dma_data_direction dir, struct dma_attrs *attrs)
36221 {
36222 int i;
36223 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
36224 return nelems;
36225 }
36226
36227 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36228 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36229 {
36230 return !dma_addr;
36231 }
36232
36233 -struct dma_map_ops intel_dma_ops = {
36234 +const struct dma_map_ops intel_dma_ops = {
36235 .alloc_coherent = intel_alloc_coherent,
36236 .free_coherent = intel_free_coherent,
36237 .map_sg = intel_map_sg,
36238 diff -urNp linux-2.6.32.44/drivers/pci/pcie/aspm.c linux-2.6.32.44/drivers/pci/pcie/aspm.c
36239 --- linux-2.6.32.44/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
36240 +++ linux-2.6.32.44/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
36241 @@ -27,9 +27,9 @@
36242 #define MODULE_PARAM_PREFIX "pcie_aspm."
36243
36244 /* Note: those are not register definitions */
36245 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36246 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36247 -#define ASPM_STATE_L1 (4) /* L1 state */
36248 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36249 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36250 +#define ASPM_STATE_L1 (4U) /* L1 state */
36251 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36252 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36253
36254 diff -urNp linux-2.6.32.44/drivers/pci/probe.c linux-2.6.32.44/drivers/pci/probe.c
36255 --- linux-2.6.32.44/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
36256 +++ linux-2.6.32.44/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
36257 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
36258 return ret;
36259 }
36260
36261 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
36262 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
36263 struct device_attribute *attr,
36264 char *buf)
36265 {
36266 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
36267 }
36268
36269 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
36270 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
36271 struct device_attribute *attr,
36272 char *buf)
36273 {
36274 diff -urNp linux-2.6.32.44/drivers/pci/proc.c linux-2.6.32.44/drivers/pci/proc.c
36275 --- linux-2.6.32.44/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
36276 +++ linux-2.6.32.44/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
36277 @@ -480,7 +480,16 @@ static const struct file_operations proc
36278 static int __init pci_proc_init(void)
36279 {
36280 struct pci_dev *dev = NULL;
36281 +
36282 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36283 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36284 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36285 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36286 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36287 +#endif
36288 +#else
36289 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36290 +#endif
36291 proc_create("devices", 0, proc_bus_pci_dir,
36292 &proc_bus_pci_dev_operations);
36293 proc_initialized = 1;
36294 diff -urNp linux-2.6.32.44/drivers/pci/slot.c linux-2.6.32.44/drivers/pci/slot.c
36295 --- linux-2.6.32.44/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
36296 +++ linux-2.6.32.44/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
36297 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
36298 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
36299 }
36300
36301 -static struct sysfs_ops pci_slot_sysfs_ops = {
36302 +static const struct sysfs_ops pci_slot_sysfs_ops = {
36303 .show = pci_slot_attr_show,
36304 .store = pci_slot_attr_store,
36305 };
36306 diff -urNp linux-2.6.32.44/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.44/drivers/pcmcia/pcmcia_ioctl.c
36307 --- linux-2.6.32.44/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
36308 +++ linux-2.6.32.44/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
36309 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
36310 return -EFAULT;
36311 }
36312 }
36313 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36314 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36315 if (!buf)
36316 return -ENOMEM;
36317
36318 diff -urNp linux-2.6.32.44/drivers/platform/x86/acer-wmi.c linux-2.6.32.44/drivers/platform/x86/acer-wmi.c
36319 --- linux-2.6.32.44/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
36320 +++ linux-2.6.32.44/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
36321 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
36322 return 0;
36323 }
36324
36325 -static struct backlight_ops acer_bl_ops = {
36326 +static const struct backlight_ops acer_bl_ops = {
36327 .get_brightness = read_brightness,
36328 .update_status = update_bl_status,
36329 };
36330 diff -urNp linux-2.6.32.44/drivers/platform/x86/asus_acpi.c linux-2.6.32.44/drivers/platform/x86/asus_acpi.c
36331 --- linux-2.6.32.44/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
36332 +++ linux-2.6.32.44/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
36333 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
36334 return 0;
36335 }
36336
36337 -static struct backlight_ops asus_backlight_data = {
36338 +static const struct backlight_ops asus_backlight_data = {
36339 .get_brightness = read_brightness,
36340 .update_status = set_brightness_status,
36341 };
36342 diff -urNp linux-2.6.32.44/drivers/platform/x86/asus-laptop.c linux-2.6.32.44/drivers/platform/x86/asus-laptop.c
36343 --- linux-2.6.32.44/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
36344 +++ linux-2.6.32.44/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
36345 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
36346 */
36347 static int read_brightness(struct backlight_device *bd);
36348 static int update_bl_status(struct backlight_device *bd);
36349 -static struct backlight_ops asusbl_ops = {
36350 +static const struct backlight_ops asusbl_ops = {
36351 .get_brightness = read_brightness,
36352 .update_status = update_bl_status,
36353 };
36354 diff -urNp linux-2.6.32.44/drivers/platform/x86/compal-laptop.c linux-2.6.32.44/drivers/platform/x86/compal-laptop.c
36355 --- linux-2.6.32.44/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
36356 +++ linux-2.6.32.44/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
36357 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
36358 return set_lcd_level(b->props.brightness);
36359 }
36360
36361 -static struct backlight_ops compalbl_ops = {
36362 +static const struct backlight_ops compalbl_ops = {
36363 .get_brightness = bl_get_brightness,
36364 .update_status = bl_update_status,
36365 };
36366 diff -urNp linux-2.6.32.44/drivers/platform/x86/dell-laptop.c linux-2.6.32.44/drivers/platform/x86/dell-laptop.c
36367 --- linux-2.6.32.44/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
36368 +++ linux-2.6.32.44/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
36369 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
36370 return buffer.output[1];
36371 }
36372
36373 -static struct backlight_ops dell_ops = {
36374 +static const struct backlight_ops dell_ops = {
36375 .get_brightness = dell_get_intensity,
36376 .update_status = dell_send_intensity,
36377 };
36378 diff -urNp linux-2.6.32.44/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.44/drivers/platform/x86/eeepc-laptop.c
36379 --- linux-2.6.32.44/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
36380 +++ linux-2.6.32.44/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
36381 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
36382 */
36383 static int read_brightness(struct backlight_device *bd);
36384 static int update_bl_status(struct backlight_device *bd);
36385 -static struct backlight_ops eeepcbl_ops = {
36386 +static const struct backlight_ops eeepcbl_ops = {
36387 .get_brightness = read_brightness,
36388 .update_status = update_bl_status,
36389 };
36390 diff -urNp linux-2.6.32.44/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.44/drivers/platform/x86/fujitsu-laptop.c
36391 --- linux-2.6.32.44/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
36392 +++ linux-2.6.32.44/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
36393 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
36394 return ret;
36395 }
36396
36397 -static struct backlight_ops fujitsubl_ops = {
36398 +static const struct backlight_ops fujitsubl_ops = {
36399 .get_brightness = bl_get_brightness,
36400 .update_status = bl_update_status,
36401 };
36402 diff -urNp linux-2.6.32.44/drivers/platform/x86/msi-laptop.c linux-2.6.32.44/drivers/platform/x86/msi-laptop.c
36403 --- linux-2.6.32.44/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
36404 +++ linux-2.6.32.44/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
36405 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
36406 return set_lcd_level(b->props.brightness);
36407 }
36408
36409 -static struct backlight_ops msibl_ops = {
36410 +static const struct backlight_ops msibl_ops = {
36411 .get_brightness = bl_get_brightness,
36412 .update_status = bl_update_status,
36413 };
36414 diff -urNp linux-2.6.32.44/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.44/drivers/platform/x86/panasonic-laptop.c
36415 --- linux-2.6.32.44/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
36416 +++ linux-2.6.32.44/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
36417 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36418 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36419 }
36420
36421 -static struct backlight_ops pcc_backlight_ops = {
36422 +static const struct backlight_ops pcc_backlight_ops = {
36423 .get_brightness = bl_get,
36424 .update_status = bl_set_status,
36425 };
36426 diff -urNp linux-2.6.32.44/drivers/platform/x86/sony-laptop.c linux-2.6.32.44/drivers/platform/x86/sony-laptop.c
36427 --- linux-2.6.32.44/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
36428 +++ linux-2.6.32.44/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
36429 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36430 }
36431
36432 static struct backlight_device *sony_backlight_device;
36433 -static struct backlight_ops sony_backlight_ops = {
36434 +static const struct backlight_ops sony_backlight_ops = {
36435 .update_status = sony_backlight_update_status,
36436 .get_brightness = sony_backlight_get_brightness,
36437 };
36438 diff -urNp linux-2.6.32.44/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.44/drivers/platform/x86/thinkpad_acpi.c
36439 --- linux-2.6.32.44/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36440 +++ linux-2.6.32.44/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36441 @@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36442 return 0;
36443 }
36444
36445 -void static hotkey_mask_warn_incomplete_mask(void)
36446 +static void hotkey_mask_warn_incomplete_mask(void)
36447 {
36448 /* log only what the user can fix... */
36449 const u32 wantedmask = hotkey_driver_mask &
36450 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36451 BACKLIGHT_UPDATE_HOTKEY);
36452 }
36453
36454 -static struct backlight_ops ibm_backlight_data = {
36455 +static const struct backlight_ops ibm_backlight_data = {
36456 .get_brightness = brightness_get,
36457 .update_status = brightness_update_status,
36458 };
36459 diff -urNp linux-2.6.32.44/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.44/drivers/platform/x86/toshiba_acpi.c
36460 --- linux-2.6.32.44/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36461 +++ linux-2.6.32.44/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36462 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36463 return AE_OK;
36464 }
36465
36466 -static struct backlight_ops toshiba_backlight_data = {
36467 +static const struct backlight_ops toshiba_backlight_data = {
36468 .get_brightness = get_lcd,
36469 .update_status = set_lcd_status,
36470 };
36471 diff -urNp linux-2.6.32.44/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.44/drivers/pnp/pnpbios/bioscalls.c
36472 --- linux-2.6.32.44/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36473 +++ linux-2.6.32.44/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36474 @@ -60,7 +60,7 @@ do { \
36475 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36476 } while(0)
36477
36478 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36479 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36480 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36481
36482 /*
36483 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36484
36485 cpu = get_cpu();
36486 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36487 +
36488 + pax_open_kernel();
36489 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36490 + pax_close_kernel();
36491
36492 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36493 spin_lock_irqsave(&pnp_bios_lock, flags);
36494 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36495 :"memory");
36496 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36497
36498 + pax_open_kernel();
36499 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36500 + pax_close_kernel();
36501 +
36502 put_cpu();
36503
36504 /* If we get here and this is set then the PnP BIOS faulted on us. */
36505 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36506 return status;
36507 }
36508
36509 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36510 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36511 {
36512 int i;
36513
36514 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36515 pnp_bios_callpoint.offset = header->fields.pm16offset;
36516 pnp_bios_callpoint.segment = PNP_CS16;
36517
36518 + pax_open_kernel();
36519 +
36520 for_each_possible_cpu(i) {
36521 struct desc_struct *gdt = get_cpu_gdt_table(i);
36522 if (!gdt)
36523 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36524 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36525 (unsigned long)__va(header->fields.pm16dseg));
36526 }
36527 +
36528 + pax_close_kernel();
36529 }
36530 diff -urNp linux-2.6.32.44/drivers/pnp/resource.c linux-2.6.32.44/drivers/pnp/resource.c
36531 --- linux-2.6.32.44/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36532 +++ linux-2.6.32.44/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36533 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36534 return 1;
36535
36536 /* check if the resource is valid */
36537 - if (*irq < 0 || *irq > 15)
36538 + if (*irq > 15)
36539 return 0;
36540
36541 /* check if the resource is reserved */
36542 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36543 return 1;
36544
36545 /* check if the resource is valid */
36546 - if (*dma < 0 || *dma == 4 || *dma > 7)
36547 + if (*dma == 4 || *dma > 7)
36548 return 0;
36549
36550 /* check if the resource is reserved */
36551 diff -urNp linux-2.6.32.44/drivers/power/bq27x00_battery.c linux-2.6.32.44/drivers/power/bq27x00_battery.c
36552 --- linux-2.6.32.44/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36553 +++ linux-2.6.32.44/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36554 @@ -44,7 +44,7 @@ struct bq27x00_device_info;
36555 struct bq27x00_access_methods {
36556 int (*read)(u8 reg, int *rt_value, int b_single,
36557 struct bq27x00_device_info *di);
36558 -};
36559 +} __no_const;
36560
36561 struct bq27x00_device_info {
36562 struct device *dev;
36563 diff -urNp linux-2.6.32.44/drivers/rtc/rtc-dev.c linux-2.6.32.44/drivers/rtc/rtc-dev.c
36564 --- linux-2.6.32.44/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36565 +++ linux-2.6.32.44/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36566 @@ -14,6 +14,7 @@
36567 #include <linux/module.h>
36568 #include <linux/rtc.h>
36569 #include <linux/sched.h>
36570 +#include <linux/grsecurity.h>
36571 #include "rtc-core.h"
36572
36573 static dev_t rtc_devt;
36574 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36575 if (copy_from_user(&tm, uarg, sizeof(tm)))
36576 return -EFAULT;
36577
36578 + gr_log_timechange();
36579 +
36580 return rtc_set_time(rtc, &tm);
36581
36582 case RTC_PIE_ON:
36583 diff -urNp linux-2.6.32.44/drivers/s390/cio/qdio_perf.c linux-2.6.32.44/drivers/s390/cio/qdio_perf.c
36584 --- linux-2.6.32.44/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36585 +++ linux-2.6.32.44/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36586 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36587 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36588 {
36589 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36590 - (long)atomic_long_read(&perf_stats.qdio_int));
36591 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36592 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36593 - (long)atomic_long_read(&perf_stats.pci_int));
36594 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36595 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36596 - (long)atomic_long_read(&perf_stats.thin_int));
36597 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36598 seq_printf(m, "\n");
36599 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36600 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
36601 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36602 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36603 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
36604 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36605 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36606 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
36607 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36608 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36609 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36610 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36611 - (long)atomic_long_read(&perf_stats.thinint_inbound),
36612 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36613 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36614 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36615 seq_printf(m, "\n");
36616 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36617 - (long)atomic_long_read(&perf_stats.siga_in));
36618 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36619 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36620 - (long)atomic_long_read(&perf_stats.siga_out));
36621 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36622 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36623 - (long)atomic_long_read(&perf_stats.siga_sync));
36624 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36625 seq_printf(m, "\n");
36626 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36627 - (long)atomic_long_read(&perf_stats.inbound_handler));
36628 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36629 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36630 - (long)atomic_long_read(&perf_stats.outbound_handler));
36631 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36632 seq_printf(m, "\n");
36633 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36634 - (long)atomic_long_read(&perf_stats.fast_requeue));
36635 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36636 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36637 - (long)atomic_long_read(&perf_stats.outbound_target_full));
36638 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36639 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36640 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36641 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36642 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36643 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
36644 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36645 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36646 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36647 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36648 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36649 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36650 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36651 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36652 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36653 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36654 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36655 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36656 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36657 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36658 seq_printf(m, "\n");
36659 return 0;
36660 }
36661 diff -urNp linux-2.6.32.44/drivers/s390/cio/qdio_perf.h linux-2.6.32.44/drivers/s390/cio/qdio_perf.h
36662 --- linux-2.6.32.44/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36663 +++ linux-2.6.32.44/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36664 @@ -13,46 +13,46 @@
36665
36666 struct qdio_perf_stats {
36667 /* interrupt handler calls */
36668 - atomic_long_t qdio_int;
36669 - atomic_long_t pci_int;
36670 - atomic_long_t thin_int;
36671 + atomic_long_unchecked_t qdio_int;
36672 + atomic_long_unchecked_t pci_int;
36673 + atomic_long_unchecked_t thin_int;
36674
36675 /* tasklet runs */
36676 - atomic_long_t tasklet_inbound;
36677 - atomic_long_t tasklet_outbound;
36678 - atomic_long_t tasklet_thinint;
36679 - atomic_long_t tasklet_thinint_loop;
36680 - atomic_long_t thinint_inbound;
36681 - atomic_long_t thinint_inbound_loop;
36682 - atomic_long_t thinint_inbound_loop2;
36683 + atomic_long_unchecked_t tasklet_inbound;
36684 + atomic_long_unchecked_t tasklet_outbound;
36685 + atomic_long_unchecked_t tasklet_thinint;
36686 + atomic_long_unchecked_t tasklet_thinint_loop;
36687 + atomic_long_unchecked_t thinint_inbound;
36688 + atomic_long_unchecked_t thinint_inbound_loop;
36689 + atomic_long_unchecked_t thinint_inbound_loop2;
36690
36691 /* signal adapter calls */
36692 - atomic_long_t siga_out;
36693 - atomic_long_t siga_in;
36694 - atomic_long_t siga_sync;
36695 + atomic_long_unchecked_t siga_out;
36696 + atomic_long_unchecked_t siga_in;
36697 + atomic_long_unchecked_t siga_sync;
36698
36699 /* misc */
36700 - atomic_long_t inbound_handler;
36701 - atomic_long_t outbound_handler;
36702 - atomic_long_t fast_requeue;
36703 - atomic_long_t outbound_target_full;
36704 + atomic_long_unchecked_t inbound_handler;
36705 + atomic_long_unchecked_t outbound_handler;
36706 + atomic_long_unchecked_t fast_requeue;
36707 + atomic_long_unchecked_t outbound_target_full;
36708
36709 /* for debugging */
36710 - atomic_long_t debug_tl_out_timer;
36711 - atomic_long_t debug_stop_polling;
36712 - atomic_long_t debug_eqbs_all;
36713 - atomic_long_t debug_eqbs_incomplete;
36714 - atomic_long_t debug_sqbs_all;
36715 - atomic_long_t debug_sqbs_incomplete;
36716 + atomic_long_unchecked_t debug_tl_out_timer;
36717 + atomic_long_unchecked_t debug_stop_polling;
36718 + atomic_long_unchecked_t debug_eqbs_all;
36719 + atomic_long_unchecked_t debug_eqbs_incomplete;
36720 + atomic_long_unchecked_t debug_sqbs_all;
36721 + atomic_long_unchecked_t debug_sqbs_incomplete;
36722 };
36723
36724 extern struct qdio_perf_stats perf_stats;
36725 extern int qdio_performance_stats;
36726
36727 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
36728 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36729 {
36730 if (qdio_performance_stats)
36731 - atomic_long_inc(count);
36732 + atomic_long_inc_unchecked(count);
36733 }
36734
36735 int qdio_setup_perf_stats(void);
36736 diff -urNp linux-2.6.32.44/drivers/scsi/aacraid/aacraid.h linux-2.6.32.44/drivers/scsi/aacraid/aacraid.h
36737 --- linux-2.6.32.44/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36738 +++ linux-2.6.32.44/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36739 @@ -471,7 +471,7 @@ struct adapter_ops
36740 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36741 /* Administrative operations */
36742 int (*adapter_comm)(struct aac_dev * dev, int comm);
36743 -};
36744 +} __no_const;
36745
36746 /*
36747 * Define which interrupt handler needs to be installed
36748 diff -urNp linux-2.6.32.44/drivers/scsi/aacraid/commctrl.c linux-2.6.32.44/drivers/scsi/aacraid/commctrl.c
36749 --- linux-2.6.32.44/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36750 +++ linux-2.6.32.44/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36751 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36752 u32 actual_fibsize64, actual_fibsize = 0;
36753 int i;
36754
36755 + pax_track_stack();
36756
36757 if (dev->in_reset) {
36758 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36759 diff -urNp linux-2.6.32.44/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.44/drivers/scsi/aic94xx/aic94xx_init.c
36760 --- linux-2.6.32.44/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36761 +++ linux-2.6.32.44/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36762 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36763 flash_error_table[i].reason);
36764 }
36765
36766 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36767 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36768 asd_show_update_bios, asd_store_update_bios);
36769
36770 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36771 diff -urNp linux-2.6.32.44/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.44/drivers/scsi/bfa/bfa_iocfc.h
36772 --- linux-2.6.32.44/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36773 +++ linux-2.6.32.44/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36774 @@ -61,7 +61,7 @@ struct bfa_hwif_s {
36775 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36776 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36777 u32 *nvecs, u32 *maxvec);
36778 -};
36779 +} __no_const;
36780 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36781
36782 struct bfa_iocfc_s {
36783 diff -urNp linux-2.6.32.44/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.44/drivers/scsi/bfa/bfa_ioc.h
36784 --- linux-2.6.32.44/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36785 +++ linux-2.6.32.44/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36786 @@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36787 bfa_ioc_disable_cbfn_t disable_cbfn;
36788 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36789 bfa_ioc_reset_cbfn_t reset_cbfn;
36790 -};
36791 +} __no_const;
36792
36793 /**
36794 * Heartbeat failure notification queue element.
36795 diff -urNp linux-2.6.32.44/drivers/scsi/BusLogic.c linux-2.6.32.44/drivers/scsi/BusLogic.c
36796 --- linux-2.6.32.44/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36797 +++ linux-2.6.32.44/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36798 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36799 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36800 *PrototypeHostAdapter)
36801 {
36802 + pax_track_stack();
36803 +
36804 /*
36805 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36806 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36807 diff -urNp linux-2.6.32.44/drivers/scsi/dpt_i2o.c linux-2.6.32.44/drivers/scsi/dpt_i2o.c
36808 --- linux-2.6.32.44/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36809 +++ linux-2.6.32.44/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36810 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36811 dma_addr_t addr;
36812 ulong flags = 0;
36813
36814 + pax_track_stack();
36815 +
36816 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36817 // get user msg size in u32s
36818 if(get_user(size, &user_msg[0])){
36819 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36820 s32 rcode;
36821 dma_addr_t addr;
36822
36823 + pax_track_stack();
36824 +
36825 memset(msg, 0 , sizeof(msg));
36826 len = scsi_bufflen(cmd);
36827 direction = 0x00000000;
36828 diff -urNp linux-2.6.32.44/drivers/scsi/eata.c linux-2.6.32.44/drivers/scsi/eata.c
36829 --- linux-2.6.32.44/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36830 +++ linux-2.6.32.44/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36831 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36832 struct hostdata *ha;
36833 char name[16];
36834
36835 + pax_track_stack();
36836 +
36837 sprintf(name, "%s%d", driver_name, j);
36838
36839 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36840 diff -urNp linux-2.6.32.44/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.44/drivers/scsi/fcoe/libfcoe.c
36841 --- linux-2.6.32.44/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36842 +++ linux-2.6.32.44/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36843 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36844 size_t rlen;
36845 size_t dlen;
36846
36847 + pax_track_stack();
36848 +
36849 fiph = (struct fip_header *)skb->data;
36850 sub = fiph->fip_subcode;
36851 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36852 diff -urNp linux-2.6.32.44/drivers/scsi/fnic/fnic_main.c linux-2.6.32.44/drivers/scsi/fnic/fnic_main.c
36853 --- linux-2.6.32.44/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36854 +++ linux-2.6.32.44/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36855 @@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36856 /* Start local port initiatialization */
36857
36858 lp->link_up = 0;
36859 - lp->tt = fnic_transport_template;
36860 + memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36861
36862 lp->max_retry_count = fnic->config.flogi_retries;
36863 lp->max_rport_retry_count = fnic->config.plogi_retries;
36864 diff -urNp linux-2.6.32.44/drivers/scsi/gdth.c linux-2.6.32.44/drivers/scsi/gdth.c
36865 --- linux-2.6.32.44/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36866 +++ linux-2.6.32.44/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36867 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36868 ulong flags;
36869 gdth_ha_str *ha;
36870
36871 + pax_track_stack();
36872 +
36873 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36874 return -EFAULT;
36875 ha = gdth_find_ha(ldrv.ionode);
36876 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36877 gdth_ha_str *ha;
36878 int rval;
36879
36880 + pax_track_stack();
36881 +
36882 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36883 res.number >= MAX_HDRIVES)
36884 return -EFAULT;
36885 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36886 gdth_ha_str *ha;
36887 int rval;
36888
36889 + pax_track_stack();
36890 +
36891 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36892 return -EFAULT;
36893 ha = gdth_find_ha(gen.ionode);
36894 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36895 int i;
36896 gdth_cmd_str gdtcmd;
36897 char cmnd[MAX_COMMAND_SIZE];
36898 +
36899 + pax_track_stack();
36900 +
36901 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36902
36903 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36904 diff -urNp linux-2.6.32.44/drivers/scsi/gdth_proc.c linux-2.6.32.44/drivers/scsi/gdth_proc.c
36905 --- linux-2.6.32.44/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36906 +++ linux-2.6.32.44/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36907 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36908 ulong64 paddr;
36909
36910 char cmnd[MAX_COMMAND_SIZE];
36911 +
36912 + pax_track_stack();
36913 +
36914 memset(cmnd, 0xff, 12);
36915 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36916
36917 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36918 gdth_hget_str *phg;
36919 char cmnd[MAX_COMMAND_SIZE];
36920
36921 + pax_track_stack();
36922 +
36923 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36924 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36925 if (!gdtcmd || !estr)
36926 diff -urNp linux-2.6.32.44/drivers/scsi/hosts.c linux-2.6.32.44/drivers/scsi/hosts.c
36927 --- linux-2.6.32.44/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36928 +++ linux-2.6.32.44/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36929 @@ -40,7 +40,7 @@
36930 #include "scsi_logging.h"
36931
36932
36933 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36934 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36935
36936
36937 static void scsi_host_cls_release(struct device *dev)
36938 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36939 * subtract one because we increment first then return, but we need to
36940 * know what the next host number was before increment
36941 */
36942 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36943 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36944 shost->dma_channel = 0xff;
36945
36946 /* These three are default values which can be overridden */
36947 diff -urNp linux-2.6.32.44/drivers/scsi/ipr.c linux-2.6.32.44/drivers/scsi/ipr.c
36948 --- linux-2.6.32.44/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36949 +++ linux-2.6.32.44/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36950 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36951 return true;
36952 }
36953
36954 -static struct ata_port_operations ipr_sata_ops = {
36955 +static const struct ata_port_operations ipr_sata_ops = {
36956 .phy_reset = ipr_ata_phy_reset,
36957 .hardreset = ipr_sata_reset,
36958 .post_internal_cmd = ipr_ata_post_internal,
36959 diff -urNp linux-2.6.32.44/drivers/scsi/ips.h linux-2.6.32.44/drivers/scsi/ips.h
36960 --- linux-2.6.32.44/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36961 +++ linux-2.6.32.44/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36962 @@ -1027,7 +1027,7 @@ typedef struct {
36963 int (*intr)(struct ips_ha *);
36964 void (*enableint)(struct ips_ha *);
36965 uint32_t (*statupd)(struct ips_ha *);
36966 -} ips_hw_func_t;
36967 +} __no_const ips_hw_func_t;
36968
36969 typedef struct ips_ha {
36970 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36971 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_disc.c linux-2.6.32.44/drivers/scsi/libfc/fc_disc.c
36972 --- linux-2.6.32.44/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36973 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36974 @@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36975 struct fc_disc *disc;
36976
36977 if (!lport->tt.disc_start)
36978 - lport->tt.disc_start = fc_disc_start;
36979 + *(void **)&lport->tt.disc_start = fc_disc_start;
36980
36981 if (!lport->tt.disc_stop)
36982 - lport->tt.disc_stop = fc_disc_stop;
36983 + *(void **)&lport->tt.disc_stop = fc_disc_stop;
36984
36985 if (!lport->tt.disc_stop_final)
36986 - lport->tt.disc_stop_final = fc_disc_stop_final;
36987 + *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36988
36989 if (!lport->tt.disc_recv_req)
36990 - lport->tt.disc_recv_req = fc_disc_recv_req;
36991 + *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36992
36993 disc = &lport->disc;
36994 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36995 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.44/drivers/scsi/libfc/fc_elsct.c
36996 --- linux-2.6.32.44/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36997 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36998 @@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36999 int fc_elsct_init(struct fc_lport *lport)
37000 {
37001 if (!lport->tt.elsct_send)
37002 - lport->tt.elsct_send = fc_elsct_send;
37003 + *(void **)&lport->tt.elsct_send = fc_elsct_send;
37004
37005 return 0;
37006 }
37007 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_exch.c linux-2.6.32.44/drivers/scsi/libfc/fc_exch.c
37008 --- linux-2.6.32.44/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
37009 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
37010 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
37011 * all together if not used XXX
37012 */
37013 struct {
37014 - atomic_t no_free_exch;
37015 - atomic_t no_free_exch_xid;
37016 - atomic_t xid_not_found;
37017 - atomic_t xid_busy;
37018 - atomic_t seq_not_found;
37019 - atomic_t non_bls_resp;
37020 + atomic_unchecked_t no_free_exch;
37021 + atomic_unchecked_t no_free_exch_xid;
37022 + atomic_unchecked_t xid_not_found;
37023 + atomic_unchecked_t xid_busy;
37024 + atomic_unchecked_t seq_not_found;
37025 + atomic_unchecked_t non_bls_resp;
37026 } stats;
37027 };
37028 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
37029 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
37030 /* allocate memory for exchange */
37031 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37032 if (!ep) {
37033 - atomic_inc(&mp->stats.no_free_exch);
37034 + atomic_inc_unchecked(&mp->stats.no_free_exch);
37035 goto out;
37036 }
37037 memset(ep, 0, sizeof(*ep));
37038 @@ -557,7 +557,7 @@ out:
37039 return ep;
37040 err:
37041 spin_unlock_bh(&pool->lock);
37042 - atomic_inc(&mp->stats.no_free_exch_xid);
37043 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37044 mempool_free(ep, mp->ep_pool);
37045 return NULL;
37046 }
37047 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37048 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37049 ep = fc_exch_find(mp, xid);
37050 if (!ep) {
37051 - atomic_inc(&mp->stats.xid_not_found);
37052 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37053 reject = FC_RJT_OX_ID;
37054 goto out;
37055 }
37056 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37057 ep = fc_exch_find(mp, xid);
37058 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37059 if (ep) {
37060 - atomic_inc(&mp->stats.xid_busy);
37061 + atomic_inc_unchecked(&mp->stats.xid_busy);
37062 reject = FC_RJT_RX_ID;
37063 goto rel;
37064 }
37065 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37066 }
37067 xid = ep->xid; /* get our XID */
37068 } else if (!ep) {
37069 - atomic_inc(&mp->stats.xid_not_found);
37070 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37071 reject = FC_RJT_RX_ID; /* XID not found */
37072 goto out;
37073 }
37074 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37075 } else {
37076 sp = &ep->seq;
37077 if (sp->id != fh->fh_seq_id) {
37078 - atomic_inc(&mp->stats.seq_not_found);
37079 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37080 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
37081 goto rel;
37082 }
37083 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
37084
37085 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37086 if (!ep) {
37087 - atomic_inc(&mp->stats.xid_not_found);
37088 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37089 goto out;
37090 }
37091 if (ep->esb_stat & ESB_ST_COMPLETE) {
37092 - atomic_inc(&mp->stats.xid_not_found);
37093 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37094 goto out;
37095 }
37096 if (ep->rxid == FC_XID_UNKNOWN)
37097 ep->rxid = ntohs(fh->fh_rx_id);
37098 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37099 - atomic_inc(&mp->stats.xid_not_found);
37100 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37101 goto rel;
37102 }
37103 if (ep->did != ntoh24(fh->fh_s_id) &&
37104 ep->did != FC_FID_FLOGI) {
37105 - atomic_inc(&mp->stats.xid_not_found);
37106 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37107 goto rel;
37108 }
37109 sof = fr_sof(fp);
37110 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
37111 } else {
37112 sp = &ep->seq;
37113 if (sp->id != fh->fh_seq_id) {
37114 - atomic_inc(&mp->stats.seq_not_found);
37115 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37116 goto rel;
37117 }
37118 }
37119 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
37120 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37121
37122 if (!sp)
37123 - atomic_inc(&mp->stats.xid_not_found);
37124 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37125 else
37126 - atomic_inc(&mp->stats.non_bls_resp);
37127 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
37128
37129 fc_frame_free(fp);
37130 }
37131 @@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
37132 int fc_exch_init(struct fc_lport *lp)
37133 {
37134 if (!lp->tt.seq_start_next)
37135 - lp->tt.seq_start_next = fc_seq_start_next;
37136 + *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
37137
37138 if (!lp->tt.exch_seq_send)
37139 - lp->tt.exch_seq_send = fc_exch_seq_send;
37140 + *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
37141
37142 if (!lp->tt.seq_send)
37143 - lp->tt.seq_send = fc_seq_send;
37144 + *(void **)&lp->tt.seq_send = fc_seq_send;
37145
37146 if (!lp->tt.seq_els_rsp_send)
37147 - lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
37148 + *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
37149
37150 if (!lp->tt.exch_done)
37151 - lp->tt.exch_done = fc_exch_done;
37152 + *(void **)&lp->tt.exch_done = fc_exch_done;
37153
37154 if (!lp->tt.exch_mgr_reset)
37155 - lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
37156 + *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
37157
37158 if (!lp->tt.seq_exch_abort)
37159 - lp->tt.seq_exch_abort = fc_seq_exch_abort;
37160 + *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
37161
37162 /*
37163 * Initialize fc_cpu_mask and fc_cpu_order. The
37164 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.44/drivers/scsi/libfc/fc_fcp.c
37165 --- linux-2.6.32.44/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
37166 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
37167 @@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
37168 struct fc_fcp_internal *si;
37169
37170 if (!lp->tt.fcp_cmd_send)
37171 - lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
37172 + *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
37173
37174 if (!lp->tt.fcp_cleanup)
37175 - lp->tt.fcp_cleanup = fc_fcp_cleanup;
37176 + *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
37177
37178 if (!lp->tt.fcp_abort_io)
37179 - lp->tt.fcp_abort_io = fc_fcp_abort_io;
37180 + *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
37181
37182 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
37183 if (!si)
37184 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_lport.c linux-2.6.32.44/drivers/scsi/libfc/fc_lport.c
37185 --- linux-2.6.32.44/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
37186 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
37187 @@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
37188 mutex_lock(&lport->lp_mutex);
37189 lport->state = LPORT_ST_DISABLED;
37190 lport->link_up = 0;
37191 - lport->tt.frame_send = fc_frame_drop;
37192 + *(void **)&lport->tt.frame_send = fc_frame_drop;
37193 mutex_unlock(&lport->lp_mutex);
37194
37195 lport->tt.fcp_abort_io(lport);
37196 @@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
37197 int fc_lport_init(struct fc_lport *lport)
37198 {
37199 if (!lport->tt.lport_recv)
37200 - lport->tt.lport_recv = fc_lport_recv_req;
37201 + *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
37202
37203 if (!lport->tt.lport_reset)
37204 - lport->tt.lport_reset = fc_lport_reset;
37205 + *(void **)&lport->tt.lport_reset = fc_lport_reset;
37206
37207 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
37208 fc_host_node_name(lport->host) = lport->wwnn;
37209 diff -urNp linux-2.6.32.44/drivers/scsi/libfc/fc_rport.c linux-2.6.32.44/drivers/scsi/libfc/fc_rport.c
37210 --- linux-2.6.32.44/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
37211 +++ linux-2.6.32.44/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
37212 @@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
37213 int fc_rport_init(struct fc_lport *lport)
37214 {
37215 if (!lport->tt.rport_lookup)
37216 - lport->tt.rport_lookup = fc_rport_lookup;
37217 + *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
37218
37219 if (!lport->tt.rport_create)
37220 - lport->tt.rport_create = fc_rport_create;
37221 + *(void **)&lport->tt.rport_create = fc_rport_create;
37222
37223 if (!lport->tt.rport_login)
37224 - lport->tt.rport_login = fc_rport_login;
37225 + *(void **)&lport->tt.rport_login = fc_rport_login;
37226
37227 if (!lport->tt.rport_logoff)
37228 - lport->tt.rport_logoff = fc_rport_logoff;
37229 + *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
37230
37231 if (!lport->tt.rport_recv_req)
37232 - lport->tt.rport_recv_req = fc_rport_recv_req;
37233 + *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
37234
37235 if (!lport->tt.rport_flush_queue)
37236 - lport->tt.rport_flush_queue = fc_rport_flush_queue;
37237 + *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
37238
37239 if (!lport->tt.rport_destroy)
37240 - lport->tt.rport_destroy = fc_rport_destroy;
37241 + *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
37242
37243 return 0;
37244 }
37245 diff -urNp linux-2.6.32.44/drivers/scsi/libsas/sas_ata.c linux-2.6.32.44/drivers/scsi/libsas/sas_ata.c
37246 --- linux-2.6.32.44/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
37247 +++ linux-2.6.32.44/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
37248 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
37249 }
37250 }
37251
37252 -static struct ata_port_operations sas_sata_ops = {
37253 +static const struct ata_port_operations sas_sata_ops = {
37254 .phy_reset = sas_ata_phy_reset,
37255 .post_internal_cmd = sas_ata_post_internal,
37256 .qc_defer = ata_std_qc_defer,
37257 diff -urNp linux-2.6.32.44/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.44/drivers/scsi/lpfc/lpfc_debugfs.c
37258 --- linux-2.6.32.44/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
37259 +++ linux-2.6.32.44/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
37260 @@ -124,7 +124,7 @@ struct lpfc_debug {
37261 int len;
37262 };
37263
37264 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37265 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37266 static unsigned long lpfc_debugfs_start_time = 0L;
37267
37268 /**
37269 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
37270 lpfc_debugfs_enable = 0;
37271
37272 len = 0;
37273 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37274 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37275 (lpfc_debugfs_max_disc_trc - 1);
37276 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37277 dtp = vport->disc_trc + i;
37278 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
37279 lpfc_debugfs_enable = 0;
37280
37281 len = 0;
37282 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37283 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37284 (lpfc_debugfs_max_slow_ring_trc - 1);
37285 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37286 dtp = phba->slow_ring_trc + i;
37287 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
37288 uint32_t *ptr;
37289 char buffer[1024];
37290
37291 + pax_track_stack();
37292 +
37293 off = 0;
37294 spin_lock_irq(&phba->hbalock);
37295
37296 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
37297 !vport || !vport->disc_trc)
37298 return;
37299
37300 - index = atomic_inc_return(&vport->disc_trc_cnt) &
37301 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37302 (lpfc_debugfs_max_disc_trc - 1);
37303 dtp = vport->disc_trc + index;
37304 dtp->fmt = fmt;
37305 dtp->data1 = data1;
37306 dtp->data2 = data2;
37307 dtp->data3 = data3;
37308 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37309 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37310 dtp->jif = jiffies;
37311 #endif
37312 return;
37313 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
37314 !phba || !phba->slow_ring_trc)
37315 return;
37316
37317 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37318 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37319 (lpfc_debugfs_max_slow_ring_trc - 1);
37320 dtp = phba->slow_ring_trc + index;
37321 dtp->fmt = fmt;
37322 dtp->data1 = data1;
37323 dtp->data2 = data2;
37324 dtp->data3 = data3;
37325 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37326 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37327 dtp->jif = jiffies;
37328 #endif
37329 return;
37330 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37331 "slow_ring buffer\n");
37332 goto debug_failed;
37333 }
37334 - atomic_set(&phba->slow_ring_trc_cnt, 0);
37335 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37336 memset(phba->slow_ring_trc, 0,
37337 (sizeof(struct lpfc_debugfs_trc) *
37338 lpfc_debugfs_max_slow_ring_trc));
37339 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37340 "buffer\n");
37341 goto debug_failed;
37342 }
37343 - atomic_set(&vport->disc_trc_cnt, 0);
37344 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37345
37346 snprintf(name, sizeof(name), "discovery_trace");
37347 vport->debug_disc_trc =
37348 diff -urNp linux-2.6.32.44/drivers/scsi/lpfc/lpfc.h linux-2.6.32.44/drivers/scsi/lpfc/lpfc.h
37349 --- linux-2.6.32.44/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
37350 +++ linux-2.6.32.44/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
37351 @@ -400,7 +400,7 @@ struct lpfc_vport {
37352 struct dentry *debug_nodelist;
37353 struct dentry *vport_debugfs_root;
37354 struct lpfc_debugfs_trc *disc_trc;
37355 - atomic_t disc_trc_cnt;
37356 + atomic_unchecked_t disc_trc_cnt;
37357 #endif
37358 uint8_t stat_data_enabled;
37359 uint8_t stat_data_blocked;
37360 @@ -725,8 +725,8 @@ struct lpfc_hba {
37361 struct timer_list fabric_block_timer;
37362 unsigned long bit_flags;
37363 #define FABRIC_COMANDS_BLOCKED 0
37364 - atomic_t num_rsrc_err;
37365 - atomic_t num_cmd_success;
37366 + atomic_unchecked_t num_rsrc_err;
37367 + atomic_unchecked_t num_cmd_success;
37368 unsigned long last_rsrc_error_time;
37369 unsigned long last_ramp_down_time;
37370 unsigned long last_ramp_up_time;
37371 @@ -740,7 +740,7 @@ struct lpfc_hba {
37372 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
37373 struct dentry *debug_slow_ring_trc;
37374 struct lpfc_debugfs_trc *slow_ring_trc;
37375 - atomic_t slow_ring_trc_cnt;
37376 + atomic_unchecked_t slow_ring_trc_cnt;
37377 #endif
37378
37379 /* Used for deferred freeing of ELS data buffers */
37380 diff -urNp linux-2.6.32.44/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.44/drivers/scsi/lpfc/lpfc_init.c
37381 --- linux-2.6.32.44/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
37382 +++ linux-2.6.32.44/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
37383 @@ -8021,8 +8021,10 @@ lpfc_init(void)
37384 printk(LPFC_COPYRIGHT "\n");
37385
37386 if (lpfc_enable_npiv) {
37387 - lpfc_transport_functions.vport_create = lpfc_vport_create;
37388 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37389 + pax_open_kernel();
37390 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37391 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37392 + pax_close_kernel();
37393 }
37394 lpfc_transport_template =
37395 fc_attach_transport(&lpfc_transport_functions);
37396 diff -urNp linux-2.6.32.44/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.44/drivers/scsi/lpfc/lpfc_scsi.c
37397 --- linux-2.6.32.44/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
37398 +++ linux-2.6.32.44/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
37399 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
37400 uint32_t evt_posted;
37401
37402 spin_lock_irqsave(&phba->hbalock, flags);
37403 - atomic_inc(&phba->num_rsrc_err);
37404 + atomic_inc_unchecked(&phba->num_rsrc_err);
37405 phba->last_rsrc_error_time = jiffies;
37406
37407 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37408 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37409 unsigned long flags;
37410 struct lpfc_hba *phba = vport->phba;
37411 uint32_t evt_posted;
37412 - atomic_inc(&phba->num_cmd_success);
37413 + atomic_inc_unchecked(&phba->num_cmd_success);
37414
37415 if (vport->cfg_lun_queue_depth <= queue_depth)
37416 return;
37417 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37418 int i;
37419 struct lpfc_rport_data *rdata;
37420
37421 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37422 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37423 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37424 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37425
37426 vports = lpfc_create_vport_work_array(phba);
37427 if (vports != NULL)
37428 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37429 }
37430 }
37431 lpfc_destroy_vport_work_array(phba, vports);
37432 - atomic_set(&phba->num_rsrc_err, 0);
37433 - atomic_set(&phba->num_cmd_success, 0);
37434 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37435 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37436 }
37437
37438 /**
37439 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37440 }
37441 }
37442 lpfc_destroy_vport_work_array(phba, vports);
37443 - atomic_set(&phba->num_rsrc_err, 0);
37444 - atomic_set(&phba->num_cmd_success, 0);
37445 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37446 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37447 }
37448
37449 /**
37450 diff -urNp linux-2.6.32.44/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.44/drivers/scsi/megaraid/megaraid_mbox.c
37451 --- linux-2.6.32.44/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37452 +++ linux-2.6.32.44/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37453 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37454 int rval;
37455 int i;
37456
37457 + pax_track_stack();
37458 +
37459 // Allocate memory for the base list of scb for management module.
37460 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37461
37462 diff -urNp linux-2.6.32.44/drivers/scsi/osd/osd_initiator.c linux-2.6.32.44/drivers/scsi/osd/osd_initiator.c
37463 --- linux-2.6.32.44/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37464 +++ linux-2.6.32.44/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37465 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37466 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37467 int ret;
37468
37469 + pax_track_stack();
37470 +
37471 or = osd_start_request(od, GFP_KERNEL);
37472 if (!or)
37473 return -ENOMEM;
37474 diff -urNp linux-2.6.32.44/drivers/scsi/pmcraid.c linux-2.6.32.44/drivers/scsi/pmcraid.c
37475 --- linux-2.6.32.44/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37476 +++ linux-2.6.32.44/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37477 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37478 res->scsi_dev = scsi_dev;
37479 scsi_dev->hostdata = res;
37480 res->change_detected = 0;
37481 - atomic_set(&res->read_failures, 0);
37482 - atomic_set(&res->write_failures, 0);
37483 + atomic_set_unchecked(&res->read_failures, 0);
37484 + atomic_set_unchecked(&res->write_failures, 0);
37485 rc = 0;
37486 }
37487 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37488 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37489
37490 /* If this was a SCSI read/write command keep count of errors */
37491 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37492 - atomic_inc(&res->read_failures);
37493 + atomic_inc_unchecked(&res->read_failures);
37494 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37495 - atomic_inc(&res->write_failures);
37496 + atomic_inc_unchecked(&res->write_failures);
37497
37498 if (!RES_IS_GSCSI(res->cfg_entry) &&
37499 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37500 @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37501
37502 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37503 /* add resources only after host is added into system */
37504 - if (!atomic_read(&pinstance->expose_resources))
37505 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37506 return;
37507
37508 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37509 @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37510 init_waitqueue_head(&pinstance->reset_wait_q);
37511
37512 atomic_set(&pinstance->outstanding_cmds, 0);
37513 - atomic_set(&pinstance->expose_resources, 0);
37514 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37515
37516 INIT_LIST_HEAD(&pinstance->free_res_q);
37517 INIT_LIST_HEAD(&pinstance->used_res_q);
37518 @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37519 /* Schedule worker thread to handle CCN and take care of adding and
37520 * removing devices to OS
37521 */
37522 - atomic_set(&pinstance->expose_resources, 1);
37523 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37524 schedule_work(&pinstance->worker_q);
37525 return rc;
37526
37527 diff -urNp linux-2.6.32.44/drivers/scsi/pmcraid.h linux-2.6.32.44/drivers/scsi/pmcraid.h
37528 --- linux-2.6.32.44/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37529 +++ linux-2.6.32.44/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37530 @@ -690,7 +690,7 @@ struct pmcraid_instance {
37531 atomic_t outstanding_cmds;
37532
37533 /* should add/delete resources to mid-layer now ?*/
37534 - atomic_t expose_resources;
37535 + atomic_unchecked_t expose_resources;
37536
37537 /* Tasklet to handle deferred processing */
37538 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37539 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37540 struct list_head queue; /* link to "to be exposed" resources */
37541 struct pmcraid_config_table_entry cfg_entry;
37542 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37543 - atomic_t read_failures; /* count of failed READ commands */
37544 - atomic_t write_failures; /* count of failed WRITE commands */
37545 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37546 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37547
37548 /* To indicate add/delete/modify during CCN */
37549 u8 change_detected;
37550 diff -urNp linux-2.6.32.44/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.44/drivers/scsi/qla2xxx/qla_def.h
37551 --- linux-2.6.32.44/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37552 +++ linux-2.6.32.44/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37553 @@ -2089,7 +2089,7 @@ struct isp_operations {
37554
37555 int (*get_flash_version) (struct scsi_qla_host *, void *);
37556 int (*start_scsi) (srb_t *);
37557 -};
37558 +} __no_const;
37559
37560 /* MSI-X Support *************************************************************/
37561
37562 diff -urNp linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_def.h
37563 --- linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37564 +++ linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37565 @@ -240,7 +240,7 @@ struct ddb_entry {
37566 atomic_t retry_relogin_timer; /* Min Time between relogins
37567 * (4000 only) */
37568 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37569 - atomic_t relogin_retry_count; /* Num of times relogin has been
37570 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37571 * retried */
37572
37573 uint16_t port;
37574 diff -urNp linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_init.c
37575 --- linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37576 +++ linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37577 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37578 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37579 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37580 atomic_set(&ddb_entry->relogin_timer, 0);
37581 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37582 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37583 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37584 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37585 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37586 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37587 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37588 atomic_set(&ddb_entry->port_down_timer,
37589 ha->port_down_retry_count);
37590 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37591 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37592 atomic_set(&ddb_entry->relogin_timer, 0);
37593 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37594 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37595 diff -urNp linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_os.c
37596 --- linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37597 +++ linux-2.6.32.44/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37598 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37599 ddb_entry->fw_ddb_device_state ==
37600 DDB_DS_SESSION_FAILED) {
37601 /* Reset retry relogin timer */
37602 - atomic_inc(&ddb_entry->relogin_retry_count);
37603 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37604 DEBUG2(printk("scsi%ld: index[%d] relogin"
37605 " timed out-retrying"
37606 " relogin (%d)\n",
37607 ha->host_no,
37608 ddb_entry->fw_ddb_index,
37609 - atomic_read(&ddb_entry->
37610 + atomic_read_unchecked(&ddb_entry->
37611 relogin_retry_count))
37612 );
37613 start_dpc++;
37614 diff -urNp linux-2.6.32.44/drivers/scsi/scsi.c linux-2.6.32.44/drivers/scsi/scsi.c
37615 --- linux-2.6.32.44/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37616 +++ linux-2.6.32.44/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37617 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37618 unsigned long timeout;
37619 int rtn = 0;
37620
37621 - atomic_inc(&cmd->device->iorequest_cnt);
37622 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37623
37624 /* check if the device is still usable */
37625 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37626 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_debug.c linux-2.6.32.44/drivers/scsi/scsi_debug.c
37627 --- linux-2.6.32.44/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37628 +++ linux-2.6.32.44/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37629 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37630 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37631 unsigned char *cmd = (unsigned char *)scp->cmnd;
37632
37633 + pax_track_stack();
37634 +
37635 if ((errsts = check_readiness(scp, 1, devip)))
37636 return errsts;
37637 memset(arr, 0, sizeof(arr));
37638 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37639 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37640 unsigned char *cmd = (unsigned char *)scp->cmnd;
37641
37642 + pax_track_stack();
37643 +
37644 if ((errsts = check_readiness(scp, 1, devip)))
37645 return errsts;
37646 memset(arr, 0, sizeof(arr));
37647 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_lib.c linux-2.6.32.44/drivers/scsi/scsi_lib.c
37648 --- linux-2.6.32.44/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37649 +++ linux-2.6.32.44/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37650 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37651
37652 scsi_init_cmd_errh(cmd);
37653 cmd->result = DID_NO_CONNECT << 16;
37654 - atomic_inc(&cmd->device->iorequest_cnt);
37655 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37656
37657 /*
37658 * SCSI request completion path will do scsi_device_unbusy(),
37659 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37660 */
37661 cmd->serial_number = 0;
37662
37663 - atomic_inc(&cmd->device->iodone_cnt);
37664 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37665 if (cmd->result)
37666 - atomic_inc(&cmd->device->ioerr_cnt);
37667 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37668
37669 disposition = scsi_decide_disposition(cmd);
37670 if (disposition != SUCCESS &&
37671 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_sysfs.c linux-2.6.32.44/drivers/scsi/scsi_sysfs.c
37672 --- linux-2.6.32.44/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37673 +++ linux-2.6.32.44/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37674 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37675 char *buf) \
37676 { \
37677 struct scsi_device *sdev = to_scsi_device(dev); \
37678 - unsigned long long count = atomic_read(&sdev->field); \
37679 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37680 return snprintf(buf, 20, "0x%llx\n", count); \
37681 } \
37682 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37683 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_transport_fc.c linux-2.6.32.44/drivers/scsi/scsi_transport_fc.c
37684 --- linux-2.6.32.44/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37685 +++ linux-2.6.32.44/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37686 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37687 * Netlink Infrastructure
37688 */
37689
37690 -static atomic_t fc_event_seq;
37691 +static atomic_unchecked_t fc_event_seq;
37692
37693 /**
37694 * fc_get_event_number - Obtain the next sequential FC event number
37695 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37696 u32
37697 fc_get_event_number(void)
37698 {
37699 - return atomic_add_return(1, &fc_event_seq);
37700 + return atomic_add_return_unchecked(1, &fc_event_seq);
37701 }
37702 EXPORT_SYMBOL(fc_get_event_number);
37703
37704 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37705 {
37706 int error;
37707
37708 - atomic_set(&fc_event_seq, 0);
37709 + atomic_set_unchecked(&fc_event_seq, 0);
37710
37711 error = transport_class_register(&fc_host_class);
37712 if (error)
37713 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.44/drivers/scsi/scsi_transport_iscsi.c
37714 --- linux-2.6.32.44/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37715 +++ linux-2.6.32.44/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37716 @@ -81,7 +81,7 @@ struct iscsi_internal {
37717 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37718 };
37719
37720 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37721 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37722 static struct workqueue_struct *iscsi_eh_timer_workq;
37723
37724 /*
37725 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37726 int err;
37727
37728 ihost = shost->shost_data;
37729 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37730 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37731
37732 if (id == ISCSI_MAX_TARGET) {
37733 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37734 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37735 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37736 ISCSI_TRANSPORT_VERSION);
37737
37738 - atomic_set(&iscsi_session_nr, 0);
37739 + atomic_set_unchecked(&iscsi_session_nr, 0);
37740
37741 err = class_register(&iscsi_transport_class);
37742 if (err)
37743 diff -urNp linux-2.6.32.44/drivers/scsi/scsi_transport_srp.c linux-2.6.32.44/drivers/scsi/scsi_transport_srp.c
37744 --- linux-2.6.32.44/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37745 +++ linux-2.6.32.44/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37746 @@ -33,7 +33,7 @@
37747 #include "scsi_transport_srp_internal.h"
37748
37749 struct srp_host_attrs {
37750 - atomic_t next_port_id;
37751 + atomic_unchecked_t next_port_id;
37752 };
37753 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37754
37755 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37756 struct Scsi_Host *shost = dev_to_shost(dev);
37757 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37758
37759 - atomic_set(&srp_host->next_port_id, 0);
37760 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37761 return 0;
37762 }
37763
37764 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37765 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37766 rport->roles = ids->roles;
37767
37768 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37769 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37770 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37771
37772 transport_setup_device(&rport->dev);
37773 diff -urNp linux-2.6.32.44/drivers/scsi/sg.c linux-2.6.32.44/drivers/scsi/sg.c
37774 --- linux-2.6.32.44/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37775 +++ linux-2.6.32.44/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37776 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37777 const struct file_operations * fops;
37778 };
37779
37780 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37781 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37782 {"allow_dio", &adio_fops},
37783 {"debug", &debug_fops},
37784 {"def_reserved_size", &dressz_fops},
37785 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
37786 {
37787 int k, mask;
37788 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37789 - struct sg_proc_leaf * leaf;
37790 + const struct sg_proc_leaf * leaf;
37791
37792 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37793 if (!sg_proc_sgp)
37794 diff -urNp linux-2.6.32.44/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.44/drivers/scsi/sym53c8xx_2/sym_glue.c
37795 --- linux-2.6.32.44/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37796 +++ linux-2.6.32.44/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37797 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37798 int do_iounmap = 0;
37799 int do_disable_device = 1;
37800
37801 + pax_track_stack();
37802 +
37803 memset(&sym_dev, 0, sizeof(sym_dev));
37804 memset(&nvram, 0, sizeof(nvram));
37805 sym_dev.pdev = pdev;
37806 diff -urNp linux-2.6.32.44/drivers/serial/kgdboc.c linux-2.6.32.44/drivers/serial/kgdboc.c
37807 --- linux-2.6.32.44/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37808 +++ linux-2.6.32.44/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37809 @@ -18,7 +18,7 @@
37810
37811 #define MAX_CONFIG_LEN 40
37812
37813 -static struct kgdb_io kgdboc_io_ops;
37814 +static const struct kgdb_io kgdboc_io_ops;
37815
37816 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37817 static int configured = -1;
37818 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37819 module_put(THIS_MODULE);
37820 }
37821
37822 -static struct kgdb_io kgdboc_io_ops = {
37823 +static const struct kgdb_io kgdboc_io_ops = {
37824 .name = "kgdboc",
37825 .read_char = kgdboc_get_char,
37826 .write_char = kgdboc_put_char,
37827 diff -urNp linux-2.6.32.44/drivers/spi/spi.c linux-2.6.32.44/drivers/spi/spi.c
37828 --- linux-2.6.32.44/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37829 +++ linux-2.6.32.44/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37830 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37831 EXPORT_SYMBOL_GPL(spi_sync);
37832
37833 /* portable code must never pass more than 32 bytes */
37834 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37835 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37836
37837 static u8 *buf;
37838
37839 diff -urNp linux-2.6.32.44/drivers/ssb/driver_gige.c linux-2.6.32.44/drivers/ssb/driver_gige.c
37840 --- linux-2.6.32.44/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37841 +++ linux-2.6.32.44/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37842 @@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37843 dev->pci_controller.io_resource = &dev->io_resource;
37844 dev->pci_controller.mem_resource = &dev->mem_resource;
37845 dev->pci_controller.io_map_base = 0x800;
37846 - dev->pci_ops.read = ssb_gige_pci_read_config;
37847 - dev->pci_ops.write = ssb_gige_pci_write_config;
37848 + *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37849 + *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37850
37851 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37852 dev->io_resource.start = 0x800;
37853 diff -urNp linux-2.6.32.44/drivers/staging/android/binder.c linux-2.6.32.44/drivers/staging/android/binder.c
37854 --- linux-2.6.32.44/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37855 +++ linux-2.6.32.44/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37856 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37857 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37858 }
37859
37860 -static struct vm_operations_struct binder_vm_ops = {
37861 +static const struct vm_operations_struct binder_vm_ops = {
37862 .open = binder_vma_open,
37863 .close = binder_vma_close,
37864 };
37865 diff -urNp linux-2.6.32.44/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.44/drivers/staging/b3dfg/b3dfg.c
37866 --- linux-2.6.32.44/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37867 +++ linux-2.6.32.44/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37868 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37869 return VM_FAULT_NOPAGE;
37870 }
37871
37872 -static struct vm_operations_struct b3dfg_vm_ops = {
37873 +static const struct vm_operations_struct b3dfg_vm_ops = {
37874 .fault = b3dfg_vma_fault,
37875 };
37876
37877 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37878 return r;
37879 }
37880
37881 -static struct file_operations b3dfg_fops = {
37882 +static const struct file_operations b3dfg_fops = {
37883 .owner = THIS_MODULE,
37884 .open = b3dfg_open,
37885 .release = b3dfg_release,
37886 diff -urNp linux-2.6.32.44/drivers/staging/comedi/comedi_fops.c linux-2.6.32.44/drivers/staging/comedi/comedi_fops.c
37887 --- linux-2.6.32.44/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37888 +++ linux-2.6.32.44/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37889 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37890 mutex_unlock(&dev->mutex);
37891 }
37892
37893 -static struct vm_operations_struct comedi_vm_ops = {
37894 +static const struct vm_operations_struct comedi_vm_ops = {
37895 .close = comedi_unmap,
37896 };
37897
37898 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.44/drivers/staging/dream/qdsp5/adsp_driver.c
37899 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37900 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37901 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37902 static dev_t adsp_devno;
37903 static struct class *adsp_class;
37904
37905 -static struct file_operations adsp_fops = {
37906 +static const struct file_operations adsp_fops = {
37907 .owner = THIS_MODULE,
37908 .open = adsp_open,
37909 .unlocked_ioctl = adsp_ioctl,
37910 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_aac.c
37911 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37912 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37913 @@ -1022,7 +1022,7 @@ done:
37914 return rc;
37915 }
37916
37917 -static struct file_operations audio_aac_fops = {
37918 +static const struct file_operations audio_aac_fops = {
37919 .owner = THIS_MODULE,
37920 .open = audio_open,
37921 .release = audio_release,
37922 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_amrnb.c
37923 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37924 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37925 @@ -833,7 +833,7 @@ done:
37926 return rc;
37927 }
37928
37929 -static struct file_operations audio_amrnb_fops = {
37930 +static const struct file_operations audio_amrnb_fops = {
37931 .owner = THIS_MODULE,
37932 .open = audamrnb_open,
37933 .release = audamrnb_release,
37934 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_evrc.c
37935 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37936 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37937 @@ -805,7 +805,7 @@ dma_fail:
37938 return rc;
37939 }
37940
37941 -static struct file_operations audio_evrc_fops = {
37942 +static const struct file_operations audio_evrc_fops = {
37943 .owner = THIS_MODULE,
37944 .open = audevrc_open,
37945 .release = audevrc_release,
37946 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_in.c
37947 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37948 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37949 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37950 return 0;
37951 }
37952
37953 -static struct file_operations audio_fops = {
37954 +static const struct file_operations audio_fops = {
37955 .owner = THIS_MODULE,
37956 .open = audio_in_open,
37957 .release = audio_in_release,
37958 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
37959 .unlocked_ioctl = audio_in_ioctl,
37960 };
37961
37962 -static struct file_operations audpre_fops = {
37963 +static const struct file_operations audpre_fops = {
37964 .owner = THIS_MODULE,
37965 .open = audpre_open,
37966 .unlocked_ioctl = audpre_ioctl,
37967 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_mp3.c
37968 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37969 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37970 @@ -941,7 +941,7 @@ done:
37971 return rc;
37972 }
37973
37974 -static struct file_operations audio_mp3_fops = {
37975 +static const struct file_operations audio_mp3_fops = {
37976 .owner = THIS_MODULE,
37977 .open = audio_open,
37978 .release = audio_release,
37979 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_out.c
37980 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37981 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37982 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37983 return 0;
37984 }
37985
37986 -static struct file_operations audio_fops = {
37987 +static const struct file_operations audio_fops = {
37988 .owner = THIS_MODULE,
37989 .open = audio_open,
37990 .release = audio_release,
37991 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
37992 .unlocked_ioctl = audio_ioctl,
37993 };
37994
37995 -static struct file_operations audpp_fops = {
37996 +static const struct file_operations audpp_fops = {
37997 .owner = THIS_MODULE,
37998 .open = audpp_open,
37999 .unlocked_ioctl = audpp_ioctl,
38000 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_qcelp.c
38001 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
38002 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
38003 @@ -816,7 +816,7 @@ err:
38004 return rc;
38005 }
38006
38007 -static struct file_operations audio_qcelp_fops = {
38008 +static const struct file_operations audio_qcelp_fops = {
38009 .owner = THIS_MODULE,
38010 .open = audqcelp_open,
38011 .release = audqcelp_release,
38012 diff -urNp linux-2.6.32.44/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.44/drivers/staging/dream/qdsp5/snd.c
38013 --- linux-2.6.32.44/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
38014 +++ linux-2.6.32.44/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
38015 @@ -242,7 +242,7 @@ err:
38016 return rc;
38017 }
38018
38019 -static struct file_operations snd_fops = {
38020 +static const struct file_operations snd_fops = {
38021 .owner = THIS_MODULE,
38022 .open = snd_open,
38023 .release = snd_release,
38024 diff -urNp linux-2.6.32.44/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.44/drivers/staging/dream/smd/smd_qmi.c
38025 --- linux-2.6.32.44/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
38026 +++ linux-2.6.32.44/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
38027 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
38028 return 0;
38029 }
38030
38031 -static struct file_operations qmi_fops = {
38032 +static const struct file_operations qmi_fops = {
38033 .owner = THIS_MODULE,
38034 .read = qmi_read,
38035 .write = qmi_write,
38036 diff -urNp linux-2.6.32.44/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.44/drivers/staging/dream/smd/smd_rpcrouter_device.c
38037 --- linux-2.6.32.44/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
38038 +++ linux-2.6.32.44/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
38039 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
38040 return rc;
38041 }
38042
38043 -static struct file_operations rpcrouter_server_fops = {
38044 +static const struct file_operations rpcrouter_server_fops = {
38045 .owner = THIS_MODULE,
38046 .open = rpcrouter_open,
38047 .release = rpcrouter_release,
38048 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
38049 .unlocked_ioctl = rpcrouter_ioctl,
38050 };
38051
38052 -static struct file_operations rpcrouter_router_fops = {
38053 +static const struct file_operations rpcrouter_router_fops = {
38054 .owner = THIS_MODULE,
38055 .open = rpcrouter_open,
38056 .release = rpcrouter_release,
38057 diff -urNp linux-2.6.32.44/drivers/staging/dst/dcore.c linux-2.6.32.44/drivers/staging/dst/dcore.c
38058 --- linux-2.6.32.44/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
38059 +++ linux-2.6.32.44/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
38060 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
38061 return 0;
38062 }
38063
38064 -static struct block_device_operations dst_blk_ops = {
38065 +static const struct block_device_operations dst_blk_ops = {
38066 .open = dst_bdev_open,
38067 .release = dst_bdev_release,
38068 .owner = THIS_MODULE,
38069 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
38070 n->size = ctl->size;
38071
38072 atomic_set(&n->refcnt, 1);
38073 - atomic_long_set(&n->gen, 0);
38074 + atomic_long_set_unchecked(&n->gen, 0);
38075 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
38076
38077 err = dst_node_sysfs_init(n);
38078 diff -urNp linux-2.6.32.44/drivers/staging/dst/trans.c linux-2.6.32.44/drivers/staging/dst/trans.c
38079 --- linux-2.6.32.44/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
38080 +++ linux-2.6.32.44/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
38081 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
38082 t->error = 0;
38083 t->retries = 0;
38084 atomic_set(&t->refcnt, 1);
38085 - t->gen = atomic_long_inc_return(&n->gen);
38086 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
38087
38088 t->enc = bio_data_dir(bio);
38089 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
38090 diff -urNp linux-2.6.32.44/drivers/staging/et131x/et1310_tx.c linux-2.6.32.44/drivers/staging/et131x/et1310_tx.c
38091 --- linux-2.6.32.44/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
38092 +++ linux-2.6.32.44/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
38093 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
38094 struct net_device_stats *stats = &etdev->net_stats;
38095
38096 if (pMpTcb->Flags & fMP_DEST_BROAD)
38097 - atomic_inc(&etdev->Stats.brdcstxmt);
38098 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
38099 else if (pMpTcb->Flags & fMP_DEST_MULTI)
38100 - atomic_inc(&etdev->Stats.multixmt);
38101 + atomic_inc_unchecked(&etdev->Stats.multixmt);
38102 else
38103 - atomic_inc(&etdev->Stats.unixmt);
38104 + atomic_inc_unchecked(&etdev->Stats.unixmt);
38105
38106 if (pMpTcb->Packet) {
38107 stats->tx_bytes += pMpTcb->Packet->len;
38108 diff -urNp linux-2.6.32.44/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.44/drivers/staging/et131x/et131x_adapter.h
38109 --- linux-2.6.32.44/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
38110 +++ linux-2.6.32.44/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
38111 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
38112 * operations
38113 */
38114 u32 unircv; /* # multicast packets received */
38115 - atomic_t unixmt; /* # multicast packets for Tx */
38116 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
38117 u32 multircv; /* # multicast packets received */
38118 - atomic_t multixmt; /* # multicast packets for Tx */
38119 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
38120 u32 brdcstrcv; /* # broadcast packets received */
38121 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
38122 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
38123 u32 norcvbuf; /* # Rx packets discarded */
38124 u32 noxmtbuf; /* # Tx packets discarded */
38125
38126 diff -urNp linux-2.6.32.44/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.44/drivers/staging/go7007/go7007-v4l2.c
38127 --- linux-2.6.32.44/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
38128 +++ linux-2.6.32.44/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
38129 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
38130 return 0;
38131 }
38132
38133 -static struct vm_operations_struct go7007_vm_ops = {
38134 +static const struct vm_operations_struct go7007_vm_ops = {
38135 .open = go7007_vm_open,
38136 .close = go7007_vm_close,
38137 .fault = go7007_vm_fault,
38138 diff -urNp linux-2.6.32.44/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.44/drivers/staging/hv/blkvsc_drv.c
38139 --- linux-2.6.32.44/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
38140 +++ linux-2.6.32.44/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
38141 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
38142 /* The one and only one */
38143 static struct blkvsc_driver_context g_blkvsc_drv;
38144
38145 -static struct block_device_operations block_ops = {
38146 +static const struct block_device_operations block_ops = {
38147 .owner = THIS_MODULE,
38148 .open = blkvsc_open,
38149 .release = blkvsc_release,
38150 diff -urNp linux-2.6.32.44/drivers/staging/hv/Channel.c linux-2.6.32.44/drivers/staging/hv/Channel.c
38151 --- linux-2.6.32.44/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
38152 +++ linux-2.6.32.44/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
38153 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
38154
38155 DPRINT_ENTER(VMBUS);
38156
38157 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
38158 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
38159 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
38160 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
38161
38162 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
38163 ASSERT(msgInfo != NULL);
38164 diff -urNp linux-2.6.32.44/drivers/staging/hv/Hv.c linux-2.6.32.44/drivers/staging/hv/Hv.c
38165 --- linux-2.6.32.44/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
38166 +++ linux-2.6.32.44/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
38167 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
38168 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
38169 u32 outputAddressHi = outputAddress >> 32;
38170 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
38171 - volatile void *hypercallPage = gHvContext.HypercallPage;
38172 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
38173
38174 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
38175 Control, Input, Output);
38176 diff -urNp linux-2.6.32.44/drivers/staging/hv/vmbus_drv.c linux-2.6.32.44/drivers/staging/hv/vmbus_drv.c
38177 --- linux-2.6.32.44/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
38178 +++ linux-2.6.32.44/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
38179 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
38180 to_device_context(root_device_obj);
38181 struct device_context *child_device_ctx =
38182 to_device_context(child_device_obj);
38183 - static atomic_t device_num = ATOMIC_INIT(0);
38184 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
38185
38186 DPRINT_ENTER(VMBUS_DRV);
38187
38188 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
38189
38190 /* Set the device name. Otherwise, device_register() will fail. */
38191 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
38192 - atomic_inc_return(&device_num));
38193 + atomic_inc_return_unchecked(&device_num));
38194
38195 /* The new device belongs to this bus */
38196 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
38197 diff -urNp linux-2.6.32.44/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.44/drivers/staging/hv/VmbusPrivate.h
38198 --- linux-2.6.32.44/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
38199 +++ linux-2.6.32.44/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
38200 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
38201 struct VMBUS_CONNECTION {
38202 enum VMBUS_CONNECT_STATE ConnectState;
38203
38204 - atomic_t NextGpadlHandle;
38205 + atomic_unchecked_t NextGpadlHandle;
38206
38207 /*
38208 * Represents channel interrupts. Each bit position represents a
38209 diff -urNp linux-2.6.32.44/drivers/staging/octeon/ethernet.c linux-2.6.32.44/drivers/staging/octeon/ethernet.c
38210 --- linux-2.6.32.44/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
38211 +++ linux-2.6.32.44/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
38212 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
38213 * since the RX tasklet also increments it.
38214 */
38215 #ifdef CONFIG_64BIT
38216 - atomic64_add(rx_status.dropped_packets,
38217 - (atomic64_t *)&priv->stats.rx_dropped);
38218 + atomic64_add_unchecked(rx_status.dropped_packets,
38219 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38220 #else
38221 - atomic_add(rx_status.dropped_packets,
38222 - (atomic_t *)&priv->stats.rx_dropped);
38223 + atomic_add_unchecked(rx_status.dropped_packets,
38224 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
38225 #endif
38226 }
38227
38228 diff -urNp linux-2.6.32.44/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.44/drivers/staging/octeon/ethernet-rx.c
38229 --- linux-2.6.32.44/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
38230 +++ linux-2.6.32.44/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
38231 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
38232 /* Increment RX stats for virtual ports */
38233 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38234 #ifdef CONFIG_64BIT
38235 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38236 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38237 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38238 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38239 #else
38240 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38241 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38242 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38243 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38244 #endif
38245 }
38246 netif_receive_skb(skb);
38247 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
38248 dev->name);
38249 */
38250 #ifdef CONFIG_64BIT
38251 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38252 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
38253 #else
38254 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38255 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
38256 #endif
38257 dev_kfree_skb_irq(skb);
38258 }
38259 diff -urNp linux-2.6.32.44/drivers/staging/panel/panel.c linux-2.6.32.44/drivers/staging/panel/panel.c
38260 --- linux-2.6.32.44/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
38261 +++ linux-2.6.32.44/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
38262 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
38263 return 0;
38264 }
38265
38266 -static struct file_operations lcd_fops = {
38267 +static const struct file_operations lcd_fops = {
38268 .write = lcd_write,
38269 .open = lcd_open,
38270 .release = lcd_release,
38271 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
38272 return 0;
38273 }
38274
38275 -static struct file_operations keypad_fops = {
38276 +static const struct file_operations keypad_fops = {
38277 .read = keypad_read, /* read */
38278 .open = keypad_open, /* open */
38279 .release = keypad_release, /* close */
38280 diff -urNp linux-2.6.32.44/drivers/staging/phison/phison.c linux-2.6.32.44/drivers/staging/phison/phison.c
38281 --- linux-2.6.32.44/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
38282 +++ linux-2.6.32.44/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
38283 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
38284 ATA_BMDMA_SHT(DRV_NAME),
38285 };
38286
38287 -static struct ata_port_operations phison_ops = {
38288 +static const struct ata_port_operations phison_ops = {
38289 .inherits = &ata_bmdma_port_ops,
38290 .prereset = phison_pre_reset,
38291 };
38292 diff -urNp linux-2.6.32.44/drivers/staging/poch/poch.c linux-2.6.32.44/drivers/staging/poch/poch.c
38293 --- linux-2.6.32.44/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
38294 +++ linux-2.6.32.44/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
38295 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
38296 return 0;
38297 }
38298
38299 -static struct file_operations poch_fops = {
38300 +static const struct file_operations poch_fops = {
38301 .owner = THIS_MODULE,
38302 .open = poch_open,
38303 .release = poch_release,
38304 diff -urNp linux-2.6.32.44/drivers/staging/pohmelfs/inode.c linux-2.6.32.44/drivers/staging/pohmelfs/inode.c
38305 --- linux-2.6.32.44/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38306 +++ linux-2.6.32.44/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
38307 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
38308 mutex_init(&psb->mcache_lock);
38309 psb->mcache_root = RB_ROOT;
38310 psb->mcache_timeout = msecs_to_jiffies(5000);
38311 - atomic_long_set(&psb->mcache_gen, 0);
38312 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
38313
38314 psb->trans_max_pages = 100;
38315
38316 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
38317 INIT_LIST_HEAD(&psb->crypto_ready_list);
38318 INIT_LIST_HEAD(&psb->crypto_active_list);
38319
38320 - atomic_set(&psb->trans_gen, 1);
38321 + atomic_set_unchecked(&psb->trans_gen, 1);
38322 atomic_long_set(&psb->total_inodes, 0);
38323
38324 mutex_init(&psb->state_lock);
38325 diff -urNp linux-2.6.32.44/drivers/staging/pohmelfs/mcache.c linux-2.6.32.44/drivers/staging/pohmelfs/mcache.c
38326 --- linux-2.6.32.44/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
38327 +++ linux-2.6.32.44/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
38328 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
38329 m->data = data;
38330 m->start = start;
38331 m->size = size;
38332 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
38333 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
38334
38335 mutex_lock(&psb->mcache_lock);
38336 err = pohmelfs_mcache_insert(psb, m);
38337 diff -urNp linux-2.6.32.44/drivers/staging/pohmelfs/netfs.h linux-2.6.32.44/drivers/staging/pohmelfs/netfs.h
38338 --- linux-2.6.32.44/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
38339 +++ linux-2.6.32.44/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
38340 @@ -570,14 +570,14 @@ struct pohmelfs_config;
38341 struct pohmelfs_sb {
38342 struct rb_root mcache_root;
38343 struct mutex mcache_lock;
38344 - atomic_long_t mcache_gen;
38345 + atomic_long_unchecked_t mcache_gen;
38346 unsigned long mcache_timeout;
38347
38348 unsigned int idx;
38349
38350 unsigned int trans_retries;
38351
38352 - atomic_t trans_gen;
38353 + atomic_unchecked_t trans_gen;
38354
38355 unsigned int crypto_attached_size;
38356 unsigned int crypto_align_size;
38357 diff -urNp linux-2.6.32.44/drivers/staging/pohmelfs/trans.c linux-2.6.32.44/drivers/staging/pohmelfs/trans.c
38358 --- linux-2.6.32.44/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
38359 +++ linux-2.6.32.44/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
38360 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
38361 int err;
38362 struct netfs_cmd *cmd = t->iovec.iov_base;
38363
38364 - t->gen = atomic_inc_return(&psb->trans_gen);
38365 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
38366
38367 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
38368 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
38369 diff -urNp linux-2.6.32.44/drivers/staging/sep/sep_driver.c linux-2.6.32.44/drivers/staging/sep/sep_driver.c
38370 --- linux-2.6.32.44/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
38371 +++ linux-2.6.32.44/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
38372 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
38373 static dev_t sep_devno;
38374
38375 /* the files operations structure of the driver */
38376 -static struct file_operations sep_file_operations = {
38377 +static const struct file_operations sep_file_operations = {
38378 .owner = THIS_MODULE,
38379 .ioctl = sep_ioctl,
38380 .poll = sep_poll,
38381 diff -urNp linux-2.6.32.44/drivers/staging/usbip/vhci.h linux-2.6.32.44/drivers/staging/usbip/vhci.h
38382 --- linux-2.6.32.44/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
38383 +++ linux-2.6.32.44/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
38384 @@ -92,7 +92,7 @@ struct vhci_hcd {
38385 unsigned resuming:1;
38386 unsigned long re_timeout;
38387
38388 - atomic_t seqnum;
38389 + atomic_unchecked_t seqnum;
38390
38391 /*
38392 * NOTE:
38393 diff -urNp linux-2.6.32.44/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.44/drivers/staging/usbip/vhci_hcd.c
38394 --- linux-2.6.32.44/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
38395 +++ linux-2.6.32.44/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
38396 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
38397 return;
38398 }
38399
38400 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38401 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38402 if (priv->seqnum == 0xffff)
38403 usbip_uinfo("seqnum max\n");
38404
38405 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
38406 return -ENOMEM;
38407 }
38408
38409 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38410 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38411 if (unlink->seqnum == 0xffff)
38412 usbip_uinfo("seqnum max\n");
38413
38414 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38415 vdev->rhport = rhport;
38416 }
38417
38418 - atomic_set(&vhci->seqnum, 0);
38419 + atomic_set_unchecked(&vhci->seqnum, 0);
38420 spin_lock_init(&vhci->lock);
38421
38422
38423 diff -urNp linux-2.6.32.44/drivers/staging/usbip/vhci_rx.c linux-2.6.32.44/drivers/staging/usbip/vhci_rx.c
38424 --- linux-2.6.32.44/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
38425 +++ linux-2.6.32.44/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
38426 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38427 usbip_uerr("cannot find a urb of seqnum %u\n",
38428 pdu->base.seqnum);
38429 usbip_uinfo("max seqnum %d\n",
38430 - atomic_read(&the_controller->seqnum));
38431 + atomic_read_unchecked(&the_controller->seqnum));
38432 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38433 return;
38434 }
38435 diff -urNp linux-2.6.32.44/drivers/staging/vme/devices/vme_user.c linux-2.6.32.44/drivers/staging/vme/devices/vme_user.c
38436 --- linux-2.6.32.44/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
38437 +++ linux-2.6.32.44/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38438 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38439 static int __init vme_user_probe(struct device *, int, int);
38440 static int __exit vme_user_remove(struct device *, int, int);
38441
38442 -static struct file_operations vme_user_fops = {
38443 +static const struct file_operations vme_user_fops = {
38444 .open = vme_user_open,
38445 .release = vme_user_release,
38446 .read = vme_user_read,
38447 diff -urNp linux-2.6.32.44/drivers/telephony/ixj.c linux-2.6.32.44/drivers/telephony/ixj.c
38448 --- linux-2.6.32.44/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38449 +++ linux-2.6.32.44/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38450 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38451 bool mContinue;
38452 char *pIn, *pOut;
38453
38454 + pax_track_stack();
38455 +
38456 if (!SCI_Prepare(j))
38457 return 0;
38458
38459 diff -urNp linux-2.6.32.44/drivers/uio/uio.c linux-2.6.32.44/drivers/uio/uio.c
38460 --- linux-2.6.32.44/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38461 +++ linux-2.6.32.44/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38462 @@ -23,6 +23,7 @@
38463 #include <linux/string.h>
38464 #include <linux/kobject.h>
38465 #include <linux/uio_driver.h>
38466 +#include <asm/local.h>
38467
38468 #define UIO_MAX_DEVICES 255
38469
38470 @@ -30,10 +31,10 @@ struct uio_device {
38471 struct module *owner;
38472 struct device *dev;
38473 int minor;
38474 - atomic_t event;
38475 + atomic_unchecked_t event;
38476 struct fasync_struct *async_queue;
38477 wait_queue_head_t wait;
38478 - int vma_count;
38479 + local_t vma_count;
38480 struct uio_info *info;
38481 struct kobject *map_dir;
38482 struct kobject *portio_dir;
38483 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38484 return entry->show(mem, buf);
38485 }
38486
38487 -static struct sysfs_ops map_sysfs_ops = {
38488 +static const struct sysfs_ops map_sysfs_ops = {
38489 .show = map_type_show,
38490 };
38491
38492 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38493 return entry->show(port, buf);
38494 }
38495
38496 -static struct sysfs_ops portio_sysfs_ops = {
38497 +static const struct sysfs_ops portio_sysfs_ops = {
38498 .show = portio_type_show,
38499 };
38500
38501 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38502 struct uio_device *idev = dev_get_drvdata(dev);
38503 if (idev)
38504 return sprintf(buf, "%u\n",
38505 - (unsigned int)atomic_read(&idev->event));
38506 + (unsigned int)atomic_read_unchecked(&idev->event));
38507 else
38508 return -ENODEV;
38509 }
38510 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38511 {
38512 struct uio_device *idev = info->uio_dev;
38513
38514 - atomic_inc(&idev->event);
38515 + atomic_inc_unchecked(&idev->event);
38516 wake_up_interruptible(&idev->wait);
38517 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38518 }
38519 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38520 }
38521
38522 listener->dev = idev;
38523 - listener->event_count = atomic_read(&idev->event);
38524 + listener->event_count = atomic_read_unchecked(&idev->event);
38525 filep->private_data = listener;
38526
38527 if (idev->info->open) {
38528 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38529 return -EIO;
38530
38531 poll_wait(filep, &idev->wait, wait);
38532 - if (listener->event_count != atomic_read(&idev->event))
38533 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38534 return POLLIN | POLLRDNORM;
38535 return 0;
38536 }
38537 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38538 do {
38539 set_current_state(TASK_INTERRUPTIBLE);
38540
38541 - event_count = atomic_read(&idev->event);
38542 + event_count = atomic_read_unchecked(&idev->event);
38543 if (event_count != listener->event_count) {
38544 if (copy_to_user(buf, &event_count, count))
38545 retval = -EFAULT;
38546 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38547 static void uio_vma_open(struct vm_area_struct *vma)
38548 {
38549 struct uio_device *idev = vma->vm_private_data;
38550 - idev->vma_count++;
38551 + local_inc(&idev->vma_count);
38552 }
38553
38554 static void uio_vma_close(struct vm_area_struct *vma)
38555 {
38556 struct uio_device *idev = vma->vm_private_data;
38557 - idev->vma_count--;
38558 + local_dec(&idev->vma_count);
38559 }
38560
38561 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38562 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
38563 idev->owner = owner;
38564 idev->info = info;
38565 init_waitqueue_head(&idev->wait);
38566 - atomic_set(&idev->event, 0);
38567 + atomic_set_unchecked(&idev->event, 0);
38568
38569 ret = uio_get_minor(idev);
38570 if (ret)
38571 diff -urNp linux-2.6.32.44/drivers/usb/atm/usbatm.c linux-2.6.32.44/drivers/usb/atm/usbatm.c
38572 --- linux-2.6.32.44/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38573 +++ linux-2.6.32.44/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38574 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38575 if (printk_ratelimit())
38576 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38577 __func__, vpi, vci);
38578 - atomic_inc(&vcc->stats->rx_err);
38579 + atomic_inc_unchecked(&vcc->stats->rx_err);
38580 return;
38581 }
38582
38583 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38584 if (length > ATM_MAX_AAL5_PDU) {
38585 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38586 __func__, length, vcc);
38587 - atomic_inc(&vcc->stats->rx_err);
38588 + atomic_inc_unchecked(&vcc->stats->rx_err);
38589 goto out;
38590 }
38591
38592 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38593 if (sarb->len < pdu_length) {
38594 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38595 __func__, pdu_length, sarb->len, vcc);
38596 - atomic_inc(&vcc->stats->rx_err);
38597 + atomic_inc_unchecked(&vcc->stats->rx_err);
38598 goto out;
38599 }
38600
38601 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38602 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38603 __func__, vcc);
38604 - atomic_inc(&vcc->stats->rx_err);
38605 + atomic_inc_unchecked(&vcc->stats->rx_err);
38606 goto out;
38607 }
38608
38609 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38610 if (printk_ratelimit())
38611 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38612 __func__, length);
38613 - atomic_inc(&vcc->stats->rx_drop);
38614 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38615 goto out;
38616 }
38617
38618 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38619
38620 vcc->push(vcc, skb);
38621
38622 - atomic_inc(&vcc->stats->rx);
38623 + atomic_inc_unchecked(&vcc->stats->rx);
38624 out:
38625 skb_trim(sarb, 0);
38626 }
38627 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38628 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38629
38630 usbatm_pop(vcc, skb);
38631 - atomic_inc(&vcc->stats->tx);
38632 + atomic_inc_unchecked(&vcc->stats->tx);
38633
38634 skb = skb_dequeue(&instance->sndqueue);
38635 }
38636 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38637 if (!left--)
38638 return sprintf(page,
38639 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38640 - atomic_read(&atm_dev->stats.aal5.tx),
38641 - atomic_read(&atm_dev->stats.aal5.tx_err),
38642 - atomic_read(&atm_dev->stats.aal5.rx),
38643 - atomic_read(&atm_dev->stats.aal5.rx_err),
38644 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38645 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38646 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38647 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38648 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38649 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38650
38651 if (!left--) {
38652 if (instance->disconnected)
38653 diff -urNp linux-2.6.32.44/drivers/usb/class/cdc-wdm.c linux-2.6.32.44/drivers/usb/class/cdc-wdm.c
38654 --- linux-2.6.32.44/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38655 +++ linux-2.6.32.44/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38656 @@ -314,7 +314,7 @@ static ssize_t wdm_write
38657 if (r < 0)
38658 goto outnp;
38659
38660 - if (!file->f_flags && O_NONBLOCK)
38661 + if (!(file->f_flags & O_NONBLOCK))
38662 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38663 &desc->flags));
38664 else
38665 diff -urNp linux-2.6.32.44/drivers/usb/core/hcd.c linux-2.6.32.44/drivers/usb/core/hcd.c
38666 --- linux-2.6.32.44/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38667 +++ linux-2.6.32.44/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38668 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38669
38670 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38671
38672 -struct usb_mon_operations *mon_ops;
38673 +const struct usb_mon_operations *mon_ops;
38674
38675 /*
38676 * The registration is unlocked.
38677 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38678 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38679 */
38680
38681 -int usb_mon_register (struct usb_mon_operations *ops)
38682 +int usb_mon_register (const struct usb_mon_operations *ops)
38683 {
38684
38685 if (mon_ops)
38686 diff -urNp linux-2.6.32.44/drivers/usb/core/hcd.h linux-2.6.32.44/drivers/usb/core/hcd.h
38687 --- linux-2.6.32.44/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38688 +++ linux-2.6.32.44/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38689 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38690 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38691
38692 struct usb_mon_operations {
38693 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38694 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38695 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38696 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38697 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38698 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38699 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38700 };
38701
38702 -extern struct usb_mon_operations *mon_ops;
38703 +extern const struct usb_mon_operations *mon_ops;
38704
38705 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38706 {
38707 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38708 (*mon_ops->urb_complete)(bus, urb, status);
38709 }
38710
38711 -int usb_mon_register(struct usb_mon_operations *ops);
38712 +int usb_mon_register(const struct usb_mon_operations *ops);
38713 void usb_mon_deregister(void);
38714
38715 #else
38716 diff -urNp linux-2.6.32.44/drivers/usb/core/message.c linux-2.6.32.44/drivers/usb/core/message.c
38717 --- linux-2.6.32.44/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38718 +++ linux-2.6.32.44/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38719 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38720 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38721 if (buf) {
38722 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38723 - if (len > 0) {
38724 - smallbuf = kmalloc(++len, GFP_NOIO);
38725 + if (len++ > 0) {
38726 + smallbuf = kmalloc(len, GFP_NOIO);
38727 if (!smallbuf)
38728 return buf;
38729 memcpy(smallbuf, buf, len);
38730 diff -urNp linux-2.6.32.44/drivers/usb/misc/appledisplay.c linux-2.6.32.44/drivers/usb/misc/appledisplay.c
38731 --- linux-2.6.32.44/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38732 +++ linux-2.6.32.44/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38733 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38734 return pdata->msgdata[1];
38735 }
38736
38737 -static struct backlight_ops appledisplay_bl_data = {
38738 +static const struct backlight_ops appledisplay_bl_data = {
38739 .get_brightness = appledisplay_bl_get_brightness,
38740 .update_status = appledisplay_bl_update_status,
38741 };
38742 diff -urNp linux-2.6.32.44/drivers/usb/mon/mon_main.c linux-2.6.32.44/drivers/usb/mon/mon_main.c
38743 --- linux-2.6.32.44/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38744 +++ linux-2.6.32.44/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38745 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38746 /*
38747 * Ops
38748 */
38749 -static struct usb_mon_operations mon_ops_0 = {
38750 +static const struct usb_mon_operations mon_ops_0 = {
38751 .urb_submit = mon_submit,
38752 .urb_submit_error = mon_submit_error,
38753 .urb_complete = mon_complete,
38754 diff -urNp linux-2.6.32.44/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.44/drivers/usb/wusbcore/wa-hc.h
38755 --- linux-2.6.32.44/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38756 +++ linux-2.6.32.44/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38757 @@ -192,7 +192,7 @@ struct wahc {
38758 struct list_head xfer_delayed_list;
38759 spinlock_t xfer_list_lock;
38760 struct work_struct xfer_work;
38761 - atomic_t xfer_id_count;
38762 + atomic_unchecked_t xfer_id_count;
38763 };
38764
38765
38766 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38767 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38768 spin_lock_init(&wa->xfer_list_lock);
38769 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38770 - atomic_set(&wa->xfer_id_count, 1);
38771 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38772 }
38773
38774 /**
38775 diff -urNp linux-2.6.32.44/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.44/drivers/usb/wusbcore/wa-xfer.c
38776 --- linux-2.6.32.44/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38777 +++ linux-2.6.32.44/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38778 @@ -293,7 +293,7 @@ out:
38779 */
38780 static void wa_xfer_id_init(struct wa_xfer *xfer)
38781 {
38782 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38783 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38784 }
38785
38786 /*
38787 diff -urNp linux-2.6.32.44/drivers/uwb/wlp/messages.c linux-2.6.32.44/drivers/uwb/wlp/messages.c
38788 --- linux-2.6.32.44/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38789 +++ linux-2.6.32.44/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38790 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38791 size_t len = skb->len;
38792 size_t used;
38793 ssize_t result;
38794 - struct wlp_nonce enonce, rnonce;
38795 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38796 enum wlp_assc_error assc_err;
38797 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38798 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38799 diff -urNp linux-2.6.32.44/drivers/uwb/wlp/sysfs.c linux-2.6.32.44/drivers/uwb/wlp/sysfs.c
38800 --- linux-2.6.32.44/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38801 +++ linux-2.6.32.44/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38802 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38803 return ret;
38804 }
38805
38806 -static
38807 -struct sysfs_ops wss_sysfs_ops = {
38808 +static const struct sysfs_ops wss_sysfs_ops = {
38809 .show = wlp_wss_attr_show,
38810 .store = wlp_wss_attr_store,
38811 };
38812 diff -urNp linux-2.6.32.44/drivers/video/atmel_lcdfb.c linux-2.6.32.44/drivers/video/atmel_lcdfb.c
38813 --- linux-2.6.32.44/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38814 +++ linux-2.6.32.44/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38815 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38816 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38817 }
38818
38819 -static struct backlight_ops atmel_lcdc_bl_ops = {
38820 +static const struct backlight_ops atmel_lcdc_bl_ops = {
38821 .update_status = atmel_bl_update_status,
38822 .get_brightness = atmel_bl_get_brightness,
38823 };
38824 diff -urNp linux-2.6.32.44/drivers/video/aty/aty128fb.c linux-2.6.32.44/drivers/video/aty/aty128fb.c
38825 --- linux-2.6.32.44/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38826 +++ linux-2.6.32.44/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38827 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38828 return bd->props.brightness;
38829 }
38830
38831 -static struct backlight_ops aty128_bl_data = {
38832 +static const struct backlight_ops aty128_bl_data = {
38833 .get_brightness = aty128_bl_get_brightness,
38834 .update_status = aty128_bl_update_status,
38835 };
38836 diff -urNp linux-2.6.32.44/drivers/video/aty/atyfb_base.c linux-2.6.32.44/drivers/video/aty/atyfb_base.c
38837 --- linux-2.6.32.44/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38838 +++ linux-2.6.32.44/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38839 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38840 return bd->props.brightness;
38841 }
38842
38843 -static struct backlight_ops aty_bl_data = {
38844 +static const struct backlight_ops aty_bl_data = {
38845 .get_brightness = aty_bl_get_brightness,
38846 .update_status = aty_bl_update_status,
38847 };
38848 diff -urNp linux-2.6.32.44/drivers/video/aty/radeon_backlight.c linux-2.6.32.44/drivers/video/aty/radeon_backlight.c
38849 --- linux-2.6.32.44/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38850 +++ linux-2.6.32.44/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38851 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38852 return bd->props.brightness;
38853 }
38854
38855 -static struct backlight_ops radeon_bl_data = {
38856 +static const struct backlight_ops radeon_bl_data = {
38857 .get_brightness = radeon_bl_get_brightness,
38858 .update_status = radeon_bl_update_status,
38859 };
38860 diff -urNp linux-2.6.32.44/drivers/video/backlight/adp5520_bl.c linux-2.6.32.44/drivers/video/backlight/adp5520_bl.c
38861 --- linux-2.6.32.44/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38862 +++ linux-2.6.32.44/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38863 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38864 return error ? data->current_brightness : reg_val;
38865 }
38866
38867 -static struct backlight_ops adp5520_bl_ops = {
38868 +static const struct backlight_ops adp5520_bl_ops = {
38869 .update_status = adp5520_bl_update_status,
38870 .get_brightness = adp5520_bl_get_brightness,
38871 };
38872 diff -urNp linux-2.6.32.44/drivers/video/backlight/adx_bl.c linux-2.6.32.44/drivers/video/backlight/adx_bl.c
38873 --- linux-2.6.32.44/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38874 +++ linux-2.6.32.44/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38875 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38876 return 1;
38877 }
38878
38879 -static struct backlight_ops adx_backlight_ops = {
38880 +static const struct backlight_ops adx_backlight_ops = {
38881 .options = 0,
38882 .update_status = adx_backlight_update_status,
38883 .get_brightness = adx_backlight_get_brightness,
38884 diff -urNp linux-2.6.32.44/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.44/drivers/video/backlight/atmel-pwm-bl.c
38885 --- linux-2.6.32.44/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38886 +++ linux-2.6.32.44/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38887 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38888 return pwm_channel_enable(&pwmbl->pwmc);
38889 }
38890
38891 -static struct backlight_ops atmel_pwm_bl_ops = {
38892 +static const struct backlight_ops atmel_pwm_bl_ops = {
38893 .get_brightness = atmel_pwm_bl_get_intensity,
38894 .update_status = atmel_pwm_bl_set_intensity,
38895 };
38896 diff -urNp linux-2.6.32.44/drivers/video/backlight/backlight.c linux-2.6.32.44/drivers/video/backlight/backlight.c
38897 --- linux-2.6.32.44/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38898 +++ linux-2.6.32.44/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38899 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38900 * ERR_PTR() or a pointer to the newly allocated device.
38901 */
38902 struct backlight_device *backlight_device_register(const char *name,
38903 - struct device *parent, void *devdata, struct backlight_ops *ops)
38904 + struct device *parent, void *devdata, const struct backlight_ops *ops)
38905 {
38906 struct backlight_device *new_bd;
38907 int rc;
38908 diff -urNp linux-2.6.32.44/drivers/video/backlight/corgi_lcd.c linux-2.6.32.44/drivers/video/backlight/corgi_lcd.c
38909 --- linux-2.6.32.44/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38910 +++ linux-2.6.32.44/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38911 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38912 }
38913 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38914
38915 -static struct backlight_ops corgi_bl_ops = {
38916 +static const struct backlight_ops corgi_bl_ops = {
38917 .get_brightness = corgi_bl_get_intensity,
38918 .update_status = corgi_bl_update_status,
38919 };
38920 diff -urNp linux-2.6.32.44/drivers/video/backlight/cr_bllcd.c linux-2.6.32.44/drivers/video/backlight/cr_bllcd.c
38921 --- linux-2.6.32.44/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38922 +++ linux-2.6.32.44/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38923 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38924 return intensity;
38925 }
38926
38927 -static struct backlight_ops cr_backlight_ops = {
38928 +static const struct backlight_ops cr_backlight_ops = {
38929 .get_brightness = cr_backlight_get_intensity,
38930 .update_status = cr_backlight_set_intensity,
38931 };
38932 diff -urNp linux-2.6.32.44/drivers/video/backlight/da903x_bl.c linux-2.6.32.44/drivers/video/backlight/da903x_bl.c
38933 --- linux-2.6.32.44/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38934 +++ linux-2.6.32.44/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38935 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38936 return data->current_brightness;
38937 }
38938
38939 -static struct backlight_ops da903x_backlight_ops = {
38940 +static const struct backlight_ops da903x_backlight_ops = {
38941 .update_status = da903x_backlight_update_status,
38942 .get_brightness = da903x_backlight_get_brightness,
38943 };
38944 diff -urNp linux-2.6.32.44/drivers/video/backlight/generic_bl.c linux-2.6.32.44/drivers/video/backlight/generic_bl.c
38945 --- linux-2.6.32.44/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38946 +++ linux-2.6.32.44/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38947 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38948 }
38949 EXPORT_SYMBOL(corgibl_limit_intensity);
38950
38951 -static struct backlight_ops genericbl_ops = {
38952 +static const struct backlight_ops genericbl_ops = {
38953 .options = BL_CORE_SUSPENDRESUME,
38954 .get_brightness = genericbl_get_intensity,
38955 .update_status = genericbl_send_intensity,
38956 diff -urNp linux-2.6.32.44/drivers/video/backlight/hp680_bl.c linux-2.6.32.44/drivers/video/backlight/hp680_bl.c
38957 --- linux-2.6.32.44/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38958 +++ linux-2.6.32.44/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38959 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38960 return current_intensity;
38961 }
38962
38963 -static struct backlight_ops hp680bl_ops = {
38964 +static const struct backlight_ops hp680bl_ops = {
38965 .get_brightness = hp680bl_get_intensity,
38966 .update_status = hp680bl_set_intensity,
38967 };
38968 diff -urNp linux-2.6.32.44/drivers/video/backlight/jornada720_bl.c linux-2.6.32.44/drivers/video/backlight/jornada720_bl.c
38969 --- linux-2.6.32.44/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38970 +++ linux-2.6.32.44/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38971 @@ -93,7 +93,7 @@ out:
38972 return ret;
38973 }
38974
38975 -static struct backlight_ops jornada_bl_ops = {
38976 +static const struct backlight_ops jornada_bl_ops = {
38977 .get_brightness = jornada_bl_get_brightness,
38978 .update_status = jornada_bl_update_status,
38979 .options = BL_CORE_SUSPENDRESUME,
38980 diff -urNp linux-2.6.32.44/drivers/video/backlight/kb3886_bl.c linux-2.6.32.44/drivers/video/backlight/kb3886_bl.c
38981 --- linux-2.6.32.44/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38982 +++ linux-2.6.32.44/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38983 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38984 return kb3886bl_intensity;
38985 }
38986
38987 -static struct backlight_ops kb3886bl_ops = {
38988 +static const struct backlight_ops kb3886bl_ops = {
38989 .get_brightness = kb3886bl_get_intensity,
38990 .update_status = kb3886bl_send_intensity,
38991 };
38992 diff -urNp linux-2.6.32.44/drivers/video/backlight/locomolcd.c linux-2.6.32.44/drivers/video/backlight/locomolcd.c
38993 --- linux-2.6.32.44/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38994 +++ linux-2.6.32.44/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38995 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38996 return current_intensity;
38997 }
38998
38999 -static struct backlight_ops locomobl_data = {
39000 +static const struct backlight_ops locomobl_data = {
39001 .get_brightness = locomolcd_get_intensity,
39002 .update_status = locomolcd_set_intensity,
39003 };
39004 diff -urNp linux-2.6.32.44/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.44/drivers/video/backlight/mbp_nvidia_bl.c
39005 --- linux-2.6.32.44/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
39006 +++ linux-2.6.32.44/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
39007 @@ -33,7 +33,7 @@ struct dmi_match_data {
39008 unsigned long iostart;
39009 unsigned long iolen;
39010 /* Backlight operations structure. */
39011 - struct backlight_ops backlight_ops;
39012 + const struct backlight_ops backlight_ops;
39013 };
39014
39015 /* Module parameters. */
39016 diff -urNp linux-2.6.32.44/drivers/video/backlight/omap1_bl.c linux-2.6.32.44/drivers/video/backlight/omap1_bl.c
39017 --- linux-2.6.32.44/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
39018 +++ linux-2.6.32.44/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
39019 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
39020 return bl->current_intensity;
39021 }
39022
39023 -static struct backlight_ops omapbl_ops = {
39024 +static const struct backlight_ops omapbl_ops = {
39025 .get_brightness = omapbl_get_intensity,
39026 .update_status = omapbl_update_status,
39027 };
39028 diff -urNp linux-2.6.32.44/drivers/video/backlight/progear_bl.c linux-2.6.32.44/drivers/video/backlight/progear_bl.c
39029 --- linux-2.6.32.44/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
39030 +++ linux-2.6.32.44/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
39031 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
39032 return intensity - HW_LEVEL_MIN;
39033 }
39034
39035 -static struct backlight_ops progearbl_ops = {
39036 +static const struct backlight_ops progearbl_ops = {
39037 .get_brightness = progearbl_get_intensity,
39038 .update_status = progearbl_set_intensity,
39039 };
39040 diff -urNp linux-2.6.32.44/drivers/video/backlight/pwm_bl.c linux-2.6.32.44/drivers/video/backlight/pwm_bl.c
39041 --- linux-2.6.32.44/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
39042 +++ linux-2.6.32.44/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
39043 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
39044 return bl->props.brightness;
39045 }
39046
39047 -static struct backlight_ops pwm_backlight_ops = {
39048 +static const struct backlight_ops pwm_backlight_ops = {
39049 .update_status = pwm_backlight_update_status,
39050 .get_brightness = pwm_backlight_get_brightness,
39051 };
39052 diff -urNp linux-2.6.32.44/drivers/video/backlight/tosa_bl.c linux-2.6.32.44/drivers/video/backlight/tosa_bl.c
39053 --- linux-2.6.32.44/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
39054 +++ linux-2.6.32.44/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
39055 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
39056 return props->brightness;
39057 }
39058
39059 -static struct backlight_ops bl_ops = {
39060 +static const struct backlight_ops bl_ops = {
39061 .get_brightness = tosa_bl_get_brightness,
39062 .update_status = tosa_bl_update_status,
39063 };
39064 diff -urNp linux-2.6.32.44/drivers/video/backlight/wm831x_bl.c linux-2.6.32.44/drivers/video/backlight/wm831x_bl.c
39065 --- linux-2.6.32.44/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
39066 +++ linux-2.6.32.44/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
39067 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
39068 return data->current_brightness;
39069 }
39070
39071 -static struct backlight_ops wm831x_backlight_ops = {
39072 +static const struct backlight_ops wm831x_backlight_ops = {
39073 .options = BL_CORE_SUSPENDRESUME,
39074 .update_status = wm831x_backlight_update_status,
39075 .get_brightness = wm831x_backlight_get_brightness,
39076 diff -urNp linux-2.6.32.44/drivers/video/bf54x-lq043fb.c linux-2.6.32.44/drivers/video/bf54x-lq043fb.c
39077 --- linux-2.6.32.44/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
39078 +++ linux-2.6.32.44/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
39079 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
39080 return 0;
39081 }
39082
39083 -static struct backlight_ops bfin_lq043fb_bl_ops = {
39084 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
39085 .get_brightness = bl_get_brightness,
39086 };
39087
39088 diff -urNp linux-2.6.32.44/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.44/drivers/video/bfin-t350mcqb-fb.c
39089 --- linux-2.6.32.44/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
39090 +++ linux-2.6.32.44/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
39091 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
39092 return 0;
39093 }
39094
39095 -static struct backlight_ops bfin_lq043fb_bl_ops = {
39096 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
39097 .get_brightness = bl_get_brightness,
39098 };
39099
39100 diff -urNp linux-2.6.32.44/drivers/video/fbcmap.c linux-2.6.32.44/drivers/video/fbcmap.c
39101 --- linux-2.6.32.44/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
39102 +++ linux-2.6.32.44/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
39103 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
39104 rc = -ENODEV;
39105 goto out;
39106 }
39107 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39108 - !info->fbops->fb_setcmap)) {
39109 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39110 rc = -EINVAL;
39111 goto out1;
39112 }
39113 diff -urNp linux-2.6.32.44/drivers/video/fbmem.c linux-2.6.32.44/drivers/video/fbmem.c
39114 --- linux-2.6.32.44/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
39115 +++ linux-2.6.32.44/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
39116 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
39117 image->dx += image->width + 8;
39118 }
39119 } else if (rotate == FB_ROTATE_UD) {
39120 - for (x = 0; x < num && image->dx >= 0; x++) {
39121 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39122 info->fbops->fb_imageblit(info, image);
39123 image->dx -= image->width + 8;
39124 }
39125 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
39126 image->dy += image->height + 8;
39127 }
39128 } else if (rotate == FB_ROTATE_CCW) {
39129 - for (x = 0; x < num && image->dy >= 0; x++) {
39130 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39131 info->fbops->fb_imageblit(info, image);
39132 image->dy -= image->height + 8;
39133 }
39134 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
39135 int flags = info->flags;
39136 int ret = 0;
39137
39138 + pax_track_stack();
39139 +
39140 if (var->activate & FB_ACTIVATE_INV_MODE) {
39141 struct fb_videomode mode1, mode2;
39142
39143 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
39144 void __user *argp = (void __user *)arg;
39145 long ret = 0;
39146
39147 + pax_track_stack();
39148 +
39149 switch (cmd) {
39150 case FBIOGET_VSCREENINFO:
39151 if (!lock_fb_info(info))
39152 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
39153 return -EFAULT;
39154 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39155 return -EINVAL;
39156 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39157 + if (con2fb.framebuffer >= FB_MAX)
39158 return -EINVAL;
39159 if (!registered_fb[con2fb.framebuffer])
39160 request_module("fb%d", con2fb.framebuffer);
39161 diff -urNp linux-2.6.32.44/drivers/video/i810/i810_accel.c linux-2.6.32.44/drivers/video/i810/i810_accel.c
39162 --- linux-2.6.32.44/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
39163 +++ linux-2.6.32.44/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
39164 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
39165 }
39166 }
39167 printk("ringbuffer lockup!!!\n");
39168 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39169 i810_report_error(mmio);
39170 par->dev_flags |= LOCKUP;
39171 info->pixmap.scan_align = 1;
39172 diff -urNp linux-2.6.32.44/drivers/video/nvidia/nv_backlight.c linux-2.6.32.44/drivers/video/nvidia/nv_backlight.c
39173 --- linux-2.6.32.44/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
39174 +++ linux-2.6.32.44/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
39175 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
39176 return bd->props.brightness;
39177 }
39178
39179 -static struct backlight_ops nvidia_bl_ops = {
39180 +static const struct backlight_ops nvidia_bl_ops = {
39181 .get_brightness = nvidia_bl_get_brightness,
39182 .update_status = nvidia_bl_update_status,
39183 };
39184 diff -urNp linux-2.6.32.44/drivers/video/riva/fbdev.c linux-2.6.32.44/drivers/video/riva/fbdev.c
39185 --- linux-2.6.32.44/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
39186 +++ linux-2.6.32.44/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
39187 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
39188 return bd->props.brightness;
39189 }
39190
39191 -static struct backlight_ops riva_bl_ops = {
39192 +static const struct backlight_ops riva_bl_ops = {
39193 .get_brightness = riva_bl_get_brightness,
39194 .update_status = riva_bl_update_status,
39195 };
39196 diff -urNp linux-2.6.32.44/drivers/video/uvesafb.c linux-2.6.32.44/drivers/video/uvesafb.c
39197 --- linux-2.6.32.44/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
39198 +++ linux-2.6.32.44/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
39199 @@ -18,6 +18,7 @@
39200 #include <linux/fb.h>
39201 #include <linux/io.h>
39202 #include <linux/mutex.h>
39203 +#include <linux/moduleloader.h>
39204 #include <video/edid.h>
39205 #include <video/uvesafb.h>
39206 #ifdef CONFIG_X86
39207 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
39208 NULL,
39209 };
39210
39211 - return call_usermodehelper(v86d_path, argv, envp, 1);
39212 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39213 }
39214
39215 /*
39216 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
39217 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39218 par->pmi_setpal = par->ypan = 0;
39219 } else {
39220 +
39221 +#ifdef CONFIG_PAX_KERNEXEC
39222 +#ifdef CONFIG_MODULES
39223 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39224 +#endif
39225 + if (!par->pmi_code) {
39226 + par->pmi_setpal = par->ypan = 0;
39227 + return 0;
39228 + }
39229 +#endif
39230 +
39231 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39232 + task->t.regs.edi);
39233 +
39234 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39235 + pax_open_kernel();
39236 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39237 + pax_close_kernel();
39238 +
39239 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39240 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39241 +#else
39242 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39243 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39244 +#endif
39245 +
39246 printk(KERN_INFO "uvesafb: protected mode interface info at "
39247 "%04x:%04x\n",
39248 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39249 @@ -1799,6 +1822,11 @@ out:
39250 if (par->vbe_modes)
39251 kfree(par->vbe_modes);
39252
39253 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39254 + if (par->pmi_code)
39255 + module_free_exec(NULL, par->pmi_code);
39256 +#endif
39257 +
39258 framebuffer_release(info);
39259 return err;
39260 }
39261 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
39262 kfree(par->vbe_state_orig);
39263 if (par->vbe_state_saved)
39264 kfree(par->vbe_state_saved);
39265 +
39266 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39267 + if (par->pmi_code)
39268 + module_free_exec(NULL, par->pmi_code);
39269 +#endif
39270 +
39271 }
39272
39273 framebuffer_release(info);
39274 diff -urNp linux-2.6.32.44/drivers/video/vesafb.c linux-2.6.32.44/drivers/video/vesafb.c
39275 --- linux-2.6.32.44/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
39276 +++ linux-2.6.32.44/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
39277 @@ -9,6 +9,7 @@
39278 */
39279
39280 #include <linux/module.h>
39281 +#include <linux/moduleloader.h>
39282 #include <linux/kernel.h>
39283 #include <linux/errno.h>
39284 #include <linux/string.h>
39285 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
39286 static int vram_total __initdata; /* Set total amount of memory */
39287 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39288 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39289 -static void (*pmi_start)(void) __read_mostly;
39290 -static void (*pmi_pal) (void) __read_mostly;
39291 +static void (*pmi_start)(void) __read_only;
39292 +static void (*pmi_pal) (void) __read_only;
39293 static int depth __read_mostly;
39294 static int vga_compat __read_mostly;
39295 /* --------------------------------------------------------------------- */
39296 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39297 unsigned int size_vmode;
39298 unsigned int size_remap;
39299 unsigned int size_total;
39300 + void *pmi_code = NULL;
39301
39302 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39303 return -ENODEV;
39304 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39305 size_remap = size_total;
39306 vesafb_fix.smem_len = size_remap;
39307
39308 -#ifndef __i386__
39309 - screen_info.vesapm_seg = 0;
39310 -#endif
39311 -
39312 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39313 printk(KERN_WARNING
39314 "vesafb: cannot reserve video memory at 0x%lx\n",
39315 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
39316 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39317 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39318
39319 +#ifdef __i386__
39320 +
39321 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39322 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39323 + if (!pmi_code)
39324 +#elif !defined(CONFIG_PAX_KERNEXEC)
39325 + if (0)
39326 +#endif
39327 +
39328 +#endif
39329 + screen_info.vesapm_seg = 0;
39330 +
39331 if (screen_info.vesapm_seg) {
39332 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39333 - screen_info.vesapm_seg,screen_info.vesapm_off);
39334 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39335 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39336 }
39337
39338 if (screen_info.vesapm_seg < 0xc000)
39339 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
39340
39341 if (ypan || pmi_setpal) {
39342 unsigned short *pmi_base;
39343 +
39344 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39345 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39346 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39347 +
39348 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39349 + pax_open_kernel();
39350 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39351 +#else
39352 + pmi_code = pmi_base;
39353 +#endif
39354 +
39355 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39356 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39357 +
39358 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39359 + pmi_start = ktva_ktla(pmi_start);
39360 + pmi_pal = ktva_ktla(pmi_pal);
39361 + pax_close_kernel();
39362 +#endif
39363 +
39364 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39365 if (pmi_base[3]) {
39366 printk(KERN_INFO "vesafb: pmi: ports = ");
39367 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
39368 info->node, info->fix.id);
39369 return 0;
39370 err:
39371 +
39372 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39373 + module_free_exec(NULL, pmi_code);
39374 +#endif
39375 +
39376 if (info->screen_base)
39377 iounmap(info->screen_base);
39378 framebuffer_release(info);
39379 diff -urNp linux-2.6.32.44/drivers/xen/sys-hypervisor.c linux-2.6.32.44/drivers/xen/sys-hypervisor.c
39380 --- linux-2.6.32.44/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
39381 +++ linux-2.6.32.44/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
39382 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
39383 return 0;
39384 }
39385
39386 -static struct sysfs_ops hyp_sysfs_ops = {
39387 +static const struct sysfs_ops hyp_sysfs_ops = {
39388 .show = hyp_sysfs_show,
39389 .store = hyp_sysfs_store,
39390 };
39391 diff -urNp linux-2.6.32.44/fs/9p/vfs_inode.c linux-2.6.32.44/fs/9p/vfs_inode.c
39392 --- linux-2.6.32.44/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
39393 +++ linux-2.6.32.44/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
39394 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
39395 static void
39396 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39397 {
39398 - char *s = nd_get_link(nd);
39399 + const char *s = nd_get_link(nd);
39400
39401 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39402 IS_ERR(s) ? "<error>" : s);
39403 diff -urNp linux-2.6.32.44/fs/aio.c linux-2.6.32.44/fs/aio.c
39404 --- linux-2.6.32.44/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
39405 +++ linux-2.6.32.44/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
39406 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
39407 size += sizeof(struct io_event) * nr_events;
39408 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39409
39410 - if (nr_pages < 0)
39411 + if (nr_pages <= 0)
39412 return -EINVAL;
39413
39414 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39415 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
39416 struct aio_timeout to;
39417 int retry = 0;
39418
39419 + pax_track_stack();
39420 +
39421 /* needed to zero any padding within an entry (there shouldn't be
39422 * any, but C is fun!
39423 */
39424 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
39425 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
39426 {
39427 ssize_t ret;
39428 + struct iovec iovstack;
39429
39430 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
39431 kiocb->ki_nbytes, 1,
39432 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
39433 + &iovstack, &kiocb->ki_iovec);
39434 if (ret < 0)
39435 goto out;
39436
39437 + if (kiocb->ki_iovec == &iovstack) {
39438 + kiocb->ki_inline_vec = iovstack;
39439 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39440 + }
39441 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39442 kiocb->ki_cur_seg = 0;
39443 /* ki_nbytes/left now reflect bytes instead of segs */
39444 diff -urNp linux-2.6.32.44/fs/attr.c linux-2.6.32.44/fs/attr.c
39445 --- linux-2.6.32.44/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39446 +++ linux-2.6.32.44/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39447 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39448 unsigned long limit;
39449
39450 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39451 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39452 if (limit != RLIM_INFINITY && offset > limit)
39453 goto out_sig;
39454 if (offset > inode->i_sb->s_maxbytes)
39455 diff -urNp linux-2.6.32.44/fs/autofs/root.c linux-2.6.32.44/fs/autofs/root.c
39456 --- linux-2.6.32.44/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39457 +++ linux-2.6.32.44/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39458 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39459 set_bit(n,sbi->symlink_bitmap);
39460 sl = &sbi->symlink[n];
39461 sl->len = strlen(symname);
39462 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39463 + slsize = sl->len+1;
39464 + sl->data = kmalloc(slsize, GFP_KERNEL);
39465 if (!sl->data) {
39466 clear_bit(n,sbi->symlink_bitmap);
39467 unlock_kernel();
39468 diff -urNp linux-2.6.32.44/fs/autofs4/symlink.c linux-2.6.32.44/fs/autofs4/symlink.c
39469 --- linux-2.6.32.44/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39470 +++ linux-2.6.32.44/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39471 @@ -15,7 +15,7 @@
39472 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39473 {
39474 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39475 - nd_set_link(nd, (char *)ino->u.symlink);
39476 + nd_set_link(nd, ino->u.symlink);
39477 return NULL;
39478 }
39479
39480 diff -urNp linux-2.6.32.44/fs/befs/linuxvfs.c linux-2.6.32.44/fs/befs/linuxvfs.c
39481 --- linux-2.6.32.44/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39482 +++ linux-2.6.32.44/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39483 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39484 {
39485 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39486 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39487 - char *link = nd_get_link(nd);
39488 + const char *link = nd_get_link(nd);
39489 if (!IS_ERR(link))
39490 kfree(link);
39491 }
39492 diff -urNp linux-2.6.32.44/fs/binfmt_aout.c linux-2.6.32.44/fs/binfmt_aout.c
39493 --- linux-2.6.32.44/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39494 +++ linux-2.6.32.44/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39495 @@ -16,6 +16,7 @@
39496 #include <linux/string.h>
39497 #include <linux/fs.h>
39498 #include <linux/file.h>
39499 +#include <linux/security.h>
39500 #include <linux/stat.h>
39501 #include <linux/fcntl.h>
39502 #include <linux/ptrace.h>
39503 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39504 #endif
39505 # define START_STACK(u) (u.start_stack)
39506
39507 + memset(&dump, 0, sizeof(dump));
39508 +
39509 fs = get_fs();
39510 set_fs(KERNEL_DS);
39511 has_dumped = 1;
39512 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39513
39514 /* If the size of the dump file exceeds the rlimit, then see what would happen
39515 if we wrote the stack, but not the data area. */
39516 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39517 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39518 dump.u_dsize = 0;
39519
39520 /* Make sure we have enough room to write the stack and data areas. */
39521 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39522 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39523 dump.u_ssize = 0;
39524
39525 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39526 dump_size = dump.u_ssize << PAGE_SHIFT;
39527 DUMP_WRITE(dump_start,dump_size);
39528 }
39529 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
39530 - set_fs(KERNEL_DS);
39531 - DUMP_WRITE(current,sizeof(*current));
39532 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39533 end_coredump:
39534 set_fs(fs);
39535 return has_dumped;
39536 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39537 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39538 if (rlim >= RLIM_INFINITY)
39539 rlim = ~0;
39540 +
39541 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39542 if (ex.a_data + ex.a_bss > rlim)
39543 return -ENOMEM;
39544
39545 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39546 install_exec_creds(bprm);
39547 current->flags &= ~PF_FORKNOEXEC;
39548
39549 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39550 + current->mm->pax_flags = 0UL;
39551 +#endif
39552 +
39553 +#ifdef CONFIG_PAX_PAGEEXEC
39554 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39555 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39556 +
39557 +#ifdef CONFIG_PAX_EMUTRAMP
39558 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39559 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39560 +#endif
39561 +
39562 +#ifdef CONFIG_PAX_MPROTECT
39563 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39564 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39565 +#endif
39566 +
39567 + }
39568 +#endif
39569 +
39570 if (N_MAGIC(ex) == OMAGIC) {
39571 unsigned long text_addr, map_size;
39572 loff_t pos;
39573 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39574
39575 down_write(&current->mm->mmap_sem);
39576 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39577 - PROT_READ | PROT_WRITE | PROT_EXEC,
39578 + PROT_READ | PROT_WRITE,
39579 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39580 fd_offset + ex.a_text);
39581 up_write(&current->mm->mmap_sem);
39582 diff -urNp linux-2.6.32.44/fs/binfmt_elf.c linux-2.6.32.44/fs/binfmt_elf.c
39583 --- linux-2.6.32.44/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39584 +++ linux-2.6.32.44/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39585 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39586 #define elf_core_dump NULL
39587 #endif
39588
39589 +#ifdef CONFIG_PAX_MPROTECT
39590 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39591 +#endif
39592 +
39593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39595 #else
39596 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39597 .load_binary = load_elf_binary,
39598 .load_shlib = load_elf_library,
39599 .core_dump = elf_core_dump,
39600 +
39601 +#ifdef CONFIG_PAX_MPROTECT
39602 + .handle_mprotect= elf_handle_mprotect,
39603 +#endif
39604 +
39605 .min_coredump = ELF_EXEC_PAGESIZE,
39606 .hasvdso = 1
39607 };
39608 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39609
39610 static int set_brk(unsigned long start, unsigned long end)
39611 {
39612 + unsigned long e = end;
39613 +
39614 start = ELF_PAGEALIGN(start);
39615 end = ELF_PAGEALIGN(end);
39616 if (end > start) {
39617 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39618 if (BAD_ADDR(addr))
39619 return addr;
39620 }
39621 - current->mm->start_brk = current->mm->brk = end;
39622 + current->mm->start_brk = current->mm->brk = e;
39623 return 0;
39624 }
39625
39626 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39627 elf_addr_t __user *u_rand_bytes;
39628 const char *k_platform = ELF_PLATFORM;
39629 const char *k_base_platform = ELF_BASE_PLATFORM;
39630 - unsigned char k_rand_bytes[16];
39631 + u32 k_rand_bytes[4];
39632 int items;
39633 elf_addr_t *elf_info;
39634 int ei_index = 0;
39635 const struct cred *cred = current_cred();
39636 struct vm_area_struct *vma;
39637 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39638 +
39639 + pax_track_stack();
39640
39641 /*
39642 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39643 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39644 * Generate 16 random bytes for userspace PRNG seeding.
39645 */
39646 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39647 - u_rand_bytes = (elf_addr_t __user *)
39648 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39649 + srandom32(k_rand_bytes[0] ^ random32());
39650 + srandom32(k_rand_bytes[1] ^ random32());
39651 + srandom32(k_rand_bytes[2] ^ random32());
39652 + srandom32(k_rand_bytes[3] ^ random32());
39653 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39654 + u_rand_bytes = (elf_addr_t __user *) p;
39655 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39656 return -EFAULT;
39657
39658 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39659 return -EFAULT;
39660 current->mm->env_end = p;
39661
39662 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39663 +
39664 /* Put the elf_info on the stack in the right place. */
39665 sp = (elf_addr_t __user *)envp + 1;
39666 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39667 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39668 return -EFAULT;
39669 return 0;
39670 }
39671 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39672 {
39673 struct elf_phdr *elf_phdata;
39674 struct elf_phdr *eppnt;
39675 - unsigned long load_addr = 0;
39676 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39677 int load_addr_set = 0;
39678 unsigned long last_bss = 0, elf_bss = 0;
39679 - unsigned long error = ~0UL;
39680 + unsigned long error = -EINVAL;
39681 unsigned long total_size;
39682 int retval, i, size;
39683
39684 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39685 goto out_close;
39686 }
39687
39688 +#ifdef CONFIG_PAX_SEGMEXEC
39689 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39690 + pax_task_size = SEGMEXEC_TASK_SIZE;
39691 +#endif
39692 +
39693 eppnt = elf_phdata;
39694 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39695 if (eppnt->p_type == PT_LOAD) {
39696 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39697 k = load_addr + eppnt->p_vaddr;
39698 if (BAD_ADDR(k) ||
39699 eppnt->p_filesz > eppnt->p_memsz ||
39700 - eppnt->p_memsz > TASK_SIZE ||
39701 - TASK_SIZE - eppnt->p_memsz < k) {
39702 + eppnt->p_memsz > pax_task_size ||
39703 + pax_task_size - eppnt->p_memsz < k) {
39704 error = -ENOMEM;
39705 goto out_close;
39706 }
39707 @@ -532,6 +557,194 @@ out:
39708 return error;
39709 }
39710
39711 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39712 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39713 +{
39714 + unsigned long pax_flags = 0UL;
39715 +
39716 +#ifdef CONFIG_PAX_PAGEEXEC
39717 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39718 + pax_flags |= MF_PAX_PAGEEXEC;
39719 +#endif
39720 +
39721 +#ifdef CONFIG_PAX_SEGMEXEC
39722 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39723 + pax_flags |= MF_PAX_SEGMEXEC;
39724 +#endif
39725 +
39726 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39727 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39728 + if (nx_enabled)
39729 + pax_flags &= ~MF_PAX_SEGMEXEC;
39730 + else
39731 + pax_flags &= ~MF_PAX_PAGEEXEC;
39732 + }
39733 +#endif
39734 +
39735 +#ifdef CONFIG_PAX_EMUTRAMP
39736 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39737 + pax_flags |= MF_PAX_EMUTRAMP;
39738 +#endif
39739 +
39740 +#ifdef CONFIG_PAX_MPROTECT
39741 + if (elf_phdata->p_flags & PF_MPROTECT)
39742 + pax_flags |= MF_PAX_MPROTECT;
39743 +#endif
39744 +
39745 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39746 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39747 + pax_flags |= MF_PAX_RANDMMAP;
39748 +#endif
39749 +
39750 + return pax_flags;
39751 +}
39752 +#endif
39753 +
39754 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39755 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39756 +{
39757 + unsigned long pax_flags = 0UL;
39758 +
39759 +#ifdef CONFIG_PAX_PAGEEXEC
39760 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39761 + pax_flags |= MF_PAX_PAGEEXEC;
39762 +#endif
39763 +
39764 +#ifdef CONFIG_PAX_SEGMEXEC
39765 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39766 + pax_flags |= MF_PAX_SEGMEXEC;
39767 +#endif
39768 +
39769 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39770 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39771 + if (nx_enabled)
39772 + pax_flags &= ~MF_PAX_SEGMEXEC;
39773 + else
39774 + pax_flags &= ~MF_PAX_PAGEEXEC;
39775 + }
39776 +#endif
39777 +
39778 +#ifdef CONFIG_PAX_EMUTRAMP
39779 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39780 + pax_flags |= MF_PAX_EMUTRAMP;
39781 +#endif
39782 +
39783 +#ifdef CONFIG_PAX_MPROTECT
39784 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39785 + pax_flags |= MF_PAX_MPROTECT;
39786 +#endif
39787 +
39788 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39789 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39790 + pax_flags |= MF_PAX_RANDMMAP;
39791 +#endif
39792 +
39793 + return pax_flags;
39794 +}
39795 +#endif
39796 +
39797 +#ifdef CONFIG_PAX_EI_PAX
39798 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39799 +{
39800 + unsigned long pax_flags = 0UL;
39801 +
39802 +#ifdef CONFIG_PAX_PAGEEXEC
39803 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39804 + pax_flags |= MF_PAX_PAGEEXEC;
39805 +#endif
39806 +
39807 +#ifdef CONFIG_PAX_SEGMEXEC
39808 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39809 + pax_flags |= MF_PAX_SEGMEXEC;
39810 +#endif
39811 +
39812 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39813 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39814 + if (nx_enabled)
39815 + pax_flags &= ~MF_PAX_SEGMEXEC;
39816 + else
39817 + pax_flags &= ~MF_PAX_PAGEEXEC;
39818 + }
39819 +#endif
39820 +
39821 +#ifdef CONFIG_PAX_EMUTRAMP
39822 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39823 + pax_flags |= MF_PAX_EMUTRAMP;
39824 +#endif
39825 +
39826 +#ifdef CONFIG_PAX_MPROTECT
39827 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39828 + pax_flags |= MF_PAX_MPROTECT;
39829 +#endif
39830 +
39831 +#ifdef CONFIG_PAX_ASLR
39832 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39833 + pax_flags |= MF_PAX_RANDMMAP;
39834 +#endif
39835 +
39836 + return pax_flags;
39837 +}
39838 +#endif
39839 +
39840 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39841 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39842 +{
39843 + unsigned long pax_flags = 0UL;
39844 +
39845 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39846 + unsigned long i;
39847 + int found_flags = 0;
39848 +#endif
39849 +
39850 +#ifdef CONFIG_PAX_EI_PAX
39851 + pax_flags = pax_parse_ei_pax(elf_ex);
39852 +#endif
39853 +
39854 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39855 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39856 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39857 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39858 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39859 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39860 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39861 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39862 + return -EINVAL;
39863 +
39864 +#ifdef CONFIG_PAX_SOFTMODE
39865 + if (pax_softmode)
39866 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
39867 + else
39868 +#endif
39869 +
39870 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39871 + found_flags = 1;
39872 + break;
39873 + }
39874 +#endif
39875 +
39876 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39877 + if (found_flags == 0) {
39878 + struct elf_phdr phdr;
39879 + memset(&phdr, 0, sizeof(phdr));
39880 + phdr.p_flags = PF_NOEMUTRAMP;
39881 +#ifdef CONFIG_PAX_SOFTMODE
39882 + if (pax_softmode)
39883 + pax_flags = pax_parse_softmode(&phdr);
39884 + else
39885 +#endif
39886 + pax_flags = pax_parse_hardmode(&phdr);
39887 + }
39888 +#endif
39889 +
39890 +
39891 + if (0 > pax_check_flags(&pax_flags))
39892 + return -EINVAL;
39893 +
39894 + current->mm->pax_flags = pax_flags;
39895 + return 0;
39896 +}
39897 +#endif
39898 +
39899 /*
39900 * These are the functions used to load ELF style executables and shared
39901 * libraries. There is no binary dependent code anywhere else.
39902 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39903 {
39904 unsigned int random_variable = 0;
39905
39906 +#ifdef CONFIG_PAX_RANDUSTACK
39907 + if (randomize_va_space)
39908 + return stack_top - current->mm->delta_stack;
39909 +#endif
39910 +
39911 if ((current->flags & PF_RANDOMIZE) &&
39912 !(current->personality & ADDR_NO_RANDOMIZE)) {
39913 random_variable = get_random_int() & STACK_RND_MASK;
39914 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39915 unsigned long load_addr = 0, load_bias = 0;
39916 int load_addr_set = 0;
39917 char * elf_interpreter = NULL;
39918 - unsigned long error;
39919 + unsigned long error = 0;
39920 struct elf_phdr *elf_ppnt, *elf_phdata;
39921 unsigned long elf_bss, elf_brk;
39922 int retval, i;
39923 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39924 unsigned long start_code, end_code, start_data, end_data;
39925 unsigned long reloc_func_desc = 0;
39926 int executable_stack = EXSTACK_DEFAULT;
39927 - unsigned long def_flags = 0;
39928 struct {
39929 struct elfhdr elf_ex;
39930 struct elfhdr interp_elf_ex;
39931 } *loc;
39932 + unsigned long pax_task_size = TASK_SIZE;
39933
39934 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39935 if (!loc) {
39936 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39937
39938 /* OK, This is the point of no return */
39939 current->flags &= ~PF_FORKNOEXEC;
39940 - current->mm->def_flags = def_flags;
39941 +
39942 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39943 + current->mm->pax_flags = 0UL;
39944 +#endif
39945 +
39946 +#ifdef CONFIG_PAX_DLRESOLVE
39947 + current->mm->call_dl_resolve = 0UL;
39948 +#endif
39949 +
39950 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39951 + current->mm->call_syscall = 0UL;
39952 +#endif
39953 +
39954 +#ifdef CONFIG_PAX_ASLR
39955 + current->mm->delta_mmap = 0UL;
39956 + current->mm->delta_stack = 0UL;
39957 +#endif
39958 +
39959 + current->mm->def_flags = 0;
39960 +
39961 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39962 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39963 + send_sig(SIGKILL, current, 0);
39964 + goto out_free_dentry;
39965 + }
39966 +#endif
39967 +
39968 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39969 + pax_set_initial_flags(bprm);
39970 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39971 + if (pax_set_initial_flags_func)
39972 + (pax_set_initial_flags_func)(bprm);
39973 +#endif
39974 +
39975 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39976 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39977 + current->mm->context.user_cs_limit = PAGE_SIZE;
39978 + current->mm->def_flags |= VM_PAGEEXEC;
39979 + }
39980 +#endif
39981 +
39982 +#ifdef CONFIG_PAX_SEGMEXEC
39983 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39984 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39985 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39986 + pax_task_size = SEGMEXEC_TASK_SIZE;
39987 + }
39988 +#endif
39989 +
39990 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39991 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39992 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39993 + put_cpu();
39994 + }
39995 +#endif
39996
39997 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39998 may depend on the personality. */
39999 SET_PERSONALITY(loc->elf_ex);
40000 +
40001 +#ifdef CONFIG_PAX_ASLR
40002 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40003 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40004 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40005 + }
40006 +#endif
40007 +
40008 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40009 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40010 + executable_stack = EXSTACK_DISABLE_X;
40011 + current->personality &= ~READ_IMPLIES_EXEC;
40012 + } else
40013 +#endif
40014 +
40015 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40016 current->personality |= READ_IMPLIES_EXEC;
40017
40018 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
40019 #else
40020 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40021 #endif
40022 +
40023 +#ifdef CONFIG_PAX_RANDMMAP
40024 + /* PaX: randomize base address at the default exe base if requested */
40025 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40026 +#ifdef CONFIG_SPARC64
40027 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40028 +#else
40029 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40030 +#endif
40031 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40032 + elf_flags |= MAP_FIXED;
40033 + }
40034 +#endif
40035 +
40036 }
40037
40038 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40039 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
40040 * allowed task size. Note that p_filesz must always be
40041 * <= p_memsz so it is only necessary to check p_memsz.
40042 */
40043 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40044 - elf_ppnt->p_memsz > TASK_SIZE ||
40045 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40046 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40047 + elf_ppnt->p_memsz > pax_task_size ||
40048 + pax_task_size - elf_ppnt->p_memsz < k) {
40049 /* set_brk can never work. Avoid overflows. */
40050 send_sig(SIGKILL, current, 0);
40051 retval = -EINVAL;
40052 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
40053 start_data += load_bias;
40054 end_data += load_bias;
40055
40056 +#ifdef CONFIG_PAX_RANDMMAP
40057 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40058 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40059 +#endif
40060 +
40061 /* Calling set_brk effectively mmaps the pages that we need
40062 * for the bss and break sections. We must do this before
40063 * mapping in the interpreter, to make sure it doesn't wind
40064 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
40065 goto out_free_dentry;
40066 }
40067 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40068 - send_sig(SIGSEGV, current, 0);
40069 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40070 - goto out_free_dentry;
40071 + /*
40072 + * This bss-zeroing can fail if the ELF
40073 + * file specifies odd protections. So
40074 + * we don't check the return value
40075 + */
40076 }
40077
40078 if (elf_interpreter) {
40079 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
40080 unsigned long n = off;
40081 if (n > PAGE_SIZE)
40082 n = PAGE_SIZE;
40083 - if (!dump_write(file, buf, n))
40084 + if (!dump_write(file, buf, n)) {
40085 + free_page((unsigned long)buf);
40086 return 0;
40087 + }
40088 off -= n;
40089 }
40090 free_page((unsigned long)buf);
40091 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
40092 * Decide what to dump of a segment, part, all or none.
40093 */
40094 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40095 - unsigned long mm_flags)
40096 + unsigned long mm_flags, long signr)
40097 {
40098 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40099
40100 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
40101 if (vma->vm_file == NULL)
40102 return 0;
40103
40104 - if (FILTER(MAPPED_PRIVATE))
40105 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40106 goto whole;
40107
40108 /*
40109 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
40110 #undef DUMP_WRITE
40111
40112 #define DUMP_WRITE(addr, nr) \
40113 + do { \
40114 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
40115 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
40116 - goto end_coredump;
40117 + goto end_coredump; \
40118 + } while (0);
40119
40120 static void fill_elf_header(struct elfhdr *elf, int segs,
40121 u16 machine, u32 flags, u8 osabi)
40122 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
40123 {
40124 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40125 int i = 0;
40126 - do
40127 + do {
40128 i += 2;
40129 - while (auxv[i - 2] != AT_NULL);
40130 + } while (auxv[i - 2] != AT_NULL);
40131 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40132 }
40133
40134 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
40135 phdr.p_offset = offset;
40136 phdr.p_vaddr = vma->vm_start;
40137 phdr.p_paddr = 0;
40138 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
40139 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
40140 phdr.p_memsz = vma->vm_end - vma->vm_start;
40141 offset += phdr.p_filesz;
40142 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40143 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
40144 unsigned long addr;
40145 unsigned long end;
40146
40147 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
40148 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
40149
40150 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40151 struct page *page;
40152 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
40153 page = get_dump_page(addr);
40154 if (page) {
40155 void *kaddr = kmap(page);
40156 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40157 stop = ((size += PAGE_SIZE) > limit) ||
40158 !dump_write(file, kaddr, PAGE_SIZE);
40159 kunmap(page);
40160 @@ -2042,6 +2356,97 @@ out:
40161
40162 #endif /* USE_ELF_CORE_DUMP */
40163
40164 +#ifdef CONFIG_PAX_MPROTECT
40165 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40166 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40167 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40168 + *
40169 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40170 + * basis because we want to allow the common case and not the special ones.
40171 + */
40172 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40173 +{
40174 + struct elfhdr elf_h;
40175 + struct elf_phdr elf_p;
40176 + unsigned long i;
40177 + unsigned long oldflags;
40178 + bool is_textrel_rw, is_textrel_rx, is_relro;
40179 +
40180 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40181 + return;
40182 +
40183 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40184 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40185 +
40186 +#ifdef CONFIG_PAX_ELFRELOCS
40187 + /* possible TEXTREL */
40188 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40189 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40190 +#else
40191 + is_textrel_rw = false;
40192 + is_textrel_rx = false;
40193 +#endif
40194 +
40195 + /* possible RELRO */
40196 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40197 +
40198 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40199 + return;
40200 +
40201 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40202 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40203 +
40204 +#ifdef CONFIG_PAX_ETEXECRELOCS
40205 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40206 +#else
40207 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40208 +#endif
40209 +
40210 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40211 + !elf_check_arch(&elf_h) ||
40212 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40213 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40214 + return;
40215 +
40216 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40217 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40218 + return;
40219 + switch (elf_p.p_type) {
40220 + case PT_DYNAMIC:
40221 + if (!is_textrel_rw && !is_textrel_rx)
40222 + continue;
40223 + i = 0UL;
40224 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40225 + elf_dyn dyn;
40226 +
40227 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40228 + return;
40229 + if (dyn.d_tag == DT_NULL)
40230 + return;
40231 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40232 + gr_log_textrel(vma);
40233 + if (is_textrel_rw)
40234 + vma->vm_flags |= VM_MAYWRITE;
40235 + else
40236 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40237 + vma->vm_flags &= ~VM_MAYWRITE;
40238 + return;
40239 + }
40240 + i++;
40241 + }
40242 + return;
40243 +
40244 + case PT_GNU_RELRO:
40245 + if (!is_relro)
40246 + continue;
40247 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40248 + vma->vm_flags &= ~VM_MAYWRITE;
40249 + return;
40250 + }
40251 + }
40252 +}
40253 +#endif
40254 +
40255 static int __init init_elf_binfmt(void)
40256 {
40257 return register_binfmt(&elf_format);
40258 diff -urNp linux-2.6.32.44/fs/binfmt_flat.c linux-2.6.32.44/fs/binfmt_flat.c
40259 --- linux-2.6.32.44/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
40260 +++ linux-2.6.32.44/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
40261 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
40262 realdatastart = (unsigned long) -ENOMEM;
40263 printk("Unable to allocate RAM for process data, errno %d\n",
40264 (int)-realdatastart);
40265 + down_write(&current->mm->mmap_sem);
40266 do_munmap(current->mm, textpos, text_len);
40267 + up_write(&current->mm->mmap_sem);
40268 ret = realdatastart;
40269 goto err;
40270 }
40271 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
40272 }
40273 if (IS_ERR_VALUE(result)) {
40274 printk("Unable to read data+bss, errno %d\n", (int)-result);
40275 + down_write(&current->mm->mmap_sem);
40276 do_munmap(current->mm, textpos, text_len);
40277 do_munmap(current->mm, realdatastart, data_len + extra);
40278 + up_write(&current->mm->mmap_sem);
40279 ret = result;
40280 goto err;
40281 }
40282 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
40283 }
40284 if (IS_ERR_VALUE(result)) {
40285 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40286 + down_write(&current->mm->mmap_sem);
40287 do_munmap(current->mm, textpos, text_len + data_len + extra +
40288 MAX_SHARED_LIBS * sizeof(unsigned long));
40289 + up_write(&current->mm->mmap_sem);
40290 ret = result;
40291 goto err;
40292 }
40293 diff -urNp linux-2.6.32.44/fs/bio.c linux-2.6.32.44/fs/bio.c
40294 --- linux-2.6.32.44/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
40295 +++ linux-2.6.32.44/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
40296 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
40297
40298 i = 0;
40299 while (i < bio_slab_nr) {
40300 - struct bio_slab *bslab = &bio_slabs[i];
40301 + bslab = &bio_slabs[i];
40302
40303 if (!bslab->slab && entry == -1)
40304 entry = i;
40305 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
40306 const int read = bio_data_dir(bio) == READ;
40307 struct bio_map_data *bmd = bio->bi_private;
40308 int i;
40309 - char *p = bmd->sgvecs[0].iov_base;
40310 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
40311
40312 __bio_for_each_segment(bvec, bio, i, 0) {
40313 char *addr = page_address(bvec->bv_page);
40314 diff -urNp linux-2.6.32.44/fs/block_dev.c linux-2.6.32.44/fs/block_dev.c
40315 --- linux-2.6.32.44/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
40316 +++ linux-2.6.32.44/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
40317 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
40318 else if (bdev->bd_contains == bdev)
40319 res = 0; /* is a whole device which isn't held */
40320
40321 - else if (bdev->bd_contains->bd_holder == bd_claim)
40322 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
40323 res = 0; /* is a partition of a device that is being partitioned */
40324 else if (bdev->bd_contains->bd_holder != NULL)
40325 res = -EBUSY; /* is a partition of a held device */
40326 diff -urNp linux-2.6.32.44/fs/btrfs/ctree.c linux-2.6.32.44/fs/btrfs/ctree.c
40327 --- linux-2.6.32.44/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
40328 +++ linux-2.6.32.44/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
40329 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
40330 free_extent_buffer(buf);
40331 add_root_to_dirty_list(root);
40332 } else {
40333 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40334 - parent_start = parent->start;
40335 - else
40336 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40337 + if (parent)
40338 + parent_start = parent->start;
40339 + else
40340 + parent_start = 0;
40341 + } else
40342 parent_start = 0;
40343
40344 WARN_ON(trans->transid != btrfs_header_generation(parent));
40345 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
40346
40347 ret = 0;
40348 if (slot == 0) {
40349 - struct btrfs_disk_key disk_key;
40350 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
40351 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
40352 }
40353 diff -urNp linux-2.6.32.44/fs/btrfs/disk-io.c linux-2.6.32.44/fs/btrfs/disk-io.c
40354 --- linux-2.6.32.44/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
40355 +++ linux-2.6.32.44/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
40356 @@ -39,7 +39,7 @@
40357 #include "tree-log.h"
40358 #include "free-space-cache.h"
40359
40360 -static struct extent_io_ops btree_extent_io_ops;
40361 +static const struct extent_io_ops btree_extent_io_ops;
40362 static void end_workqueue_fn(struct btrfs_work *work);
40363 static void free_fs_root(struct btrfs_root *root);
40364
40365 @@ -2607,7 +2607,7 @@ out:
40366 return 0;
40367 }
40368
40369 -static struct extent_io_ops btree_extent_io_ops = {
40370 +static const struct extent_io_ops btree_extent_io_ops = {
40371 .write_cache_pages_lock_hook = btree_lock_page_hook,
40372 .readpage_end_io_hook = btree_readpage_end_io_hook,
40373 .submit_bio_hook = btree_submit_bio_hook,
40374 diff -urNp linux-2.6.32.44/fs/btrfs/extent_io.h linux-2.6.32.44/fs/btrfs/extent_io.h
40375 --- linux-2.6.32.44/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
40376 +++ linux-2.6.32.44/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
40377 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
40378 struct bio *bio, int mirror_num,
40379 unsigned long bio_flags);
40380 struct extent_io_ops {
40381 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
40382 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
40383 u64 start, u64 end, int *page_started,
40384 unsigned long *nr_written);
40385 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
40386 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
40387 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
40388 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
40389 extent_submit_bio_hook_t *submit_bio_hook;
40390 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
40391 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
40392 size_t size, struct bio *bio,
40393 unsigned long bio_flags);
40394 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
40395 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
40396 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
40397 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
40398 u64 start, u64 end,
40399 struct extent_state *state);
40400 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
40401 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
40402 u64 start, u64 end,
40403 struct extent_state *state);
40404 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40405 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40406 struct extent_state *state);
40407 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40408 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40409 struct extent_state *state, int uptodate);
40410 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
40411 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
40412 unsigned long old, unsigned long bits);
40413 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
40414 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
40415 unsigned long bits);
40416 - int (*merge_extent_hook)(struct inode *inode,
40417 + int (* const merge_extent_hook)(struct inode *inode,
40418 struct extent_state *new,
40419 struct extent_state *other);
40420 - int (*split_extent_hook)(struct inode *inode,
40421 + int (* const split_extent_hook)(struct inode *inode,
40422 struct extent_state *orig, u64 split);
40423 - int (*write_cache_pages_lock_hook)(struct page *page);
40424 + int (* const write_cache_pages_lock_hook)(struct page *page);
40425 };
40426
40427 struct extent_io_tree {
40428 @@ -88,7 +88,7 @@ struct extent_io_tree {
40429 u64 dirty_bytes;
40430 spinlock_t lock;
40431 spinlock_t buffer_lock;
40432 - struct extent_io_ops *ops;
40433 + const struct extent_io_ops *ops;
40434 };
40435
40436 struct extent_state {
40437 diff -urNp linux-2.6.32.44/fs/btrfs/extent-tree.c linux-2.6.32.44/fs/btrfs/extent-tree.c
40438 --- linux-2.6.32.44/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40439 +++ linux-2.6.32.44/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40440 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40441 u64 group_start = group->key.objectid;
40442 new_extents = kmalloc(sizeof(*new_extents),
40443 GFP_NOFS);
40444 + if (!new_extents) {
40445 + ret = -ENOMEM;
40446 + goto out;
40447 + }
40448 nr_extents = 1;
40449 ret = get_new_locations(reloc_inode,
40450 extent_key,
40451 diff -urNp linux-2.6.32.44/fs/btrfs/free-space-cache.c linux-2.6.32.44/fs/btrfs/free-space-cache.c
40452 --- linux-2.6.32.44/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40453 +++ linux-2.6.32.44/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40454 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40455
40456 while(1) {
40457 if (entry->bytes < bytes || entry->offset < min_start) {
40458 - struct rb_node *node;
40459 -
40460 node = rb_next(&entry->offset_index);
40461 if (!node)
40462 break;
40463 @@ -1226,7 +1224,7 @@ again:
40464 */
40465 while (entry->bitmap || found_bitmap ||
40466 (!entry->bitmap && entry->bytes < min_bytes)) {
40467 - struct rb_node *node = rb_next(&entry->offset_index);
40468 + node = rb_next(&entry->offset_index);
40469
40470 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40471 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40472 diff -urNp linux-2.6.32.44/fs/btrfs/inode.c linux-2.6.32.44/fs/btrfs/inode.c
40473 --- linux-2.6.32.44/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40474 +++ linux-2.6.32.44/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40475 @@ -63,7 +63,7 @@ static const struct inode_operations btr
40476 static const struct address_space_operations btrfs_aops;
40477 static const struct address_space_operations btrfs_symlink_aops;
40478 static const struct file_operations btrfs_dir_file_operations;
40479 -static struct extent_io_ops btrfs_extent_io_ops;
40480 +static const struct extent_io_ops btrfs_extent_io_ops;
40481
40482 static struct kmem_cache *btrfs_inode_cachep;
40483 struct kmem_cache *btrfs_trans_handle_cachep;
40484 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40485 1, 0, NULL, GFP_NOFS);
40486 while (start < end) {
40487 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40488 + BUG_ON(!async_cow);
40489 async_cow->inode = inode;
40490 async_cow->root = root;
40491 async_cow->locked_page = locked_page;
40492 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40493 inline_size = btrfs_file_extent_inline_item_len(leaf,
40494 btrfs_item_nr(leaf, path->slots[0]));
40495 tmp = kmalloc(inline_size, GFP_NOFS);
40496 + if (!tmp)
40497 + return -ENOMEM;
40498 ptr = btrfs_file_extent_inline_start(item);
40499
40500 read_extent_buffer(leaf, tmp, ptr, inline_size);
40501 @@ -5410,7 +5413,7 @@ fail:
40502 return -ENOMEM;
40503 }
40504
40505 -static int btrfs_getattr(struct vfsmount *mnt,
40506 +int btrfs_getattr(struct vfsmount *mnt,
40507 struct dentry *dentry, struct kstat *stat)
40508 {
40509 struct inode *inode = dentry->d_inode;
40510 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40511 return 0;
40512 }
40513
40514 +EXPORT_SYMBOL(btrfs_getattr);
40515 +
40516 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40517 +{
40518 + return BTRFS_I(inode)->root->anon_super.s_dev;
40519 +}
40520 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40521 +
40522 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40523 struct inode *new_dir, struct dentry *new_dentry)
40524 {
40525 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40526 .fsync = btrfs_sync_file,
40527 };
40528
40529 -static struct extent_io_ops btrfs_extent_io_ops = {
40530 +static const struct extent_io_ops btrfs_extent_io_ops = {
40531 .fill_delalloc = run_delalloc_range,
40532 .submit_bio_hook = btrfs_submit_bio_hook,
40533 .merge_bio_hook = btrfs_merge_bio_hook,
40534 diff -urNp linux-2.6.32.44/fs/btrfs/relocation.c linux-2.6.32.44/fs/btrfs/relocation.c
40535 --- linux-2.6.32.44/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40536 +++ linux-2.6.32.44/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40537 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40538 }
40539 spin_unlock(&rc->reloc_root_tree.lock);
40540
40541 - BUG_ON((struct btrfs_root *)node->data != root);
40542 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40543
40544 if (!del) {
40545 spin_lock(&rc->reloc_root_tree.lock);
40546 diff -urNp linux-2.6.32.44/fs/btrfs/sysfs.c linux-2.6.32.44/fs/btrfs/sysfs.c
40547 --- linux-2.6.32.44/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40548 +++ linux-2.6.32.44/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40549 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40550 complete(&root->kobj_unregister);
40551 }
40552
40553 -static struct sysfs_ops btrfs_super_attr_ops = {
40554 +static const struct sysfs_ops btrfs_super_attr_ops = {
40555 .show = btrfs_super_attr_show,
40556 .store = btrfs_super_attr_store,
40557 };
40558
40559 -static struct sysfs_ops btrfs_root_attr_ops = {
40560 +static const struct sysfs_ops btrfs_root_attr_ops = {
40561 .show = btrfs_root_attr_show,
40562 .store = btrfs_root_attr_store,
40563 };
40564 diff -urNp linux-2.6.32.44/fs/buffer.c linux-2.6.32.44/fs/buffer.c
40565 --- linux-2.6.32.44/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40566 +++ linux-2.6.32.44/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40567 @@ -25,6 +25,7 @@
40568 #include <linux/percpu.h>
40569 #include <linux/slab.h>
40570 #include <linux/capability.h>
40571 +#include <linux/security.h>
40572 #include <linux/blkdev.h>
40573 #include <linux/file.h>
40574 #include <linux/quotaops.h>
40575 diff -urNp linux-2.6.32.44/fs/cachefiles/bind.c linux-2.6.32.44/fs/cachefiles/bind.c
40576 --- linux-2.6.32.44/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40577 +++ linux-2.6.32.44/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40578 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40579 args);
40580
40581 /* start by checking things over */
40582 - ASSERT(cache->fstop_percent >= 0 &&
40583 - cache->fstop_percent < cache->fcull_percent &&
40584 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40585 cache->fcull_percent < cache->frun_percent &&
40586 cache->frun_percent < 100);
40587
40588 - ASSERT(cache->bstop_percent >= 0 &&
40589 - cache->bstop_percent < cache->bcull_percent &&
40590 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40591 cache->bcull_percent < cache->brun_percent &&
40592 cache->brun_percent < 100);
40593
40594 diff -urNp linux-2.6.32.44/fs/cachefiles/daemon.c linux-2.6.32.44/fs/cachefiles/daemon.c
40595 --- linux-2.6.32.44/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40596 +++ linux-2.6.32.44/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40597 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40598 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40599 return -EIO;
40600
40601 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40602 + if (datalen > PAGE_SIZE - 1)
40603 return -EOPNOTSUPP;
40604
40605 /* drag the command string into the kernel so we can parse it */
40606 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40607 if (args[0] != '%' || args[1] != '\0')
40608 return -EINVAL;
40609
40610 - if (fstop < 0 || fstop >= cache->fcull_percent)
40611 + if (fstop >= cache->fcull_percent)
40612 return cachefiles_daemon_range_error(cache, args);
40613
40614 cache->fstop_percent = fstop;
40615 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40616 if (args[0] != '%' || args[1] != '\0')
40617 return -EINVAL;
40618
40619 - if (bstop < 0 || bstop >= cache->bcull_percent)
40620 + if (bstop >= cache->bcull_percent)
40621 return cachefiles_daemon_range_error(cache, args);
40622
40623 cache->bstop_percent = bstop;
40624 diff -urNp linux-2.6.32.44/fs/cachefiles/internal.h linux-2.6.32.44/fs/cachefiles/internal.h
40625 --- linux-2.6.32.44/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40626 +++ linux-2.6.32.44/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40627 @@ -56,7 +56,7 @@ struct cachefiles_cache {
40628 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40629 struct rb_root active_nodes; /* active nodes (can't be culled) */
40630 rwlock_t active_lock; /* lock for active_nodes */
40631 - atomic_t gravecounter; /* graveyard uniquifier */
40632 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40633 unsigned frun_percent; /* when to stop culling (% files) */
40634 unsigned fcull_percent; /* when to start culling (% files) */
40635 unsigned fstop_percent; /* when to stop allocating (% files) */
40636 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40637 * proc.c
40638 */
40639 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40640 -extern atomic_t cachefiles_lookup_histogram[HZ];
40641 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40642 -extern atomic_t cachefiles_create_histogram[HZ];
40643 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40644 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40645 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40646
40647 extern int __init cachefiles_proc_init(void);
40648 extern void cachefiles_proc_cleanup(void);
40649 static inline
40650 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40651 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40652 {
40653 unsigned long jif = jiffies - start_jif;
40654 if (jif >= HZ)
40655 jif = HZ - 1;
40656 - atomic_inc(&histogram[jif]);
40657 + atomic_inc_unchecked(&histogram[jif]);
40658 }
40659
40660 #else
40661 diff -urNp linux-2.6.32.44/fs/cachefiles/namei.c linux-2.6.32.44/fs/cachefiles/namei.c
40662 --- linux-2.6.32.44/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40663 +++ linux-2.6.32.44/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40664 @@ -250,7 +250,7 @@ try_again:
40665 /* first step is to make up a grave dentry in the graveyard */
40666 sprintf(nbuffer, "%08x%08x",
40667 (uint32_t) get_seconds(),
40668 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40669 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40670
40671 /* do the multiway lock magic */
40672 trap = lock_rename(cache->graveyard, dir);
40673 diff -urNp linux-2.6.32.44/fs/cachefiles/proc.c linux-2.6.32.44/fs/cachefiles/proc.c
40674 --- linux-2.6.32.44/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40675 +++ linux-2.6.32.44/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40676 @@ -14,9 +14,9 @@
40677 #include <linux/seq_file.h>
40678 #include "internal.h"
40679
40680 -atomic_t cachefiles_lookup_histogram[HZ];
40681 -atomic_t cachefiles_mkdir_histogram[HZ];
40682 -atomic_t cachefiles_create_histogram[HZ];
40683 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40684 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40685 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40686
40687 /*
40688 * display the latency histogram
40689 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40690 return 0;
40691 default:
40692 index = (unsigned long) v - 3;
40693 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40694 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40695 - z = atomic_read(&cachefiles_create_histogram[index]);
40696 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40697 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40698 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40699 if (x == 0 && y == 0 && z == 0)
40700 return 0;
40701
40702 diff -urNp linux-2.6.32.44/fs/cachefiles/rdwr.c linux-2.6.32.44/fs/cachefiles/rdwr.c
40703 --- linux-2.6.32.44/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40704 +++ linux-2.6.32.44/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40705 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40706 old_fs = get_fs();
40707 set_fs(KERNEL_DS);
40708 ret = file->f_op->write(
40709 - file, (const void __user *) data, len, &pos);
40710 + file, (__force const void __user *) data, len, &pos);
40711 set_fs(old_fs);
40712 kunmap(page);
40713 if (ret != len)
40714 diff -urNp linux-2.6.32.44/fs/cifs/cifs_debug.c linux-2.6.32.44/fs/cifs/cifs_debug.c
40715 --- linux-2.6.32.44/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40716 +++ linux-2.6.32.44/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40717 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40718 tcon = list_entry(tmp3,
40719 struct cifsTconInfo,
40720 tcon_list);
40721 - atomic_set(&tcon->num_smbs_sent, 0);
40722 - atomic_set(&tcon->num_writes, 0);
40723 - atomic_set(&tcon->num_reads, 0);
40724 - atomic_set(&tcon->num_oplock_brks, 0);
40725 - atomic_set(&tcon->num_opens, 0);
40726 - atomic_set(&tcon->num_posixopens, 0);
40727 - atomic_set(&tcon->num_posixmkdirs, 0);
40728 - atomic_set(&tcon->num_closes, 0);
40729 - atomic_set(&tcon->num_deletes, 0);
40730 - atomic_set(&tcon->num_mkdirs, 0);
40731 - atomic_set(&tcon->num_rmdirs, 0);
40732 - atomic_set(&tcon->num_renames, 0);
40733 - atomic_set(&tcon->num_t2renames, 0);
40734 - atomic_set(&tcon->num_ffirst, 0);
40735 - atomic_set(&tcon->num_fnext, 0);
40736 - atomic_set(&tcon->num_fclose, 0);
40737 - atomic_set(&tcon->num_hardlinks, 0);
40738 - atomic_set(&tcon->num_symlinks, 0);
40739 - atomic_set(&tcon->num_locks, 0);
40740 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40741 + atomic_set_unchecked(&tcon->num_writes, 0);
40742 + atomic_set_unchecked(&tcon->num_reads, 0);
40743 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40744 + atomic_set_unchecked(&tcon->num_opens, 0);
40745 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40746 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40747 + atomic_set_unchecked(&tcon->num_closes, 0);
40748 + atomic_set_unchecked(&tcon->num_deletes, 0);
40749 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40750 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40751 + atomic_set_unchecked(&tcon->num_renames, 0);
40752 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40753 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40754 + atomic_set_unchecked(&tcon->num_fnext, 0);
40755 + atomic_set_unchecked(&tcon->num_fclose, 0);
40756 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40757 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40758 + atomic_set_unchecked(&tcon->num_locks, 0);
40759 }
40760 }
40761 }
40762 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40763 if (tcon->need_reconnect)
40764 seq_puts(m, "\tDISCONNECTED ");
40765 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40766 - atomic_read(&tcon->num_smbs_sent),
40767 - atomic_read(&tcon->num_oplock_brks));
40768 + atomic_read_unchecked(&tcon->num_smbs_sent),
40769 + atomic_read_unchecked(&tcon->num_oplock_brks));
40770 seq_printf(m, "\nReads: %d Bytes: %lld",
40771 - atomic_read(&tcon->num_reads),
40772 + atomic_read_unchecked(&tcon->num_reads),
40773 (long long)(tcon->bytes_read));
40774 seq_printf(m, "\nWrites: %d Bytes: %lld",
40775 - atomic_read(&tcon->num_writes),
40776 + atomic_read_unchecked(&tcon->num_writes),
40777 (long long)(tcon->bytes_written));
40778 seq_printf(m, "\nFlushes: %d",
40779 - atomic_read(&tcon->num_flushes));
40780 + atomic_read_unchecked(&tcon->num_flushes));
40781 seq_printf(m, "\nLocks: %d HardLinks: %d "
40782 "Symlinks: %d",
40783 - atomic_read(&tcon->num_locks),
40784 - atomic_read(&tcon->num_hardlinks),
40785 - atomic_read(&tcon->num_symlinks));
40786 + atomic_read_unchecked(&tcon->num_locks),
40787 + atomic_read_unchecked(&tcon->num_hardlinks),
40788 + atomic_read_unchecked(&tcon->num_symlinks));
40789 seq_printf(m, "\nOpens: %d Closes: %d "
40790 "Deletes: %d",
40791 - atomic_read(&tcon->num_opens),
40792 - atomic_read(&tcon->num_closes),
40793 - atomic_read(&tcon->num_deletes));
40794 + atomic_read_unchecked(&tcon->num_opens),
40795 + atomic_read_unchecked(&tcon->num_closes),
40796 + atomic_read_unchecked(&tcon->num_deletes));
40797 seq_printf(m, "\nPosix Opens: %d "
40798 "Posix Mkdirs: %d",
40799 - atomic_read(&tcon->num_posixopens),
40800 - atomic_read(&tcon->num_posixmkdirs));
40801 + atomic_read_unchecked(&tcon->num_posixopens),
40802 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40803 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40804 - atomic_read(&tcon->num_mkdirs),
40805 - atomic_read(&tcon->num_rmdirs));
40806 + atomic_read_unchecked(&tcon->num_mkdirs),
40807 + atomic_read_unchecked(&tcon->num_rmdirs));
40808 seq_printf(m, "\nRenames: %d T2 Renames %d",
40809 - atomic_read(&tcon->num_renames),
40810 - atomic_read(&tcon->num_t2renames));
40811 + atomic_read_unchecked(&tcon->num_renames),
40812 + atomic_read_unchecked(&tcon->num_t2renames));
40813 seq_printf(m, "\nFindFirst: %d FNext %d "
40814 "FClose %d",
40815 - atomic_read(&tcon->num_ffirst),
40816 - atomic_read(&tcon->num_fnext),
40817 - atomic_read(&tcon->num_fclose));
40818 + atomic_read_unchecked(&tcon->num_ffirst),
40819 + atomic_read_unchecked(&tcon->num_fnext),
40820 + atomic_read_unchecked(&tcon->num_fclose));
40821 }
40822 }
40823 }
40824 diff -urNp linux-2.6.32.44/fs/cifs/cifsglob.h linux-2.6.32.44/fs/cifs/cifsglob.h
40825 --- linux-2.6.32.44/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40826 +++ linux-2.6.32.44/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40827 @@ -252,28 +252,28 @@ struct cifsTconInfo {
40828 __u16 Flags; /* optional support bits */
40829 enum statusEnum tidStatus;
40830 #ifdef CONFIG_CIFS_STATS
40831 - atomic_t num_smbs_sent;
40832 - atomic_t num_writes;
40833 - atomic_t num_reads;
40834 - atomic_t num_flushes;
40835 - atomic_t num_oplock_brks;
40836 - atomic_t num_opens;
40837 - atomic_t num_closes;
40838 - atomic_t num_deletes;
40839 - atomic_t num_mkdirs;
40840 - atomic_t num_posixopens;
40841 - atomic_t num_posixmkdirs;
40842 - atomic_t num_rmdirs;
40843 - atomic_t num_renames;
40844 - atomic_t num_t2renames;
40845 - atomic_t num_ffirst;
40846 - atomic_t num_fnext;
40847 - atomic_t num_fclose;
40848 - atomic_t num_hardlinks;
40849 - atomic_t num_symlinks;
40850 - atomic_t num_locks;
40851 - atomic_t num_acl_get;
40852 - atomic_t num_acl_set;
40853 + atomic_unchecked_t num_smbs_sent;
40854 + atomic_unchecked_t num_writes;
40855 + atomic_unchecked_t num_reads;
40856 + atomic_unchecked_t num_flushes;
40857 + atomic_unchecked_t num_oplock_brks;
40858 + atomic_unchecked_t num_opens;
40859 + atomic_unchecked_t num_closes;
40860 + atomic_unchecked_t num_deletes;
40861 + atomic_unchecked_t num_mkdirs;
40862 + atomic_unchecked_t num_posixopens;
40863 + atomic_unchecked_t num_posixmkdirs;
40864 + atomic_unchecked_t num_rmdirs;
40865 + atomic_unchecked_t num_renames;
40866 + atomic_unchecked_t num_t2renames;
40867 + atomic_unchecked_t num_ffirst;
40868 + atomic_unchecked_t num_fnext;
40869 + atomic_unchecked_t num_fclose;
40870 + atomic_unchecked_t num_hardlinks;
40871 + atomic_unchecked_t num_symlinks;
40872 + atomic_unchecked_t num_locks;
40873 + atomic_unchecked_t num_acl_get;
40874 + atomic_unchecked_t num_acl_set;
40875 #ifdef CONFIG_CIFS_STATS2
40876 unsigned long long time_writes;
40877 unsigned long long time_reads;
40878 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40879 }
40880
40881 #ifdef CONFIG_CIFS_STATS
40882 -#define cifs_stats_inc atomic_inc
40883 +#define cifs_stats_inc atomic_inc_unchecked
40884
40885 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40886 unsigned int bytes)
40887 diff -urNp linux-2.6.32.44/fs/cifs/link.c linux-2.6.32.44/fs/cifs/link.c
40888 --- linux-2.6.32.44/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40889 +++ linux-2.6.32.44/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40890 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40891
40892 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40893 {
40894 - char *p = nd_get_link(nd);
40895 + const char *p = nd_get_link(nd);
40896 if (!IS_ERR(p))
40897 kfree(p);
40898 }
40899 diff -urNp linux-2.6.32.44/fs/coda/cache.c linux-2.6.32.44/fs/coda/cache.c
40900 --- linux-2.6.32.44/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40901 +++ linux-2.6.32.44/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40902 @@ -24,14 +24,14 @@
40903 #include <linux/coda_fs_i.h>
40904 #include <linux/coda_cache.h>
40905
40906 -static atomic_t permission_epoch = ATOMIC_INIT(0);
40907 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40908
40909 /* replace or extend an acl cache hit */
40910 void coda_cache_enter(struct inode *inode, int mask)
40911 {
40912 struct coda_inode_info *cii = ITOC(inode);
40913
40914 - cii->c_cached_epoch = atomic_read(&permission_epoch);
40915 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40916 if (cii->c_uid != current_fsuid()) {
40917 cii->c_uid = current_fsuid();
40918 cii->c_cached_perm = mask;
40919 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40920 void coda_cache_clear_inode(struct inode *inode)
40921 {
40922 struct coda_inode_info *cii = ITOC(inode);
40923 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40924 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40925 }
40926
40927 /* remove all acl caches */
40928 void coda_cache_clear_all(struct super_block *sb)
40929 {
40930 - atomic_inc(&permission_epoch);
40931 + atomic_inc_unchecked(&permission_epoch);
40932 }
40933
40934
40935 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40936
40937 hit = (mask & cii->c_cached_perm) == mask &&
40938 cii->c_uid == current_fsuid() &&
40939 - cii->c_cached_epoch == atomic_read(&permission_epoch);
40940 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40941
40942 return hit;
40943 }
40944 diff -urNp linux-2.6.32.44/fs/compat_binfmt_elf.c linux-2.6.32.44/fs/compat_binfmt_elf.c
40945 --- linux-2.6.32.44/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40946 +++ linux-2.6.32.44/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40947 @@ -29,10 +29,12 @@
40948 #undef elfhdr
40949 #undef elf_phdr
40950 #undef elf_note
40951 +#undef elf_dyn
40952 #undef elf_addr_t
40953 #define elfhdr elf32_hdr
40954 #define elf_phdr elf32_phdr
40955 #define elf_note elf32_note
40956 +#define elf_dyn Elf32_Dyn
40957 #define elf_addr_t Elf32_Addr
40958
40959 /*
40960 diff -urNp linux-2.6.32.44/fs/compat.c linux-2.6.32.44/fs/compat.c
40961 --- linux-2.6.32.44/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40962 +++ linux-2.6.32.44/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
40963 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40964
40965 struct compat_readdir_callback {
40966 struct compat_old_linux_dirent __user *dirent;
40967 + struct file * file;
40968 int result;
40969 };
40970
40971 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40972 buf->result = -EOVERFLOW;
40973 return -EOVERFLOW;
40974 }
40975 +
40976 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40977 + return 0;
40978 +
40979 buf->result++;
40980 dirent = buf->dirent;
40981 if (!access_ok(VERIFY_WRITE, dirent,
40982 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40983
40984 buf.result = 0;
40985 buf.dirent = dirent;
40986 + buf.file = file;
40987
40988 error = vfs_readdir(file, compat_fillonedir, &buf);
40989 if (buf.result)
40990 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
40991 struct compat_getdents_callback {
40992 struct compat_linux_dirent __user *current_dir;
40993 struct compat_linux_dirent __user *previous;
40994 + struct file * file;
40995 int count;
40996 int error;
40997 };
40998 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40999 buf->error = -EOVERFLOW;
41000 return -EOVERFLOW;
41001 }
41002 +
41003 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41004 + return 0;
41005 +
41006 dirent = buf->previous;
41007 if (dirent) {
41008 if (__put_user(offset, &dirent->d_off))
41009 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
41010 buf.previous = NULL;
41011 buf.count = count;
41012 buf.error = 0;
41013 + buf.file = file;
41014
41015 error = vfs_readdir(file, compat_filldir, &buf);
41016 if (error >= 0)
41017 @@ -987,6 +999,7 @@ out:
41018 struct compat_getdents_callback64 {
41019 struct linux_dirent64 __user *current_dir;
41020 struct linux_dirent64 __user *previous;
41021 + struct file * file;
41022 int count;
41023 int error;
41024 };
41025 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
41026 buf->error = -EINVAL; /* only used if we fail.. */
41027 if (reclen > buf->count)
41028 return -EINVAL;
41029 +
41030 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41031 + return 0;
41032 +
41033 dirent = buf->previous;
41034
41035 if (dirent) {
41036 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
41037 buf.previous = NULL;
41038 buf.count = count;
41039 buf.error = 0;
41040 + buf.file = file;
41041
41042 error = vfs_readdir(file, compat_filldir64, &buf);
41043 if (error >= 0)
41044 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
41045 * verify all the pointers
41046 */
41047 ret = -EINVAL;
41048 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
41049 + if (nr_segs > UIO_MAXIOV)
41050 goto out;
41051 if (!file->f_op)
41052 goto out;
41053 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
41054 compat_uptr_t __user *envp,
41055 struct pt_regs * regs)
41056 {
41057 +#ifdef CONFIG_GRKERNSEC
41058 + struct file *old_exec_file;
41059 + struct acl_subject_label *old_acl;
41060 + struct rlimit old_rlim[RLIM_NLIMITS];
41061 +#endif
41062 struct linux_binprm *bprm;
41063 struct file *file;
41064 struct files_struct *displaced;
41065 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
41066 bprm->filename = filename;
41067 bprm->interp = filename;
41068
41069 + if (gr_process_user_ban()) {
41070 + retval = -EPERM;
41071 + goto out_file;
41072 + }
41073 +
41074 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41075 + retval = -EAGAIN;
41076 + if (gr_handle_nproc())
41077 + goto out_file;
41078 + retval = -EACCES;
41079 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
41080 + goto out_file;
41081 +
41082 retval = bprm_mm_init(bprm);
41083 if (retval)
41084 goto out_file;
41085 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
41086 if (retval < 0)
41087 goto out;
41088
41089 + if (!gr_tpe_allow(file)) {
41090 + retval = -EACCES;
41091 + goto out;
41092 + }
41093 +
41094 + if (gr_check_crash_exec(file)) {
41095 + retval = -EACCES;
41096 + goto out;
41097 + }
41098 +
41099 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41100 +
41101 + gr_handle_exec_args_compat(bprm, argv);
41102 +
41103 +#ifdef CONFIG_GRKERNSEC
41104 + old_acl = current->acl;
41105 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41106 + old_exec_file = current->exec_file;
41107 + get_file(file);
41108 + current->exec_file = file;
41109 +#endif
41110 +
41111 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41112 + bprm->unsafe & LSM_UNSAFE_SHARE);
41113 + if (retval < 0)
41114 + goto out_fail;
41115 +
41116 retval = search_binary_handler(bprm, regs);
41117 if (retval < 0)
41118 - goto out;
41119 + goto out_fail;
41120 +#ifdef CONFIG_GRKERNSEC
41121 + if (old_exec_file)
41122 + fput(old_exec_file);
41123 +#endif
41124
41125 /* execve succeeded */
41126 current->fs->in_exec = 0;
41127 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
41128 put_files_struct(displaced);
41129 return retval;
41130
41131 +out_fail:
41132 +#ifdef CONFIG_GRKERNSEC
41133 + current->acl = old_acl;
41134 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41135 + fput(current->exec_file);
41136 + current->exec_file = old_exec_file;
41137 +#endif
41138 +
41139 out:
41140 if (bprm->mm) {
41141 acct_arg_size(bprm, 0);
41142 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
41143 struct fdtable *fdt;
41144 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41145
41146 + pax_track_stack();
41147 +
41148 if (n < 0)
41149 goto out_nofds;
41150
41151 diff -urNp linux-2.6.32.44/fs/compat_ioctl.c linux-2.6.32.44/fs/compat_ioctl.c
41152 --- linux-2.6.32.44/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
41153 +++ linux-2.6.32.44/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
41154 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
41155 up = (struct compat_video_spu_palette __user *) arg;
41156 err = get_user(palp, &up->palette);
41157 err |= get_user(length, &up->length);
41158 + if (err)
41159 + return -EFAULT;
41160
41161 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41162 err = put_user(compat_ptr(palp), &up_native->palette);
41163 diff -urNp linux-2.6.32.44/fs/configfs/dir.c linux-2.6.32.44/fs/configfs/dir.c
41164 --- linux-2.6.32.44/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41165 +++ linux-2.6.32.44/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
41166 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
41167 }
41168 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41169 struct configfs_dirent *next;
41170 - const char * name;
41171 + const unsigned char * name;
41172 + char d_name[sizeof(next->s_dentry->d_iname)];
41173 int len;
41174
41175 next = list_entry(p, struct configfs_dirent,
41176 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
41177 continue;
41178
41179 name = configfs_get_name(next);
41180 - len = strlen(name);
41181 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41182 + len = next->s_dentry->d_name.len;
41183 + memcpy(d_name, name, len);
41184 + name = d_name;
41185 + } else
41186 + len = strlen(name);
41187 if (next->s_dentry)
41188 ino = next->s_dentry->d_inode->i_ino;
41189 else
41190 diff -urNp linux-2.6.32.44/fs/dcache.c linux-2.6.32.44/fs/dcache.c
41191 --- linux-2.6.32.44/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
41192 +++ linux-2.6.32.44/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
41193 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
41194
41195 static struct kmem_cache *dentry_cache __read_mostly;
41196
41197 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
41198 -
41199 /*
41200 * This is the single most critical data structure when it comes
41201 * to the dcache: the hashtable for lookups. Somebody should try
41202 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
41203 mempages -= reserve;
41204
41205 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41206 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41207 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41208
41209 dcache_init();
41210 inode_init();
41211 diff -urNp linux-2.6.32.44/fs/dlm/lockspace.c linux-2.6.32.44/fs/dlm/lockspace.c
41212 --- linux-2.6.32.44/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
41213 +++ linux-2.6.32.44/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
41214 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
41215 kfree(ls);
41216 }
41217
41218 -static struct sysfs_ops dlm_attr_ops = {
41219 +static const struct sysfs_ops dlm_attr_ops = {
41220 .show = dlm_attr_show,
41221 .store = dlm_attr_store,
41222 };
41223 diff -urNp linux-2.6.32.44/fs/ecryptfs/inode.c linux-2.6.32.44/fs/ecryptfs/inode.c
41224 --- linux-2.6.32.44/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41225 +++ linux-2.6.32.44/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
41226 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
41227 old_fs = get_fs();
41228 set_fs(get_ds());
41229 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41230 - (char __user *)lower_buf,
41231 + (__force char __user *)lower_buf,
41232 lower_bufsiz);
41233 set_fs(old_fs);
41234 if (rc < 0)
41235 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
41236 }
41237 old_fs = get_fs();
41238 set_fs(get_ds());
41239 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41240 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
41241 set_fs(old_fs);
41242 if (rc < 0)
41243 goto out_free;
41244 diff -urNp linux-2.6.32.44/fs/exec.c linux-2.6.32.44/fs/exec.c
41245 --- linux-2.6.32.44/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
41246 +++ linux-2.6.32.44/fs/exec.c 2011-07-06 19:53:33.000000000 -0400
41247 @@ -56,12 +56,24 @@
41248 #include <linux/fsnotify.h>
41249 #include <linux/fs_struct.h>
41250 #include <linux/pipe_fs_i.h>
41251 +#include <linux/random.h>
41252 +#include <linux/seq_file.h>
41253 +
41254 +#ifdef CONFIG_PAX_REFCOUNT
41255 +#include <linux/kallsyms.h>
41256 +#include <linux/kdebug.h>
41257 +#endif
41258
41259 #include <asm/uaccess.h>
41260 #include <asm/mmu_context.h>
41261 #include <asm/tlb.h>
41262 #include "internal.h"
41263
41264 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41265 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41266 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41267 +#endif
41268 +
41269 int core_uses_pid;
41270 char core_pattern[CORENAME_MAX_SIZE] = "core";
41271 unsigned int core_pipe_limit;
41272 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
41273 goto out;
41274
41275 file = do_filp_open(AT_FDCWD, tmp,
41276 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41277 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41278 MAY_READ | MAY_EXEC | MAY_OPEN);
41279 putname(tmp);
41280 error = PTR_ERR(file);
41281 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
41282 int write)
41283 {
41284 struct page *page;
41285 - int ret;
41286
41287 -#ifdef CONFIG_STACK_GROWSUP
41288 - if (write) {
41289 - ret = expand_stack_downwards(bprm->vma, pos);
41290 - if (ret < 0)
41291 - return NULL;
41292 - }
41293 -#endif
41294 - ret = get_user_pages(current, bprm->mm, pos,
41295 - 1, write, 1, &page, NULL);
41296 - if (ret <= 0)
41297 + if (0 > expand_stack_downwards(bprm->vma, pos))
41298 + return NULL;
41299 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41300 return NULL;
41301
41302 if (write) {
41303 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
41304 vma->vm_end = STACK_TOP_MAX;
41305 vma->vm_start = vma->vm_end - PAGE_SIZE;
41306 vma->vm_flags = VM_STACK_FLAGS;
41307 +
41308 +#ifdef CONFIG_PAX_SEGMEXEC
41309 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41310 +#endif
41311 +
41312 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41313
41314 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
41315 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
41316 mm->stack_vm = mm->total_vm = 1;
41317 up_write(&mm->mmap_sem);
41318 bprm->p = vma->vm_end - sizeof(void *);
41319 +
41320 +#ifdef CONFIG_PAX_RANDUSTACK
41321 + if (randomize_va_space)
41322 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41323 +#endif
41324 +
41325 return 0;
41326 err:
41327 up_write(&mm->mmap_sem);
41328 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
41329 int r;
41330 mm_segment_t oldfs = get_fs();
41331 set_fs(KERNEL_DS);
41332 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
41333 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
41334 set_fs(oldfs);
41335 return r;
41336 }
41337 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
41338 unsigned long new_end = old_end - shift;
41339 struct mmu_gather *tlb;
41340
41341 - BUG_ON(new_start > new_end);
41342 + if (new_start >= new_end || new_start < mmap_min_addr)
41343 + return -ENOMEM;
41344
41345 /*
41346 * ensure there are no vmas between where we want to go
41347 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
41348 if (vma != find_vma(mm, new_start))
41349 return -EFAULT;
41350
41351 +#ifdef CONFIG_PAX_SEGMEXEC
41352 + BUG_ON(pax_find_mirror_vma(vma));
41353 +#endif
41354 +
41355 /*
41356 * cover the whole range: [new_start, old_end)
41357 */
41358 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
41359 stack_top = arch_align_stack(stack_top);
41360 stack_top = PAGE_ALIGN(stack_top);
41361
41362 - if (unlikely(stack_top < mmap_min_addr) ||
41363 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41364 - return -ENOMEM;
41365 -
41366 stack_shift = vma->vm_end - stack_top;
41367
41368 bprm->p -= stack_shift;
41369 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
41370 bprm->exec -= stack_shift;
41371
41372 down_write(&mm->mmap_sem);
41373 +
41374 + /* Move stack pages down in memory. */
41375 + if (stack_shift) {
41376 + ret = shift_arg_pages(vma, stack_shift);
41377 + if (ret)
41378 + goto out_unlock;
41379 + }
41380 +
41381 vm_flags = VM_STACK_FLAGS;
41382
41383 /*
41384 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
41385 vm_flags &= ~VM_EXEC;
41386 vm_flags |= mm->def_flags;
41387
41388 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41389 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41390 + vm_flags &= ~VM_EXEC;
41391 +
41392 +#ifdef CONFIG_PAX_MPROTECT
41393 + if (mm->pax_flags & MF_PAX_MPROTECT)
41394 + vm_flags &= ~VM_MAYEXEC;
41395 +#endif
41396 +
41397 + }
41398 +#endif
41399 +
41400 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
41401 vm_flags);
41402 if (ret)
41403 goto out_unlock;
41404 BUG_ON(prev != vma);
41405
41406 - /* Move stack pages down in memory. */
41407 - if (stack_shift) {
41408 - ret = shift_arg_pages(vma, stack_shift);
41409 - if (ret)
41410 - goto out_unlock;
41411 - }
41412 -
41413 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
41414 stack_size = vma->vm_end - vma->vm_start;
41415 /*
41416 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
41417 int err;
41418
41419 file = do_filp_open(AT_FDCWD, name,
41420 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41421 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41422 MAY_EXEC | MAY_OPEN);
41423 if (IS_ERR(file))
41424 goto out;
41425 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41426 old_fs = get_fs();
41427 set_fs(get_ds());
41428 /* The cast to a user pointer is valid due to the set_fs() */
41429 - result = vfs_read(file, (void __user *)addr, count, &pos);
41430 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
41431 set_fs(old_fs);
41432 return result;
41433 }
41434 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41435 }
41436 rcu_read_unlock();
41437
41438 - if (p->fs->users > n_fs) {
41439 + if (atomic_read(&p->fs->users) > n_fs) {
41440 bprm->unsafe |= LSM_UNSAFE_SHARE;
41441 } else {
41442 res = -EAGAIN;
41443 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
41444 char __user *__user *envp,
41445 struct pt_regs * regs)
41446 {
41447 +#ifdef CONFIG_GRKERNSEC
41448 + struct file *old_exec_file;
41449 + struct acl_subject_label *old_acl;
41450 + struct rlimit old_rlim[RLIM_NLIMITS];
41451 +#endif
41452 struct linux_binprm *bprm;
41453 struct file *file;
41454 struct files_struct *displaced;
41455 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
41456 bprm->filename = filename;
41457 bprm->interp = filename;
41458
41459 + if (gr_process_user_ban()) {
41460 + retval = -EPERM;
41461 + goto out_file;
41462 + }
41463 +
41464 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41465 +
41466 + if (gr_handle_nproc()) {
41467 + retval = -EAGAIN;
41468 + goto out_file;
41469 + }
41470 +
41471 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41472 + retval = -EACCES;
41473 + goto out_file;
41474 + }
41475 +
41476 retval = bprm_mm_init(bprm);
41477 if (retval)
41478 goto out_file;
41479 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
41480 if (retval < 0)
41481 goto out;
41482
41483 + if (!gr_tpe_allow(file)) {
41484 + retval = -EACCES;
41485 + goto out;
41486 + }
41487 +
41488 + if (gr_check_crash_exec(file)) {
41489 + retval = -EACCES;
41490 + goto out;
41491 + }
41492 +
41493 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41494 +
41495 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41496 +
41497 +#ifdef CONFIG_GRKERNSEC
41498 + old_acl = current->acl;
41499 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41500 + old_exec_file = current->exec_file;
41501 + get_file(file);
41502 + current->exec_file = file;
41503 +#endif
41504 +
41505 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41506 + bprm->unsafe & LSM_UNSAFE_SHARE);
41507 + if (retval < 0)
41508 + goto out_fail;
41509 +
41510 current->flags &= ~PF_KTHREAD;
41511 retval = search_binary_handler(bprm,regs);
41512 if (retval < 0)
41513 - goto out;
41514 + goto out_fail;
41515 +#ifdef CONFIG_GRKERNSEC
41516 + if (old_exec_file)
41517 + fput(old_exec_file);
41518 +#endif
41519
41520 /* execve succeeded */
41521 current->fs->in_exec = 0;
41522 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
41523 put_files_struct(displaced);
41524 return retval;
41525
41526 +out_fail:
41527 +#ifdef CONFIG_GRKERNSEC
41528 + current->acl = old_acl;
41529 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41530 + fput(current->exec_file);
41531 + current->exec_file = old_exec_file;
41532 +#endif
41533 +
41534 out:
41535 if (bprm->mm) {
41536 acct_arg_size(bprm, 0);
41537 @@ -1591,6 +1681,220 @@ out:
41538 return ispipe;
41539 }
41540
41541 +int pax_check_flags(unsigned long *flags)
41542 +{
41543 + int retval = 0;
41544 +
41545 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41546 + if (*flags & MF_PAX_SEGMEXEC)
41547 + {
41548 + *flags &= ~MF_PAX_SEGMEXEC;
41549 + retval = -EINVAL;
41550 + }
41551 +#endif
41552 +
41553 + if ((*flags & MF_PAX_PAGEEXEC)
41554 +
41555 +#ifdef CONFIG_PAX_PAGEEXEC
41556 + && (*flags & MF_PAX_SEGMEXEC)
41557 +#endif
41558 +
41559 + )
41560 + {
41561 + *flags &= ~MF_PAX_PAGEEXEC;
41562 + retval = -EINVAL;
41563 + }
41564 +
41565 + if ((*flags & MF_PAX_MPROTECT)
41566 +
41567 +#ifdef CONFIG_PAX_MPROTECT
41568 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41569 +#endif
41570 +
41571 + )
41572 + {
41573 + *flags &= ~MF_PAX_MPROTECT;
41574 + retval = -EINVAL;
41575 + }
41576 +
41577 + if ((*flags & MF_PAX_EMUTRAMP)
41578 +
41579 +#ifdef CONFIG_PAX_EMUTRAMP
41580 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41581 +#endif
41582 +
41583 + )
41584 + {
41585 + *flags &= ~MF_PAX_EMUTRAMP;
41586 + retval = -EINVAL;
41587 + }
41588 +
41589 + return retval;
41590 +}
41591 +
41592 +EXPORT_SYMBOL(pax_check_flags);
41593 +
41594 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41595 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41596 +{
41597 + struct task_struct *tsk = current;
41598 + struct mm_struct *mm = current->mm;
41599 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41600 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41601 + char *path_exec = NULL;
41602 + char *path_fault = NULL;
41603 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41604 +
41605 + if (buffer_exec && buffer_fault) {
41606 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41607 +
41608 + down_read(&mm->mmap_sem);
41609 + vma = mm->mmap;
41610 + while (vma && (!vma_exec || !vma_fault)) {
41611 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41612 + vma_exec = vma;
41613 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41614 + vma_fault = vma;
41615 + vma = vma->vm_next;
41616 + }
41617 + if (vma_exec) {
41618 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41619 + if (IS_ERR(path_exec))
41620 + path_exec = "<path too long>";
41621 + else {
41622 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41623 + if (path_exec) {
41624 + *path_exec = 0;
41625 + path_exec = buffer_exec;
41626 + } else
41627 + path_exec = "<path too long>";
41628 + }
41629 + }
41630 + if (vma_fault) {
41631 + start = vma_fault->vm_start;
41632 + end = vma_fault->vm_end;
41633 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41634 + if (vma_fault->vm_file) {
41635 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41636 + if (IS_ERR(path_fault))
41637 + path_fault = "<path too long>";
41638 + else {
41639 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41640 + if (path_fault) {
41641 + *path_fault = 0;
41642 + path_fault = buffer_fault;
41643 + } else
41644 + path_fault = "<path too long>";
41645 + }
41646 + } else
41647 + path_fault = "<anonymous mapping>";
41648 + }
41649 + up_read(&mm->mmap_sem);
41650 + }
41651 + if (tsk->signal->curr_ip)
41652 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41653 + else
41654 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41655 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41656 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41657 + task_uid(tsk), task_euid(tsk), pc, sp);
41658 + free_page((unsigned long)buffer_exec);
41659 + free_page((unsigned long)buffer_fault);
41660 + pax_report_insns(pc, sp);
41661 + do_coredump(SIGKILL, SIGKILL, regs);
41662 +}
41663 +#endif
41664 +
41665 +#ifdef CONFIG_PAX_REFCOUNT
41666 +void pax_report_refcount_overflow(struct pt_regs *regs)
41667 +{
41668 + if (current->signal->curr_ip)
41669 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41670 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41671 + else
41672 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41673 + current->comm, task_pid_nr(current), current_uid(), current_euid());
41674 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41675 + show_regs(regs);
41676 + force_sig_specific(SIGKILL, current);
41677 +}
41678 +#endif
41679 +
41680 +#ifdef CONFIG_PAX_USERCOPY
41681 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41682 +int object_is_on_stack(const void *obj, unsigned long len)
41683 +{
41684 + const void * const stack = task_stack_page(current);
41685 + const void * const stackend = stack + THREAD_SIZE;
41686 +
41687 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41688 + const void *frame = NULL;
41689 + const void *oldframe;
41690 +#endif
41691 +
41692 + if (obj + len < obj)
41693 + return -1;
41694 +
41695 + if (obj + len <= stack || stackend <= obj)
41696 + return 0;
41697 +
41698 + if (obj < stack || stackend < obj + len)
41699 + return -1;
41700 +
41701 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41702 + oldframe = __builtin_frame_address(1);
41703 + if (oldframe)
41704 + frame = __builtin_frame_address(2);
41705 + /*
41706 + low ----------------------------------------------> high
41707 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
41708 + ^----------------^
41709 + allow copies only within here
41710 + */
41711 + while (stack <= frame && frame < stackend) {
41712 + /* if obj + len extends past the last frame, this
41713 + check won't pass and the next frame will be 0,
41714 + causing us to bail out and correctly report
41715 + the copy as invalid
41716 + */
41717 + if (obj + len <= frame)
41718 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41719 + oldframe = frame;
41720 + frame = *(const void * const *)frame;
41721 + }
41722 + return -1;
41723 +#else
41724 + return 1;
41725 +#endif
41726 +}
41727 +
41728 +
41729 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41730 +{
41731 + if (current->signal->curr_ip)
41732 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41733 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41734 + else
41735 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41736 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41737 +
41738 + dump_stack();
41739 + gr_handle_kernel_exploit();
41740 + do_group_exit(SIGKILL);
41741 +}
41742 +#endif
41743 +
41744 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41745 +void pax_track_stack(void)
41746 +{
41747 + unsigned long sp = (unsigned long)&sp;
41748 + if (sp < current_thread_info()->lowest_stack &&
41749 + sp > (unsigned long)task_stack_page(current))
41750 + current_thread_info()->lowest_stack = sp;
41751 +}
41752 +EXPORT_SYMBOL(pax_track_stack);
41753 +#endif
41754 +
41755 static int zap_process(struct task_struct *start)
41756 {
41757 struct task_struct *t;
41758 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
41759 pipe = file->f_path.dentry->d_inode->i_pipe;
41760
41761 pipe_lock(pipe);
41762 - pipe->readers++;
41763 - pipe->writers--;
41764 + atomic_inc(&pipe->readers);
41765 + atomic_dec(&pipe->writers);
41766
41767 - while ((pipe->readers > 1) && (!signal_pending(current))) {
41768 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41769 wake_up_interruptible_sync(&pipe->wait);
41770 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41771 pipe_wait(pipe);
41772 }
41773
41774 - pipe->readers--;
41775 - pipe->writers++;
41776 + atomic_dec(&pipe->readers);
41777 + atomic_inc(&pipe->writers);
41778 pipe_unlock(pipe);
41779
41780 }
41781 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
41782 char **helper_argv = NULL;
41783 int helper_argc = 0;
41784 int dump_count = 0;
41785 - static atomic_t core_dump_count = ATOMIC_INIT(0);
41786 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41787
41788 audit_core_dumps(signr);
41789
41790 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41791 + gr_handle_brute_attach(current, mm->flags);
41792 +
41793 binfmt = mm->binfmt;
41794 if (!binfmt || !binfmt->core_dump)
41795 goto fail;
41796 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
41797 */
41798 clear_thread_flag(TIF_SIGPENDING);
41799
41800 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41801 +
41802 /*
41803 * lock_kernel() because format_corename() is controlled by sysctl, which
41804 * uses lock_kernel()
41805 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
41806 goto fail_unlock;
41807 }
41808
41809 - dump_count = atomic_inc_return(&core_dump_count);
41810 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
41811 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41812 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41813 task_tgid_vnr(current), current->comm);
41814 @@ -1972,7 +2281,7 @@ close_fail:
41815 filp_close(file, NULL);
41816 fail_dropcount:
41817 if (dump_count)
41818 - atomic_dec(&core_dump_count);
41819 + atomic_dec_unchecked(&core_dump_count);
41820 fail_unlock:
41821 if (helper_argv)
41822 argv_free(helper_argv);
41823 diff -urNp linux-2.6.32.44/fs/ext2/balloc.c linux-2.6.32.44/fs/ext2/balloc.c
41824 --- linux-2.6.32.44/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41825 +++ linux-2.6.32.44/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41826 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41827
41828 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41829 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41830 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41831 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41832 sbi->s_resuid != current_fsuid() &&
41833 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41834 return 0;
41835 diff -urNp linux-2.6.32.44/fs/ext3/balloc.c linux-2.6.32.44/fs/ext3/balloc.c
41836 --- linux-2.6.32.44/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41837 +++ linux-2.6.32.44/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41838 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41839
41840 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41841 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41842 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41843 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41844 sbi->s_resuid != current_fsuid() &&
41845 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41846 return 0;
41847 diff -urNp linux-2.6.32.44/fs/ext4/balloc.c linux-2.6.32.44/fs/ext4/balloc.c
41848 --- linux-2.6.32.44/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41849 +++ linux-2.6.32.44/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41850 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41851 /* Hm, nope. Are (enough) root reserved blocks available? */
41852 if (sbi->s_resuid == current_fsuid() ||
41853 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41854 - capable(CAP_SYS_RESOURCE)) {
41855 + capable_nolog(CAP_SYS_RESOURCE)) {
41856 if (free_blocks >= (nblocks + dirty_blocks))
41857 return 1;
41858 }
41859 diff -urNp linux-2.6.32.44/fs/ext4/ext4.h linux-2.6.32.44/fs/ext4/ext4.h
41860 --- linux-2.6.32.44/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41861 +++ linux-2.6.32.44/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41862 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41863
41864 /* stats for buddy allocator */
41865 spinlock_t s_mb_pa_lock;
41866 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41867 - atomic_t s_bal_success; /* we found long enough chunks */
41868 - atomic_t s_bal_allocated; /* in blocks */
41869 - atomic_t s_bal_ex_scanned; /* total extents scanned */
41870 - atomic_t s_bal_goals; /* goal hits */
41871 - atomic_t s_bal_breaks; /* too long searches */
41872 - atomic_t s_bal_2orders; /* 2^order hits */
41873 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41874 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41875 + atomic_unchecked_t s_bal_allocated; /* in blocks */
41876 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41877 + atomic_unchecked_t s_bal_goals; /* goal hits */
41878 + atomic_unchecked_t s_bal_breaks; /* too long searches */
41879 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41880 spinlock_t s_bal_lock;
41881 unsigned long s_mb_buddies_generated;
41882 unsigned long long s_mb_generation_time;
41883 - atomic_t s_mb_lost_chunks;
41884 - atomic_t s_mb_preallocated;
41885 - atomic_t s_mb_discarded;
41886 + atomic_unchecked_t s_mb_lost_chunks;
41887 + atomic_unchecked_t s_mb_preallocated;
41888 + atomic_unchecked_t s_mb_discarded;
41889 atomic_t s_lock_busy;
41890
41891 /* locality groups */
41892 diff -urNp linux-2.6.32.44/fs/ext4/mballoc.c linux-2.6.32.44/fs/ext4/mballoc.c
41893 --- linux-2.6.32.44/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41894 +++ linux-2.6.32.44/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41895 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41896 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41897
41898 if (EXT4_SB(sb)->s_mb_stats)
41899 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41900 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41901
41902 break;
41903 }
41904 @@ -2131,7 +2131,7 @@ repeat:
41905 ac->ac_status = AC_STATUS_CONTINUE;
41906 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41907 cr = 3;
41908 - atomic_inc(&sbi->s_mb_lost_chunks);
41909 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41910 goto repeat;
41911 }
41912 }
41913 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41914 ext4_grpblk_t counters[16];
41915 } sg;
41916
41917 + pax_track_stack();
41918 +
41919 group--;
41920 if (group == 0)
41921 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41922 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41923 if (sbi->s_mb_stats) {
41924 printk(KERN_INFO
41925 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41926 - atomic_read(&sbi->s_bal_allocated),
41927 - atomic_read(&sbi->s_bal_reqs),
41928 - atomic_read(&sbi->s_bal_success));
41929 + atomic_read_unchecked(&sbi->s_bal_allocated),
41930 + atomic_read_unchecked(&sbi->s_bal_reqs),
41931 + atomic_read_unchecked(&sbi->s_bal_success));
41932 printk(KERN_INFO
41933 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41934 "%u 2^N hits, %u breaks, %u lost\n",
41935 - atomic_read(&sbi->s_bal_ex_scanned),
41936 - atomic_read(&sbi->s_bal_goals),
41937 - atomic_read(&sbi->s_bal_2orders),
41938 - atomic_read(&sbi->s_bal_breaks),
41939 - atomic_read(&sbi->s_mb_lost_chunks));
41940 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41941 + atomic_read_unchecked(&sbi->s_bal_goals),
41942 + atomic_read_unchecked(&sbi->s_bal_2orders),
41943 + atomic_read_unchecked(&sbi->s_bal_breaks),
41944 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41945 printk(KERN_INFO
41946 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41947 sbi->s_mb_buddies_generated++,
41948 sbi->s_mb_generation_time);
41949 printk(KERN_INFO
41950 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41951 - atomic_read(&sbi->s_mb_preallocated),
41952 - atomic_read(&sbi->s_mb_discarded));
41953 + atomic_read_unchecked(&sbi->s_mb_preallocated),
41954 + atomic_read_unchecked(&sbi->s_mb_discarded));
41955 }
41956
41957 free_percpu(sbi->s_locality_groups);
41958 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41959 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41960
41961 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41962 - atomic_inc(&sbi->s_bal_reqs);
41963 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41964 + atomic_inc_unchecked(&sbi->s_bal_reqs);
41965 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41966 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41967 - atomic_inc(&sbi->s_bal_success);
41968 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41969 + atomic_inc_unchecked(&sbi->s_bal_success);
41970 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41971 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41972 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41973 - atomic_inc(&sbi->s_bal_goals);
41974 + atomic_inc_unchecked(&sbi->s_bal_goals);
41975 if (ac->ac_found > sbi->s_mb_max_to_scan)
41976 - atomic_inc(&sbi->s_bal_breaks);
41977 + atomic_inc_unchecked(&sbi->s_bal_breaks);
41978 }
41979
41980 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41981 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41982 trace_ext4_mb_new_inode_pa(ac, pa);
41983
41984 ext4_mb_use_inode_pa(ac, pa);
41985 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41986 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41987
41988 ei = EXT4_I(ac->ac_inode);
41989 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41990 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41991 trace_ext4_mb_new_group_pa(ac, pa);
41992
41993 ext4_mb_use_group_pa(ac, pa);
41994 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41995 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41996
41997 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41998 lg = ac->ac_lg;
41999 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42000 * from the bitmap and continue.
42001 */
42002 }
42003 - atomic_add(free, &sbi->s_mb_discarded);
42004 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42005
42006 return err;
42007 }
42008 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42009 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42010 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42011 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42012 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42013 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42014
42015 if (ac) {
42016 ac->ac_sb = sb;
42017 diff -urNp linux-2.6.32.44/fs/ext4/super.c linux-2.6.32.44/fs/ext4/super.c
42018 --- linux-2.6.32.44/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
42019 +++ linux-2.6.32.44/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
42020 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
42021 }
42022
42023
42024 -static struct sysfs_ops ext4_attr_ops = {
42025 +static const struct sysfs_ops ext4_attr_ops = {
42026 .show = ext4_attr_show,
42027 .store = ext4_attr_store,
42028 };
42029 diff -urNp linux-2.6.32.44/fs/fcntl.c linux-2.6.32.44/fs/fcntl.c
42030 --- linux-2.6.32.44/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
42031 +++ linux-2.6.32.44/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
42032 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
42033 if (err)
42034 return err;
42035
42036 + if (gr_handle_chroot_fowner(pid, type))
42037 + return -ENOENT;
42038 + if (gr_check_protected_task_fowner(pid, type))
42039 + return -EACCES;
42040 +
42041 f_modown(filp, pid, type, force);
42042 return 0;
42043 }
42044 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
42045 switch (cmd) {
42046 case F_DUPFD:
42047 case F_DUPFD_CLOEXEC:
42048 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42049 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42050 break;
42051 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42052 diff -urNp linux-2.6.32.44/fs/fifo.c linux-2.6.32.44/fs/fifo.c
42053 --- linux-2.6.32.44/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
42054 +++ linux-2.6.32.44/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
42055 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
42056 */
42057 filp->f_op = &read_pipefifo_fops;
42058 pipe->r_counter++;
42059 - if (pipe->readers++ == 0)
42060 + if (atomic_inc_return(&pipe->readers) == 1)
42061 wake_up_partner(inode);
42062
42063 - if (!pipe->writers) {
42064 + if (!atomic_read(&pipe->writers)) {
42065 if ((filp->f_flags & O_NONBLOCK)) {
42066 /* suppress POLLHUP until we have
42067 * seen a writer */
42068 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
42069 * errno=ENXIO when there is no process reading the FIFO.
42070 */
42071 ret = -ENXIO;
42072 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42073 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42074 goto err;
42075
42076 filp->f_op = &write_pipefifo_fops;
42077 pipe->w_counter++;
42078 - if (!pipe->writers++)
42079 + if (atomic_inc_return(&pipe->writers) == 1)
42080 wake_up_partner(inode);
42081
42082 - if (!pipe->readers) {
42083 + if (!atomic_read(&pipe->readers)) {
42084 wait_for_partner(inode, &pipe->r_counter);
42085 if (signal_pending(current))
42086 goto err_wr;
42087 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
42088 */
42089 filp->f_op = &rdwr_pipefifo_fops;
42090
42091 - pipe->readers++;
42092 - pipe->writers++;
42093 + atomic_inc(&pipe->readers);
42094 + atomic_inc(&pipe->writers);
42095 pipe->r_counter++;
42096 pipe->w_counter++;
42097 - if (pipe->readers == 1 || pipe->writers == 1)
42098 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42099 wake_up_partner(inode);
42100 break;
42101
42102 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
42103 return 0;
42104
42105 err_rd:
42106 - if (!--pipe->readers)
42107 + if (atomic_dec_and_test(&pipe->readers))
42108 wake_up_interruptible(&pipe->wait);
42109 ret = -ERESTARTSYS;
42110 goto err;
42111
42112 err_wr:
42113 - if (!--pipe->writers)
42114 + if (atomic_dec_and_test(&pipe->writers))
42115 wake_up_interruptible(&pipe->wait);
42116 ret = -ERESTARTSYS;
42117 goto err;
42118
42119 err:
42120 - if (!pipe->readers && !pipe->writers)
42121 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42122 free_pipe_info(inode);
42123
42124 err_nocleanup:
42125 diff -urNp linux-2.6.32.44/fs/file.c linux-2.6.32.44/fs/file.c
42126 --- linux-2.6.32.44/fs/file.c 2011-03-27 14:31:47.000000000 -0400
42127 +++ linux-2.6.32.44/fs/file.c 2011-04-17 15:56:46.000000000 -0400
42128 @@ -14,6 +14,7 @@
42129 #include <linux/slab.h>
42130 #include <linux/vmalloc.h>
42131 #include <linux/file.h>
42132 +#include <linux/security.h>
42133 #include <linux/fdtable.h>
42134 #include <linux/bitops.h>
42135 #include <linux/interrupt.h>
42136 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
42137 * N.B. For clone tasks sharing a files structure, this test
42138 * will limit the total number of files that can be opened.
42139 */
42140 +
42141 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42142 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42143 return -EMFILE;
42144
42145 diff -urNp linux-2.6.32.44/fs/filesystems.c linux-2.6.32.44/fs/filesystems.c
42146 --- linux-2.6.32.44/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
42147 +++ linux-2.6.32.44/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
42148 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
42149 int len = dot ? dot - name : strlen(name);
42150
42151 fs = __get_fs_type(name, len);
42152 +
42153 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42154 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42155 +#else
42156 if (!fs && (request_module("%.*s", len, name) == 0))
42157 +#endif
42158 fs = __get_fs_type(name, len);
42159
42160 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42161 diff -urNp linux-2.6.32.44/fs/fscache/cookie.c linux-2.6.32.44/fs/fscache/cookie.c
42162 --- linux-2.6.32.44/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
42163 +++ linux-2.6.32.44/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
42164 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42165 parent ? (char *) parent->def->name : "<no-parent>",
42166 def->name, netfs_data);
42167
42168 - fscache_stat(&fscache_n_acquires);
42169 + fscache_stat_unchecked(&fscache_n_acquires);
42170
42171 /* if there's no parent cookie, then we don't create one here either */
42172 if (!parent) {
42173 - fscache_stat(&fscache_n_acquires_null);
42174 + fscache_stat_unchecked(&fscache_n_acquires_null);
42175 _leave(" [no parent]");
42176 return NULL;
42177 }
42178 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42179 /* allocate and initialise a cookie */
42180 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42181 if (!cookie) {
42182 - fscache_stat(&fscache_n_acquires_oom);
42183 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42184 _leave(" [ENOMEM]");
42185 return NULL;
42186 }
42187 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42188
42189 switch (cookie->def->type) {
42190 case FSCACHE_COOKIE_TYPE_INDEX:
42191 - fscache_stat(&fscache_n_cookie_index);
42192 + fscache_stat_unchecked(&fscache_n_cookie_index);
42193 break;
42194 case FSCACHE_COOKIE_TYPE_DATAFILE:
42195 - fscache_stat(&fscache_n_cookie_data);
42196 + fscache_stat_unchecked(&fscache_n_cookie_data);
42197 break;
42198 default:
42199 - fscache_stat(&fscache_n_cookie_special);
42200 + fscache_stat_unchecked(&fscache_n_cookie_special);
42201 break;
42202 }
42203
42204 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42205 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42206 atomic_dec(&parent->n_children);
42207 __fscache_cookie_put(cookie);
42208 - fscache_stat(&fscache_n_acquires_nobufs);
42209 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42210 _leave(" = NULL");
42211 return NULL;
42212 }
42213 }
42214
42215 - fscache_stat(&fscache_n_acquires_ok);
42216 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42217 _leave(" = %p", cookie);
42218 return cookie;
42219 }
42220 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42221 cache = fscache_select_cache_for_object(cookie->parent);
42222 if (!cache) {
42223 up_read(&fscache_addremove_sem);
42224 - fscache_stat(&fscache_n_acquires_no_cache);
42225 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42226 _leave(" = -ENOMEDIUM [no cache]");
42227 return -ENOMEDIUM;
42228 }
42229 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42230 object = cache->ops->alloc_object(cache, cookie);
42231 fscache_stat_d(&fscache_n_cop_alloc_object);
42232 if (IS_ERR(object)) {
42233 - fscache_stat(&fscache_n_object_no_alloc);
42234 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42235 ret = PTR_ERR(object);
42236 goto error;
42237 }
42238
42239 - fscache_stat(&fscache_n_object_alloc);
42240 + fscache_stat_unchecked(&fscache_n_object_alloc);
42241
42242 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42243
42244 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42245 struct fscache_object *object;
42246 struct hlist_node *_p;
42247
42248 - fscache_stat(&fscache_n_updates);
42249 + fscache_stat_unchecked(&fscache_n_updates);
42250
42251 if (!cookie) {
42252 - fscache_stat(&fscache_n_updates_null);
42253 + fscache_stat_unchecked(&fscache_n_updates_null);
42254 _leave(" [no cookie]");
42255 return;
42256 }
42257 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42258 struct fscache_object *object;
42259 unsigned long event;
42260
42261 - fscache_stat(&fscache_n_relinquishes);
42262 + fscache_stat_unchecked(&fscache_n_relinquishes);
42263 if (retire)
42264 - fscache_stat(&fscache_n_relinquishes_retire);
42265 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42266
42267 if (!cookie) {
42268 - fscache_stat(&fscache_n_relinquishes_null);
42269 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42270 _leave(" [no cookie]");
42271 return;
42272 }
42273 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42274
42275 /* wait for the cookie to finish being instantiated (or to fail) */
42276 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42277 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42278 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42279 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42280 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42281 }
42282 diff -urNp linux-2.6.32.44/fs/fscache/internal.h linux-2.6.32.44/fs/fscache/internal.h
42283 --- linux-2.6.32.44/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
42284 +++ linux-2.6.32.44/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
42285 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
42286 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42287 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42288
42289 -extern atomic_t fscache_n_op_pend;
42290 -extern atomic_t fscache_n_op_run;
42291 -extern atomic_t fscache_n_op_enqueue;
42292 -extern atomic_t fscache_n_op_deferred_release;
42293 -extern atomic_t fscache_n_op_release;
42294 -extern atomic_t fscache_n_op_gc;
42295 -extern atomic_t fscache_n_op_cancelled;
42296 -extern atomic_t fscache_n_op_rejected;
42297 -
42298 -extern atomic_t fscache_n_attr_changed;
42299 -extern atomic_t fscache_n_attr_changed_ok;
42300 -extern atomic_t fscache_n_attr_changed_nobufs;
42301 -extern atomic_t fscache_n_attr_changed_nomem;
42302 -extern atomic_t fscache_n_attr_changed_calls;
42303 -
42304 -extern atomic_t fscache_n_allocs;
42305 -extern atomic_t fscache_n_allocs_ok;
42306 -extern atomic_t fscache_n_allocs_wait;
42307 -extern atomic_t fscache_n_allocs_nobufs;
42308 -extern atomic_t fscache_n_allocs_intr;
42309 -extern atomic_t fscache_n_allocs_object_dead;
42310 -extern atomic_t fscache_n_alloc_ops;
42311 -extern atomic_t fscache_n_alloc_op_waits;
42312 -
42313 -extern atomic_t fscache_n_retrievals;
42314 -extern atomic_t fscache_n_retrievals_ok;
42315 -extern atomic_t fscache_n_retrievals_wait;
42316 -extern atomic_t fscache_n_retrievals_nodata;
42317 -extern atomic_t fscache_n_retrievals_nobufs;
42318 -extern atomic_t fscache_n_retrievals_intr;
42319 -extern atomic_t fscache_n_retrievals_nomem;
42320 -extern atomic_t fscache_n_retrievals_object_dead;
42321 -extern atomic_t fscache_n_retrieval_ops;
42322 -extern atomic_t fscache_n_retrieval_op_waits;
42323 -
42324 -extern atomic_t fscache_n_stores;
42325 -extern atomic_t fscache_n_stores_ok;
42326 -extern atomic_t fscache_n_stores_again;
42327 -extern atomic_t fscache_n_stores_nobufs;
42328 -extern atomic_t fscache_n_stores_oom;
42329 -extern atomic_t fscache_n_store_ops;
42330 -extern atomic_t fscache_n_store_calls;
42331 -extern atomic_t fscache_n_store_pages;
42332 -extern atomic_t fscache_n_store_radix_deletes;
42333 -extern atomic_t fscache_n_store_pages_over_limit;
42334 -
42335 -extern atomic_t fscache_n_store_vmscan_not_storing;
42336 -extern atomic_t fscache_n_store_vmscan_gone;
42337 -extern atomic_t fscache_n_store_vmscan_busy;
42338 -extern atomic_t fscache_n_store_vmscan_cancelled;
42339 -
42340 -extern atomic_t fscache_n_marks;
42341 -extern atomic_t fscache_n_uncaches;
42342 -
42343 -extern atomic_t fscache_n_acquires;
42344 -extern atomic_t fscache_n_acquires_null;
42345 -extern atomic_t fscache_n_acquires_no_cache;
42346 -extern atomic_t fscache_n_acquires_ok;
42347 -extern atomic_t fscache_n_acquires_nobufs;
42348 -extern atomic_t fscache_n_acquires_oom;
42349 -
42350 -extern atomic_t fscache_n_updates;
42351 -extern atomic_t fscache_n_updates_null;
42352 -extern atomic_t fscache_n_updates_run;
42353 -
42354 -extern atomic_t fscache_n_relinquishes;
42355 -extern atomic_t fscache_n_relinquishes_null;
42356 -extern atomic_t fscache_n_relinquishes_waitcrt;
42357 -extern atomic_t fscache_n_relinquishes_retire;
42358 -
42359 -extern atomic_t fscache_n_cookie_index;
42360 -extern atomic_t fscache_n_cookie_data;
42361 -extern atomic_t fscache_n_cookie_special;
42362 -
42363 -extern atomic_t fscache_n_object_alloc;
42364 -extern atomic_t fscache_n_object_no_alloc;
42365 -extern atomic_t fscache_n_object_lookups;
42366 -extern atomic_t fscache_n_object_lookups_negative;
42367 -extern atomic_t fscache_n_object_lookups_positive;
42368 -extern atomic_t fscache_n_object_lookups_timed_out;
42369 -extern atomic_t fscache_n_object_created;
42370 -extern atomic_t fscache_n_object_avail;
42371 -extern atomic_t fscache_n_object_dead;
42372 -
42373 -extern atomic_t fscache_n_checkaux_none;
42374 -extern atomic_t fscache_n_checkaux_okay;
42375 -extern atomic_t fscache_n_checkaux_update;
42376 -extern atomic_t fscache_n_checkaux_obsolete;
42377 +extern atomic_unchecked_t fscache_n_op_pend;
42378 +extern atomic_unchecked_t fscache_n_op_run;
42379 +extern atomic_unchecked_t fscache_n_op_enqueue;
42380 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42381 +extern atomic_unchecked_t fscache_n_op_release;
42382 +extern atomic_unchecked_t fscache_n_op_gc;
42383 +extern atomic_unchecked_t fscache_n_op_cancelled;
42384 +extern atomic_unchecked_t fscache_n_op_rejected;
42385 +
42386 +extern atomic_unchecked_t fscache_n_attr_changed;
42387 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42388 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42389 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42390 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42391 +
42392 +extern atomic_unchecked_t fscache_n_allocs;
42393 +extern atomic_unchecked_t fscache_n_allocs_ok;
42394 +extern atomic_unchecked_t fscache_n_allocs_wait;
42395 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42396 +extern atomic_unchecked_t fscache_n_allocs_intr;
42397 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42398 +extern atomic_unchecked_t fscache_n_alloc_ops;
42399 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42400 +
42401 +extern atomic_unchecked_t fscache_n_retrievals;
42402 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42403 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42404 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42405 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42406 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42407 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42408 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42409 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42410 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42411 +
42412 +extern atomic_unchecked_t fscache_n_stores;
42413 +extern atomic_unchecked_t fscache_n_stores_ok;
42414 +extern atomic_unchecked_t fscache_n_stores_again;
42415 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42416 +extern atomic_unchecked_t fscache_n_stores_oom;
42417 +extern atomic_unchecked_t fscache_n_store_ops;
42418 +extern atomic_unchecked_t fscache_n_store_calls;
42419 +extern atomic_unchecked_t fscache_n_store_pages;
42420 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42421 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42422 +
42423 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42424 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42425 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42426 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42427 +
42428 +extern atomic_unchecked_t fscache_n_marks;
42429 +extern atomic_unchecked_t fscache_n_uncaches;
42430 +
42431 +extern atomic_unchecked_t fscache_n_acquires;
42432 +extern atomic_unchecked_t fscache_n_acquires_null;
42433 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42434 +extern atomic_unchecked_t fscache_n_acquires_ok;
42435 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42436 +extern atomic_unchecked_t fscache_n_acquires_oom;
42437 +
42438 +extern atomic_unchecked_t fscache_n_updates;
42439 +extern atomic_unchecked_t fscache_n_updates_null;
42440 +extern atomic_unchecked_t fscache_n_updates_run;
42441 +
42442 +extern atomic_unchecked_t fscache_n_relinquishes;
42443 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42444 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42445 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42446 +
42447 +extern atomic_unchecked_t fscache_n_cookie_index;
42448 +extern atomic_unchecked_t fscache_n_cookie_data;
42449 +extern atomic_unchecked_t fscache_n_cookie_special;
42450 +
42451 +extern atomic_unchecked_t fscache_n_object_alloc;
42452 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42453 +extern atomic_unchecked_t fscache_n_object_lookups;
42454 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42455 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42456 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42457 +extern atomic_unchecked_t fscache_n_object_created;
42458 +extern atomic_unchecked_t fscache_n_object_avail;
42459 +extern atomic_unchecked_t fscache_n_object_dead;
42460 +
42461 +extern atomic_unchecked_t fscache_n_checkaux_none;
42462 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42463 +extern atomic_unchecked_t fscache_n_checkaux_update;
42464 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42465
42466 extern atomic_t fscache_n_cop_alloc_object;
42467 extern atomic_t fscache_n_cop_lookup_object;
42468 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42469 atomic_inc(stat);
42470 }
42471
42472 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42473 +{
42474 + atomic_inc_unchecked(stat);
42475 +}
42476 +
42477 static inline void fscache_stat_d(atomic_t *stat)
42478 {
42479 atomic_dec(stat);
42480 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
42481
42482 #define __fscache_stat(stat) (NULL)
42483 #define fscache_stat(stat) do {} while (0)
42484 +#define fscache_stat_unchecked(stat) do {} while (0)
42485 #define fscache_stat_d(stat) do {} while (0)
42486 #endif
42487
42488 diff -urNp linux-2.6.32.44/fs/fscache/object.c linux-2.6.32.44/fs/fscache/object.c
42489 --- linux-2.6.32.44/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42490 +++ linux-2.6.32.44/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42491 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
42492 /* update the object metadata on disk */
42493 case FSCACHE_OBJECT_UPDATING:
42494 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42495 - fscache_stat(&fscache_n_updates_run);
42496 + fscache_stat_unchecked(&fscache_n_updates_run);
42497 fscache_stat(&fscache_n_cop_update_object);
42498 object->cache->ops->update_object(object);
42499 fscache_stat_d(&fscache_n_cop_update_object);
42500 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
42501 spin_lock(&object->lock);
42502 object->state = FSCACHE_OBJECT_DEAD;
42503 spin_unlock(&object->lock);
42504 - fscache_stat(&fscache_n_object_dead);
42505 + fscache_stat_unchecked(&fscache_n_object_dead);
42506 goto terminal_transit;
42507
42508 /* handle the parent cache of this object being withdrawn from
42509 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
42510 spin_lock(&object->lock);
42511 object->state = FSCACHE_OBJECT_DEAD;
42512 spin_unlock(&object->lock);
42513 - fscache_stat(&fscache_n_object_dead);
42514 + fscache_stat_unchecked(&fscache_n_object_dead);
42515 goto terminal_transit;
42516
42517 /* complain about the object being woken up once it is
42518 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42519 parent->cookie->def->name, cookie->def->name,
42520 object->cache->tag->name);
42521
42522 - fscache_stat(&fscache_n_object_lookups);
42523 + fscache_stat_unchecked(&fscache_n_object_lookups);
42524 fscache_stat(&fscache_n_cop_lookup_object);
42525 ret = object->cache->ops->lookup_object(object);
42526 fscache_stat_d(&fscache_n_cop_lookup_object);
42527 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42528 if (ret == -ETIMEDOUT) {
42529 /* probably stuck behind another object, so move this one to
42530 * the back of the queue */
42531 - fscache_stat(&fscache_n_object_lookups_timed_out);
42532 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42533 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42534 }
42535
42536 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42537
42538 spin_lock(&object->lock);
42539 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42540 - fscache_stat(&fscache_n_object_lookups_negative);
42541 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42542
42543 /* transit here to allow write requests to begin stacking up
42544 * and read requests to begin returning ENODATA */
42545 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42546 * result, in which case there may be data available */
42547 spin_lock(&object->lock);
42548 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42549 - fscache_stat(&fscache_n_object_lookups_positive);
42550 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42551
42552 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42553
42554 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42555 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42556 } else {
42557 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42558 - fscache_stat(&fscache_n_object_created);
42559 + fscache_stat_unchecked(&fscache_n_object_created);
42560
42561 object->state = FSCACHE_OBJECT_AVAILABLE;
42562 spin_unlock(&object->lock);
42563 @@ -633,7 +633,7 @@ static void fscache_object_available(str
42564 fscache_enqueue_dependents(object);
42565
42566 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42567 - fscache_stat(&fscache_n_object_avail);
42568 + fscache_stat_unchecked(&fscache_n_object_avail);
42569
42570 _leave("");
42571 }
42572 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42573 enum fscache_checkaux result;
42574
42575 if (!object->cookie->def->check_aux) {
42576 - fscache_stat(&fscache_n_checkaux_none);
42577 + fscache_stat_unchecked(&fscache_n_checkaux_none);
42578 return FSCACHE_CHECKAUX_OKAY;
42579 }
42580
42581 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42582 switch (result) {
42583 /* entry okay as is */
42584 case FSCACHE_CHECKAUX_OKAY:
42585 - fscache_stat(&fscache_n_checkaux_okay);
42586 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
42587 break;
42588
42589 /* entry requires update */
42590 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42591 - fscache_stat(&fscache_n_checkaux_update);
42592 + fscache_stat_unchecked(&fscache_n_checkaux_update);
42593 break;
42594
42595 /* entry requires deletion */
42596 case FSCACHE_CHECKAUX_OBSOLETE:
42597 - fscache_stat(&fscache_n_checkaux_obsolete);
42598 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42599 break;
42600
42601 default:
42602 diff -urNp linux-2.6.32.44/fs/fscache/operation.c linux-2.6.32.44/fs/fscache/operation.c
42603 --- linux-2.6.32.44/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42604 +++ linux-2.6.32.44/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42605 @@ -16,7 +16,7 @@
42606 #include <linux/seq_file.h>
42607 #include "internal.h"
42608
42609 -atomic_t fscache_op_debug_id;
42610 +atomic_unchecked_t fscache_op_debug_id;
42611 EXPORT_SYMBOL(fscache_op_debug_id);
42612
42613 /**
42614 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42615 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42616 ASSERTCMP(atomic_read(&op->usage), >, 0);
42617
42618 - fscache_stat(&fscache_n_op_enqueue);
42619 + fscache_stat_unchecked(&fscache_n_op_enqueue);
42620 switch (op->flags & FSCACHE_OP_TYPE) {
42621 case FSCACHE_OP_FAST:
42622 _debug("queue fast");
42623 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42624 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42625 if (op->processor)
42626 fscache_enqueue_operation(op);
42627 - fscache_stat(&fscache_n_op_run);
42628 + fscache_stat_unchecked(&fscache_n_op_run);
42629 }
42630
42631 /*
42632 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42633 if (object->n_ops > 0) {
42634 atomic_inc(&op->usage);
42635 list_add_tail(&op->pend_link, &object->pending_ops);
42636 - fscache_stat(&fscache_n_op_pend);
42637 + fscache_stat_unchecked(&fscache_n_op_pend);
42638 } else if (!list_empty(&object->pending_ops)) {
42639 atomic_inc(&op->usage);
42640 list_add_tail(&op->pend_link, &object->pending_ops);
42641 - fscache_stat(&fscache_n_op_pend);
42642 + fscache_stat_unchecked(&fscache_n_op_pend);
42643 fscache_start_operations(object);
42644 } else {
42645 ASSERTCMP(object->n_in_progress, ==, 0);
42646 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42647 object->n_exclusive++; /* reads and writes must wait */
42648 atomic_inc(&op->usage);
42649 list_add_tail(&op->pend_link, &object->pending_ops);
42650 - fscache_stat(&fscache_n_op_pend);
42651 + fscache_stat_unchecked(&fscache_n_op_pend);
42652 ret = 0;
42653 } else {
42654 /* not allowed to submit ops in any other state */
42655 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42656 if (object->n_exclusive > 0) {
42657 atomic_inc(&op->usage);
42658 list_add_tail(&op->pend_link, &object->pending_ops);
42659 - fscache_stat(&fscache_n_op_pend);
42660 + fscache_stat_unchecked(&fscache_n_op_pend);
42661 } else if (!list_empty(&object->pending_ops)) {
42662 atomic_inc(&op->usage);
42663 list_add_tail(&op->pend_link, &object->pending_ops);
42664 - fscache_stat(&fscache_n_op_pend);
42665 + fscache_stat_unchecked(&fscache_n_op_pend);
42666 fscache_start_operations(object);
42667 } else {
42668 ASSERTCMP(object->n_exclusive, ==, 0);
42669 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42670 object->n_ops++;
42671 atomic_inc(&op->usage);
42672 list_add_tail(&op->pend_link, &object->pending_ops);
42673 - fscache_stat(&fscache_n_op_pend);
42674 + fscache_stat_unchecked(&fscache_n_op_pend);
42675 ret = 0;
42676 } else if (object->state == FSCACHE_OBJECT_DYING ||
42677 object->state == FSCACHE_OBJECT_LC_DYING ||
42678 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42679 - fscache_stat(&fscache_n_op_rejected);
42680 + fscache_stat_unchecked(&fscache_n_op_rejected);
42681 ret = -ENOBUFS;
42682 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42683 fscache_report_unexpected_submission(object, op, ostate);
42684 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42685
42686 ret = -EBUSY;
42687 if (!list_empty(&op->pend_link)) {
42688 - fscache_stat(&fscache_n_op_cancelled);
42689 + fscache_stat_unchecked(&fscache_n_op_cancelled);
42690 list_del_init(&op->pend_link);
42691 object->n_ops--;
42692 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42693 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42694 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42695 BUG();
42696
42697 - fscache_stat(&fscache_n_op_release);
42698 + fscache_stat_unchecked(&fscache_n_op_release);
42699
42700 if (op->release) {
42701 op->release(op);
42702 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42703 * lock, and defer it otherwise */
42704 if (!spin_trylock(&object->lock)) {
42705 _debug("defer put");
42706 - fscache_stat(&fscache_n_op_deferred_release);
42707 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
42708
42709 cache = object->cache;
42710 spin_lock(&cache->op_gc_list_lock);
42711 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42712
42713 _debug("GC DEFERRED REL OBJ%x OP%x",
42714 object->debug_id, op->debug_id);
42715 - fscache_stat(&fscache_n_op_gc);
42716 + fscache_stat_unchecked(&fscache_n_op_gc);
42717
42718 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42719
42720 diff -urNp linux-2.6.32.44/fs/fscache/page.c linux-2.6.32.44/fs/fscache/page.c
42721 --- linux-2.6.32.44/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42722 +++ linux-2.6.32.44/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42723 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42724 val = radix_tree_lookup(&cookie->stores, page->index);
42725 if (!val) {
42726 rcu_read_unlock();
42727 - fscache_stat(&fscache_n_store_vmscan_not_storing);
42728 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42729 __fscache_uncache_page(cookie, page);
42730 return true;
42731 }
42732 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42733 spin_unlock(&cookie->stores_lock);
42734
42735 if (xpage) {
42736 - fscache_stat(&fscache_n_store_vmscan_cancelled);
42737 - fscache_stat(&fscache_n_store_radix_deletes);
42738 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42739 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42740 ASSERTCMP(xpage, ==, page);
42741 } else {
42742 - fscache_stat(&fscache_n_store_vmscan_gone);
42743 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42744 }
42745
42746 wake_up_bit(&cookie->flags, 0);
42747 @@ -106,7 +106,7 @@ page_busy:
42748 /* we might want to wait here, but that could deadlock the allocator as
42749 * the slow-work threads writing to the cache may all end up sleeping
42750 * on memory allocation */
42751 - fscache_stat(&fscache_n_store_vmscan_busy);
42752 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42753 return false;
42754 }
42755 EXPORT_SYMBOL(__fscache_maybe_release_page);
42756 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42757 FSCACHE_COOKIE_STORING_TAG);
42758 if (!radix_tree_tag_get(&cookie->stores, page->index,
42759 FSCACHE_COOKIE_PENDING_TAG)) {
42760 - fscache_stat(&fscache_n_store_radix_deletes);
42761 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42762 xpage = radix_tree_delete(&cookie->stores, page->index);
42763 }
42764 spin_unlock(&cookie->stores_lock);
42765 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42766
42767 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42768
42769 - fscache_stat(&fscache_n_attr_changed_calls);
42770 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42771
42772 if (fscache_object_is_active(object)) {
42773 fscache_set_op_state(op, "CallFS");
42774 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42775
42776 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42777
42778 - fscache_stat(&fscache_n_attr_changed);
42779 + fscache_stat_unchecked(&fscache_n_attr_changed);
42780
42781 op = kzalloc(sizeof(*op), GFP_KERNEL);
42782 if (!op) {
42783 - fscache_stat(&fscache_n_attr_changed_nomem);
42784 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42785 _leave(" = -ENOMEM");
42786 return -ENOMEM;
42787 }
42788 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42789 if (fscache_submit_exclusive_op(object, op) < 0)
42790 goto nobufs;
42791 spin_unlock(&cookie->lock);
42792 - fscache_stat(&fscache_n_attr_changed_ok);
42793 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42794 fscache_put_operation(op);
42795 _leave(" = 0");
42796 return 0;
42797 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42798 nobufs:
42799 spin_unlock(&cookie->lock);
42800 kfree(op);
42801 - fscache_stat(&fscache_n_attr_changed_nobufs);
42802 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42803 _leave(" = %d", -ENOBUFS);
42804 return -ENOBUFS;
42805 }
42806 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42807 /* allocate a retrieval operation and attempt to submit it */
42808 op = kzalloc(sizeof(*op), GFP_NOIO);
42809 if (!op) {
42810 - fscache_stat(&fscache_n_retrievals_nomem);
42811 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42812 return NULL;
42813 }
42814
42815 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42816 return 0;
42817 }
42818
42819 - fscache_stat(&fscache_n_retrievals_wait);
42820 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
42821
42822 jif = jiffies;
42823 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42824 fscache_wait_bit_interruptible,
42825 TASK_INTERRUPTIBLE) != 0) {
42826 - fscache_stat(&fscache_n_retrievals_intr);
42827 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42828 _leave(" = -ERESTARTSYS");
42829 return -ERESTARTSYS;
42830 }
42831 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42832 */
42833 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42834 struct fscache_retrieval *op,
42835 - atomic_t *stat_op_waits,
42836 - atomic_t *stat_object_dead)
42837 + atomic_unchecked_t *stat_op_waits,
42838 + atomic_unchecked_t *stat_object_dead)
42839 {
42840 int ret;
42841
42842 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42843 goto check_if_dead;
42844
42845 _debug(">>> WT");
42846 - fscache_stat(stat_op_waits);
42847 + fscache_stat_unchecked(stat_op_waits);
42848 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42849 fscache_wait_bit_interruptible,
42850 TASK_INTERRUPTIBLE) < 0) {
42851 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42852
42853 check_if_dead:
42854 if (unlikely(fscache_object_is_dead(object))) {
42855 - fscache_stat(stat_object_dead);
42856 + fscache_stat_unchecked(stat_object_dead);
42857 return -ENOBUFS;
42858 }
42859 return 0;
42860 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42861
42862 _enter("%p,%p,,,", cookie, page);
42863
42864 - fscache_stat(&fscache_n_retrievals);
42865 + fscache_stat_unchecked(&fscache_n_retrievals);
42866
42867 if (hlist_empty(&cookie->backing_objects))
42868 goto nobufs;
42869 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42870 goto nobufs_unlock;
42871 spin_unlock(&cookie->lock);
42872
42873 - fscache_stat(&fscache_n_retrieval_ops);
42874 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42875
42876 /* pin the netfs read context in case we need to do the actual netfs
42877 * read because we've encountered a cache read failure */
42878 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42879
42880 error:
42881 if (ret == -ENOMEM)
42882 - fscache_stat(&fscache_n_retrievals_nomem);
42883 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42884 else if (ret == -ERESTARTSYS)
42885 - fscache_stat(&fscache_n_retrievals_intr);
42886 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42887 else if (ret == -ENODATA)
42888 - fscache_stat(&fscache_n_retrievals_nodata);
42889 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42890 else if (ret < 0)
42891 - fscache_stat(&fscache_n_retrievals_nobufs);
42892 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42893 else
42894 - fscache_stat(&fscache_n_retrievals_ok);
42895 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42896
42897 fscache_put_retrieval(op);
42898 _leave(" = %d", ret);
42899 @@ -453,7 +453,7 @@ nobufs_unlock:
42900 spin_unlock(&cookie->lock);
42901 kfree(op);
42902 nobufs:
42903 - fscache_stat(&fscache_n_retrievals_nobufs);
42904 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42905 _leave(" = -ENOBUFS");
42906 return -ENOBUFS;
42907 }
42908 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42909
42910 _enter("%p,,%d,,,", cookie, *nr_pages);
42911
42912 - fscache_stat(&fscache_n_retrievals);
42913 + fscache_stat_unchecked(&fscache_n_retrievals);
42914
42915 if (hlist_empty(&cookie->backing_objects))
42916 goto nobufs;
42917 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42918 goto nobufs_unlock;
42919 spin_unlock(&cookie->lock);
42920
42921 - fscache_stat(&fscache_n_retrieval_ops);
42922 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
42923
42924 /* pin the netfs read context in case we need to do the actual netfs
42925 * read because we've encountered a cache read failure */
42926 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42927
42928 error:
42929 if (ret == -ENOMEM)
42930 - fscache_stat(&fscache_n_retrievals_nomem);
42931 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42932 else if (ret == -ERESTARTSYS)
42933 - fscache_stat(&fscache_n_retrievals_intr);
42934 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
42935 else if (ret == -ENODATA)
42936 - fscache_stat(&fscache_n_retrievals_nodata);
42937 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42938 else if (ret < 0)
42939 - fscache_stat(&fscache_n_retrievals_nobufs);
42940 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42941 else
42942 - fscache_stat(&fscache_n_retrievals_ok);
42943 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
42944
42945 fscache_put_retrieval(op);
42946 _leave(" = %d", ret);
42947 @@ -570,7 +570,7 @@ nobufs_unlock:
42948 spin_unlock(&cookie->lock);
42949 kfree(op);
42950 nobufs:
42951 - fscache_stat(&fscache_n_retrievals_nobufs);
42952 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42953 _leave(" = -ENOBUFS");
42954 return -ENOBUFS;
42955 }
42956 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42957
42958 _enter("%p,%p,,,", cookie, page);
42959
42960 - fscache_stat(&fscache_n_allocs);
42961 + fscache_stat_unchecked(&fscache_n_allocs);
42962
42963 if (hlist_empty(&cookie->backing_objects))
42964 goto nobufs;
42965 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42966 goto nobufs_unlock;
42967 spin_unlock(&cookie->lock);
42968
42969 - fscache_stat(&fscache_n_alloc_ops);
42970 + fscache_stat_unchecked(&fscache_n_alloc_ops);
42971
42972 ret = fscache_wait_for_retrieval_activation(
42973 object, op,
42974 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42975
42976 error:
42977 if (ret == -ERESTARTSYS)
42978 - fscache_stat(&fscache_n_allocs_intr);
42979 + fscache_stat_unchecked(&fscache_n_allocs_intr);
42980 else if (ret < 0)
42981 - fscache_stat(&fscache_n_allocs_nobufs);
42982 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42983 else
42984 - fscache_stat(&fscache_n_allocs_ok);
42985 + fscache_stat_unchecked(&fscache_n_allocs_ok);
42986
42987 fscache_put_retrieval(op);
42988 _leave(" = %d", ret);
42989 @@ -651,7 +651,7 @@ nobufs_unlock:
42990 spin_unlock(&cookie->lock);
42991 kfree(op);
42992 nobufs:
42993 - fscache_stat(&fscache_n_allocs_nobufs);
42994 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42995 _leave(" = -ENOBUFS");
42996 return -ENOBUFS;
42997 }
42998 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42999
43000 spin_lock(&cookie->stores_lock);
43001
43002 - fscache_stat(&fscache_n_store_calls);
43003 + fscache_stat_unchecked(&fscache_n_store_calls);
43004
43005 /* find a page to store */
43006 page = NULL;
43007 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
43008 page = results[0];
43009 _debug("gang %d [%lx]", n, page->index);
43010 if (page->index > op->store_limit) {
43011 - fscache_stat(&fscache_n_store_pages_over_limit);
43012 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43013 goto superseded;
43014 }
43015
43016 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
43017
43018 if (page) {
43019 fscache_set_op_state(&op->op, "Store");
43020 - fscache_stat(&fscache_n_store_pages);
43021 + fscache_stat_unchecked(&fscache_n_store_pages);
43022 fscache_stat(&fscache_n_cop_write_page);
43023 ret = object->cache->ops->write_page(op, page);
43024 fscache_stat_d(&fscache_n_cop_write_page);
43025 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
43026 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43027 ASSERT(PageFsCache(page));
43028
43029 - fscache_stat(&fscache_n_stores);
43030 + fscache_stat_unchecked(&fscache_n_stores);
43031
43032 op = kzalloc(sizeof(*op), GFP_NOIO);
43033 if (!op)
43034 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
43035 spin_unlock(&cookie->stores_lock);
43036 spin_unlock(&object->lock);
43037
43038 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43039 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43040 op->store_limit = object->store_limit;
43041
43042 if (fscache_submit_op(object, &op->op) < 0)
43043 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
43044
43045 spin_unlock(&cookie->lock);
43046 radix_tree_preload_end();
43047 - fscache_stat(&fscache_n_store_ops);
43048 - fscache_stat(&fscache_n_stores_ok);
43049 + fscache_stat_unchecked(&fscache_n_store_ops);
43050 + fscache_stat_unchecked(&fscache_n_stores_ok);
43051
43052 /* the slow work queue now carries its own ref on the object */
43053 fscache_put_operation(&op->op);
43054 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
43055 return 0;
43056
43057 already_queued:
43058 - fscache_stat(&fscache_n_stores_again);
43059 + fscache_stat_unchecked(&fscache_n_stores_again);
43060 already_pending:
43061 spin_unlock(&cookie->stores_lock);
43062 spin_unlock(&object->lock);
43063 spin_unlock(&cookie->lock);
43064 radix_tree_preload_end();
43065 kfree(op);
43066 - fscache_stat(&fscache_n_stores_ok);
43067 + fscache_stat_unchecked(&fscache_n_stores_ok);
43068 _leave(" = 0");
43069 return 0;
43070
43071 @@ -886,14 +886,14 @@ nobufs:
43072 spin_unlock(&cookie->lock);
43073 radix_tree_preload_end();
43074 kfree(op);
43075 - fscache_stat(&fscache_n_stores_nobufs);
43076 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43077 _leave(" = -ENOBUFS");
43078 return -ENOBUFS;
43079
43080 nomem_free:
43081 kfree(op);
43082 nomem:
43083 - fscache_stat(&fscache_n_stores_oom);
43084 + fscache_stat_unchecked(&fscache_n_stores_oom);
43085 _leave(" = -ENOMEM");
43086 return -ENOMEM;
43087 }
43088 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
43089 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43090 ASSERTCMP(page, !=, NULL);
43091
43092 - fscache_stat(&fscache_n_uncaches);
43093 + fscache_stat_unchecked(&fscache_n_uncaches);
43094
43095 /* cache withdrawal may beat us to it */
43096 if (!PageFsCache(page))
43097 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
43098 unsigned long loop;
43099
43100 #ifdef CONFIG_FSCACHE_STATS
43101 - atomic_add(pagevec->nr, &fscache_n_marks);
43102 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43103 #endif
43104
43105 for (loop = 0; loop < pagevec->nr; loop++) {
43106 diff -urNp linux-2.6.32.44/fs/fscache/stats.c linux-2.6.32.44/fs/fscache/stats.c
43107 --- linux-2.6.32.44/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
43108 +++ linux-2.6.32.44/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
43109 @@ -18,95 +18,95 @@
43110 /*
43111 * operation counters
43112 */
43113 -atomic_t fscache_n_op_pend;
43114 -atomic_t fscache_n_op_run;
43115 -atomic_t fscache_n_op_enqueue;
43116 -atomic_t fscache_n_op_requeue;
43117 -atomic_t fscache_n_op_deferred_release;
43118 -atomic_t fscache_n_op_release;
43119 -atomic_t fscache_n_op_gc;
43120 -atomic_t fscache_n_op_cancelled;
43121 -atomic_t fscache_n_op_rejected;
43122 -
43123 -atomic_t fscache_n_attr_changed;
43124 -atomic_t fscache_n_attr_changed_ok;
43125 -atomic_t fscache_n_attr_changed_nobufs;
43126 -atomic_t fscache_n_attr_changed_nomem;
43127 -atomic_t fscache_n_attr_changed_calls;
43128 -
43129 -atomic_t fscache_n_allocs;
43130 -atomic_t fscache_n_allocs_ok;
43131 -atomic_t fscache_n_allocs_wait;
43132 -atomic_t fscache_n_allocs_nobufs;
43133 -atomic_t fscache_n_allocs_intr;
43134 -atomic_t fscache_n_allocs_object_dead;
43135 -atomic_t fscache_n_alloc_ops;
43136 -atomic_t fscache_n_alloc_op_waits;
43137 -
43138 -atomic_t fscache_n_retrievals;
43139 -atomic_t fscache_n_retrievals_ok;
43140 -atomic_t fscache_n_retrievals_wait;
43141 -atomic_t fscache_n_retrievals_nodata;
43142 -atomic_t fscache_n_retrievals_nobufs;
43143 -atomic_t fscache_n_retrievals_intr;
43144 -atomic_t fscache_n_retrievals_nomem;
43145 -atomic_t fscache_n_retrievals_object_dead;
43146 -atomic_t fscache_n_retrieval_ops;
43147 -atomic_t fscache_n_retrieval_op_waits;
43148 -
43149 -atomic_t fscache_n_stores;
43150 -atomic_t fscache_n_stores_ok;
43151 -atomic_t fscache_n_stores_again;
43152 -atomic_t fscache_n_stores_nobufs;
43153 -atomic_t fscache_n_stores_oom;
43154 -atomic_t fscache_n_store_ops;
43155 -atomic_t fscache_n_store_calls;
43156 -atomic_t fscache_n_store_pages;
43157 -atomic_t fscache_n_store_radix_deletes;
43158 -atomic_t fscache_n_store_pages_over_limit;
43159 -
43160 -atomic_t fscache_n_store_vmscan_not_storing;
43161 -atomic_t fscache_n_store_vmscan_gone;
43162 -atomic_t fscache_n_store_vmscan_busy;
43163 -atomic_t fscache_n_store_vmscan_cancelled;
43164 -
43165 -atomic_t fscache_n_marks;
43166 -atomic_t fscache_n_uncaches;
43167 -
43168 -atomic_t fscache_n_acquires;
43169 -atomic_t fscache_n_acquires_null;
43170 -atomic_t fscache_n_acquires_no_cache;
43171 -atomic_t fscache_n_acquires_ok;
43172 -atomic_t fscache_n_acquires_nobufs;
43173 -atomic_t fscache_n_acquires_oom;
43174 -
43175 -atomic_t fscache_n_updates;
43176 -atomic_t fscache_n_updates_null;
43177 -atomic_t fscache_n_updates_run;
43178 -
43179 -atomic_t fscache_n_relinquishes;
43180 -atomic_t fscache_n_relinquishes_null;
43181 -atomic_t fscache_n_relinquishes_waitcrt;
43182 -atomic_t fscache_n_relinquishes_retire;
43183 -
43184 -atomic_t fscache_n_cookie_index;
43185 -atomic_t fscache_n_cookie_data;
43186 -atomic_t fscache_n_cookie_special;
43187 -
43188 -atomic_t fscache_n_object_alloc;
43189 -atomic_t fscache_n_object_no_alloc;
43190 -atomic_t fscache_n_object_lookups;
43191 -atomic_t fscache_n_object_lookups_negative;
43192 -atomic_t fscache_n_object_lookups_positive;
43193 -atomic_t fscache_n_object_lookups_timed_out;
43194 -atomic_t fscache_n_object_created;
43195 -atomic_t fscache_n_object_avail;
43196 -atomic_t fscache_n_object_dead;
43197 -
43198 -atomic_t fscache_n_checkaux_none;
43199 -atomic_t fscache_n_checkaux_okay;
43200 -atomic_t fscache_n_checkaux_update;
43201 -atomic_t fscache_n_checkaux_obsolete;
43202 +atomic_unchecked_t fscache_n_op_pend;
43203 +atomic_unchecked_t fscache_n_op_run;
43204 +atomic_unchecked_t fscache_n_op_enqueue;
43205 +atomic_unchecked_t fscache_n_op_requeue;
43206 +atomic_unchecked_t fscache_n_op_deferred_release;
43207 +atomic_unchecked_t fscache_n_op_release;
43208 +atomic_unchecked_t fscache_n_op_gc;
43209 +atomic_unchecked_t fscache_n_op_cancelled;
43210 +atomic_unchecked_t fscache_n_op_rejected;
43211 +
43212 +atomic_unchecked_t fscache_n_attr_changed;
43213 +atomic_unchecked_t fscache_n_attr_changed_ok;
43214 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43215 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43216 +atomic_unchecked_t fscache_n_attr_changed_calls;
43217 +
43218 +atomic_unchecked_t fscache_n_allocs;
43219 +atomic_unchecked_t fscache_n_allocs_ok;
43220 +atomic_unchecked_t fscache_n_allocs_wait;
43221 +atomic_unchecked_t fscache_n_allocs_nobufs;
43222 +atomic_unchecked_t fscache_n_allocs_intr;
43223 +atomic_unchecked_t fscache_n_allocs_object_dead;
43224 +atomic_unchecked_t fscache_n_alloc_ops;
43225 +atomic_unchecked_t fscache_n_alloc_op_waits;
43226 +
43227 +atomic_unchecked_t fscache_n_retrievals;
43228 +atomic_unchecked_t fscache_n_retrievals_ok;
43229 +atomic_unchecked_t fscache_n_retrievals_wait;
43230 +atomic_unchecked_t fscache_n_retrievals_nodata;
43231 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43232 +atomic_unchecked_t fscache_n_retrievals_intr;
43233 +atomic_unchecked_t fscache_n_retrievals_nomem;
43234 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43235 +atomic_unchecked_t fscache_n_retrieval_ops;
43236 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43237 +
43238 +atomic_unchecked_t fscache_n_stores;
43239 +atomic_unchecked_t fscache_n_stores_ok;
43240 +atomic_unchecked_t fscache_n_stores_again;
43241 +atomic_unchecked_t fscache_n_stores_nobufs;
43242 +atomic_unchecked_t fscache_n_stores_oom;
43243 +atomic_unchecked_t fscache_n_store_ops;
43244 +atomic_unchecked_t fscache_n_store_calls;
43245 +atomic_unchecked_t fscache_n_store_pages;
43246 +atomic_unchecked_t fscache_n_store_radix_deletes;
43247 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43248 +
43249 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43250 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43251 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43252 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43253 +
43254 +atomic_unchecked_t fscache_n_marks;
43255 +atomic_unchecked_t fscache_n_uncaches;
43256 +
43257 +atomic_unchecked_t fscache_n_acquires;
43258 +atomic_unchecked_t fscache_n_acquires_null;
43259 +atomic_unchecked_t fscache_n_acquires_no_cache;
43260 +atomic_unchecked_t fscache_n_acquires_ok;
43261 +atomic_unchecked_t fscache_n_acquires_nobufs;
43262 +atomic_unchecked_t fscache_n_acquires_oom;
43263 +
43264 +atomic_unchecked_t fscache_n_updates;
43265 +atomic_unchecked_t fscache_n_updates_null;
43266 +atomic_unchecked_t fscache_n_updates_run;
43267 +
43268 +atomic_unchecked_t fscache_n_relinquishes;
43269 +atomic_unchecked_t fscache_n_relinquishes_null;
43270 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43271 +atomic_unchecked_t fscache_n_relinquishes_retire;
43272 +
43273 +atomic_unchecked_t fscache_n_cookie_index;
43274 +atomic_unchecked_t fscache_n_cookie_data;
43275 +atomic_unchecked_t fscache_n_cookie_special;
43276 +
43277 +atomic_unchecked_t fscache_n_object_alloc;
43278 +atomic_unchecked_t fscache_n_object_no_alloc;
43279 +atomic_unchecked_t fscache_n_object_lookups;
43280 +atomic_unchecked_t fscache_n_object_lookups_negative;
43281 +atomic_unchecked_t fscache_n_object_lookups_positive;
43282 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43283 +atomic_unchecked_t fscache_n_object_created;
43284 +atomic_unchecked_t fscache_n_object_avail;
43285 +atomic_unchecked_t fscache_n_object_dead;
43286 +
43287 +atomic_unchecked_t fscache_n_checkaux_none;
43288 +atomic_unchecked_t fscache_n_checkaux_okay;
43289 +atomic_unchecked_t fscache_n_checkaux_update;
43290 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43291
43292 atomic_t fscache_n_cop_alloc_object;
43293 atomic_t fscache_n_cop_lookup_object;
43294 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43295 seq_puts(m, "FS-Cache statistics\n");
43296
43297 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43298 - atomic_read(&fscache_n_cookie_index),
43299 - atomic_read(&fscache_n_cookie_data),
43300 - atomic_read(&fscache_n_cookie_special));
43301 + atomic_read_unchecked(&fscache_n_cookie_index),
43302 + atomic_read_unchecked(&fscache_n_cookie_data),
43303 + atomic_read_unchecked(&fscache_n_cookie_special));
43304
43305 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43306 - atomic_read(&fscache_n_object_alloc),
43307 - atomic_read(&fscache_n_object_no_alloc),
43308 - atomic_read(&fscache_n_object_avail),
43309 - atomic_read(&fscache_n_object_dead));
43310 + atomic_read_unchecked(&fscache_n_object_alloc),
43311 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43312 + atomic_read_unchecked(&fscache_n_object_avail),
43313 + atomic_read_unchecked(&fscache_n_object_dead));
43314 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43315 - atomic_read(&fscache_n_checkaux_none),
43316 - atomic_read(&fscache_n_checkaux_okay),
43317 - atomic_read(&fscache_n_checkaux_update),
43318 - atomic_read(&fscache_n_checkaux_obsolete));
43319 + atomic_read_unchecked(&fscache_n_checkaux_none),
43320 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43321 + atomic_read_unchecked(&fscache_n_checkaux_update),
43322 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43323
43324 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43325 - atomic_read(&fscache_n_marks),
43326 - atomic_read(&fscache_n_uncaches));
43327 + atomic_read_unchecked(&fscache_n_marks),
43328 + atomic_read_unchecked(&fscache_n_uncaches));
43329
43330 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43331 " oom=%u\n",
43332 - atomic_read(&fscache_n_acquires),
43333 - atomic_read(&fscache_n_acquires_null),
43334 - atomic_read(&fscache_n_acquires_no_cache),
43335 - atomic_read(&fscache_n_acquires_ok),
43336 - atomic_read(&fscache_n_acquires_nobufs),
43337 - atomic_read(&fscache_n_acquires_oom));
43338 + atomic_read_unchecked(&fscache_n_acquires),
43339 + atomic_read_unchecked(&fscache_n_acquires_null),
43340 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
43341 + atomic_read_unchecked(&fscache_n_acquires_ok),
43342 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
43343 + atomic_read_unchecked(&fscache_n_acquires_oom));
43344
43345 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43346 - atomic_read(&fscache_n_object_lookups),
43347 - atomic_read(&fscache_n_object_lookups_negative),
43348 - atomic_read(&fscache_n_object_lookups_positive),
43349 - atomic_read(&fscache_n_object_lookups_timed_out),
43350 - atomic_read(&fscache_n_object_created));
43351 + atomic_read_unchecked(&fscache_n_object_lookups),
43352 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
43353 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
43354 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
43355 + atomic_read_unchecked(&fscache_n_object_created));
43356
43357 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43358 - atomic_read(&fscache_n_updates),
43359 - atomic_read(&fscache_n_updates_null),
43360 - atomic_read(&fscache_n_updates_run));
43361 + atomic_read_unchecked(&fscache_n_updates),
43362 + atomic_read_unchecked(&fscache_n_updates_null),
43363 + atomic_read_unchecked(&fscache_n_updates_run));
43364
43365 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43366 - atomic_read(&fscache_n_relinquishes),
43367 - atomic_read(&fscache_n_relinquishes_null),
43368 - atomic_read(&fscache_n_relinquishes_waitcrt),
43369 - atomic_read(&fscache_n_relinquishes_retire));
43370 + atomic_read_unchecked(&fscache_n_relinquishes),
43371 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43372 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43373 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43374
43375 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43376 - atomic_read(&fscache_n_attr_changed),
43377 - atomic_read(&fscache_n_attr_changed_ok),
43378 - atomic_read(&fscache_n_attr_changed_nobufs),
43379 - atomic_read(&fscache_n_attr_changed_nomem),
43380 - atomic_read(&fscache_n_attr_changed_calls));
43381 + atomic_read_unchecked(&fscache_n_attr_changed),
43382 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43383 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43384 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43385 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43386
43387 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43388 - atomic_read(&fscache_n_allocs),
43389 - atomic_read(&fscache_n_allocs_ok),
43390 - atomic_read(&fscache_n_allocs_wait),
43391 - atomic_read(&fscache_n_allocs_nobufs),
43392 - atomic_read(&fscache_n_allocs_intr));
43393 + atomic_read_unchecked(&fscache_n_allocs),
43394 + atomic_read_unchecked(&fscache_n_allocs_ok),
43395 + atomic_read_unchecked(&fscache_n_allocs_wait),
43396 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43397 + atomic_read_unchecked(&fscache_n_allocs_intr));
43398 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43399 - atomic_read(&fscache_n_alloc_ops),
43400 - atomic_read(&fscache_n_alloc_op_waits),
43401 - atomic_read(&fscache_n_allocs_object_dead));
43402 + atomic_read_unchecked(&fscache_n_alloc_ops),
43403 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43404 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43405
43406 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43407 " int=%u oom=%u\n",
43408 - atomic_read(&fscache_n_retrievals),
43409 - atomic_read(&fscache_n_retrievals_ok),
43410 - atomic_read(&fscache_n_retrievals_wait),
43411 - atomic_read(&fscache_n_retrievals_nodata),
43412 - atomic_read(&fscache_n_retrievals_nobufs),
43413 - atomic_read(&fscache_n_retrievals_intr),
43414 - atomic_read(&fscache_n_retrievals_nomem));
43415 + atomic_read_unchecked(&fscache_n_retrievals),
43416 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43417 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43418 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43419 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43420 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43421 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43422 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43423 - atomic_read(&fscache_n_retrieval_ops),
43424 - atomic_read(&fscache_n_retrieval_op_waits),
43425 - atomic_read(&fscache_n_retrievals_object_dead));
43426 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43427 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43428 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43429
43430 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43431 - atomic_read(&fscache_n_stores),
43432 - atomic_read(&fscache_n_stores_ok),
43433 - atomic_read(&fscache_n_stores_again),
43434 - atomic_read(&fscache_n_stores_nobufs),
43435 - atomic_read(&fscache_n_stores_oom));
43436 + atomic_read_unchecked(&fscache_n_stores),
43437 + atomic_read_unchecked(&fscache_n_stores_ok),
43438 + atomic_read_unchecked(&fscache_n_stores_again),
43439 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43440 + atomic_read_unchecked(&fscache_n_stores_oom));
43441 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43442 - atomic_read(&fscache_n_store_ops),
43443 - atomic_read(&fscache_n_store_calls),
43444 - atomic_read(&fscache_n_store_pages),
43445 - atomic_read(&fscache_n_store_radix_deletes),
43446 - atomic_read(&fscache_n_store_pages_over_limit));
43447 + atomic_read_unchecked(&fscache_n_store_ops),
43448 + atomic_read_unchecked(&fscache_n_store_calls),
43449 + atomic_read_unchecked(&fscache_n_store_pages),
43450 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43451 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43452
43453 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43454 - atomic_read(&fscache_n_store_vmscan_not_storing),
43455 - atomic_read(&fscache_n_store_vmscan_gone),
43456 - atomic_read(&fscache_n_store_vmscan_busy),
43457 - atomic_read(&fscache_n_store_vmscan_cancelled));
43458 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43459 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43460 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43461 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43462
43463 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43464 - atomic_read(&fscache_n_op_pend),
43465 - atomic_read(&fscache_n_op_run),
43466 - atomic_read(&fscache_n_op_enqueue),
43467 - atomic_read(&fscache_n_op_cancelled),
43468 - atomic_read(&fscache_n_op_rejected));
43469 + atomic_read_unchecked(&fscache_n_op_pend),
43470 + atomic_read_unchecked(&fscache_n_op_run),
43471 + atomic_read_unchecked(&fscache_n_op_enqueue),
43472 + atomic_read_unchecked(&fscache_n_op_cancelled),
43473 + atomic_read_unchecked(&fscache_n_op_rejected));
43474 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43475 - atomic_read(&fscache_n_op_deferred_release),
43476 - atomic_read(&fscache_n_op_release),
43477 - atomic_read(&fscache_n_op_gc));
43478 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43479 + atomic_read_unchecked(&fscache_n_op_release),
43480 + atomic_read_unchecked(&fscache_n_op_gc));
43481
43482 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43483 atomic_read(&fscache_n_cop_alloc_object),
43484 diff -urNp linux-2.6.32.44/fs/fs_struct.c linux-2.6.32.44/fs/fs_struct.c
43485 --- linux-2.6.32.44/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43486 +++ linux-2.6.32.44/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43487 @@ -4,6 +4,7 @@
43488 #include <linux/path.h>
43489 #include <linux/slab.h>
43490 #include <linux/fs_struct.h>
43491 +#include <linux/grsecurity.h>
43492
43493 /*
43494 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43495 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43496 old_root = fs->root;
43497 fs->root = *path;
43498 path_get(path);
43499 + gr_set_chroot_entries(current, path);
43500 write_unlock(&fs->lock);
43501 if (old_root.dentry)
43502 path_put(&old_root);
43503 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43504 && fs->root.mnt == old_root->mnt) {
43505 path_get(new_root);
43506 fs->root = *new_root;
43507 + gr_set_chroot_entries(p, new_root);
43508 count++;
43509 }
43510 if (fs->pwd.dentry == old_root->dentry
43511 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43512 task_lock(tsk);
43513 write_lock(&fs->lock);
43514 tsk->fs = NULL;
43515 - kill = !--fs->users;
43516 + gr_clear_chroot_entries(tsk);
43517 + kill = !atomic_dec_return(&fs->users);
43518 write_unlock(&fs->lock);
43519 task_unlock(tsk);
43520 if (kill)
43521 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43522 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43523 /* We don't need to lock fs - think why ;-) */
43524 if (fs) {
43525 - fs->users = 1;
43526 + atomic_set(&fs->users, 1);
43527 fs->in_exec = 0;
43528 rwlock_init(&fs->lock);
43529 fs->umask = old->umask;
43530 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43531
43532 task_lock(current);
43533 write_lock(&fs->lock);
43534 - kill = !--fs->users;
43535 + kill = !atomic_dec_return(&fs->users);
43536 current->fs = new_fs;
43537 + gr_set_chroot_entries(current, &new_fs->root);
43538 write_unlock(&fs->lock);
43539 task_unlock(current);
43540
43541 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43542
43543 /* to be mentioned only in INIT_TASK */
43544 struct fs_struct init_fs = {
43545 - .users = 1,
43546 + .users = ATOMIC_INIT(1),
43547 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43548 .umask = 0022,
43549 };
43550 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43551 task_lock(current);
43552
43553 write_lock(&init_fs.lock);
43554 - init_fs.users++;
43555 + atomic_inc(&init_fs.users);
43556 write_unlock(&init_fs.lock);
43557
43558 write_lock(&fs->lock);
43559 current->fs = &init_fs;
43560 - kill = !--fs->users;
43561 + gr_set_chroot_entries(current, &current->fs->root);
43562 + kill = !atomic_dec_return(&fs->users);
43563 write_unlock(&fs->lock);
43564
43565 task_unlock(current);
43566 diff -urNp linux-2.6.32.44/fs/fuse/cuse.c linux-2.6.32.44/fs/fuse/cuse.c
43567 --- linux-2.6.32.44/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43568 +++ linux-2.6.32.44/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43569 @@ -576,10 +576,12 @@ static int __init cuse_init(void)
43570 INIT_LIST_HEAD(&cuse_conntbl[i]);
43571
43572 /* inherit and extend fuse_dev_operations */
43573 - cuse_channel_fops = fuse_dev_operations;
43574 - cuse_channel_fops.owner = THIS_MODULE;
43575 - cuse_channel_fops.open = cuse_channel_open;
43576 - cuse_channel_fops.release = cuse_channel_release;
43577 + pax_open_kernel();
43578 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43579 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43580 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43581 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43582 + pax_close_kernel();
43583
43584 cuse_class = class_create(THIS_MODULE, "cuse");
43585 if (IS_ERR(cuse_class))
43586 diff -urNp linux-2.6.32.44/fs/fuse/dev.c linux-2.6.32.44/fs/fuse/dev.c
43587 --- linux-2.6.32.44/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43588 +++ linux-2.6.32.44/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43589 @@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43590 {
43591 struct fuse_notify_inval_entry_out outarg;
43592 int err = -EINVAL;
43593 - char buf[FUSE_NAME_MAX+1];
43594 + char *buf = NULL;
43595 struct qstr name;
43596
43597 if (size < sizeof(outarg))
43598 @@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43599 if (outarg.namelen > FUSE_NAME_MAX)
43600 goto err;
43601
43602 + err = -ENOMEM;
43603 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43604 + if (!buf)
43605 + goto err;
43606 +
43607 name.name = buf;
43608 name.len = outarg.namelen;
43609 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43610 @@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43611
43612 down_read(&fc->killsb);
43613 err = -ENOENT;
43614 - if (!fc->sb)
43615 - goto err_unlock;
43616 -
43617 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43618 -
43619 -err_unlock:
43620 + if (fc->sb)
43621 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43622 up_read(&fc->killsb);
43623 + kfree(buf);
43624 return err;
43625
43626 err:
43627 fuse_copy_finish(cs);
43628 + kfree(buf);
43629 return err;
43630 }
43631
43632 diff -urNp linux-2.6.32.44/fs/fuse/dir.c linux-2.6.32.44/fs/fuse/dir.c
43633 --- linux-2.6.32.44/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43634 +++ linux-2.6.32.44/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43635 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43636 return link;
43637 }
43638
43639 -static void free_link(char *link)
43640 +static void free_link(const char *link)
43641 {
43642 if (!IS_ERR(link))
43643 free_page((unsigned long) link);
43644 diff -urNp linux-2.6.32.44/fs/gfs2/ops_inode.c linux-2.6.32.44/fs/gfs2/ops_inode.c
43645 --- linux-2.6.32.44/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43646 +++ linux-2.6.32.44/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43647 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43648 unsigned int x;
43649 int error;
43650
43651 + pax_track_stack();
43652 +
43653 if (ndentry->d_inode) {
43654 nip = GFS2_I(ndentry->d_inode);
43655 if (ip == nip)
43656 diff -urNp linux-2.6.32.44/fs/gfs2/sys.c linux-2.6.32.44/fs/gfs2/sys.c
43657 --- linux-2.6.32.44/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43658 +++ linux-2.6.32.44/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43659 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43660 return a->store ? a->store(sdp, buf, len) : len;
43661 }
43662
43663 -static struct sysfs_ops gfs2_attr_ops = {
43664 +static const struct sysfs_ops gfs2_attr_ops = {
43665 .show = gfs2_attr_show,
43666 .store = gfs2_attr_store,
43667 };
43668 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43669 return 0;
43670 }
43671
43672 -static struct kset_uevent_ops gfs2_uevent_ops = {
43673 +static const struct kset_uevent_ops gfs2_uevent_ops = {
43674 .uevent = gfs2_uevent,
43675 };
43676
43677 diff -urNp linux-2.6.32.44/fs/hfsplus/catalog.c linux-2.6.32.44/fs/hfsplus/catalog.c
43678 --- linux-2.6.32.44/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43679 +++ linux-2.6.32.44/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43680 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43681 int err;
43682 u16 type;
43683
43684 + pax_track_stack();
43685 +
43686 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43687 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43688 if (err)
43689 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43690 int entry_size;
43691 int err;
43692
43693 + pax_track_stack();
43694 +
43695 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43696 sb = dir->i_sb;
43697 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43698 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43699 int entry_size, type;
43700 int err = 0;
43701
43702 + pax_track_stack();
43703 +
43704 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43705 dst_dir->i_ino, dst_name->name);
43706 sb = src_dir->i_sb;
43707 diff -urNp linux-2.6.32.44/fs/hfsplus/dir.c linux-2.6.32.44/fs/hfsplus/dir.c
43708 --- linux-2.6.32.44/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43709 +++ linux-2.6.32.44/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43710 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43711 struct hfsplus_readdir_data *rd;
43712 u16 type;
43713
43714 + pax_track_stack();
43715 +
43716 if (filp->f_pos >= inode->i_size)
43717 return 0;
43718
43719 diff -urNp linux-2.6.32.44/fs/hfsplus/inode.c linux-2.6.32.44/fs/hfsplus/inode.c
43720 --- linux-2.6.32.44/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43721 +++ linux-2.6.32.44/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43722 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43723 int res = 0;
43724 u16 type;
43725
43726 + pax_track_stack();
43727 +
43728 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43729
43730 HFSPLUS_I(inode).dev = 0;
43731 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43732 struct hfs_find_data fd;
43733 hfsplus_cat_entry entry;
43734
43735 + pax_track_stack();
43736 +
43737 if (HFSPLUS_IS_RSRC(inode))
43738 main_inode = HFSPLUS_I(inode).rsrc_inode;
43739
43740 diff -urNp linux-2.6.32.44/fs/hfsplus/ioctl.c linux-2.6.32.44/fs/hfsplus/ioctl.c
43741 --- linux-2.6.32.44/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43742 +++ linux-2.6.32.44/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43743 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43744 struct hfsplus_cat_file *file;
43745 int res;
43746
43747 + pax_track_stack();
43748 +
43749 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43750 return -EOPNOTSUPP;
43751
43752 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43753 struct hfsplus_cat_file *file;
43754 ssize_t res = 0;
43755
43756 + pax_track_stack();
43757 +
43758 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43759 return -EOPNOTSUPP;
43760
43761 diff -urNp linux-2.6.32.44/fs/hfsplus/super.c linux-2.6.32.44/fs/hfsplus/super.c
43762 --- linux-2.6.32.44/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43763 +++ linux-2.6.32.44/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43764 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43765 struct nls_table *nls = NULL;
43766 int err = -EINVAL;
43767
43768 + pax_track_stack();
43769 +
43770 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43771 if (!sbi)
43772 return -ENOMEM;
43773 diff -urNp linux-2.6.32.44/fs/hugetlbfs/inode.c linux-2.6.32.44/fs/hugetlbfs/inode.c
43774 --- linux-2.6.32.44/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43775 +++ linux-2.6.32.44/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43776 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43777 .kill_sb = kill_litter_super,
43778 };
43779
43780 -static struct vfsmount *hugetlbfs_vfsmount;
43781 +struct vfsmount *hugetlbfs_vfsmount;
43782
43783 static int can_do_hugetlb_shm(void)
43784 {
43785 diff -urNp linux-2.6.32.44/fs/ioctl.c linux-2.6.32.44/fs/ioctl.c
43786 --- linux-2.6.32.44/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43787 +++ linux-2.6.32.44/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43788 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43789 u64 phys, u64 len, u32 flags)
43790 {
43791 struct fiemap_extent extent;
43792 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
43793 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43794
43795 /* only count the extents */
43796 if (fieinfo->fi_extents_max == 0) {
43797 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43798
43799 fieinfo.fi_flags = fiemap.fm_flags;
43800 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43801 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43802 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43803
43804 if (fiemap.fm_extent_count != 0 &&
43805 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43806 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43807 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43808 fiemap.fm_flags = fieinfo.fi_flags;
43809 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43810 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43811 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43812 error = -EFAULT;
43813
43814 return error;
43815 diff -urNp linux-2.6.32.44/fs/jbd/checkpoint.c linux-2.6.32.44/fs/jbd/checkpoint.c
43816 --- linux-2.6.32.44/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43817 +++ linux-2.6.32.44/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43818 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43819 tid_t this_tid;
43820 int result;
43821
43822 + pax_track_stack();
43823 +
43824 jbd_debug(1, "Start checkpoint\n");
43825
43826 /*
43827 diff -urNp linux-2.6.32.44/fs/jffs2/compr_rtime.c linux-2.6.32.44/fs/jffs2/compr_rtime.c
43828 --- linux-2.6.32.44/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43829 +++ linux-2.6.32.44/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43830 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43831 int outpos = 0;
43832 int pos=0;
43833
43834 + pax_track_stack();
43835 +
43836 memset(positions,0,sizeof(positions));
43837
43838 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43839 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43840 int outpos = 0;
43841 int pos=0;
43842
43843 + pax_track_stack();
43844 +
43845 memset(positions,0,sizeof(positions));
43846
43847 while (outpos<destlen) {
43848 diff -urNp linux-2.6.32.44/fs/jffs2/compr_rubin.c linux-2.6.32.44/fs/jffs2/compr_rubin.c
43849 --- linux-2.6.32.44/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43850 +++ linux-2.6.32.44/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43851 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43852 int ret;
43853 uint32_t mysrclen, mydstlen;
43854
43855 + pax_track_stack();
43856 +
43857 mysrclen = *sourcelen;
43858 mydstlen = *dstlen - 8;
43859
43860 diff -urNp linux-2.6.32.44/fs/jffs2/erase.c linux-2.6.32.44/fs/jffs2/erase.c
43861 --- linux-2.6.32.44/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43862 +++ linux-2.6.32.44/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43863 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43864 struct jffs2_unknown_node marker = {
43865 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43866 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43867 - .totlen = cpu_to_je32(c->cleanmarker_size)
43868 + .totlen = cpu_to_je32(c->cleanmarker_size),
43869 + .hdr_crc = cpu_to_je32(0)
43870 };
43871
43872 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43873 diff -urNp linux-2.6.32.44/fs/jffs2/wbuf.c linux-2.6.32.44/fs/jffs2/wbuf.c
43874 --- linux-2.6.32.44/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43875 +++ linux-2.6.32.44/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43876 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43877 {
43878 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43879 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43880 - .totlen = constant_cpu_to_je32(8)
43881 + .totlen = constant_cpu_to_je32(8),
43882 + .hdr_crc = constant_cpu_to_je32(0)
43883 };
43884
43885 /*
43886 diff -urNp linux-2.6.32.44/fs/jffs2/xattr.c linux-2.6.32.44/fs/jffs2/xattr.c
43887 --- linux-2.6.32.44/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43888 +++ linux-2.6.32.44/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43889 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43890
43891 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43892
43893 + pax_track_stack();
43894 +
43895 /* Phase.1 : Merge same xref */
43896 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43897 xref_tmphash[i] = NULL;
43898 diff -urNp linux-2.6.32.44/fs/jfs/super.c linux-2.6.32.44/fs/jfs/super.c
43899 --- linux-2.6.32.44/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43900 +++ linux-2.6.32.44/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43901 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43902
43903 jfs_inode_cachep =
43904 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43905 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43906 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43907 init_once);
43908 if (jfs_inode_cachep == NULL)
43909 return -ENOMEM;
43910 diff -urNp linux-2.6.32.44/fs/Kconfig.binfmt linux-2.6.32.44/fs/Kconfig.binfmt
43911 --- linux-2.6.32.44/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43912 +++ linux-2.6.32.44/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43913 @@ -86,7 +86,7 @@ config HAVE_AOUT
43914
43915 config BINFMT_AOUT
43916 tristate "Kernel support for a.out and ECOFF binaries"
43917 - depends on HAVE_AOUT
43918 + depends on HAVE_AOUT && BROKEN
43919 ---help---
43920 A.out (Assembler.OUTput) is a set of formats for libraries and
43921 executables used in the earliest versions of UNIX. Linux used
43922 diff -urNp linux-2.6.32.44/fs/libfs.c linux-2.6.32.44/fs/libfs.c
43923 --- linux-2.6.32.44/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43924 +++ linux-2.6.32.44/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43925 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43926
43927 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43928 struct dentry *next;
43929 + char d_name[sizeof(next->d_iname)];
43930 + const unsigned char *name;
43931 +
43932 next = list_entry(p, struct dentry, d_u.d_child);
43933 if (d_unhashed(next) || !next->d_inode)
43934 continue;
43935
43936 spin_unlock(&dcache_lock);
43937 - if (filldir(dirent, next->d_name.name,
43938 + name = next->d_name.name;
43939 + if (name == next->d_iname) {
43940 + memcpy(d_name, name, next->d_name.len);
43941 + name = d_name;
43942 + }
43943 + if (filldir(dirent, name,
43944 next->d_name.len, filp->f_pos,
43945 next->d_inode->i_ino,
43946 dt_type(next->d_inode)) < 0)
43947 diff -urNp linux-2.6.32.44/fs/lockd/clntproc.c linux-2.6.32.44/fs/lockd/clntproc.c
43948 --- linux-2.6.32.44/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43949 +++ linux-2.6.32.44/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43950 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43951 /*
43952 * Cookie counter for NLM requests
43953 */
43954 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43955 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43956
43957 void nlmclnt_next_cookie(struct nlm_cookie *c)
43958 {
43959 - u32 cookie = atomic_inc_return(&nlm_cookie);
43960 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43961
43962 memcpy(c->data, &cookie, 4);
43963 c->len=4;
43964 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43965 struct nlm_rqst reqst, *req;
43966 int status;
43967
43968 + pax_track_stack();
43969 +
43970 req = &reqst;
43971 memset(req, 0, sizeof(*req));
43972 locks_init_lock(&req->a_args.lock.fl);
43973 diff -urNp linux-2.6.32.44/fs/lockd/svc.c linux-2.6.32.44/fs/lockd/svc.c
43974 --- linux-2.6.32.44/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43975 +++ linux-2.6.32.44/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43976 @@ -43,7 +43,7 @@
43977
43978 static struct svc_program nlmsvc_program;
43979
43980 -struct nlmsvc_binding * nlmsvc_ops;
43981 +const struct nlmsvc_binding * nlmsvc_ops;
43982 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43983
43984 static DEFINE_MUTEX(nlmsvc_mutex);
43985 diff -urNp linux-2.6.32.44/fs/locks.c linux-2.6.32.44/fs/locks.c
43986 --- linux-2.6.32.44/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43987 +++ linux-2.6.32.44/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43988 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43989
43990 static struct kmem_cache *filelock_cache __read_mostly;
43991
43992 +static void locks_init_lock_always(struct file_lock *fl)
43993 +{
43994 + fl->fl_next = NULL;
43995 + fl->fl_fasync = NULL;
43996 + fl->fl_owner = NULL;
43997 + fl->fl_pid = 0;
43998 + fl->fl_nspid = NULL;
43999 + fl->fl_file = NULL;
44000 + fl->fl_flags = 0;
44001 + fl->fl_type = 0;
44002 + fl->fl_start = fl->fl_end = 0;
44003 +}
44004 +
44005 /* Allocate an empty lock structure. */
44006 static struct file_lock *locks_alloc_lock(void)
44007 {
44008 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
44009 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
44010 +
44011 + if (fl)
44012 + locks_init_lock_always(fl);
44013 +
44014 + return fl;
44015 }
44016
44017 void locks_release_private(struct file_lock *fl)
44018 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
44019 INIT_LIST_HEAD(&fl->fl_link);
44020 INIT_LIST_HEAD(&fl->fl_block);
44021 init_waitqueue_head(&fl->fl_wait);
44022 - fl->fl_next = NULL;
44023 - fl->fl_fasync = NULL;
44024 - fl->fl_owner = NULL;
44025 - fl->fl_pid = 0;
44026 - fl->fl_nspid = NULL;
44027 - fl->fl_file = NULL;
44028 - fl->fl_flags = 0;
44029 - fl->fl_type = 0;
44030 - fl->fl_start = fl->fl_end = 0;
44031 fl->fl_ops = NULL;
44032 fl->fl_lmops = NULL;
44033 + locks_init_lock_always(fl);
44034 }
44035
44036 EXPORT_SYMBOL(locks_init_lock);
44037 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
44038 return;
44039
44040 if (filp->f_op && filp->f_op->flock) {
44041 - struct file_lock fl = {
44042 + struct file_lock flock = {
44043 .fl_pid = current->tgid,
44044 .fl_file = filp,
44045 .fl_flags = FL_FLOCK,
44046 .fl_type = F_UNLCK,
44047 .fl_end = OFFSET_MAX,
44048 };
44049 - filp->f_op->flock(filp, F_SETLKW, &fl);
44050 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44051 - fl.fl_ops->fl_release_private(&fl);
44052 + filp->f_op->flock(filp, F_SETLKW, &flock);
44053 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44054 + flock.fl_ops->fl_release_private(&flock);
44055 }
44056
44057 lock_kernel();
44058 diff -urNp linux-2.6.32.44/fs/mbcache.c linux-2.6.32.44/fs/mbcache.c
44059 --- linux-2.6.32.44/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
44060 +++ linux-2.6.32.44/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
44061 @@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
44062 if (!cache)
44063 goto fail;
44064 cache->c_name = name;
44065 - cache->c_op.free = NULL;
44066 + *(void **)&cache->c_op.free = NULL;
44067 if (cache_op)
44068 - cache->c_op.free = cache_op->free;
44069 + *(void **)&cache->c_op.free = cache_op->free;
44070 atomic_set(&cache->c_entry_count, 0);
44071 cache->c_bucket_bits = bucket_bits;
44072 #ifdef MB_CACHE_INDEXES_COUNT
44073 diff -urNp linux-2.6.32.44/fs/namei.c linux-2.6.32.44/fs/namei.c
44074 --- linux-2.6.32.44/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
44075 +++ linux-2.6.32.44/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
44076 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
44077 return ret;
44078
44079 /*
44080 - * Read/write DACs are always overridable.
44081 - * Executable DACs are overridable if at least one exec bit is set.
44082 - */
44083 - if (!(mask & MAY_EXEC) || execute_ok(inode))
44084 - if (capable(CAP_DAC_OVERRIDE))
44085 - return 0;
44086 -
44087 - /*
44088 * Searching includes executable on directories, else just read.
44089 */
44090 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44091 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
44092 if (capable(CAP_DAC_READ_SEARCH))
44093 return 0;
44094
44095 + /*
44096 + * Read/write DACs are always overridable.
44097 + * Executable DACs are overridable if at least one exec bit is set.
44098 + */
44099 + if (!(mask & MAY_EXEC) || execute_ok(inode))
44100 + if (capable(CAP_DAC_OVERRIDE))
44101 + return 0;
44102 +
44103 return -EACCES;
44104 }
44105
44106 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
44107 if (!ret)
44108 goto ok;
44109
44110 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
44111 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
44112 + capable(CAP_DAC_OVERRIDE))
44113 goto ok;
44114
44115 return ret;
44116 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
44117 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
44118 error = PTR_ERR(cookie);
44119 if (!IS_ERR(cookie)) {
44120 - char *s = nd_get_link(nd);
44121 + const char *s = nd_get_link(nd);
44122 error = 0;
44123 if (s)
44124 error = __vfs_follow_link(nd, s);
44125 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
44126 err = security_inode_follow_link(path->dentry, nd);
44127 if (err)
44128 goto loop;
44129 +
44130 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
44131 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
44132 + err = -EACCES;
44133 + goto loop;
44134 + }
44135 +
44136 current->link_count++;
44137 current->total_link_count++;
44138 nd->depth++;
44139 @@ -1016,11 +1024,18 @@ return_reval:
44140 break;
44141 }
44142 return_base:
44143 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44144 + path_put(&nd->path);
44145 + return -ENOENT;
44146 + }
44147 return 0;
44148 out_dput:
44149 path_put_conditional(&next, nd);
44150 break;
44151 }
44152 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
44153 + err = -ENOENT;
44154 +
44155 path_put(&nd->path);
44156 return_err:
44157 return err;
44158 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
44159 int retval = path_init(dfd, name, flags, nd);
44160 if (!retval)
44161 retval = path_walk(name, nd);
44162 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
44163 - nd->path.dentry->d_inode))
44164 - audit_inode(name, nd->path.dentry);
44165 +
44166 + if (likely(!retval)) {
44167 + if (nd->path.dentry && nd->path.dentry->d_inode) {
44168 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44169 + retval = -ENOENT;
44170 + if (!audit_dummy_context())
44171 + audit_inode(name, nd->path.dentry);
44172 + }
44173 + }
44174 if (nd->root.mnt) {
44175 path_put(&nd->root);
44176 nd->root.mnt = NULL;
44177 }
44178 +
44179 return retval;
44180 }
44181
44182 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
44183 if (error)
44184 goto err_out;
44185
44186 +
44187 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44188 + error = -EPERM;
44189 + goto err_out;
44190 + }
44191 + if (gr_handle_rawio(inode)) {
44192 + error = -EPERM;
44193 + goto err_out;
44194 + }
44195 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
44196 + error = -EACCES;
44197 + goto err_out;
44198 + }
44199 +
44200 if (flag & O_TRUNC) {
44201 error = get_write_access(inode);
44202 if (error)
44203 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
44204 int error;
44205 struct dentry *dir = nd->path.dentry;
44206
44207 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
44208 + error = -EACCES;
44209 + goto out_unlock;
44210 + }
44211 +
44212 if (!IS_POSIXACL(dir->d_inode))
44213 mode &= ~current_umask();
44214 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
44215 if (error)
44216 goto out_unlock;
44217 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
44218 + if (!error)
44219 + gr_handle_create(path->dentry, nd->path.mnt);
44220 out_unlock:
44221 mutex_unlock(&dir->d_inode->i_mutex);
44222 dput(nd->path.dentry);
44223 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
44224 &nd, flag);
44225 if (error)
44226 return ERR_PTR(error);
44227 +
44228 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
44229 + error = -EPERM;
44230 + goto exit;
44231 + }
44232 +
44233 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
44234 + error = -EPERM;
44235 + goto exit;
44236 + }
44237 +
44238 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
44239 + error = -EACCES;
44240 + goto exit;
44241 + }
44242 +
44243 goto ok;
44244 }
44245
44246 @@ -1795,6 +1854,14 @@ do_last:
44247 /*
44248 * It already exists.
44249 */
44250 +
44251 + /* only check if O_CREAT is specified, all other checks need
44252 + to go into may_open */
44253 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
44254 + error = -EACCES;
44255 + goto exit_mutex_unlock;
44256 + }
44257 +
44258 mutex_unlock(&dir->d_inode->i_mutex);
44259 audit_inode(pathname, path.dentry);
44260
44261 @@ -1887,6 +1954,13 @@ do_link:
44262 error = security_inode_follow_link(path.dentry, &nd);
44263 if (error)
44264 goto exit_dput;
44265 +
44266 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
44267 + path.dentry, nd.path.mnt)) {
44268 + error = -EACCES;
44269 + goto exit_dput;
44270 + }
44271 +
44272 error = __do_follow_link(&path, &nd);
44273 if (error) {
44274 /* Does someone understand code flow here? Or it is only
44275 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44276 error = may_mknod(mode);
44277 if (error)
44278 goto out_dput;
44279 +
44280 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
44281 + error = -EPERM;
44282 + goto out_dput;
44283 + }
44284 +
44285 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
44286 + error = -EACCES;
44287 + goto out_dput;
44288 + }
44289 +
44290 error = mnt_want_write(nd.path.mnt);
44291 if (error)
44292 goto out_dput;
44293 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44294 }
44295 out_drop_write:
44296 mnt_drop_write(nd.path.mnt);
44297 +
44298 + if (!error)
44299 + gr_handle_create(dentry, nd.path.mnt);
44300 out_dput:
44301 dput(dentry);
44302 out_unlock:
44303 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44304 if (IS_ERR(dentry))
44305 goto out_unlock;
44306
44307 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44308 + error = -EACCES;
44309 + goto out_dput;
44310 + }
44311 +
44312 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44313 mode &= ~current_umask();
44314 error = mnt_want_write(nd.path.mnt);
44315 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44316 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44317 out_drop_write:
44318 mnt_drop_write(nd.path.mnt);
44319 +
44320 + if (!error)
44321 + gr_handle_create(dentry, nd.path.mnt);
44322 +
44323 out_dput:
44324 dput(dentry);
44325 out_unlock:
44326 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
44327 char * name;
44328 struct dentry *dentry;
44329 struct nameidata nd;
44330 + ino_t saved_ino = 0;
44331 + dev_t saved_dev = 0;
44332
44333 error = user_path_parent(dfd, pathname, &nd, &name);
44334 if (error)
44335 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
44336 error = PTR_ERR(dentry);
44337 if (IS_ERR(dentry))
44338 goto exit2;
44339 +
44340 + if (dentry->d_inode != NULL) {
44341 + if (dentry->d_inode->i_nlink <= 1) {
44342 + saved_ino = dentry->d_inode->i_ino;
44343 + saved_dev = gr_get_dev_from_dentry(dentry);
44344 + }
44345 +
44346 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44347 + error = -EACCES;
44348 + goto exit3;
44349 + }
44350 + }
44351 +
44352 error = mnt_want_write(nd.path.mnt);
44353 if (error)
44354 goto exit3;
44355 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
44356 if (error)
44357 goto exit4;
44358 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44359 + if (!error && (saved_dev || saved_ino))
44360 + gr_handle_delete(saved_ino, saved_dev);
44361 exit4:
44362 mnt_drop_write(nd.path.mnt);
44363 exit3:
44364 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
44365 struct dentry *dentry;
44366 struct nameidata nd;
44367 struct inode *inode = NULL;
44368 + ino_t saved_ino = 0;
44369 + dev_t saved_dev = 0;
44370
44371 error = user_path_parent(dfd, pathname, &nd, &name);
44372 if (error)
44373 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
44374 if (nd.last.name[nd.last.len])
44375 goto slashes;
44376 inode = dentry->d_inode;
44377 - if (inode)
44378 + if (inode) {
44379 + if (inode->i_nlink <= 1) {
44380 + saved_ino = inode->i_ino;
44381 + saved_dev = gr_get_dev_from_dentry(dentry);
44382 + }
44383 +
44384 atomic_inc(&inode->i_count);
44385 +
44386 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44387 + error = -EACCES;
44388 + goto exit2;
44389 + }
44390 + }
44391 error = mnt_want_write(nd.path.mnt);
44392 if (error)
44393 goto exit2;
44394 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
44395 if (error)
44396 goto exit3;
44397 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44398 + if (!error && (saved_ino || saved_dev))
44399 + gr_handle_delete(saved_ino, saved_dev);
44400 exit3:
44401 mnt_drop_write(nd.path.mnt);
44402 exit2:
44403 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44404 if (IS_ERR(dentry))
44405 goto out_unlock;
44406
44407 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44408 + error = -EACCES;
44409 + goto out_dput;
44410 + }
44411 +
44412 error = mnt_want_write(nd.path.mnt);
44413 if (error)
44414 goto out_dput;
44415 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44416 if (error)
44417 goto out_drop_write;
44418 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44419 + if (!error)
44420 + gr_handle_create(dentry, nd.path.mnt);
44421 out_drop_write:
44422 mnt_drop_write(nd.path.mnt);
44423 out_dput:
44424 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44425 error = PTR_ERR(new_dentry);
44426 if (IS_ERR(new_dentry))
44427 goto out_unlock;
44428 +
44429 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44430 + old_path.dentry->d_inode,
44431 + old_path.dentry->d_inode->i_mode, to)) {
44432 + error = -EACCES;
44433 + goto out_dput;
44434 + }
44435 +
44436 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44437 + old_path.dentry, old_path.mnt, to)) {
44438 + error = -EACCES;
44439 + goto out_dput;
44440 + }
44441 +
44442 error = mnt_want_write(nd.path.mnt);
44443 if (error)
44444 goto out_dput;
44445 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44446 if (error)
44447 goto out_drop_write;
44448 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44449 + if (!error)
44450 + gr_handle_create(new_dentry, nd.path.mnt);
44451 out_drop_write:
44452 mnt_drop_write(nd.path.mnt);
44453 out_dput:
44454 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44455 char *to;
44456 int error;
44457
44458 + pax_track_stack();
44459 +
44460 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44461 if (error)
44462 goto exit;
44463 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44464 if (new_dentry == trap)
44465 goto exit5;
44466
44467 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44468 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44469 + to);
44470 + if (error)
44471 + goto exit5;
44472 +
44473 error = mnt_want_write(oldnd.path.mnt);
44474 if (error)
44475 goto exit5;
44476 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44477 goto exit6;
44478 error = vfs_rename(old_dir->d_inode, old_dentry,
44479 new_dir->d_inode, new_dentry);
44480 + if (!error)
44481 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44482 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44483 exit6:
44484 mnt_drop_write(oldnd.path.mnt);
44485 exit5:
44486 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44487
44488 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44489 {
44490 + char tmpbuf[64];
44491 + const char *newlink;
44492 int len;
44493
44494 len = PTR_ERR(link);
44495 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44496 len = strlen(link);
44497 if (len > (unsigned) buflen)
44498 len = buflen;
44499 - if (copy_to_user(buffer, link, len))
44500 +
44501 + if (len < sizeof(tmpbuf)) {
44502 + memcpy(tmpbuf, link, len);
44503 + newlink = tmpbuf;
44504 + } else
44505 + newlink = link;
44506 +
44507 + if (copy_to_user(buffer, newlink, len))
44508 len = -EFAULT;
44509 out:
44510 return len;
44511 diff -urNp linux-2.6.32.44/fs/namespace.c linux-2.6.32.44/fs/namespace.c
44512 --- linux-2.6.32.44/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44513 +++ linux-2.6.32.44/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44514 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44515 if (!(sb->s_flags & MS_RDONLY))
44516 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44517 up_write(&sb->s_umount);
44518 +
44519 + gr_log_remount(mnt->mnt_devname, retval);
44520 +
44521 return retval;
44522 }
44523
44524 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44525 security_sb_umount_busy(mnt);
44526 up_write(&namespace_sem);
44527 release_mounts(&umount_list);
44528 +
44529 + gr_log_unmount(mnt->mnt_devname, retval);
44530 +
44531 return retval;
44532 }
44533
44534 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44535 if (retval)
44536 goto dput_out;
44537
44538 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44539 + retval = -EPERM;
44540 + goto dput_out;
44541 + }
44542 +
44543 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44544 + retval = -EPERM;
44545 + goto dput_out;
44546 + }
44547 +
44548 if (flags & MS_REMOUNT)
44549 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44550 data_page);
44551 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44552 dev_name, data_page);
44553 dput_out:
44554 path_put(&path);
44555 +
44556 + gr_log_mount(dev_name, dir_name, retval);
44557 +
44558 return retval;
44559 }
44560
44561 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44562 goto out1;
44563 }
44564
44565 + if (gr_handle_chroot_pivot()) {
44566 + error = -EPERM;
44567 + path_put(&old);
44568 + goto out1;
44569 + }
44570 +
44571 read_lock(&current->fs->lock);
44572 root = current->fs->root;
44573 path_get(&current->fs->root);
44574 diff -urNp linux-2.6.32.44/fs/ncpfs/dir.c linux-2.6.32.44/fs/ncpfs/dir.c
44575 --- linux-2.6.32.44/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44576 +++ linux-2.6.32.44/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44577 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44578 int res, val = 0, len;
44579 __u8 __name[NCP_MAXPATHLEN + 1];
44580
44581 + pax_track_stack();
44582 +
44583 parent = dget_parent(dentry);
44584 dir = parent->d_inode;
44585
44586 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44587 int error, res, len;
44588 __u8 __name[NCP_MAXPATHLEN + 1];
44589
44590 + pax_track_stack();
44591 +
44592 lock_kernel();
44593 error = -EIO;
44594 if (!ncp_conn_valid(server))
44595 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44596 int error, result, len;
44597 int opmode;
44598 __u8 __name[NCP_MAXPATHLEN + 1];
44599 -
44600 +
44601 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44602 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44603
44604 + pax_track_stack();
44605 +
44606 error = -EIO;
44607 lock_kernel();
44608 if (!ncp_conn_valid(server))
44609 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44610 int error, len;
44611 __u8 __name[NCP_MAXPATHLEN + 1];
44612
44613 + pax_track_stack();
44614 +
44615 DPRINTK("ncp_mkdir: making %s/%s\n",
44616 dentry->d_parent->d_name.name, dentry->d_name.name);
44617
44618 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44619 if (!ncp_conn_valid(server))
44620 goto out;
44621
44622 + pax_track_stack();
44623 +
44624 ncp_age_dentry(server, dentry);
44625 len = sizeof(__name);
44626 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44627 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44628 int old_len, new_len;
44629 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44630
44631 + pax_track_stack();
44632 +
44633 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44634 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44635 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44636 diff -urNp linux-2.6.32.44/fs/ncpfs/inode.c linux-2.6.32.44/fs/ncpfs/inode.c
44637 --- linux-2.6.32.44/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44638 +++ linux-2.6.32.44/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44639 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44640 #endif
44641 struct ncp_entry_info finfo;
44642
44643 + pax_track_stack();
44644 +
44645 data.wdog_pid = NULL;
44646 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44647 if (!server)
44648 diff -urNp linux-2.6.32.44/fs/nfs/inode.c linux-2.6.32.44/fs/nfs/inode.c
44649 --- linux-2.6.32.44/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44650 +++ linux-2.6.32.44/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44651 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44652 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44653 nfsi->attrtimeo_timestamp = jiffies;
44654
44655 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44656 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44657 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44658 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44659 else
44660 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44661 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44662 }
44663
44664 -static atomic_long_t nfs_attr_generation_counter;
44665 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44666
44667 static unsigned long nfs_read_attr_generation_counter(void)
44668 {
44669 - return atomic_long_read(&nfs_attr_generation_counter);
44670 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44671 }
44672
44673 unsigned long nfs_inc_attr_generation_counter(void)
44674 {
44675 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44676 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44677 }
44678
44679 void nfs_fattr_init(struct nfs_fattr *fattr)
44680 diff -urNp linux-2.6.32.44/fs/nfsd/lockd.c linux-2.6.32.44/fs/nfsd/lockd.c
44681 --- linux-2.6.32.44/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44682 +++ linux-2.6.32.44/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44683 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44684 fput(filp);
44685 }
44686
44687 -static struct nlmsvc_binding nfsd_nlm_ops = {
44688 +static const struct nlmsvc_binding nfsd_nlm_ops = {
44689 .fopen = nlm_fopen, /* open file for locking */
44690 .fclose = nlm_fclose, /* close file */
44691 };
44692 diff -urNp linux-2.6.32.44/fs/nfsd/nfs4state.c linux-2.6.32.44/fs/nfsd/nfs4state.c
44693 --- linux-2.6.32.44/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44694 +++ linux-2.6.32.44/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44695 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44696 unsigned int cmd;
44697 int err;
44698
44699 + pax_track_stack();
44700 +
44701 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44702 (long long) lock->lk_offset,
44703 (long long) lock->lk_length);
44704 diff -urNp linux-2.6.32.44/fs/nfsd/nfs4xdr.c linux-2.6.32.44/fs/nfsd/nfs4xdr.c
44705 --- linux-2.6.32.44/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44706 +++ linux-2.6.32.44/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44707 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44708 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44709 u32 minorversion = resp->cstate.minorversion;
44710
44711 + pax_track_stack();
44712 +
44713 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44714 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44715 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44716 diff -urNp linux-2.6.32.44/fs/nfsd/vfs.c linux-2.6.32.44/fs/nfsd/vfs.c
44717 --- linux-2.6.32.44/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44718 +++ linux-2.6.32.44/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44719 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44720 } else {
44721 oldfs = get_fs();
44722 set_fs(KERNEL_DS);
44723 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44724 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44725 set_fs(oldfs);
44726 }
44727
44728 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44729
44730 /* Write the data. */
44731 oldfs = get_fs(); set_fs(KERNEL_DS);
44732 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44733 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44734 set_fs(oldfs);
44735 if (host_err < 0)
44736 goto out_nfserr;
44737 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44738 */
44739
44740 oldfs = get_fs(); set_fs(KERNEL_DS);
44741 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44742 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44743 set_fs(oldfs);
44744
44745 if (host_err < 0)
44746 diff -urNp linux-2.6.32.44/fs/nilfs2/ioctl.c linux-2.6.32.44/fs/nilfs2/ioctl.c
44747 --- linux-2.6.32.44/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44748 +++ linux-2.6.32.44/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44749 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44750 unsigned int cmd, void __user *argp)
44751 {
44752 struct nilfs_argv argv[5];
44753 - const static size_t argsz[5] = {
44754 + static const size_t argsz[5] = {
44755 sizeof(struct nilfs_vdesc),
44756 sizeof(struct nilfs_period),
44757 sizeof(__u64),
44758 diff -urNp linux-2.6.32.44/fs/notify/dnotify/dnotify.c linux-2.6.32.44/fs/notify/dnotify/dnotify.c
44759 --- linux-2.6.32.44/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44760 +++ linux-2.6.32.44/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44761 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44762 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44763 }
44764
44765 -static struct fsnotify_ops dnotify_fsnotify_ops = {
44766 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
44767 .handle_event = dnotify_handle_event,
44768 .should_send_event = dnotify_should_send_event,
44769 .free_group_priv = NULL,
44770 diff -urNp linux-2.6.32.44/fs/notify/notification.c linux-2.6.32.44/fs/notify/notification.c
44771 --- linux-2.6.32.44/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44772 +++ linux-2.6.32.44/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44773 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44774 * get set to 0 so it will never get 'freed'
44775 */
44776 static struct fsnotify_event q_overflow_event;
44777 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44778 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44779
44780 /**
44781 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44782 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44783 */
44784 u32 fsnotify_get_cookie(void)
44785 {
44786 - return atomic_inc_return(&fsnotify_sync_cookie);
44787 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44788 }
44789 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44790
44791 diff -urNp linux-2.6.32.44/fs/ntfs/dir.c linux-2.6.32.44/fs/ntfs/dir.c
44792 --- linux-2.6.32.44/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44793 +++ linux-2.6.32.44/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44794 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
44795 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44796 ~(s64)(ndir->itype.index.block_size - 1)));
44797 /* Bounds checks. */
44798 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44799 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44800 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44801 "inode 0x%lx or driver bug.", vdir->i_ino);
44802 goto err_out;
44803 diff -urNp linux-2.6.32.44/fs/ntfs/file.c linux-2.6.32.44/fs/ntfs/file.c
44804 --- linux-2.6.32.44/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44805 +++ linux-2.6.32.44/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44806 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44807 #endif /* NTFS_RW */
44808 };
44809
44810 -const struct file_operations ntfs_empty_file_ops = {};
44811 +const struct file_operations ntfs_empty_file_ops __read_only;
44812
44813 -const struct inode_operations ntfs_empty_inode_ops = {};
44814 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44815 diff -urNp linux-2.6.32.44/fs/ocfs2/cluster/masklog.c linux-2.6.32.44/fs/ocfs2/cluster/masklog.c
44816 --- linux-2.6.32.44/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44817 +++ linux-2.6.32.44/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44818 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44819 return mlog_mask_store(mlog_attr->mask, buf, count);
44820 }
44821
44822 -static struct sysfs_ops mlog_attr_ops = {
44823 +static const struct sysfs_ops mlog_attr_ops = {
44824 .show = mlog_show,
44825 .store = mlog_store,
44826 };
44827 diff -urNp linux-2.6.32.44/fs/ocfs2/localalloc.c linux-2.6.32.44/fs/ocfs2/localalloc.c
44828 --- linux-2.6.32.44/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44829 +++ linux-2.6.32.44/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44830 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44831 goto bail;
44832 }
44833
44834 - atomic_inc(&osb->alloc_stats.moves);
44835 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44836
44837 status = 0;
44838 bail:
44839 diff -urNp linux-2.6.32.44/fs/ocfs2/namei.c linux-2.6.32.44/fs/ocfs2/namei.c
44840 --- linux-2.6.32.44/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44841 +++ linux-2.6.32.44/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44842 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44843 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44844 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44845
44846 + pax_track_stack();
44847 +
44848 /* At some point it might be nice to break this function up a
44849 * bit. */
44850
44851 diff -urNp linux-2.6.32.44/fs/ocfs2/ocfs2.h linux-2.6.32.44/fs/ocfs2/ocfs2.h
44852 --- linux-2.6.32.44/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44853 +++ linux-2.6.32.44/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44854 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
44855
44856 struct ocfs2_alloc_stats
44857 {
44858 - atomic_t moves;
44859 - atomic_t local_data;
44860 - atomic_t bitmap_data;
44861 - atomic_t bg_allocs;
44862 - atomic_t bg_extends;
44863 + atomic_unchecked_t moves;
44864 + atomic_unchecked_t local_data;
44865 + atomic_unchecked_t bitmap_data;
44866 + atomic_unchecked_t bg_allocs;
44867 + atomic_unchecked_t bg_extends;
44868 };
44869
44870 enum ocfs2_local_alloc_state
44871 diff -urNp linux-2.6.32.44/fs/ocfs2/suballoc.c linux-2.6.32.44/fs/ocfs2/suballoc.c
44872 --- linux-2.6.32.44/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44873 +++ linux-2.6.32.44/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44874 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44875 mlog_errno(status);
44876 goto bail;
44877 }
44878 - atomic_inc(&osb->alloc_stats.bg_extends);
44879 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44880
44881 /* You should never ask for this much metadata */
44882 BUG_ON(bits_wanted >
44883 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44884 mlog_errno(status);
44885 goto bail;
44886 }
44887 - atomic_inc(&osb->alloc_stats.bg_allocs);
44888 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44889
44890 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44891 ac->ac_bits_given += (*num_bits);
44892 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44893 mlog_errno(status);
44894 goto bail;
44895 }
44896 - atomic_inc(&osb->alloc_stats.bg_allocs);
44897 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44898
44899 BUG_ON(num_bits != 1);
44900
44901 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44902 cluster_start,
44903 num_clusters);
44904 if (!status)
44905 - atomic_inc(&osb->alloc_stats.local_data);
44906 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44907 } else {
44908 if (min_clusters > (osb->bitmap_cpg - 1)) {
44909 /* The only paths asking for contiguousness
44910 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44911 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44912 bg_blkno,
44913 bg_bit_off);
44914 - atomic_inc(&osb->alloc_stats.bitmap_data);
44915 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44916 }
44917 }
44918 if (status < 0) {
44919 diff -urNp linux-2.6.32.44/fs/ocfs2/super.c linux-2.6.32.44/fs/ocfs2/super.c
44920 --- linux-2.6.32.44/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44921 +++ linux-2.6.32.44/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44922 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44923 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44924 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44925 "Stats",
44926 - atomic_read(&osb->alloc_stats.bitmap_data),
44927 - atomic_read(&osb->alloc_stats.local_data),
44928 - atomic_read(&osb->alloc_stats.bg_allocs),
44929 - atomic_read(&osb->alloc_stats.moves),
44930 - atomic_read(&osb->alloc_stats.bg_extends));
44931 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44932 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44933 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44934 + atomic_read_unchecked(&osb->alloc_stats.moves),
44935 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44936
44937 out += snprintf(buf + out, len - out,
44938 "%10s => State: %u Descriptor: %llu Size: %u bits "
44939 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44940 spin_lock_init(&osb->osb_xattr_lock);
44941 ocfs2_init_inode_steal_slot(osb);
44942
44943 - atomic_set(&osb->alloc_stats.moves, 0);
44944 - atomic_set(&osb->alloc_stats.local_data, 0);
44945 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44946 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44947 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44948 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44949 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44950 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44951 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44952 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44953
44954 /* Copy the blockcheck stats from the superblock probe */
44955 osb->osb_ecc_stats = *stats;
44956 diff -urNp linux-2.6.32.44/fs/open.c linux-2.6.32.44/fs/open.c
44957 --- linux-2.6.32.44/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44958 +++ linux-2.6.32.44/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44959 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44960 error = locks_verify_truncate(inode, NULL, length);
44961 if (!error)
44962 error = security_path_truncate(&path, length, 0);
44963 +
44964 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44965 + error = -EACCES;
44966 +
44967 if (!error) {
44968 vfs_dq_init(inode);
44969 error = do_truncate(path.dentry, length, 0, NULL);
44970 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44971 if (__mnt_is_readonly(path.mnt))
44972 res = -EROFS;
44973
44974 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44975 + res = -EACCES;
44976 +
44977 out_path_release:
44978 path_put(&path);
44979 out:
44980 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44981 if (error)
44982 goto dput_and_out;
44983
44984 + gr_log_chdir(path.dentry, path.mnt);
44985 +
44986 set_fs_pwd(current->fs, &path);
44987
44988 dput_and_out:
44989 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44990 goto out_putf;
44991
44992 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44993 +
44994 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44995 + error = -EPERM;
44996 +
44997 + if (!error)
44998 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44999 +
45000 if (!error)
45001 set_fs_pwd(current->fs, &file->f_path);
45002 out_putf:
45003 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
45004 if (!capable(CAP_SYS_CHROOT))
45005 goto dput_and_out;
45006
45007 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45008 + goto dput_and_out;
45009 +
45010 + if (gr_handle_chroot_caps(&path)) {
45011 + error = -ENOMEM;
45012 + goto dput_and_out;
45013 + }
45014 +
45015 set_fs_root(current->fs, &path);
45016 +
45017 + gr_handle_chroot_chdir(&path);
45018 +
45019 error = 0;
45020 dput_and_out:
45021 path_put(&path);
45022 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
45023 err = mnt_want_write_file(file);
45024 if (err)
45025 goto out_putf;
45026 +
45027 mutex_lock(&inode->i_mutex);
45028 +
45029 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
45030 + err = -EACCES;
45031 + goto out_unlock;
45032 + }
45033 +
45034 if (mode == (mode_t) -1)
45035 mode = inode->i_mode;
45036 +
45037 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
45038 + err = -EPERM;
45039 + goto out_unlock;
45040 + }
45041 +
45042 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45043 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45044 err = notify_change(dentry, &newattrs);
45045 +
45046 +out_unlock:
45047 mutex_unlock(&inode->i_mutex);
45048 mnt_drop_write(file->f_path.mnt);
45049 out_putf:
45050 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
45051 error = mnt_want_write(path.mnt);
45052 if (error)
45053 goto dput_and_out;
45054 +
45055 mutex_lock(&inode->i_mutex);
45056 +
45057 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
45058 + error = -EACCES;
45059 + goto out_unlock;
45060 + }
45061 +
45062 if (mode == (mode_t) -1)
45063 mode = inode->i_mode;
45064 +
45065 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
45066 + error = -EACCES;
45067 + goto out_unlock;
45068 + }
45069 +
45070 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45071 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45072 error = notify_change(path.dentry, &newattrs);
45073 +
45074 +out_unlock:
45075 mutex_unlock(&inode->i_mutex);
45076 mnt_drop_write(path.mnt);
45077 dput_and_out:
45078 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
45079 return sys_fchmodat(AT_FDCWD, filename, mode);
45080 }
45081
45082 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
45083 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
45084 {
45085 struct inode *inode = dentry->d_inode;
45086 int error;
45087 struct iattr newattrs;
45088
45089 + if (!gr_acl_handle_chown(dentry, mnt))
45090 + return -EACCES;
45091 +
45092 newattrs.ia_valid = ATTR_CTIME;
45093 if (user != (uid_t) -1) {
45094 newattrs.ia_valid |= ATTR_UID;
45095 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
45096 error = mnt_want_write(path.mnt);
45097 if (error)
45098 goto out_release;
45099 - error = chown_common(path.dentry, user, group);
45100 + error = chown_common(path.dentry, user, group, path.mnt);
45101 mnt_drop_write(path.mnt);
45102 out_release:
45103 path_put(&path);
45104 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
45105 error = mnt_want_write(path.mnt);
45106 if (error)
45107 goto out_release;
45108 - error = chown_common(path.dentry, user, group);
45109 + error = chown_common(path.dentry, user, group, path.mnt);
45110 mnt_drop_write(path.mnt);
45111 out_release:
45112 path_put(&path);
45113 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
45114 error = mnt_want_write(path.mnt);
45115 if (error)
45116 goto out_release;
45117 - error = chown_common(path.dentry, user, group);
45118 + error = chown_common(path.dentry, user, group, path.mnt);
45119 mnt_drop_write(path.mnt);
45120 out_release:
45121 path_put(&path);
45122 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
45123 goto out_fput;
45124 dentry = file->f_path.dentry;
45125 audit_inode(NULL, dentry);
45126 - error = chown_common(dentry, user, group);
45127 + error = chown_common(dentry, user, group, file->f_path.mnt);
45128 mnt_drop_write(file->f_path.mnt);
45129 out_fput:
45130 fput(file);
45131 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
45132 if (!IS_ERR(tmp)) {
45133 fd = get_unused_fd_flags(flags);
45134 if (fd >= 0) {
45135 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
45136 + struct file *f;
45137 + /* don't allow to be set by userland */
45138 + flags &= ~FMODE_GREXEC;
45139 + f = do_filp_open(dfd, tmp, flags, mode, 0);
45140 if (IS_ERR(f)) {
45141 put_unused_fd(fd);
45142 fd = PTR_ERR(f);
45143 diff -urNp linux-2.6.32.44/fs/partitions/ldm.c linux-2.6.32.44/fs/partitions/ldm.c
45144 --- linux-2.6.32.44/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
45145 +++ linux-2.6.32.44/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
45146 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
45147 ldm_error ("A VBLK claims to have %d parts.", num);
45148 return false;
45149 }
45150 +
45151 if (rec >= num) {
45152 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
45153 return false;
45154 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
45155 goto found;
45156 }
45157
45158 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45159 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45160 if (!f) {
45161 ldm_crit ("Out of memory.");
45162 return false;
45163 diff -urNp linux-2.6.32.44/fs/partitions/mac.c linux-2.6.32.44/fs/partitions/mac.c
45164 --- linux-2.6.32.44/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
45165 +++ linux-2.6.32.44/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
45166 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
45167 return 0; /* not a MacOS disk */
45168 }
45169 blocks_in_map = be32_to_cpu(part->map_count);
45170 + printk(" [mac]");
45171 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
45172 put_dev_sector(sect);
45173 return 0;
45174 }
45175 - printk(" [mac]");
45176 for (slot = 1; slot <= blocks_in_map; ++slot) {
45177 int pos = slot * secsize;
45178 put_dev_sector(sect);
45179 diff -urNp linux-2.6.32.44/fs/pipe.c linux-2.6.32.44/fs/pipe.c
45180 --- linux-2.6.32.44/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
45181 +++ linux-2.6.32.44/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
45182 @@ -401,9 +401,9 @@ redo:
45183 }
45184 if (bufs) /* More to do? */
45185 continue;
45186 - if (!pipe->writers)
45187 + if (!atomic_read(&pipe->writers))
45188 break;
45189 - if (!pipe->waiting_writers) {
45190 + if (!atomic_read(&pipe->waiting_writers)) {
45191 /* syscall merging: Usually we must not sleep
45192 * if O_NONBLOCK is set, or if we got some data.
45193 * But if a writer sleeps in kernel space, then
45194 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
45195 mutex_lock(&inode->i_mutex);
45196 pipe = inode->i_pipe;
45197
45198 - if (!pipe->readers) {
45199 + if (!atomic_read(&pipe->readers)) {
45200 send_sig(SIGPIPE, current, 0);
45201 ret = -EPIPE;
45202 goto out;
45203 @@ -511,7 +511,7 @@ redo1:
45204 for (;;) {
45205 int bufs;
45206
45207 - if (!pipe->readers) {
45208 + if (!atomic_read(&pipe->readers)) {
45209 send_sig(SIGPIPE, current, 0);
45210 if (!ret)
45211 ret = -EPIPE;
45212 @@ -597,9 +597,9 @@ redo2:
45213 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45214 do_wakeup = 0;
45215 }
45216 - pipe->waiting_writers++;
45217 + atomic_inc(&pipe->waiting_writers);
45218 pipe_wait(pipe);
45219 - pipe->waiting_writers--;
45220 + atomic_dec(&pipe->waiting_writers);
45221 }
45222 out:
45223 mutex_unlock(&inode->i_mutex);
45224 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
45225 mask = 0;
45226 if (filp->f_mode & FMODE_READ) {
45227 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45228 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45229 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45230 mask |= POLLHUP;
45231 }
45232
45233 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
45234 * Most Unices do not set POLLERR for FIFOs but on Linux they
45235 * behave exactly like pipes for poll().
45236 */
45237 - if (!pipe->readers)
45238 + if (!atomic_read(&pipe->readers))
45239 mask |= POLLERR;
45240 }
45241
45242 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
45243
45244 mutex_lock(&inode->i_mutex);
45245 pipe = inode->i_pipe;
45246 - pipe->readers -= decr;
45247 - pipe->writers -= decw;
45248 + atomic_sub(decr, &pipe->readers);
45249 + atomic_sub(decw, &pipe->writers);
45250
45251 - if (!pipe->readers && !pipe->writers) {
45252 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45253 free_pipe_info(inode);
45254 } else {
45255 wake_up_interruptible_sync(&pipe->wait);
45256 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
45257
45258 if (inode->i_pipe) {
45259 ret = 0;
45260 - inode->i_pipe->readers++;
45261 + atomic_inc(&inode->i_pipe->readers);
45262 }
45263
45264 mutex_unlock(&inode->i_mutex);
45265 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
45266
45267 if (inode->i_pipe) {
45268 ret = 0;
45269 - inode->i_pipe->writers++;
45270 + atomic_inc(&inode->i_pipe->writers);
45271 }
45272
45273 mutex_unlock(&inode->i_mutex);
45274 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
45275 if (inode->i_pipe) {
45276 ret = 0;
45277 if (filp->f_mode & FMODE_READ)
45278 - inode->i_pipe->readers++;
45279 + atomic_inc(&inode->i_pipe->readers);
45280 if (filp->f_mode & FMODE_WRITE)
45281 - inode->i_pipe->writers++;
45282 + atomic_inc(&inode->i_pipe->writers);
45283 }
45284
45285 mutex_unlock(&inode->i_mutex);
45286 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
45287 inode->i_pipe = NULL;
45288 }
45289
45290 -static struct vfsmount *pipe_mnt __read_mostly;
45291 +struct vfsmount *pipe_mnt __read_mostly;
45292 static int pipefs_delete_dentry(struct dentry *dentry)
45293 {
45294 /*
45295 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
45296 goto fail_iput;
45297 inode->i_pipe = pipe;
45298
45299 - pipe->readers = pipe->writers = 1;
45300 + atomic_set(&pipe->readers, 1);
45301 + atomic_set(&pipe->writers, 1);
45302 inode->i_fop = &rdwr_pipefifo_fops;
45303
45304 /*
45305 diff -urNp linux-2.6.32.44/fs/proc/array.c linux-2.6.32.44/fs/proc/array.c
45306 --- linux-2.6.32.44/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
45307 +++ linux-2.6.32.44/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
45308 @@ -60,6 +60,7 @@
45309 #include <linux/tty.h>
45310 #include <linux/string.h>
45311 #include <linux/mman.h>
45312 +#include <linux/grsecurity.h>
45313 #include <linux/proc_fs.h>
45314 #include <linux/ioport.h>
45315 #include <linux/uaccess.h>
45316 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
45317 p->nivcsw);
45318 }
45319
45320 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45321 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45322 +{
45323 + if (p->mm)
45324 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45325 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45326 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45327 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45328 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45329 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45330 + else
45331 + seq_printf(m, "PaX:\t-----\n");
45332 +}
45333 +#endif
45334 +
45335 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45336 struct pid *pid, struct task_struct *task)
45337 {
45338 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
45339 task_cap(m, task);
45340 cpuset_task_status_allowed(m, task);
45341 task_context_switch_counts(m, task);
45342 +
45343 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45344 + task_pax(m, task);
45345 +#endif
45346 +
45347 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45348 + task_grsec_rbac(m, task);
45349 +#endif
45350 +
45351 return 0;
45352 }
45353
45354 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45355 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45356 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45357 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45358 +#endif
45359 +
45360 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45361 struct pid *pid, struct task_struct *task, int whole)
45362 {
45363 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
45364 cputime_t cutime, cstime, utime, stime;
45365 cputime_t cgtime, gtime;
45366 unsigned long rsslim = 0;
45367 - char tcomm[sizeof(task->comm)];
45368 + char tcomm[sizeof(task->comm)] = { 0 };
45369 unsigned long flags;
45370
45371 + pax_track_stack();
45372 +
45373 state = *get_task_state(task);
45374 vsize = eip = esp = 0;
45375 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45376 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
45377 gtime = task_gtime(task);
45378 }
45379
45380 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45381 + if (PAX_RAND_FLAGS(mm)) {
45382 + eip = 0;
45383 + esp = 0;
45384 + wchan = 0;
45385 + }
45386 +#endif
45387 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45388 + wchan = 0;
45389 + eip =0;
45390 + esp =0;
45391 +#endif
45392 +
45393 /* scale priority and nice values from timeslices to -20..20 */
45394 /* to make it look like a "normal" Unix priority/nice value */
45395 priority = task_prio(task);
45396 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
45397 vsize,
45398 mm ? get_mm_rss(mm) : 0,
45399 rsslim,
45400 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45401 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45402 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45403 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45404 +#else
45405 mm ? (permitted ? mm->start_code : 1) : 0,
45406 mm ? (permitted ? mm->end_code : 1) : 0,
45407 (permitted && mm) ? mm->start_stack : 0,
45408 +#endif
45409 esp,
45410 eip,
45411 /* The signal information here is obsolete.
45412 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45413
45414 return 0;
45415 }
45416 +
45417 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45418 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45419 +{
45420 + u32 curr_ip = 0;
45421 + unsigned long flags;
45422 +
45423 + if (lock_task_sighand(task, &flags)) {
45424 + curr_ip = task->signal->curr_ip;
45425 + unlock_task_sighand(task, &flags);
45426 + }
45427 +
45428 + return sprintf(buffer, "%pI4\n", &curr_ip);
45429 +}
45430 +#endif
45431 diff -urNp linux-2.6.32.44/fs/proc/base.c linux-2.6.32.44/fs/proc/base.c
45432 --- linux-2.6.32.44/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
45433 +++ linux-2.6.32.44/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
45434 @@ -102,6 +102,22 @@ struct pid_entry {
45435 union proc_op op;
45436 };
45437
45438 +struct getdents_callback {
45439 + struct linux_dirent __user * current_dir;
45440 + struct linux_dirent __user * previous;
45441 + struct file * file;
45442 + int count;
45443 + int error;
45444 +};
45445 +
45446 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45447 + loff_t offset, u64 ino, unsigned int d_type)
45448 +{
45449 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45450 + buf->error = -EINVAL;
45451 + return 0;
45452 +}
45453 +
45454 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45455 .name = (NAME), \
45456 .len = sizeof(NAME) - 1, \
45457 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45458 if (task == current)
45459 return 0;
45460
45461 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45462 + return -EPERM;
45463 +
45464 /*
45465 * If current is actively ptrace'ing, and would also be
45466 * permitted to freshly attach with ptrace now, permit it.
45467 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45468 if (!mm->arg_end)
45469 goto out_mm; /* Shh! No looking before we're done */
45470
45471 + if (gr_acl_handle_procpidmem(task))
45472 + goto out_mm;
45473 +
45474 len = mm->arg_end - mm->arg_start;
45475
45476 if (len > PAGE_SIZE)
45477 @@ -287,12 +309,28 @@ out:
45478 return res;
45479 }
45480
45481 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45482 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45483 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45484 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45485 +#endif
45486 +
45487 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45488 {
45489 int res = 0;
45490 struct mm_struct *mm = get_task_mm(task);
45491 if (mm) {
45492 unsigned int nwords = 0;
45493 +
45494 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45495 + /* allow if we're currently ptracing this task */
45496 + if (PAX_RAND_FLAGS(mm) &&
45497 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45498 + mmput(mm);
45499 + return res;
45500 + }
45501 +#endif
45502 +
45503 do {
45504 nwords += 2;
45505 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45506 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45507 }
45508
45509
45510 -#ifdef CONFIG_KALLSYMS
45511 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45512 /*
45513 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45514 * Returns the resolved symbol. If that fails, simply return the address.
45515 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45516 }
45517 #endif /* CONFIG_KALLSYMS */
45518
45519 -#ifdef CONFIG_STACKTRACE
45520 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45521
45522 #define MAX_STACK_TRACE_DEPTH 64
45523
45524 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45525 return count;
45526 }
45527
45528 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45529 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45530 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45531 {
45532 long nr;
45533 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45534 /************************************************************************/
45535
45536 /* permission checks */
45537 -static int proc_fd_access_allowed(struct inode *inode)
45538 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45539 {
45540 struct task_struct *task;
45541 int allowed = 0;
45542 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45543 */
45544 task = get_proc_task(inode);
45545 if (task) {
45546 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45547 + if (log)
45548 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45549 + else
45550 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45551 put_task_struct(task);
45552 }
45553 return allowed;
45554 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45555 if (!task)
45556 goto out_no_task;
45557
45558 + if (gr_acl_handle_procpidmem(task))
45559 + goto out;
45560 +
45561 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45562 goto out;
45563
45564 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45565 path_put(&nd->path);
45566
45567 /* Are we allowed to snoop on the tasks file descriptors? */
45568 - if (!proc_fd_access_allowed(inode))
45569 + if (!proc_fd_access_allowed(inode,0))
45570 goto out;
45571
45572 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45573 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45574 struct path path;
45575
45576 /* Are we allowed to snoop on the tasks file descriptors? */
45577 - if (!proc_fd_access_allowed(inode))
45578 - goto out;
45579 + /* logging this is needed for learning on chromium to work properly,
45580 + but we don't want to flood the logs from 'ps' which does a readlink
45581 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45582 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45583 + */
45584 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45585 + if (!proc_fd_access_allowed(inode,0))
45586 + goto out;
45587 + } else {
45588 + if (!proc_fd_access_allowed(inode,1))
45589 + goto out;
45590 + }
45591
45592 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45593 if (error)
45594 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45595 rcu_read_lock();
45596 cred = __task_cred(task);
45597 inode->i_uid = cred->euid;
45598 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45599 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45600 +#else
45601 inode->i_gid = cred->egid;
45602 +#endif
45603 rcu_read_unlock();
45604 }
45605 security_task_to_inode(task, inode);
45606 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45607 struct inode *inode = dentry->d_inode;
45608 struct task_struct *task;
45609 const struct cred *cred;
45610 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45611 + const struct cred *tmpcred = current_cred();
45612 +#endif
45613
45614 generic_fillattr(inode, stat);
45615
45616 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45617 stat->uid = 0;
45618 stat->gid = 0;
45619 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45620 +
45621 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45622 + rcu_read_unlock();
45623 + return -ENOENT;
45624 + }
45625 +
45626 if (task) {
45627 + cred = __task_cred(task);
45628 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45629 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45630 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45631 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45632 +#endif
45633 + ) {
45634 +#endif
45635 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45636 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45637 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45638 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45639 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45640 +#endif
45641 task_dumpable(task)) {
45642 - cred = __task_cred(task);
45643 stat->uid = cred->euid;
45644 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45645 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45646 +#else
45647 stat->gid = cred->egid;
45648 +#endif
45649 }
45650 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45651 + } else {
45652 + rcu_read_unlock();
45653 + return -ENOENT;
45654 + }
45655 +#endif
45656 }
45657 rcu_read_unlock();
45658 return 0;
45659 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45660
45661 if (task) {
45662 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45663 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45664 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45665 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45666 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45667 +#endif
45668 task_dumpable(task)) {
45669 rcu_read_lock();
45670 cred = __task_cred(task);
45671 inode->i_uid = cred->euid;
45672 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45673 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45674 +#else
45675 inode->i_gid = cred->egid;
45676 +#endif
45677 rcu_read_unlock();
45678 } else {
45679 inode->i_uid = 0;
45680 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45681 int fd = proc_fd(inode);
45682
45683 if (task) {
45684 - files = get_files_struct(task);
45685 + if (!gr_acl_handle_procpidmem(task))
45686 + files = get_files_struct(task);
45687 put_task_struct(task);
45688 }
45689 if (files) {
45690 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
45691 static int proc_fd_permission(struct inode *inode, int mask)
45692 {
45693 int rv;
45694 + struct task_struct *task;
45695
45696 rv = generic_permission(inode, mask, NULL);
45697 - if (rv == 0)
45698 - return 0;
45699 +
45700 if (task_pid(current) == proc_pid(inode))
45701 rv = 0;
45702 +
45703 + task = get_proc_task(inode);
45704 + if (task == NULL)
45705 + return rv;
45706 +
45707 + if (gr_acl_handle_procpidmem(task))
45708 + rv = -EACCES;
45709 +
45710 + put_task_struct(task);
45711 +
45712 return rv;
45713 }
45714
45715 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45716 if (!task)
45717 goto out_no_task;
45718
45719 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45720 + goto out;
45721 +
45722 /*
45723 * Yes, it does not scale. And it should not. Don't add
45724 * new entries into /proc/<tgid>/ without very good reasons.
45725 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45726 if (!task)
45727 goto out_no_task;
45728
45729 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45730 + goto out;
45731 +
45732 ret = 0;
45733 i = filp->f_pos;
45734 switch (i) {
45735 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45736 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45737 void *cookie)
45738 {
45739 - char *s = nd_get_link(nd);
45740 + const char *s = nd_get_link(nd);
45741 if (!IS_ERR(s))
45742 __putname(s);
45743 }
45744 @@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45745 #ifdef CONFIG_SCHED_DEBUG
45746 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45747 #endif
45748 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45749 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45750 INF("syscall", S_IRUSR, proc_pid_syscall),
45751 #endif
45752 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45753 @@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45754 #ifdef CONFIG_SECURITY
45755 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45756 #endif
45757 -#ifdef CONFIG_KALLSYMS
45758 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45759 INF("wchan", S_IRUGO, proc_pid_wchan),
45760 #endif
45761 -#ifdef CONFIG_STACKTRACE
45762 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45763 ONE("stack", S_IRUSR, proc_pid_stack),
45764 #endif
45765 #ifdef CONFIG_SCHEDSTATS
45766 @@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45767 #ifdef CONFIG_TASK_IO_ACCOUNTING
45768 INF("io", S_IRUSR, proc_tgid_io_accounting),
45769 #endif
45770 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45771 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45772 +#endif
45773 };
45774
45775 static int proc_tgid_base_readdir(struct file * filp,
45776 @@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45777 if (!inode)
45778 goto out;
45779
45780 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45781 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45782 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45783 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45784 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45785 +#else
45786 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45787 +#endif
45788 inode->i_op = &proc_tgid_base_inode_operations;
45789 inode->i_fop = &proc_tgid_base_operations;
45790 inode->i_flags|=S_IMMUTABLE;
45791 @@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45792 if (!task)
45793 goto out;
45794
45795 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45796 + goto out_put_task;
45797 +
45798 result = proc_pid_instantiate(dir, dentry, task, NULL);
45799 +out_put_task:
45800 put_task_struct(task);
45801 out:
45802 return result;
45803 @@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45804 {
45805 unsigned int nr;
45806 struct task_struct *reaper;
45807 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45808 + const struct cred *tmpcred = current_cred();
45809 + const struct cred *itercred;
45810 +#endif
45811 + filldir_t __filldir = filldir;
45812 struct tgid_iter iter;
45813 struct pid_namespace *ns;
45814
45815 @@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45816 for (iter = next_tgid(ns, iter);
45817 iter.task;
45818 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45819 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45820 + rcu_read_lock();
45821 + itercred = __task_cred(iter.task);
45822 +#endif
45823 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45824 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45825 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45826 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45827 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45828 +#endif
45829 + )
45830 +#endif
45831 + )
45832 + __filldir = &gr_fake_filldir;
45833 + else
45834 + __filldir = filldir;
45835 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45836 + rcu_read_unlock();
45837 +#endif
45838 filp->f_pos = iter.tgid + TGID_OFFSET;
45839 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45840 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45841 put_task_struct(iter.task);
45842 goto out;
45843 }
45844 @@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45845 #ifdef CONFIG_SCHED_DEBUG
45846 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45847 #endif
45848 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45849 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45850 INF("syscall", S_IRUSR, proc_pid_syscall),
45851 #endif
45852 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45853 @@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45854 #ifdef CONFIG_SECURITY
45855 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45856 #endif
45857 -#ifdef CONFIG_KALLSYMS
45858 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45859 INF("wchan", S_IRUGO, proc_pid_wchan),
45860 #endif
45861 -#ifdef CONFIG_STACKTRACE
45862 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45863 ONE("stack", S_IRUSR, proc_pid_stack),
45864 #endif
45865 #ifdef CONFIG_SCHEDSTATS
45866 diff -urNp linux-2.6.32.44/fs/proc/cmdline.c linux-2.6.32.44/fs/proc/cmdline.c
45867 --- linux-2.6.32.44/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45868 +++ linux-2.6.32.44/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45869 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
45870
45871 static int __init proc_cmdline_init(void)
45872 {
45873 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45874 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45875 +#else
45876 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45877 +#endif
45878 return 0;
45879 }
45880 module_init(proc_cmdline_init);
45881 diff -urNp linux-2.6.32.44/fs/proc/devices.c linux-2.6.32.44/fs/proc/devices.c
45882 --- linux-2.6.32.44/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45883 +++ linux-2.6.32.44/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45884 @@ -64,7 +64,11 @@ static const struct file_operations proc
45885
45886 static int __init proc_devices_init(void)
45887 {
45888 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45889 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45890 +#else
45891 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45892 +#endif
45893 return 0;
45894 }
45895 module_init(proc_devices_init);
45896 diff -urNp linux-2.6.32.44/fs/proc/inode.c linux-2.6.32.44/fs/proc/inode.c
45897 --- linux-2.6.32.44/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45898 +++ linux-2.6.32.44/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45899 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45900 if (de->mode) {
45901 inode->i_mode = de->mode;
45902 inode->i_uid = de->uid;
45903 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45904 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45905 +#else
45906 inode->i_gid = de->gid;
45907 +#endif
45908 }
45909 if (de->size)
45910 inode->i_size = de->size;
45911 diff -urNp linux-2.6.32.44/fs/proc/internal.h linux-2.6.32.44/fs/proc/internal.h
45912 --- linux-2.6.32.44/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45913 +++ linux-2.6.32.44/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45914 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45915 struct pid *pid, struct task_struct *task);
45916 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45917 struct pid *pid, struct task_struct *task);
45918 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45919 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45920 +#endif
45921 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45922
45923 extern const struct file_operations proc_maps_operations;
45924 diff -urNp linux-2.6.32.44/fs/proc/Kconfig linux-2.6.32.44/fs/proc/Kconfig
45925 --- linux-2.6.32.44/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45926 +++ linux-2.6.32.44/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45927 @@ -30,12 +30,12 @@ config PROC_FS
45928
45929 config PROC_KCORE
45930 bool "/proc/kcore support" if !ARM
45931 - depends on PROC_FS && MMU
45932 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45933
45934 config PROC_VMCORE
45935 bool "/proc/vmcore support (EXPERIMENTAL)"
45936 - depends on PROC_FS && CRASH_DUMP
45937 - default y
45938 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45939 + default n
45940 help
45941 Exports the dump image of crashed kernel in ELF format.
45942
45943 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45944 limited in memory.
45945
45946 config PROC_PAGE_MONITOR
45947 - default y
45948 - depends on PROC_FS && MMU
45949 + default n
45950 + depends on PROC_FS && MMU && !GRKERNSEC
45951 bool "Enable /proc page monitoring" if EMBEDDED
45952 help
45953 Various /proc files exist to monitor process memory utilization:
45954 diff -urNp linux-2.6.32.44/fs/proc/kcore.c linux-2.6.32.44/fs/proc/kcore.c
45955 --- linux-2.6.32.44/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45956 +++ linux-2.6.32.44/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45957 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45958 off_t offset = 0;
45959 struct kcore_list *m;
45960
45961 + pax_track_stack();
45962 +
45963 /* setup ELF header */
45964 elf = (struct elfhdr *) bufp;
45965 bufp += sizeof(struct elfhdr);
45966 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45967 * the addresses in the elf_phdr on our list.
45968 */
45969 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45970 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45971 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45972 + if (tsz > buflen)
45973 tsz = buflen;
45974 -
45975 +
45976 while (buflen) {
45977 struct kcore_list *m;
45978
45979 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45980 kfree(elf_buf);
45981 } else {
45982 if (kern_addr_valid(start)) {
45983 - unsigned long n;
45984 + char *elf_buf;
45985 + mm_segment_t oldfs;
45986
45987 - n = copy_to_user(buffer, (char *)start, tsz);
45988 - /*
45989 - * We cannot distingush between fault on source
45990 - * and fault on destination. When this happens
45991 - * we clear too and hope it will trigger the
45992 - * EFAULT again.
45993 - */
45994 - if (n) {
45995 - if (clear_user(buffer + tsz - n,
45996 - n))
45997 + elf_buf = kmalloc(tsz, GFP_KERNEL);
45998 + if (!elf_buf)
45999 + return -ENOMEM;
46000 + oldfs = get_fs();
46001 + set_fs(KERNEL_DS);
46002 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46003 + set_fs(oldfs);
46004 + if (copy_to_user(buffer, elf_buf, tsz)) {
46005 + kfree(elf_buf);
46006 return -EFAULT;
46007 + }
46008 }
46009 + set_fs(oldfs);
46010 + kfree(elf_buf);
46011 } else {
46012 if (clear_user(buffer, tsz))
46013 return -EFAULT;
46014 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
46015
46016 static int open_kcore(struct inode *inode, struct file *filp)
46017 {
46018 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46019 + return -EPERM;
46020 +#endif
46021 if (!capable(CAP_SYS_RAWIO))
46022 return -EPERM;
46023 if (kcore_need_update)
46024 diff -urNp linux-2.6.32.44/fs/proc/meminfo.c linux-2.6.32.44/fs/proc/meminfo.c
46025 --- linux-2.6.32.44/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
46026 +++ linux-2.6.32.44/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
46027 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46028 unsigned long pages[NR_LRU_LISTS];
46029 int lru;
46030
46031 + pax_track_stack();
46032 +
46033 /*
46034 * display in kilobytes.
46035 */
46036 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
46037 vmi.used >> 10,
46038 vmi.largest_chunk >> 10
46039 #ifdef CONFIG_MEMORY_FAILURE
46040 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46041 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46042 #endif
46043 );
46044
46045 diff -urNp linux-2.6.32.44/fs/proc/nommu.c linux-2.6.32.44/fs/proc/nommu.c
46046 --- linux-2.6.32.44/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
46047 +++ linux-2.6.32.44/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
46048 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
46049 if (len < 1)
46050 len = 1;
46051 seq_printf(m, "%*c", len, ' ');
46052 - seq_path(m, &file->f_path, "");
46053 + seq_path(m, &file->f_path, "\n\\");
46054 }
46055
46056 seq_putc(m, '\n');
46057 diff -urNp linux-2.6.32.44/fs/proc/proc_net.c linux-2.6.32.44/fs/proc/proc_net.c
46058 --- linux-2.6.32.44/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
46059 +++ linux-2.6.32.44/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
46060 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
46061 struct task_struct *task;
46062 struct nsproxy *ns;
46063 struct net *net = NULL;
46064 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46065 + const struct cred *cred = current_cred();
46066 +#endif
46067 +
46068 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46069 + if (cred->fsuid)
46070 + return net;
46071 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46072 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46073 + return net;
46074 +#endif
46075
46076 rcu_read_lock();
46077 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46078 diff -urNp linux-2.6.32.44/fs/proc/proc_sysctl.c linux-2.6.32.44/fs/proc/proc_sysctl.c
46079 --- linux-2.6.32.44/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
46080 +++ linux-2.6.32.44/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
46081 @@ -7,6 +7,8 @@
46082 #include <linux/security.h>
46083 #include "internal.h"
46084
46085 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46086 +
46087 static const struct dentry_operations proc_sys_dentry_operations;
46088 static const struct file_operations proc_sys_file_operations;
46089 static const struct inode_operations proc_sys_inode_operations;
46090 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
46091 if (!p)
46092 goto out;
46093
46094 + if (gr_handle_sysctl(p, MAY_EXEC))
46095 + goto out;
46096 +
46097 err = ERR_PTR(-ENOMEM);
46098 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
46099 if (h)
46100 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
46101 if (*pos < file->f_pos)
46102 continue;
46103
46104 + if (gr_handle_sysctl(table, 0))
46105 + continue;
46106 +
46107 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46108 if (res)
46109 return res;
46110 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
46111 if (IS_ERR(head))
46112 return PTR_ERR(head);
46113
46114 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46115 + return -ENOENT;
46116 +
46117 generic_fillattr(inode, stat);
46118 if (table)
46119 stat->mode = (stat->mode & S_IFMT) | table->mode;
46120 diff -urNp linux-2.6.32.44/fs/proc/root.c linux-2.6.32.44/fs/proc/root.c
46121 --- linux-2.6.32.44/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
46122 +++ linux-2.6.32.44/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
46123 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
46124 #ifdef CONFIG_PROC_DEVICETREE
46125 proc_device_tree_init();
46126 #endif
46127 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46128 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46129 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46130 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46131 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46132 +#endif
46133 +#else
46134 proc_mkdir("bus", NULL);
46135 +#endif
46136 proc_sys_init();
46137 }
46138
46139 diff -urNp linux-2.6.32.44/fs/proc/task_mmu.c linux-2.6.32.44/fs/proc/task_mmu.c
46140 --- linux-2.6.32.44/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
46141 +++ linux-2.6.32.44/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
46142 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
46143 "VmStk:\t%8lu kB\n"
46144 "VmExe:\t%8lu kB\n"
46145 "VmLib:\t%8lu kB\n"
46146 - "VmPTE:\t%8lu kB\n",
46147 - hiwater_vm << (PAGE_SHIFT-10),
46148 + "VmPTE:\t%8lu kB\n"
46149 +
46150 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46151 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46152 +#endif
46153 +
46154 + ,hiwater_vm << (PAGE_SHIFT-10),
46155 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46156 mm->locked_vm << (PAGE_SHIFT-10),
46157 hiwater_rss << (PAGE_SHIFT-10),
46158 total_rss << (PAGE_SHIFT-10),
46159 data << (PAGE_SHIFT-10),
46160 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46161 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
46162 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
46163 +
46164 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46165 + , mm->context.user_cs_base, mm->context.user_cs_limit
46166 +#endif
46167 +
46168 + );
46169 }
46170
46171 unsigned long task_vsize(struct mm_struct *mm)
46172 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
46173 struct proc_maps_private *priv = m->private;
46174 struct vm_area_struct *vma = v;
46175
46176 - vma_stop(priv, vma);
46177 + if (!IS_ERR(vma))
46178 + vma_stop(priv, vma);
46179 if (priv->task)
46180 put_task_struct(priv->task);
46181 }
46182 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
46183 return ret;
46184 }
46185
46186 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46187 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46188 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46189 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46190 +#endif
46191 +
46192 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46193 {
46194 struct mm_struct *mm = vma->vm_mm;
46195 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
46196 int flags = vma->vm_flags;
46197 unsigned long ino = 0;
46198 unsigned long long pgoff = 0;
46199 - unsigned long start;
46200 dev_t dev = 0;
46201 int len;
46202
46203 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
46204 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46205 }
46206
46207 - /* We don't show the stack guard page in /proc/maps */
46208 - start = vma->vm_start;
46209 - if (vma->vm_flags & VM_GROWSDOWN)
46210 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
46211 - start += PAGE_SIZE;
46212 -
46213 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46214 - start,
46215 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46216 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
46217 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
46218 +#else
46219 + vma->vm_start,
46220 vma->vm_end,
46221 +#endif
46222 flags & VM_READ ? 'r' : '-',
46223 flags & VM_WRITE ? 'w' : '-',
46224 flags & VM_EXEC ? 'x' : '-',
46225 flags & VM_MAYSHARE ? 's' : 'p',
46226 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46227 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46228 +#else
46229 pgoff,
46230 +#endif
46231 MAJOR(dev), MINOR(dev), ino, &len);
46232
46233 /*
46234 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
46235 */
46236 if (file) {
46237 pad_len_spaces(m, len);
46238 - seq_path(m, &file->f_path, "\n");
46239 + seq_path(m, &file->f_path, "\n\\");
46240 } else {
46241 const char *name = arch_vma_name(vma);
46242 if (!name) {
46243 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
46244 if (vma->vm_start <= mm->brk &&
46245 vma->vm_end >= mm->start_brk) {
46246 name = "[heap]";
46247 - } else if (vma->vm_start <= mm->start_stack &&
46248 - vma->vm_end >= mm->start_stack) {
46249 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46250 + (vma->vm_start <= mm->start_stack &&
46251 + vma->vm_end >= mm->start_stack)) {
46252 name = "[stack]";
46253 }
46254 } else {
46255 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
46256 };
46257
46258 memset(&mss, 0, sizeof mss);
46259 - mss.vma = vma;
46260 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46261 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46262 +
46263 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46264 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46265 +#endif
46266 + mss.vma = vma;
46267 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46268 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46269 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46270 + }
46271 +#endif
46272
46273 show_map_vma(m, vma);
46274
46275 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
46276 "Swap: %8lu kB\n"
46277 "KernelPageSize: %8lu kB\n"
46278 "MMUPageSize: %8lu kB\n",
46279 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46280 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46281 +#else
46282 (vma->vm_end - vma->vm_start) >> 10,
46283 +#endif
46284 mss.resident >> 10,
46285 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46286 mss.shared_clean >> 10,
46287 diff -urNp linux-2.6.32.44/fs/proc/task_nommu.c linux-2.6.32.44/fs/proc/task_nommu.c
46288 --- linux-2.6.32.44/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
46289 +++ linux-2.6.32.44/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
46290 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
46291 else
46292 bytes += kobjsize(mm);
46293
46294 - if (current->fs && current->fs->users > 1)
46295 + if (current->fs && atomic_read(&current->fs->users) > 1)
46296 sbytes += kobjsize(current->fs);
46297 else
46298 bytes += kobjsize(current->fs);
46299 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
46300 if (len < 1)
46301 len = 1;
46302 seq_printf(m, "%*c", len, ' ');
46303 - seq_path(m, &file->f_path, "");
46304 + seq_path(m, &file->f_path, "\n\\");
46305 }
46306
46307 seq_putc(m, '\n');
46308 diff -urNp linux-2.6.32.44/fs/readdir.c linux-2.6.32.44/fs/readdir.c
46309 --- linux-2.6.32.44/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
46310 +++ linux-2.6.32.44/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
46311 @@ -16,6 +16,7 @@
46312 #include <linux/security.h>
46313 #include <linux/syscalls.h>
46314 #include <linux/unistd.h>
46315 +#include <linux/namei.h>
46316
46317 #include <asm/uaccess.h>
46318
46319 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46320
46321 struct readdir_callback {
46322 struct old_linux_dirent __user * dirent;
46323 + struct file * file;
46324 int result;
46325 };
46326
46327 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46328 buf->result = -EOVERFLOW;
46329 return -EOVERFLOW;
46330 }
46331 +
46332 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46333 + return 0;
46334 +
46335 buf->result++;
46336 dirent = buf->dirent;
46337 if (!access_ok(VERIFY_WRITE, dirent,
46338 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46339
46340 buf.result = 0;
46341 buf.dirent = dirent;
46342 + buf.file = file;
46343
46344 error = vfs_readdir(file, fillonedir, &buf);
46345 if (buf.result)
46346 @@ -142,6 +149,7 @@ struct linux_dirent {
46347 struct getdents_callback {
46348 struct linux_dirent __user * current_dir;
46349 struct linux_dirent __user * previous;
46350 + struct file * file;
46351 int count;
46352 int error;
46353 };
46354 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
46355 buf->error = -EOVERFLOW;
46356 return -EOVERFLOW;
46357 }
46358 +
46359 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46360 + return 0;
46361 +
46362 dirent = buf->previous;
46363 if (dirent) {
46364 if (__put_user(offset, &dirent->d_off))
46365 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46366 buf.previous = NULL;
46367 buf.count = count;
46368 buf.error = 0;
46369 + buf.file = file;
46370
46371 error = vfs_readdir(file, filldir, &buf);
46372 if (error >= 0)
46373 @@ -228,6 +241,7 @@ out:
46374 struct getdents_callback64 {
46375 struct linux_dirent64 __user * current_dir;
46376 struct linux_dirent64 __user * previous;
46377 + struct file *file;
46378 int count;
46379 int error;
46380 };
46381 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
46382 buf->error = -EINVAL; /* only used if we fail.. */
46383 if (reclen > buf->count)
46384 return -EINVAL;
46385 +
46386 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46387 + return 0;
46388 +
46389 dirent = buf->previous;
46390 if (dirent) {
46391 if (__put_user(offset, &dirent->d_off))
46392 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46393
46394 buf.current_dir = dirent;
46395 buf.previous = NULL;
46396 + buf.file = file;
46397 buf.count = count;
46398 buf.error = 0;
46399
46400 diff -urNp linux-2.6.32.44/fs/reiserfs/dir.c linux-2.6.32.44/fs/reiserfs/dir.c
46401 --- linux-2.6.32.44/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46402 +++ linux-2.6.32.44/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46403 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46404 struct reiserfs_dir_entry de;
46405 int ret = 0;
46406
46407 + pax_track_stack();
46408 +
46409 reiserfs_write_lock(inode->i_sb);
46410
46411 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46412 diff -urNp linux-2.6.32.44/fs/reiserfs/do_balan.c linux-2.6.32.44/fs/reiserfs/do_balan.c
46413 --- linux-2.6.32.44/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46414 +++ linux-2.6.32.44/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46415 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46416 return;
46417 }
46418
46419 - atomic_inc(&(fs_generation(tb->tb_sb)));
46420 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46421 do_balance_starts(tb);
46422
46423 /* balance leaf returns 0 except if combining L R and S into
46424 diff -urNp linux-2.6.32.44/fs/reiserfs/item_ops.c linux-2.6.32.44/fs/reiserfs/item_ops.c
46425 --- linux-2.6.32.44/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46426 +++ linux-2.6.32.44/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46427 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46428 vi->vi_index, vi->vi_type, vi->vi_ih);
46429 }
46430
46431 -static struct item_operations stat_data_ops = {
46432 +static const struct item_operations stat_data_ops = {
46433 .bytes_number = sd_bytes_number,
46434 .decrement_key = sd_decrement_key,
46435 .is_left_mergeable = sd_is_left_mergeable,
46436 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46437 vi->vi_index, vi->vi_type, vi->vi_ih);
46438 }
46439
46440 -static struct item_operations direct_ops = {
46441 +static const struct item_operations direct_ops = {
46442 .bytes_number = direct_bytes_number,
46443 .decrement_key = direct_decrement_key,
46444 .is_left_mergeable = direct_is_left_mergeable,
46445 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46446 vi->vi_index, vi->vi_type, vi->vi_ih);
46447 }
46448
46449 -static struct item_operations indirect_ops = {
46450 +static const struct item_operations indirect_ops = {
46451 .bytes_number = indirect_bytes_number,
46452 .decrement_key = indirect_decrement_key,
46453 .is_left_mergeable = indirect_is_left_mergeable,
46454 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46455 printk("\n");
46456 }
46457
46458 -static struct item_operations direntry_ops = {
46459 +static const struct item_operations direntry_ops = {
46460 .bytes_number = direntry_bytes_number,
46461 .decrement_key = direntry_decrement_key,
46462 .is_left_mergeable = direntry_is_left_mergeable,
46463 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46464 "Invalid item type observed, run fsck ASAP");
46465 }
46466
46467 -static struct item_operations errcatch_ops = {
46468 +static const struct item_operations errcatch_ops = {
46469 errcatch_bytes_number,
46470 errcatch_decrement_key,
46471 errcatch_is_left_mergeable,
46472 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46473 #error Item types must use disk-format assigned values.
46474 #endif
46475
46476 -struct item_operations *item_ops[TYPE_ANY + 1] = {
46477 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46478 &stat_data_ops,
46479 &indirect_ops,
46480 &direct_ops,
46481 diff -urNp linux-2.6.32.44/fs/reiserfs/journal.c linux-2.6.32.44/fs/reiserfs/journal.c
46482 --- linux-2.6.32.44/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46483 +++ linux-2.6.32.44/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46484 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46485 struct buffer_head *bh;
46486 int i, j;
46487
46488 + pax_track_stack();
46489 +
46490 bh = __getblk(dev, block, bufsize);
46491 if (buffer_uptodate(bh))
46492 return (bh);
46493 diff -urNp linux-2.6.32.44/fs/reiserfs/namei.c linux-2.6.32.44/fs/reiserfs/namei.c
46494 --- linux-2.6.32.44/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46495 +++ linux-2.6.32.44/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46496 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46497 unsigned long savelink = 1;
46498 struct timespec ctime;
46499
46500 + pax_track_stack();
46501 +
46502 /* three balancings: (1) old name removal, (2) new name insertion
46503 and (3) maybe "save" link insertion
46504 stat data updates: (1) old directory,
46505 diff -urNp linux-2.6.32.44/fs/reiserfs/procfs.c linux-2.6.32.44/fs/reiserfs/procfs.c
46506 --- linux-2.6.32.44/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46507 +++ linux-2.6.32.44/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46508 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46509 "SMALL_TAILS " : "NO_TAILS ",
46510 replay_only(sb) ? "REPLAY_ONLY " : "",
46511 convert_reiserfs(sb) ? "CONV " : "",
46512 - atomic_read(&r->s_generation_counter),
46513 + atomic_read_unchecked(&r->s_generation_counter),
46514 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46515 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46516 SF(s_good_search_by_key_reada), SF(s_bmaps),
46517 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46518 struct journal_params *jp = &rs->s_v1.s_journal;
46519 char b[BDEVNAME_SIZE];
46520
46521 + pax_track_stack();
46522 +
46523 seq_printf(m, /* on-disk fields */
46524 "jp_journal_1st_block: \t%i\n"
46525 "jp_journal_dev: \t%s[%x]\n"
46526 diff -urNp linux-2.6.32.44/fs/reiserfs/stree.c linux-2.6.32.44/fs/reiserfs/stree.c
46527 --- linux-2.6.32.44/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46528 +++ linux-2.6.32.44/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46529 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46530 int iter = 0;
46531 #endif
46532
46533 + pax_track_stack();
46534 +
46535 BUG_ON(!th->t_trans_id);
46536
46537 init_tb_struct(th, &s_del_balance, sb, path,
46538 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46539 int retval;
46540 int quota_cut_bytes = 0;
46541
46542 + pax_track_stack();
46543 +
46544 BUG_ON(!th->t_trans_id);
46545
46546 le_key2cpu_key(&cpu_key, key);
46547 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46548 int quota_cut_bytes;
46549 loff_t tail_pos = 0;
46550
46551 + pax_track_stack();
46552 +
46553 BUG_ON(!th->t_trans_id);
46554
46555 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46556 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46557 int retval;
46558 int fs_gen;
46559
46560 + pax_track_stack();
46561 +
46562 BUG_ON(!th->t_trans_id);
46563
46564 fs_gen = get_generation(inode->i_sb);
46565 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46566 int fs_gen = 0;
46567 int quota_bytes = 0;
46568
46569 + pax_track_stack();
46570 +
46571 BUG_ON(!th->t_trans_id);
46572
46573 if (inode) { /* Do we count quotas for item? */
46574 diff -urNp linux-2.6.32.44/fs/reiserfs/super.c linux-2.6.32.44/fs/reiserfs/super.c
46575 --- linux-2.6.32.44/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46576 +++ linux-2.6.32.44/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46577 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46578 {.option_name = NULL}
46579 };
46580
46581 + pax_track_stack();
46582 +
46583 *blocks = 0;
46584 if (!options || !*options)
46585 /* use default configuration: create tails, journaling on, no
46586 diff -urNp linux-2.6.32.44/fs/select.c linux-2.6.32.44/fs/select.c
46587 --- linux-2.6.32.44/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46588 +++ linux-2.6.32.44/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46589 @@ -20,6 +20,7 @@
46590 #include <linux/module.h>
46591 #include <linux/slab.h>
46592 #include <linux/poll.h>
46593 +#include <linux/security.h>
46594 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46595 #include <linux/file.h>
46596 #include <linux/fdtable.h>
46597 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46598 int retval, i, timed_out = 0;
46599 unsigned long slack = 0;
46600
46601 + pax_track_stack();
46602 +
46603 rcu_read_lock();
46604 retval = max_select_fd(n, fds);
46605 rcu_read_unlock();
46606 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46607 /* Allocate small arguments on the stack to save memory and be faster */
46608 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46609
46610 + pax_track_stack();
46611 +
46612 ret = -EINVAL;
46613 if (n < 0)
46614 goto out_nofds;
46615 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46616 struct poll_list *walk = head;
46617 unsigned long todo = nfds;
46618
46619 + pax_track_stack();
46620 +
46621 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46622 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46623 return -EINVAL;
46624
46625 diff -urNp linux-2.6.32.44/fs/seq_file.c linux-2.6.32.44/fs/seq_file.c
46626 --- linux-2.6.32.44/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46627 +++ linux-2.6.32.44/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46628 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46629 return 0;
46630 }
46631 if (!m->buf) {
46632 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46633 + m->size = PAGE_SIZE;
46634 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46635 if (!m->buf)
46636 return -ENOMEM;
46637 }
46638 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46639 Eoverflow:
46640 m->op->stop(m, p);
46641 kfree(m->buf);
46642 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46643 + m->size <<= 1;
46644 + m->buf = kmalloc(m->size, GFP_KERNEL);
46645 return !m->buf ? -ENOMEM : -EAGAIN;
46646 }
46647
46648 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46649 m->version = file->f_version;
46650 /* grab buffer if we didn't have one */
46651 if (!m->buf) {
46652 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46653 + m->size = PAGE_SIZE;
46654 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46655 if (!m->buf)
46656 goto Enomem;
46657 }
46658 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46659 goto Fill;
46660 m->op->stop(m, p);
46661 kfree(m->buf);
46662 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46663 + m->size <<= 1;
46664 + m->buf = kmalloc(m->size, GFP_KERNEL);
46665 if (!m->buf)
46666 goto Enomem;
46667 m->count = 0;
46668 @@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46669 int res = -ENOMEM;
46670
46671 if (op) {
46672 - op->start = single_start;
46673 - op->next = single_next;
46674 - op->stop = single_stop;
46675 - op->show = show;
46676 + *(void **)&op->start = single_start;
46677 + *(void **)&op->next = single_next;
46678 + *(void **)&op->stop = single_stop;
46679 + *(void **)&op->show = show;
46680 res = seq_open(file, op);
46681 if (!res)
46682 ((struct seq_file *)file->private_data)->private = data;
46683 diff -urNp linux-2.6.32.44/fs/smbfs/proc.c linux-2.6.32.44/fs/smbfs/proc.c
46684 --- linux-2.6.32.44/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46685 +++ linux-2.6.32.44/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46686 @@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46687
46688 out:
46689 if (server->local_nls != NULL && server->remote_nls != NULL)
46690 - server->ops->convert = convert_cp;
46691 + *(void **)&server->ops->convert = convert_cp;
46692 else
46693 - server->ops->convert = convert_memcpy;
46694 + *(void **)&server->ops->convert = convert_memcpy;
46695
46696 smb_unlock_server(server);
46697 return n;
46698 @@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46699
46700 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46701 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46702 - server->ops->getattr = smb_proc_getattr_core;
46703 + *(void **)&server->ops->getattr = smb_proc_getattr_core;
46704 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46705 - server->ops->getattr = smb_proc_getattr_ff;
46706 + *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46707 }
46708
46709 /* Decode server capabilities */
46710 @@ -3439,7 +3439,7 @@ out:
46711 static void
46712 install_ops(struct smb_ops *dst, struct smb_ops *src)
46713 {
46714 - memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46715 + memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46716 }
46717
46718 /* < LANMAN2 */
46719 diff -urNp linux-2.6.32.44/fs/smbfs/symlink.c linux-2.6.32.44/fs/smbfs/symlink.c
46720 --- linux-2.6.32.44/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46721 +++ linux-2.6.32.44/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46722 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46723
46724 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46725 {
46726 - char *s = nd_get_link(nd);
46727 + const char *s = nd_get_link(nd);
46728 if (!IS_ERR(s))
46729 __putname(s);
46730 }
46731 diff -urNp linux-2.6.32.44/fs/splice.c linux-2.6.32.44/fs/splice.c
46732 --- linux-2.6.32.44/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46733 +++ linux-2.6.32.44/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46734 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46735 pipe_lock(pipe);
46736
46737 for (;;) {
46738 - if (!pipe->readers) {
46739 + if (!atomic_read(&pipe->readers)) {
46740 send_sig(SIGPIPE, current, 0);
46741 if (!ret)
46742 ret = -EPIPE;
46743 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46744 do_wakeup = 0;
46745 }
46746
46747 - pipe->waiting_writers++;
46748 + atomic_inc(&pipe->waiting_writers);
46749 pipe_wait(pipe);
46750 - pipe->waiting_writers--;
46751 + atomic_dec(&pipe->waiting_writers);
46752 }
46753
46754 pipe_unlock(pipe);
46755 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46756 .spd_release = spd_release_page,
46757 };
46758
46759 + pax_track_stack();
46760 +
46761 index = *ppos >> PAGE_CACHE_SHIFT;
46762 loff = *ppos & ~PAGE_CACHE_MASK;
46763 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46764 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46765 old_fs = get_fs();
46766 set_fs(get_ds());
46767 /* The cast to a user pointer is valid due to the set_fs() */
46768 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46769 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46770 set_fs(old_fs);
46771
46772 return res;
46773 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46774 old_fs = get_fs();
46775 set_fs(get_ds());
46776 /* The cast to a user pointer is valid due to the set_fs() */
46777 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46778 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46779 set_fs(old_fs);
46780
46781 return res;
46782 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46783 .spd_release = spd_release_page,
46784 };
46785
46786 + pax_track_stack();
46787 +
46788 index = *ppos >> PAGE_CACHE_SHIFT;
46789 offset = *ppos & ~PAGE_CACHE_MASK;
46790 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46791 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46792 goto err;
46793
46794 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46795 - vec[i].iov_base = (void __user *) page_address(page);
46796 + vec[i].iov_base = (__force void __user *) page_address(page);
46797 vec[i].iov_len = this_len;
46798 pages[i] = page;
46799 spd.nr_pages++;
46800 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46801 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46802 {
46803 while (!pipe->nrbufs) {
46804 - if (!pipe->writers)
46805 + if (!atomic_read(&pipe->writers))
46806 return 0;
46807
46808 - if (!pipe->waiting_writers && sd->num_spliced)
46809 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46810 return 0;
46811
46812 if (sd->flags & SPLICE_F_NONBLOCK)
46813 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46814 * out of the pipe right after the splice_to_pipe(). So set
46815 * PIPE_READERS appropriately.
46816 */
46817 - pipe->readers = 1;
46818 + atomic_set(&pipe->readers, 1);
46819
46820 current->splice_pipe = pipe;
46821 }
46822 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46823 .spd_release = spd_release_page,
46824 };
46825
46826 + pax_track_stack();
46827 +
46828 pipe = pipe_info(file->f_path.dentry->d_inode);
46829 if (!pipe)
46830 return -EBADF;
46831 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46832 ret = -ERESTARTSYS;
46833 break;
46834 }
46835 - if (!pipe->writers)
46836 + if (!atomic_read(&pipe->writers))
46837 break;
46838 - if (!pipe->waiting_writers) {
46839 + if (!atomic_read(&pipe->waiting_writers)) {
46840 if (flags & SPLICE_F_NONBLOCK) {
46841 ret = -EAGAIN;
46842 break;
46843 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46844 pipe_lock(pipe);
46845
46846 while (pipe->nrbufs >= PIPE_BUFFERS) {
46847 - if (!pipe->readers) {
46848 + if (!atomic_read(&pipe->readers)) {
46849 send_sig(SIGPIPE, current, 0);
46850 ret = -EPIPE;
46851 break;
46852 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46853 ret = -ERESTARTSYS;
46854 break;
46855 }
46856 - pipe->waiting_writers++;
46857 + atomic_inc(&pipe->waiting_writers);
46858 pipe_wait(pipe);
46859 - pipe->waiting_writers--;
46860 + atomic_dec(&pipe->waiting_writers);
46861 }
46862
46863 pipe_unlock(pipe);
46864 @@ -1785,14 +1791,14 @@ retry:
46865 pipe_double_lock(ipipe, opipe);
46866
46867 do {
46868 - if (!opipe->readers) {
46869 + if (!atomic_read(&opipe->readers)) {
46870 send_sig(SIGPIPE, current, 0);
46871 if (!ret)
46872 ret = -EPIPE;
46873 break;
46874 }
46875
46876 - if (!ipipe->nrbufs && !ipipe->writers)
46877 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46878 break;
46879
46880 /*
46881 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46882 pipe_double_lock(ipipe, opipe);
46883
46884 do {
46885 - if (!opipe->readers) {
46886 + if (!atomic_read(&opipe->readers)) {
46887 send_sig(SIGPIPE, current, 0);
46888 if (!ret)
46889 ret = -EPIPE;
46890 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46891 * return EAGAIN if we have the potential of some data in the
46892 * future, otherwise just return 0
46893 */
46894 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46895 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46896 ret = -EAGAIN;
46897
46898 pipe_unlock(ipipe);
46899 diff -urNp linux-2.6.32.44/fs/sysfs/file.c linux-2.6.32.44/fs/sysfs/file.c
46900 --- linux-2.6.32.44/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46901 +++ linux-2.6.32.44/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46902 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46903
46904 struct sysfs_open_dirent {
46905 atomic_t refcnt;
46906 - atomic_t event;
46907 + atomic_unchecked_t event;
46908 wait_queue_head_t poll;
46909 struct list_head buffers; /* goes through sysfs_buffer.list */
46910 };
46911 @@ -53,7 +53,7 @@ struct sysfs_buffer {
46912 size_t count;
46913 loff_t pos;
46914 char * page;
46915 - struct sysfs_ops * ops;
46916 + const struct sysfs_ops * ops;
46917 struct mutex mutex;
46918 int needs_read_fill;
46919 int event;
46920 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46921 {
46922 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46923 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46924 - struct sysfs_ops * ops = buffer->ops;
46925 + const struct sysfs_ops * ops = buffer->ops;
46926 int ret = 0;
46927 ssize_t count;
46928
46929 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46930 if (!sysfs_get_active_two(attr_sd))
46931 return -ENODEV;
46932
46933 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46934 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46935 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46936
46937 sysfs_put_active_two(attr_sd);
46938 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46939 {
46940 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46941 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46942 - struct sysfs_ops * ops = buffer->ops;
46943 + const struct sysfs_ops * ops = buffer->ops;
46944 int rc;
46945
46946 /* need attr_sd for attr and ops, its parent for kobj */
46947 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46948 return -ENOMEM;
46949
46950 atomic_set(&new_od->refcnt, 0);
46951 - atomic_set(&new_od->event, 1);
46952 + atomic_set_unchecked(&new_od->event, 1);
46953 init_waitqueue_head(&new_od->poll);
46954 INIT_LIST_HEAD(&new_od->buffers);
46955 goto retry;
46956 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46957 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46958 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46959 struct sysfs_buffer *buffer;
46960 - struct sysfs_ops *ops;
46961 + const struct sysfs_ops *ops;
46962 int error = -EACCES;
46963 char *p;
46964
46965 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46966
46967 sysfs_put_active_two(attr_sd);
46968
46969 - if (buffer->event != atomic_read(&od->event))
46970 + if (buffer->event != atomic_read_unchecked(&od->event))
46971 goto trigger;
46972
46973 return DEFAULT_POLLMASK;
46974 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46975
46976 od = sd->s_attr.open;
46977 if (od) {
46978 - atomic_inc(&od->event);
46979 + atomic_inc_unchecked(&od->event);
46980 wake_up_interruptible(&od->poll);
46981 }
46982
46983 diff -urNp linux-2.6.32.44/fs/sysfs/mount.c linux-2.6.32.44/fs/sysfs/mount.c
46984 --- linux-2.6.32.44/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46985 +++ linux-2.6.32.44/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46986 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46987 .s_name = "",
46988 .s_count = ATOMIC_INIT(1),
46989 .s_flags = SYSFS_DIR,
46990 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46991 + .s_mode = S_IFDIR | S_IRWXU,
46992 +#else
46993 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46994 +#endif
46995 .s_ino = 1,
46996 };
46997
46998 diff -urNp linux-2.6.32.44/fs/sysfs/symlink.c linux-2.6.32.44/fs/sysfs/symlink.c
46999 --- linux-2.6.32.44/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
47000 +++ linux-2.6.32.44/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
47001 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
47002
47003 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47004 {
47005 - char *page = nd_get_link(nd);
47006 + const char *page = nd_get_link(nd);
47007 if (!IS_ERR(page))
47008 free_page((unsigned long)page);
47009 }
47010 diff -urNp linux-2.6.32.44/fs/udf/balloc.c linux-2.6.32.44/fs/udf/balloc.c
47011 --- linux-2.6.32.44/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
47012 +++ linux-2.6.32.44/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
47013 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
47014
47015 mutex_lock(&sbi->s_alloc_mutex);
47016 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
47017 - if (bloc->logicalBlockNum < 0 ||
47018 - (bloc->logicalBlockNum + count) >
47019 - partmap->s_partition_len) {
47020 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
47021 udf_debug("%d < %d || %d + %d > %d\n",
47022 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
47023 count, partmap->s_partition_len);
47024 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
47025
47026 mutex_lock(&sbi->s_alloc_mutex);
47027 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
47028 - if (bloc->logicalBlockNum < 0 ||
47029 - (bloc->logicalBlockNum + count) >
47030 - partmap->s_partition_len) {
47031 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
47032 udf_debug("%d < %d || %d + %d > %d\n",
47033 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
47034 partmap->s_partition_len);
47035 diff -urNp linux-2.6.32.44/fs/udf/inode.c linux-2.6.32.44/fs/udf/inode.c
47036 --- linux-2.6.32.44/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
47037 +++ linux-2.6.32.44/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
47038 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
47039 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47040 int lastblock = 0;
47041
47042 + pax_track_stack();
47043 +
47044 prev_epos.offset = udf_file_entry_alloc_offset(inode);
47045 prev_epos.block = iinfo->i_location;
47046 prev_epos.bh = NULL;
47047 diff -urNp linux-2.6.32.44/fs/udf/misc.c linux-2.6.32.44/fs/udf/misc.c
47048 --- linux-2.6.32.44/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
47049 +++ linux-2.6.32.44/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
47050 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47051
47052 u8 udf_tag_checksum(const struct tag *t)
47053 {
47054 - u8 *data = (u8 *)t;
47055 + const u8 *data = (const u8 *)t;
47056 u8 checksum = 0;
47057 int i;
47058 for (i = 0; i < sizeof(struct tag); ++i)
47059 diff -urNp linux-2.6.32.44/fs/utimes.c linux-2.6.32.44/fs/utimes.c
47060 --- linux-2.6.32.44/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
47061 +++ linux-2.6.32.44/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
47062 @@ -1,6 +1,7 @@
47063 #include <linux/compiler.h>
47064 #include <linux/file.h>
47065 #include <linux/fs.h>
47066 +#include <linux/security.h>
47067 #include <linux/linkage.h>
47068 #include <linux/mount.h>
47069 #include <linux/namei.h>
47070 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47071 goto mnt_drop_write_and_out;
47072 }
47073 }
47074 +
47075 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47076 + error = -EACCES;
47077 + goto mnt_drop_write_and_out;
47078 + }
47079 +
47080 mutex_lock(&inode->i_mutex);
47081 error = notify_change(path->dentry, &newattrs);
47082 mutex_unlock(&inode->i_mutex);
47083 diff -urNp linux-2.6.32.44/fs/xattr_acl.c linux-2.6.32.44/fs/xattr_acl.c
47084 --- linux-2.6.32.44/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
47085 +++ linux-2.6.32.44/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
47086 @@ -17,8 +17,8 @@
47087 struct posix_acl *
47088 posix_acl_from_xattr(const void *value, size_t size)
47089 {
47090 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47091 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47092 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47093 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47094 int count;
47095 struct posix_acl *acl;
47096 struct posix_acl_entry *acl_e;
47097 diff -urNp linux-2.6.32.44/fs/xattr.c linux-2.6.32.44/fs/xattr.c
47098 --- linux-2.6.32.44/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
47099 +++ linux-2.6.32.44/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
47100 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47101 * Extended attribute SET operations
47102 */
47103 static long
47104 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47105 +setxattr(struct path *path, const char __user *name, const void __user *value,
47106 size_t size, int flags)
47107 {
47108 int error;
47109 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
47110 return PTR_ERR(kvalue);
47111 }
47112
47113 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47114 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47115 + error = -EACCES;
47116 + goto out;
47117 + }
47118 +
47119 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47120 +out:
47121 kfree(kvalue);
47122 return error;
47123 }
47124 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47125 return error;
47126 error = mnt_want_write(path.mnt);
47127 if (!error) {
47128 - error = setxattr(path.dentry, name, value, size, flags);
47129 + error = setxattr(&path, name, value, size, flags);
47130 mnt_drop_write(path.mnt);
47131 }
47132 path_put(&path);
47133 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47134 return error;
47135 error = mnt_want_write(path.mnt);
47136 if (!error) {
47137 - error = setxattr(path.dentry, name, value, size, flags);
47138 + error = setxattr(&path, name, value, size, flags);
47139 mnt_drop_write(path.mnt);
47140 }
47141 path_put(&path);
47142 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47143 const void __user *,value, size_t, size, int, flags)
47144 {
47145 struct file *f;
47146 - struct dentry *dentry;
47147 int error = -EBADF;
47148
47149 f = fget(fd);
47150 if (!f)
47151 return error;
47152 - dentry = f->f_path.dentry;
47153 - audit_inode(NULL, dentry);
47154 + audit_inode(NULL, f->f_path.dentry);
47155 error = mnt_want_write_file(f);
47156 if (!error) {
47157 - error = setxattr(dentry, name, value, size, flags);
47158 + error = setxattr(&f->f_path, name, value, size, flags);
47159 mnt_drop_write(f->f_path.mnt);
47160 }
47161 fput(f);
47162 diff -urNp linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl32.c
47163 --- linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
47164 +++ linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
47165 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
47166 xfs_fsop_geom_t fsgeo;
47167 int error;
47168
47169 + memset(&fsgeo, 0, sizeof(fsgeo));
47170 error = xfs_fs_geometry(mp, &fsgeo, 3);
47171 if (error)
47172 return -error;
47173 diff -urNp linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl.c
47174 --- linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
47175 +++ linux-2.6.32.44/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
47176 @@ -134,7 +134,7 @@ xfs_find_handle(
47177 }
47178
47179 error = -EFAULT;
47180 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47181 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47182 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47183 goto out_put;
47184
47185 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
47186 if (IS_ERR(dentry))
47187 return PTR_ERR(dentry);
47188
47189 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
47190 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
47191 if (!kbuf)
47192 goto out_dput;
47193
47194 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
47195 xfs_mount_t *mp,
47196 void __user *arg)
47197 {
47198 - xfs_fsop_geom_t fsgeo;
47199 + xfs_fsop_geom_t fsgeo;
47200 int error;
47201
47202 error = xfs_fs_geometry(mp, &fsgeo, 3);
47203 diff -urNp linux-2.6.32.44/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.44/fs/xfs/linux-2.6/xfs_iops.c
47204 --- linux-2.6.32.44/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
47205 +++ linux-2.6.32.44/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
47206 @@ -468,7 +468,7 @@ xfs_vn_put_link(
47207 struct nameidata *nd,
47208 void *p)
47209 {
47210 - char *s = nd_get_link(nd);
47211 + const char *s = nd_get_link(nd);
47212
47213 if (!IS_ERR(s))
47214 kfree(s);
47215 diff -urNp linux-2.6.32.44/fs/xfs/xfs_bmap.c linux-2.6.32.44/fs/xfs/xfs_bmap.c
47216 --- linux-2.6.32.44/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
47217 +++ linux-2.6.32.44/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
47218 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
47219 int nmap,
47220 int ret_nmap);
47221 #else
47222 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47223 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47224 #endif /* DEBUG */
47225
47226 #if defined(XFS_RW_TRACE)
47227 diff -urNp linux-2.6.32.44/fs/xfs/xfs_dir2_sf.c linux-2.6.32.44/fs/xfs/xfs_dir2_sf.c
47228 --- linux-2.6.32.44/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
47229 +++ linux-2.6.32.44/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
47230 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
47231 }
47232
47233 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
47234 - if (filldir(dirent, sfep->name, sfep->namelen,
47235 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47236 + char name[sfep->namelen];
47237 + memcpy(name, sfep->name, sfep->namelen);
47238 + if (filldir(dirent, name, sfep->namelen,
47239 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47240 + *offset = off & 0x7fffffff;
47241 + return 0;
47242 + }
47243 + } else if (filldir(dirent, sfep->name, sfep->namelen,
47244 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47245 *offset = off & 0x7fffffff;
47246 return 0;
47247 diff -urNp linux-2.6.32.44/grsecurity/gracl_alloc.c linux-2.6.32.44/grsecurity/gracl_alloc.c
47248 --- linux-2.6.32.44/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47249 +++ linux-2.6.32.44/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
47250 @@ -0,0 +1,105 @@
47251 +#include <linux/kernel.h>
47252 +#include <linux/mm.h>
47253 +#include <linux/slab.h>
47254 +#include <linux/vmalloc.h>
47255 +#include <linux/gracl.h>
47256 +#include <linux/grsecurity.h>
47257 +
47258 +static unsigned long alloc_stack_next = 1;
47259 +static unsigned long alloc_stack_size = 1;
47260 +static void **alloc_stack;
47261 +
47262 +static __inline__ int
47263 +alloc_pop(void)
47264 +{
47265 + if (alloc_stack_next == 1)
47266 + return 0;
47267 +
47268 + kfree(alloc_stack[alloc_stack_next - 2]);
47269 +
47270 + alloc_stack_next--;
47271 +
47272 + return 1;
47273 +}
47274 +
47275 +static __inline__ int
47276 +alloc_push(void *buf)
47277 +{
47278 + if (alloc_stack_next >= alloc_stack_size)
47279 + return 1;
47280 +
47281 + alloc_stack[alloc_stack_next - 1] = buf;
47282 +
47283 + alloc_stack_next++;
47284 +
47285 + return 0;
47286 +}
47287 +
47288 +void *
47289 +acl_alloc(unsigned long len)
47290 +{
47291 + void *ret = NULL;
47292 +
47293 + if (!len || len > PAGE_SIZE)
47294 + goto out;
47295 +
47296 + ret = kmalloc(len, GFP_KERNEL);
47297 +
47298 + if (ret) {
47299 + if (alloc_push(ret)) {
47300 + kfree(ret);
47301 + ret = NULL;
47302 + }
47303 + }
47304 +
47305 +out:
47306 + return ret;
47307 +}
47308 +
47309 +void *
47310 +acl_alloc_num(unsigned long num, unsigned long len)
47311 +{
47312 + if (!len || (num > (PAGE_SIZE / len)))
47313 + return NULL;
47314 +
47315 + return acl_alloc(num * len);
47316 +}
47317 +
47318 +void
47319 +acl_free_all(void)
47320 +{
47321 + if (gr_acl_is_enabled() || !alloc_stack)
47322 + return;
47323 +
47324 + while (alloc_pop()) ;
47325 +
47326 + if (alloc_stack) {
47327 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47328 + kfree(alloc_stack);
47329 + else
47330 + vfree(alloc_stack);
47331 + }
47332 +
47333 + alloc_stack = NULL;
47334 + alloc_stack_size = 1;
47335 + alloc_stack_next = 1;
47336 +
47337 + return;
47338 +}
47339 +
47340 +int
47341 +acl_alloc_stack_init(unsigned long size)
47342 +{
47343 + if ((size * sizeof (void *)) <= PAGE_SIZE)
47344 + alloc_stack =
47345 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47346 + else
47347 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
47348 +
47349 + alloc_stack_size = size;
47350 +
47351 + if (!alloc_stack)
47352 + return 0;
47353 + else
47354 + return 1;
47355 +}
47356 diff -urNp linux-2.6.32.44/grsecurity/gracl.c linux-2.6.32.44/grsecurity/gracl.c
47357 --- linux-2.6.32.44/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47358 +++ linux-2.6.32.44/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
47359 @@ -0,0 +1,4082 @@
47360 +#include <linux/kernel.h>
47361 +#include <linux/module.h>
47362 +#include <linux/sched.h>
47363 +#include <linux/mm.h>
47364 +#include <linux/file.h>
47365 +#include <linux/fs.h>
47366 +#include <linux/namei.h>
47367 +#include <linux/mount.h>
47368 +#include <linux/tty.h>
47369 +#include <linux/proc_fs.h>
47370 +#include <linux/smp_lock.h>
47371 +#include <linux/slab.h>
47372 +#include <linux/vmalloc.h>
47373 +#include <linux/types.h>
47374 +#include <linux/sysctl.h>
47375 +#include <linux/netdevice.h>
47376 +#include <linux/ptrace.h>
47377 +#include <linux/gracl.h>
47378 +#include <linux/gralloc.h>
47379 +#include <linux/grsecurity.h>
47380 +#include <linux/grinternal.h>
47381 +#include <linux/pid_namespace.h>
47382 +#include <linux/fdtable.h>
47383 +#include <linux/percpu.h>
47384 +
47385 +#include <asm/uaccess.h>
47386 +#include <asm/errno.h>
47387 +#include <asm/mman.h>
47388 +
47389 +static struct acl_role_db acl_role_set;
47390 +static struct name_db name_set;
47391 +static struct inodev_db inodev_set;
47392 +
47393 +/* for keeping track of userspace pointers used for subjects, so we
47394 + can share references in the kernel as well
47395 +*/
47396 +
47397 +static struct dentry *real_root;
47398 +static struct vfsmount *real_root_mnt;
47399 +
47400 +static struct acl_subj_map_db subj_map_set;
47401 +
47402 +static struct acl_role_label *default_role;
47403 +
47404 +static struct acl_role_label *role_list;
47405 +
47406 +static u16 acl_sp_role_value;
47407 +
47408 +extern char *gr_shared_page[4];
47409 +static DEFINE_MUTEX(gr_dev_mutex);
47410 +DEFINE_RWLOCK(gr_inode_lock);
47411 +
47412 +struct gr_arg *gr_usermode;
47413 +
47414 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
47415 +
47416 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47417 +extern void gr_clear_learn_entries(void);
47418 +
47419 +#ifdef CONFIG_GRKERNSEC_RESLOG
47420 +extern void gr_log_resource(const struct task_struct *task,
47421 + const int res, const unsigned long wanted, const int gt);
47422 +#endif
47423 +
47424 +unsigned char *gr_system_salt;
47425 +unsigned char *gr_system_sum;
47426 +
47427 +static struct sprole_pw **acl_special_roles = NULL;
47428 +static __u16 num_sprole_pws = 0;
47429 +
47430 +static struct acl_role_label *kernel_role = NULL;
47431 +
47432 +static unsigned int gr_auth_attempts = 0;
47433 +static unsigned long gr_auth_expires = 0UL;
47434 +
47435 +#ifdef CONFIG_NET
47436 +extern struct vfsmount *sock_mnt;
47437 +#endif
47438 +extern struct vfsmount *pipe_mnt;
47439 +extern struct vfsmount *shm_mnt;
47440 +#ifdef CONFIG_HUGETLBFS
47441 +extern struct vfsmount *hugetlbfs_vfsmount;
47442 +#endif
47443 +
47444 +static struct acl_object_label *fakefs_obj_rw;
47445 +static struct acl_object_label *fakefs_obj_rwx;
47446 +
47447 +extern int gr_init_uidset(void);
47448 +extern void gr_free_uidset(void);
47449 +extern void gr_remove_uid(uid_t uid);
47450 +extern int gr_find_uid(uid_t uid);
47451 +
47452 +__inline__ int
47453 +gr_acl_is_enabled(void)
47454 +{
47455 + return (gr_status & GR_READY);
47456 +}
47457 +
47458 +#ifdef CONFIG_BTRFS_FS
47459 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47460 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47461 +#endif
47462 +
47463 +static inline dev_t __get_dev(const struct dentry *dentry)
47464 +{
47465 +#ifdef CONFIG_BTRFS_FS
47466 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47467 + return get_btrfs_dev_from_inode(dentry->d_inode);
47468 + else
47469 +#endif
47470 + return dentry->d_inode->i_sb->s_dev;
47471 +}
47472 +
47473 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47474 +{
47475 + return __get_dev(dentry);
47476 +}
47477 +
47478 +static char gr_task_roletype_to_char(struct task_struct *task)
47479 +{
47480 + switch (task->role->roletype &
47481 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47482 + GR_ROLE_SPECIAL)) {
47483 + case GR_ROLE_DEFAULT:
47484 + return 'D';
47485 + case GR_ROLE_USER:
47486 + return 'U';
47487 + case GR_ROLE_GROUP:
47488 + return 'G';
47489 + case GR_ROLE_SPECIAL:
47490 + return 'S';
47491 + }
47492 +
47493 + return 'X';
47494 +}
47495 +
47496 +char gr_roletype_to_char(void)
47497 +{
47498 + return gr_task_roletype_to_char(current);
47499 +}
47500 +
47501 +__inline__ int
47502 +gr_acl_tpe_check(void)
47503 +{
47504 + if (unlikely(!(gr_status & GR_READY)))
47505 + return 0;
47506 + if (current->role->roletype & GR_ROLE_TPE)
47507 + return 1;
47508 + else
47509 + return 0;
47510 +}
47511 +
47512 +int
47513 +gr_handle_rawio(const struct inode *inode)
47514 +{
47515 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47516 + if (inode && S_ISBLK(inode->i_mode) &&
47517 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47518 + !capable(CAP_SYS_RAWIO))
47519 + return 1;
47520 +#endif
47521 + return 0;
47522 +}
47523 +
47524 +static int
47525 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47526 +{
47527 + if (likely(lena != lenb))
47528 + return 0;
47529 +
47530 + return !memcmp(a, b, lena);
47531 +}
47532 +
47533 +/* this must be called with vfsmount_lock and dcache_lock held */
47534 +
47535 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47536 + struct dentry *root, struct vfsmount *rootmnt,
47537 + char *buffer, int buflen)
47538 +{
47539 + char * end = buffer+buflen;
47540 + char * retval;
47541 + int namelen;
47542 +
47543 + *--end = '\0';
47544 + buflen--;
47545 +
47546 + if (buflen < 1)
47547 + goto Elong;
47548 + /* Get '/' right */
47549 + retval = end-1;
47550 + *retval = '/';
47551 +
47552 + for (;;) {
47553 + struct dentry * parent;
47554 +
47555 + if (dentry == root && vfsmnt == rootmnt)
47556 + break;
47557 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47558 + /* Global root? */
47559 + if (vfsmnt->mnt_parent == vfsmnt)
47560 + goto global_root;
47561 + dentry = vfsmnt->mnt_mountpoint;
47562 + vfsmnt = vfsmnt->mnt_parent;
47563 + continue;
47564 + }
47565 + parent = dentry->d_parent;
47566 + prefetch(parent);
47567 + namelen = dentry->d_name.len;
47568 + buflen -= namelen + 1;
47569 + if (buflen < 0)
47570 + goto Elong;
47571 + end -= namelen;
47572 + memcpy(end, dentry->d_name.name, namelen);
47573 + *--end = '/';
47574 + retval = end;
47575 + dentry = parent;
47576 + }
47577 +
47578 +out:
47579 + return retval;
47580 +
47581 +global_root:
47582 + namelen = dentry->d_name.len;
47583 + buflen -= namelen;
47584 + if (buflen < 0)
47585 + goto Elong;
47586 + retval -= namelen-1; /* hit the slash */
47587 + memcpy(retval, dentry->d_name.name, namelen);
47588 + goto out;
47589 +Elong:
47590 + retval = ERR_PTR(-ENAMETOOLONG);
47591 + goto out;
47592 +}
47593 +
47594 +static char *
47595 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47596 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47597 +{
47598 + char *retval;
47599 +
47600 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47601 + if (unlikely(IS_ERR(retval)))
47602 + retval = strcpy(buf, "<path too long>");
47603 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47604 + retval[1] = '\0';
47605 +
47606 + return retval;
47607 +}
47608 +
47609 +static char *
47610 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47611 + char *buf, int buflen)
47612 +{
47613 + char *res;
47614 +
47615 + /* we can use real_root, real_root_mnt, because this is only called
47616 + by the RBAC system */
47617 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47618 +
47619 + return res;
47620 +}
47621 +
47622 +static char *
47623 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47624 + char *buf, int buflen)
47625 +{
47626 + char *res;
47627 + struct dentry *root;
47628 + struct vfsmount *rootmnt;
47629 + struct task_struct *reaper = &init_task;
47630 +
47631 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47632 + read_lock(&reaper->fs->lock);
47633 + root = dget(reaper->fs->root.dentry);
47634 + rootmnt = mntget(reaper->fs->root.mnt);
47635 + read_unlock(&reaper->fs->lock);
47636 +
47637 + spin_lock(&dcache_lock);
47638 + spin_lock(&vfsmount_lock);
47639 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47640 + spin_unlock(&vfsmount_lock);
47641 + spin_unlock(&dcache_lock);
47642 +
47643 + dput(root);
47644 + mntput(rootmnt);
47645 + return res;
47646 +}
47647 +
47648 +static char *
47649 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47650 +{
47651 + char *ret;
47652 + spin_lock(&dcache_lock);
47653 + spin_lock(&vfsmount_lock);
47654 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47655 + PAGE_SIZE);
47656 + spin_unlock(&vfsmount_lock);
47657 + spin_unlock(&dcache_lock);
47658 + return ret;
47659 +}
47660 +
47661 +char *
47662 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47663 +{
47664 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47665 + PAGE_SIZE);
47666 +}
47667 +
47668 +char *
47669 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47670 +{
47671 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47672 + PAGE_SIZE);
47673 +}
47674 +
47675 +char *
47676 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47677 +{
47678 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47679 + PAGE_SIZE);
47680 +}
47681 +
47682 +char *
47683 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47684 +{
47685 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47686 + PAGE_SIZE);
47687 +}
47688 +
47689 +char *
47690 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47691 +{
47692 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47693 + PAGE_SIZE);
47694 +}
47695 +
47696 +__inline__ __u32
47697 +to_gr_audit(const __u32 reqmode)
47698 +{
47699 + /* masks off auditable permission flags, then shifts them to create
47700 + auditing flags, and adds the special case of append auditing if
47701 + we're requesting write */
47702 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47703 +}
47704 +
47705 +struct acl_subject_label *
47706 +lookup_subject_map(const struct acl_subject_label *userp)
47707 +{
47708 + unsigned int index = shash(userp, subj_map_set.s_size);
47709 + struct subject_map *match;
47710 +
47711 + match = subj_map_set.s_hash[index];
47712 +
47713 + while (match && match->user != userp)
47714 + match = match->next;
47715 +
47716 + if (match != NULL)
47717 + return match->kernel;
47718 + else
47719 + return NULL;
47720 +}
47721 +
47722 +static void
47723 +insert_subj_map_entry(struct subject_map *subjmap)
47724 +{
47725 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47726 + struct subject_map **curr;
47727 +
47728 + subjmap->prev = NULL;
47729 +
47730 + curr = &subj_map_set.s_hash[index];
47731 + if (*curr != NULL)
47732 + (*curr)->prev = subjmap;
47733 +
47734 + subjmap->next = *curr;
47735 + *curr = subjmap;
47736 +
47737 + return;
47738 +}
47739 +
47740 +static struct acl_role_label *
47741 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47742 + const gid_t gid)
47743 +{
47744 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47745 + struct acl_role_label *match;
47746 + struct role_allowed_ip *ipp;
47747 + unsigned int x;
47748 + u32 curr_ip = task->signal->curr_ip;
47749 +
47750 + task->signal->saved_ip = curr_ip;
47751 +
47752 + match = acl_role_set.r_hash[index];
47753 +
47754 + while (match) {
47755 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47756 + for (x = 0; x < match->domain_child_num; x++) {
47757 + if (match->domain_children[x] == uid)
47758 + goto found;
47759 + }
47760 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47761 + break;
47762 + match = match->next;
47763 + }
47764 +found:
47765 + if (match == NULL) {
47766 + try_group:
47767 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47768 + match = acl_role_set.r_hash[index];
47769 +
47770 + while (match) {
47771 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47772 + for (x = 0; x < match->domain_child_num; x++) {
47773 + if (match->domain_children[x] == gid)
47774 + goto found2;
47775 + }
47776 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47777 + break;
47778 + match = match->next;
47779 + }
47780 +found2:
47781 + if (match == NULL)
47782 + match = default_role;
47783 + if (match->allowed_ips == NULL)
47784 + return match;
47785 + else {
47786 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47787 + if (likely
47788 + ((ntohl(curr_ip) & ipp->netmask) ==
47789 + (ntohl(ipp->addr) & ipp->netmask)))
47790 + return match;
47791 + }
47792 + match = default_role;
47793 + }
47794 + } else if (match->allowed_ips == NULL) {
47795 + return match;
47796 + } else {
47797 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47798 + if (likely
47799 + ((ntohl(curr_ip) & ipp->netmask) ==
47800 + (ntohl(ipp->addr) & ipp->netmask)))
47801 + return match;
47802 + }
47803 + goto try_group;
47804 + }
47805 +
47806 + return match;
47807 +}
47808 +
47809 +struct acl_subject_label *
47810 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47811 + const struct acl_role_label *role)
47812 +{
47813 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47814 + struct acl_subject_label *match;
47815 +
47816 + match = role->subj_hash[index];
47817 +
47818 + while (match && (match->inode != ino || match->device != dev ||
47819 + (match->mode & GR_DELETED))) {
47820 + match = match->next;
47821 + }
47822 +
47823 + if (match && !(match->mode & GR_DELETED))
47824 + return match;
47825 + else
47826 + return NULL;
47827 +}
47828 +
47829 +struct acl_subject_label *
47830 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47831 + const struct acl_role_label *role)
47832 +{
47833 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
47834 + struct acl_subject_label *match;
47835 +
47836 + match = role->subj_hash[index];
47837 +
47838 + while (match && (match->inode != ino || match->device != dev ||
47839 + !(match->mode & GR_DELETED))) {
47840 + match = match->next;
47841 + }
47842 +
47843 + if (match && (match->mode & GR_DELETED))
47844 + return match;
47845 + else
47846 + return NULL;
47847 +}
47848 +
47849 +static struct acl_object_label *
47850 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47851 + const struct acl_subject_label *subj)
47852 +{
47853 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47854 + struct acl_object_label *match;
47855 +
47856 + match = subj->obj_hash[index];
47857 +
47858 + while (match && (match->inode != ino || match->device != dev ||
47859 + (match->mode & GR_DELETED))) {
47860 + match = match->next;
47861 + }
47862 +
47863 + if (match && !(match->mode & GR_DELETED))
47864 + return match;
47865 + else
47866 + return NULL;
47867 +}
47868 +
47869 +static struct acl_object_label *
47870 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47871 + const struct acl_subject_label *subj)
47872 +{
47873 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47874 + struct acl_object_label *match;
47875 +
47876 + match = subj->obj_hash[index];
47877 +
47878 + while (match && (match->inode != ino || match->device != dev ||
47879 + !(match->mode & GR_DELETED))) {
47880 + match = match->next;
47881 + }
47882 +
47883 + if (match && (match->mode & GR_DELETED))
47884 + return match;
47885 +
47886 + match = subj->obj_hash[index];
47887 +
47888 + while (match && (match->inode != ino || match->device != dev ||
47889 + (match->mode & GR_DELETED))) {
47890 + match = match->next;
47891 + }
47892 +
47893 + if (match && !(match->mode & GR_DELETED))
47894 + return match;
47895 + else
47896 + return NULL;
47897 +}
47898 +
47899 +static struct name_entry *
47900 +lookup_name_entry(const char *name)
47901 +{
47902 + unsigned int len = strlen(name);
47903 + unsigned int key = full_name_hash(name, len);
47904 + unsigned int index = key % name_set.n_size;
47905 + struct name_entry *match;
47906 +
47907 + match = name_set.n_hash[index];
47908 +
47909 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47910 + match = match->next;
47911 +
47912 + return match;
47913 +}
47914 +
47915 +static struct name_entry *
47916 +lookup_name_entry_create(const char *name)
47917 +{
47918 + unsigned int len = strlen(name);
47919 + unsigned int key = full_name_hash(name, len);
47920 + unsigned int index = key % name_set.n_size;
47921 + struct name_entry *match;
47922 +
47923 + match = name_set.n_hash[index];
47924 +
47925 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47926 + !match->deleted))
47927 + match = match->next;
47928 +
47929 + if (match && match->deleted)
47930 + return match;
47931 +
47932 + match = name_set.n_hash[index];
47933 +
47934 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47935 + match->deleted))
47936 + match = match->next;
47937 +
47938 + if (match && !match->deleted)
47939 + return match;
47940 + else
47941 + return NULL;
47942 +}
47943 +
47944 +static struct inodev_entry *
47945 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
47946 +{
47947 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
47948 + struct inodev_entry *match;
47949 +
47950 + match = inodev_set.i_hash[index];
47951 +
47952 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47953 + match = match->next;
47954 +
47955 + return match;
47956 +}
47957 +
47958 +static void
47959 +insert_inodev_entry(struct inodev_entry *entry)
47960 +{
47961 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47962 + inodev_set.i_size);
47963 + struct inodev_entry **curr;
47964 +
47965 + entry->prev = NULL;
47966 +
47967 + curr = &inodev_set.i_hash[index];
47968 + if (*curr != NULL)
47969 + (*curr)->prev = entry;
47970 +
47971 + entry->next = *curr;
47972 + *curr = entry;
47973 +
47974 + return;
47975 +}
47976 +
47977 +static void
47978 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47979 +{
47980 + unsigned int index =
47981 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47982 + struct acl_role_label **curr;
47983 + struct acl_role_label *tmp;
47984 +
47985 + curr = &acl_role_set.r_hash[index];
47986 +
47987 + /* if role was already inserted due to domains and already has
47988 + a role in the same bucket as it attached, then we need to
47989 + combine these two buckets
47990 + */
47991 + if (role->next) {
47992 + tmp = role->next;
47993 + while (tmp->next)
47994 + tmp = tmp->next;
47995 + tmp->next = *curr;
47996 + } else
47997 + role->next = *curr;
47998 + *curr = role;
47999 +
48000 + return;
48001 +}
48002 +
48003 +static void
48004 +insert_acl_role_label(struct acl_role_label *role)
48005 +{
48006 + int i;
48007 +
48008 + if (role_list == NULL) {
48009 + role_list = role;
48010 + role->prev = NULL;
48011 + } else {
48012 + role->prev = role_list;
48013 + role_list = role;
48014 + }
48015 +
48016 + /* used for hash chains */
48017 + role->next = NULL;
48018 +
48019 + if (role->roletype & GR_ROLE_DOMAIN) {
48020 + for (i = 0; i < role->domain_child_num; i++)
48021 + __insert_acl_role_label(role, role->domain_children[i]);
48022 + } else
48023 + __insert_acl_role_label(role, role->uidgid);
48024 +}
48025 +
48026 +static int
48027 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48028 +{
48029 + struct name_entry **curr, *nentry;
48030 + struct inodev_entry *ientry;
48031 + unsigned int len = strlen(name);
48032 + unsigned int key = full_name_hash(name, len);
48033 + unsigned int index = key % name_set.n_size;
48034 +
48035 + curr = &name_set.n_hash[index];
48036 +
48037 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48038 + curr = &((*curr)->next);
48039 +
48040 + if (*curr != NULL)
48041 + return 1;
48042 +
48043 + nentry = acl_alloc(sizeof (struct name_entry));
48044 + if (nentry == NULL)
48045 + return 0;
48046 + ientry = acl_alloc(sizeof (struct inodev_entry));
48047 + if (ientry == NULL)
48048 + return 0;
48049 + ientry->nentry = nentry;
48050 +
48051 + nentry->key = key;
48052 + nentry->name = name;
48053 + nentry->inode = inode;
48054 + nentry->device = device;
48055 + nentry->len = len;
48056 + nentry->deleted = deleted;
48057 +
48058 + nentry->prev = NULL;
48059 + curr = &name_set.n_hash[index];
48060 + if (*curr != NULL)
48061 + (*curr)->prev = nentry;
48062 + nentry->next = *curr;
48063 + *curr = nentry;
48064 +
48065 + /* insert us into the table searchable by inode/dev */
48066 + insert_inodev_entry(ientry);
48067 +
48068 + return 1;
48069 +}
48070 +
48071 +static void
48072 +insert_acl_obj_label(struct acl_object_label *obj,
48073 + struct acl_subject_label *subj)
48074 +{
48075 + unsigned int index =
48076 + fhash(obj->inode, obj->device, subj->obj_hash_size);
48077 + struct acl_object_label **curr;
48078 +
48079 +
48080 + obj->prev = NULL;
48081 +
48082 + curr = &subj->obj_hash[index];
48083 + if (*curr != NULL)
48084 + (*curr)->prev = obj;
48085 +
48086 + obj->next = *curr;
48087 + *curr = obj;
48088 +
48089 + return;
48090 +}
48091 +
48092 +static void
48093 +insert_acl_subj_label(struct acl_subject_label *obj,
48094 + struct acl_role_label *role)
48095 +{
48096 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48097 + struct acl_subject_label **curr;
48098 +
48099 + obj->prev = NULL;
48100 +
48101 + curr = &role->subj_hash[index];
48102 + if (*curr != NULL)
48103 + (*curr)->prev = obj;
48104 +
48105 + obj->next = *curr;
48106 + *curr = obj;
48107 +
48108 + return;
48109 +}
48110 +
48111 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48112 +
48113 +static void *
48114 +create_table(__u32 * len, int elementsize)
48115 +{
48116 + unsigned int table_sizes[] = {
48117 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48118 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48119 + 4194301, 8388593, 16777213, 33554393, 67108859
48120 + };
48121 + void *newtable = NULL;
48122 + unsigned int pwr = 0;
48123 +
48124 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48125 + table_sizes[pwr] <= *len)
48126 + pwr++;
48127 +
48128 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48129 + return newtable;
48130 +
48131 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48132 + newtable =
48133 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48134 + else
48135 + newtable = vmalloc(table_sizes[pwr] * elementsize);
48136 +
48137 + *len = table_sizes[pwr];
48138 +
48139 + return newtable;
48140 +}
48141 +
48142 +static int
48143 +init_variables(const struct gr_arg *arg)
48144 +{
48145 + struct task_struct *reaper = &init_task;
48146 + unsigned int stacksize;
48147 +
48148 + subj_map_set.s_size = arg->role_db.num_subjects;
48149 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48150 + name_set.n_size = arg->role_db.num_objects;
48151 + inodev_set.i_size = arg->role_db.num_objects;
48152 +
48153 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
48154 + !name_set.n_size || !inodev_set.i_size)
48155 + return 1;
48156 +
48157 + if (!gr_init_uidset())
48158 + return 1;
48159 +
48160 + /* set up the stack that holds allocation info */
48161 +
48162 + stacksize = arg->role_db.num_pointers + 5;
48163 +
48164 + if (!acl_alloc_stack_init(stacksize))
48165 + return 1;
48166 +
48167 + /* grab reference for the real root dentry and vfsmount */
48168 + read_lock(&reaper->fs->lock);
48169 + real_root = dget(reaper->fs->root.dentry);
48170 + real_root_mnt = mntget(reaper->fs->root.mnt);
48171 + read_unlock(&reaper->fs->lock);
48172 +
48173 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48174 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
48175 +#endif
48176 +
48177 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48178 + if (fakefs_obj_rw == NULL)
48179 + return 1;
48180 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48181 +
48182 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48183 + if (fakefs_obj_rwx == NULL)
48184 + return 1;
48185 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48186 +
48187 + subj_map_set.s_hash =
48188 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48189 + acl_role_set.r_hash =
48190 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48191 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48192 + inodev_set.i_hash =
48193 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48194 +
48195 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48196 + !name_set.n_hash || !inodev_set.i_hash)
48197 + return 1;
48198 +
48199 + memset(subj_map_set.s_hash, 0,
48200 + sizeof(struct subject_map *) * subj_map_set.s_size);
48201 + memset(acl_role_set.r_hash, 0,
48202 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
48203 + memset(name_set.n_hash, 0,
48204 + sizeof (struct name_entry *) * name_set.n_size);
48205 + memset(inodev_set.i_hash, 0,
48206 + sizeof (struct inodev_entry *) * inodev_set.i_size);
48207 +
48208 + return 0;
48209 +}
48210 +
48211 +/* free information not needed after startup
48212 + currently contains user->kernel pointer mappings for subjects
48213 +*/
48214 +
48215 +static void
48216 +free_init_variables(void)
48217 +{
48218 + __u32 i;
48219 +
48220 + if (subj_map_set.s_hash) {
48221 + for (i = 0; i < subj_map_set.s_size; i++) {
48222 + if (subj_map_set.s_hash[i]) {
48223 + kfree(subj_map_set.s_hash[i]);
48224 + subj_map_set.s_hash[i] = NULL;
48225 + }
48226 + }
48227 +
48228 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48229 + PAGE_SIZE)
48230 + kfree(subj_map_set.s_hash);
48231 + else
48232 + vfree(subj_map_set.s_hash);
48233 + }
48234 +
48235 + return;
48236 +}
48237 +
48238 +static void
48239 +free_variables(void)
48240 +{
48241 + struct acl_subject_label *s;
48242 + struct acl_role_label *r;
48243 + struct task_struct *task, *task2;
48244 + unsigned int x;
48245 +
48246 + gr_clear_learn_entries();
48247 +
48248 + read_lock(&tasklist_lock);
48249 + do_each_thread(task2, task) {
48250 + task->acl_sp_role = 0;
48251 + task->acl_role_id = 0;
48252 + task->acl = NULL;
48253 + task->role = NULL;
48254 + } while_each_thread(task2, task);
48255 + read_unlock(&tasklist_lock);
48256 +
48257 + /* release the reference to the real root dentry and vfsmount */
48258 + if (real_root)
48259 + dput(real_root);
48260 + real_root = NULL;
48261 + if (real_root_mnt)
48262 + mntput(real_root_mnt);
48263 + real_root_mnt = NULL;
48264 +
48265 + /* free all object hash tables */
48266 +
48267 + FOR_EACH_ROLE_START(r)
48268 + if (r->subj_hash == NULL)
48269 + goto next_role;
48270 + FOR_EACH_SUBJECT_START(r, s, x)
48271 + if (s->obj_hash == NULL)
48272 + break;
48273 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48274 + kfree(s->obj_hash);
48275 + else
48276 + vfree(s->obj_hash);
48277 + FOR_EACH_SUBJECT_END(s, x)
48278 + FOR_EACH_NESTED_SUBJECT_START(r, s)
48279 + if (s->obj_hash == NULL)
48280 + break;
48281 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48282 + kfree(s->obj_hash);
48283 + else
48284 + vfree(s->obj_hash);
48285 + FOR_EACH_NESTED_SUBJECT_END(s)
48286 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48287 + kfree(r->subj_hash);
48288 + else
48289 + vfree(r->subj_hash);
48290 + r->subj_hash = NULL;
48291 +next_role:
48292 + FOR_EACH_ROLE_END(r)
48293 +
48294 + acl_free_all();
48295 +
48296 + if (acl_role_set.r_hash) {
48297 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48298 + PAGE_SIZE)
48299 + kfree(acl_role_set.r_hash);
48300 + else
48301 + vfree(acl_role_set.r_hash);
48302 + }
48303 + if (name_set.n_hash) {
48304 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
48305 + PAGE_SIZE)
48306 + kfree(name_set.n_hash);
48307 + else
48308 + vfree(name_set.n_hash);
48309 + }
48310 +
48311 + if (inodev_set.i_hash) {
48312 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48313 + PAGE_SIZE)
48314 + kfree(inodev_set.i_hash);
48315 + else
48316 + vfree(inodev_set.i_hash);
48317 + }
48318 +
48319 + gr_free_uidset();
48320 +
48321 + memset(&name_set, 0, sizeof (struct name_db));
48322 + memset(&inodev_set, 0, sizeof (struct inodev_db));
48323 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48324 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48325 +
48326 + default_role = NULL;
48327 + role_list = NULL;
48328 +
48329 + return;
48330 +}
48331 +
48332 +static __u32
48333 +count_user_objs(struct acl_object_label *userp)
48334 +{
48335 + struct acl_object_label o_tmp;
48336 + __u32 num = 0;
48337 +
48338 + while (userp) {
48339 + if (copy_from_user(&o_tmp, userp,
48340 + sizeof (struct acl_object_label)))
48341 + break;
48342 +
48343 + userp = o_tmp.prev;
48344 + num++;
48345 + }
48346 +
48347 + return num;
48348 +}
48349 +
48350 +static struct acl_subject_label *
48351 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48352 +
48353 +static int
48354 +copy_user_glob(struct acl_object_label *obj)
48355 +{
48356 + struct acl_object_label *g_tmp, **guser;
48357 + unsigned int len;
48358 + char *tmp;
48359 +
48360 + if (obj->globbed == NULL)
48361 + return 0;
48362 +
48363 + guser = &obj->globbed;
48364 + while (*guser) {
48365 + g_tmp = (struct acl_object_label *)
48366 + acl_alloc(sizeof (struct acl_object_label));
48367 + if (g_tmp == NULL)
48368 + return -ENOMEM;
48369 +
48370 + if (copy_from_user(g_tmp, *guser,
48371 + sizeof (struct acl_object_label)))
48372 + return -EFAULT;
48373 +
48374 + len = strnlen_user(g_tmp->filename, PATH_MAX);
48375 +
48376 + if (!len || len >= PATH_MAX)
48377 + return -EINVAL;
48378 +
48379 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48380 + return -ENOMEM;
48381 +
48382 + if (copy_from_user(tmp, g_tmp->filename, len))
48383 + return -EFAULT;
48384 + tmp[len-1] = '\0';
48385 + g_tmp->filename = tmp;
48386 +
48387 + *guser = g_tmp;
48388 + guser = &(g_tmp->next);
48389 + }
48390 +
48391 + return 0;
48392 +}
48393 +
48394 +static int
48395 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48396 + struct acl_role_label *role)
48397 +{
48398 + struct acl_object_label *o_tmp;
48399 + unsigned int len;
48400 + int ret;
48401 + char *tmp;
48402 +
48403 + while (userp) {
48404 + if ((o_tmp = (struct acl_object_label *)
48405 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
48406 + return -ENOMEM;
48407 +
48408 + if (copy_from_user(o_tmp, userp,
48409 + sizeof (struct acl_object_label)))
48410 + return -EFAULT;
48411 +
48412 + userp = o_tmp->prev;
48413 +
48414 + len = strnlen_user(o_tmp->filename, PATH_MAX);
48415 +
48416 + if (!len || len >= PATH_MAX)
48417 + return -EINVAL;
48418 +
48419 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48420 + return -ENOMEM;
48421 +
48422 + if (copy_from_user(tmp, o_tmp->filename, len))
48423 + return -EFAULT;
48424 + tmp[len-1] = '\0';
48425 + o_tmp->filename = tmp;
48426 +
48427 + insert_acl_obj_label(o_tmp, subj);
48428 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48429 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48430 + return -ENOMEM;
48431 +
48432 + ret = copy_user_glob(o_tmp);
48433 + if (ret)
48434 + return ret;
48435 +
48436 + if (o_tmp->nested) {
48437 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48438 + if (IS_ERR(o_tmp->nested))
48439 + return PTR_ERR(o_tmp->nested);
48440 +
48441 + /* insert into nested subject list */
48442 + o_tmp->nested->next = role->hash->first;
48443 + role->hash->first = o_tmp->nested;
48444 + }
48445 + }
48446 +
48447 + return 0;
48448 +}
48449 +
48450 +static __u32
48451 +count_user_subjs(struct acl_subject_label *userp)
48452 +{
48453 + struct acl_subject_label s_tmp;
48454 + __u32 num = 0;
48455 +
48456 + while (userp) {
48457 + if (copy_from_user(&s_tmp, userp,
48458 + sizeof (struct acl_subject_label)))
48459 + break;
48460 +
48461 + userp = s_tmp.prev;
48462 + /* do not count nested subjects against this count, since
48463 + they are not included in the hash table, but are
48464 + attached to objects. We have already counted
48465 + the subjects in userspace for the allocation
48466 + stack
48467 + */
48468 + if (!(s_tmp.mode & GR_NESTED))
48469 + num++;
48470 + }
48471 +
48472 + return num;
48473 +}
48474 +
48475 +static int
48476 +copy_user_allowedips(struct acl_role_label *rolep)
48477 +{
48478 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48479 +
48480 + ruserip = rolep->allowed_ips;
48481 +
48482 + while (ruserip) {
48483 + rlast = rtmp;
48484 +
48485 + if ((rtmp = (struct role_allowed_ip *)
48486 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48487 + return -ENOMEM;
48488 +
48489 + if (copy_from_user(rtmp, ruserip,
48490 + sizeof (struct role_allowed_ip)))
48491 + return -EFAULT;
48492 +
48493 + ruserip = rtmp->prev;
48494 +
48495 + if (!rlast) {
48496 + rtmp->prev = NULL;
48497 + rolep->allowed_ips = rtmp;
48498 + } else {
48499 + rlast->next = rtmp;
48500 + rtmp->prev = rlast;
48501 + }
48502 +
48503 + if (!ruserip)
48504 + rtmp->next = NULL;
48505 + }
48506 +
48507 + return 0;
48508 +}
48509 +
48510 +static int
48511 +copy_user_transitions(struct acl_role_label *rolep)
48512 +{
48513 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
48514 +
48515 + unsigned int len;
48516 + char *tmp;
48517 +
48518 + rusertp = rolep->transitions;
48519 +
48520 + while (rusertp) {
48521 + rlast = rtmp;
48522 +
48523 + if ((rtmp = (struct role_transition *)
48524 + acl_alloc(sizeof (struct role_transition))) == NULL)
48525 + return -ENOMEM;
48526 +
48527 + if (copy_from_user(rtmp, rusertp,
48528 + sizeof (struct role_transition)))
48529 + return -EFAULT;
48530 +
48531 + rusertp = rtmp->prev;
48532 +
48533 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48534 +
48535 + if (!len || len >= GR_SPROLE_LEN)
48536 + return -EINVAL;
48537 +
48538 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48539 + return -ENOMEM;
48540 +
48541 + if (copy_from_user(tmp, rtmp->rolename, len))
48542 + return -EFAULT;
48543 + tmp[len-1] = '\0';
48544 + rtmp->rolename = tmp;
48545 +
48546 + if (!rlast) {
48547 + rtmp->prev = NULL;
48548 + rolep->transitions = rtmp;
48549 + } else {
48550 + rlast->next = rtmp;
48551 + rtmp->prev = rlast;
48552 + }
48553 +
48554 + if (!rusertp)
48555 + rtmp->next = NULL;
48556 + }
48557 +
48558 + return 0;
48559 +}
48560 +
48561 +static struct acl_subject_label *
48562 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48563 +{
48564 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48565 + unsigned int len;
48566 + char *tmp;
48567 + __u32 num_objs;
48568 + struct acl_ip_label **i_tmp, *i_utmp2;
48569 + struct gr_hash_struct ghash;
48570 + struct subject_map *subjmap;
48571 + unsigned int i_num;
48572 + int err;
48573 +
48574 + s_tmp = lookup_subject_map(userp);
48575 +
48576 + /* we've already copied this subject into the kernel, just return
48577 + the reference to it, and don't copy it over again
48578 + */
48579 + if (s_tmp)
48580 + return(s_tmp);
48581 +
48582 + if ((s_tmp = (struct acl_subject_label *)
48583 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48584 + return ERR_PTR(-ENOMEM);
48585 +
48586 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48587 + if (subjmap == NULL)
48588 + return ERR_PTR(-ENOMEM);
48589 +
48590 + subjmap->user = userp;
48591 + subjmap->kernel = s_tmp;
48592 + insert_subj_map_entry(subjmap);
48593 +
48594 + if (copy_from_user(s_tmp, userp,
48595 + sizeof (struct acl_subject_label)))
48596 + return ERR_PTR(-EFAULT);
48597 +
48598 + len = strnlen_user(s_tmp->filename, PATH_MAX);
48599 +
48600 + if (!len || len >= PATH_MAX)
48601 + return ERR_PTR(-EINVAL);
48602 +
48603 + if ((tmp = (char *) acl_alloc(len)) == NULL)
48604 + return ERR_PTR(-ENOMEM);
48605 +
48606 + if (copy_from_user(tmp, s_tmp->filename, len))
48607 + return ERR_PTR(-EFAULT);
48608 + tmp[len-1] = '\0';
48609 + s_tmp->filename = tmp;
48610 +
48611 + if (!strcmp(s_tmp->filename, "/"))
48612 + role->root_label = s_tmp;
48613 +
48614 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48615 + return ERR_PTR(-EFAULT);
48616 +
48617 + /* copy user and group transition tables */
48618 +
48619 + if (s_tmp->user_trans_num) {
48620 + uid_t *uidlist;
48621 +
48622 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48623 + if (uidlist == NULL)
48624 + return ERR_PTR(-ENOMEM);
48625 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48626 + return ERR_PTR(-EFAULT);
48627 +
48628 + s_tmp->user_transitions = uidlist;
48629 + }
48630 +
48631 + if (s_tmp->group_trans_num) {
48632 + gid_t *gidlist;
48633 +
48634 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48635 + if (gidlist == NULL)
48636 + return ERR_PTR(-ENOMEM);
48637 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48638 + return ERR_PTR(-EFAULT);
48639 +
48640 + s_tmp->group_transitions = gidlist;
48641 + }
48642 +
48643 + /* set up object hash table */
48644 + num_objs = count_user_objs(ghash.first);
48645 +
48646 + s_tmp->obj_hash_size = num_objs;
48647 + s_tmp->obj_hash =
48648 + (struct acl_object_label **)
48649 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48650 +
48651 + if (!s_tmp->obj_hash)
48652 + return ERR_PTR(-ENOMEM);
48653 +
48654 + memset(s_tmp->obj_hash, 0,
48655 + s_tmp->obj_hash_size *
48656 + sizeof (struct acl_object_label *));
48657 +
48658 + /* add in objects */
48659 + err = copy_user_objs(ghash.first, s_tmp, role);
48660 +
48661 + if (err)
48662 + return ERR_PTR(err);
48663 +
48664 + /* set pointer for parent subject */
48665 + if (s_tmp->parent_subject) {
48666 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48667 +
48668 + if (IS_ERR(s_tmp2))
48669 + return s_tmp2;
48670 +
48671 + s_tmp->parent_subject = s_tmp2;
48672 + }
48673 +
48674 + /* add in ip acls */
48675 +
48676 + if (!s_tmp->ip_num) {
48677 + s_tmp->ips = NULL;
48678 + goto insert;
48679 + }
48680 +
48681 + i_tmp =
48682 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48683 + sizeof (struct acl_ip_label *));
48684 +
48685 + if (!i_tmp)
48686 + return ERR_PTR(-ENOMEM);
48687 +
48688 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48689 + *(i_tmp + i_num) =
48690 + (struct acl_ip_label *)
48691 + acl_alloc(sizeof (struct acl_ip_label));
48692 + if (!*(i_tmp + i_num))
48693 + return ERR_PTR(-ENOMEM);
48694 +
48695 + if (copy_from_user
48696 + (&i_utmp2, s_tmp->ips + i_num,
48697 + sizeof (struct acl_ip_label *)))
48698 + return ERR_PTR(-EFAULT);
48699 +
48700 + if (copy_from_user
48701 + (*(i_tmp + i_num), i_utmp2,
48702 + sizeof (struct acl_ip_label)))
48703 + return ERR_PTR(-EFAULT);
48704 +
48705 + if ((*(i_tmp + i_num))->iface == NULL)
48706 + continue;
48707 +
48708 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48709 + if (!len || len >= IFNAMSIZ)
48710 + return ERR_PTR(-EINVAL);
48711 + tmp = acl_alloc(len);
48712 + if (tmp == NULL)
48713 + return ERR_PTR(-ENOMEM);
48714 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48715 + return ERR_PTR(-EFAULT);
48716 + (*(i_tmp + i_num))->iface = tmp;
48717 + }
48718 +
48719 + s_tmp->ips = i_tmp;
48720 +
48721 +insert:
48722 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48723 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48724 + return ERR_PTR(-ENOMEM);
48725 +
48726 + return s_tmp;
48727 +}
48728 +
48729 +static int
48730 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48731 +{
48732 + struct acl_subject_label s_pre;
48733 + struct acl_subject_label * ret;
48734 + int err;
48735 +
48736 + while (userp) {
48737 + if (copy_from_user(&s_pre, userp,
48738 + sizeof (struct acl_subject_label)))
48739 + return -EFAULT;
48740 +
48741 + /* do not add nested subjects here, add
48742 + while parsing objects
48743 + */
48744 +
48745 + if (s_pre.mode & GR_NESTED) {
48746 + userp = s_pre.prev;
48747 + continue;
48748 + }
48749 +
48750 + ret = do_copy_user_subj(userp, role);
48751 +
48752 + err = PTR_ERR(ret);
48753 + if (IS_ERR(ret))
48754 + return err;
48755 +
48756 + insert_acl_subj_label(ret, role);
48757 +
48758 + userp = s_pre.prev;
48759 + }
48760 +
48761 + return 0;
48762 +}
48763 +
48764 +static int
48765 +copy_user_acl(struct gr_arg *arg)
48766 +{
48767 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48768 + struct sprole_pw *sptmp;
48769 + struct gr_hash_struct *ghash;
48770 + uid_t *domainlist;
48771 + unsigned int r_num;
48772 + unsigned int len;
48773 + char *tmp;
48774 + int err = 0;
48775 + __u16 i;
48776 + __u32 num_subjs;
48777 +
48778 + /* we need a default and kernel role */
48779 + if (arg->role_db.num_roles < 2)
48780 + return -EINVAL;
48781 +
48782 + /* copy special role authentication info from userspace */
48783 +
48784 + num_sprole_pws = arg->num_sprole_pws;
48785 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48786 +
48787 + if (!acl_special_roles) {
48788 + err = -ENOMEM;
48789 + goto cleanup;
48790 + }
48791 +
48792 + for (i = 0; i < num_sprole_pws; i++) {
48793 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48794 + if (!sptmp) {
48795 + err = -ENOMEM;
48796 + goto cleanup;
48797 + }
48798 + if (copy_from_user(sptmp, arg->sprole_pws + i,
48799 + sizeof (struct sprole_pw))) {
48800 + err = -EFAULT;
48801 + goto cleanup;
48802 + }
48803 +
48804 + len =
48805 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48806 +
48807 + if (!len || len >= GR_SPROLE_LEN) {
48808 + err = -EINVAL;
48809 + goto cleanup;
48810 + }
48811 +
48812 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48813 + err = -ENOMEM;
48814 + goto cleanup;
48815 + }
48816 +
48817 + if (copy_from_user(tmp, sptmp->rolename, len)) {
48818 + err = -EFAULT;
48819 + goto cleanup;
48820 + }
48821 + tmp[len-1] = '\0';
48822 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48823 + printk(KERN_ALERT "Copying special role %s\n", tmp);
48824 +#endif
48825 + sptmp->rolename = tmp;
48826 + acl_special_roles[i] = sptmp;
48827 + }
48828 +
48829 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48830 +
48831 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48832 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
48833 +
48834 + if (!r_tmp) {
48835 + err = -ENOMEM;
48836 + goto cleanup;
48837 + }
48838 +
48839 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
48840 + sizeof (struct acl_role_label *))) {
48841 + err = -EFAULT;
48842 + goto cleanup;
48843 + }
48844 +
48845 + if (copy_from_user(r_tmp, r_utmp2,
48846 + sizeof (struct acl_role_label))) {
48847 + err = -EFAULT;
48848 + goto cleanup;
48849 + }
48850 +
48851 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48852 +
48853 + if (!len || len >= PATH_MAX) {
48854 + err = -EINVAL;
48855 + goto cleanup;
48856 + }
48857 +
48858 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
48859 + err = -ENOMEM;
48860 + goto cleanup;
48861 + }
48862 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
48863 + err = -EFAULT;
48864 + goto cleanup;
48865 + }
48866 + tmp[len-1] = '\0';
48867 + r_tmp->rolename = tmp;
48868 +
48869 + if (!strcmp(r_tmp->rolename, "default")
48870 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48871 + default_role = r_tmp;
48872 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48873 + kernel_role = r_tmp;
48874 + }
48875 +
48876 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48877 + err = -ENOMEM;
48878 + goto cleanup;
48879 + }
48880 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48881 + err = -EFAULT;
48882 + goto cleanup;
48883 + }
48884 +
48885 + r_tmp->hash = ghash;
48886 +
48887 + num_subjs = count_user_subjs(r_tmp->hash->first);
48888 +
48889 + r_tmp->subj_hash_size = num_subjs;
48890 + r_tmp->subj_hash =
48891 + (struct acl_subject_label **)
48892 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48893 +
48894 + if (!r_tmp->subj_hash) {
48895 + err = -ENOMEM;
48896 + goto cleanup;
48897 + }
48898 +
48899 + err = copy_user_allowedips(r_tmp);
48900 + if (err)
48901 + goto cleanup;
48902 +
48903 + /* copy domain info */
48904 + if (r_tmp->domain_children != NULL) {
48905 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48906 + if (domainlist == NULL) {
48907 + err = -ENOMEM;
48908 + goto cleanup;
48909 + }
48910 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48911 + err = -EFAULT;
48912 + goto cleanup;
48913 + }
48914 + r_tmp->domain_children = domainlist;
48915 + }
48916 +
48917 + err = copy_user_transitions(r_tmp);
48918 + if (err)
48919 + goto cleanup;
48920 +
48921 + memset(r_tmp->subj_hash, 0,
48922 + r_tmp->subj_hash_size *
48923 + sizeof (struct acl_subject_label *));
48924 +
48925 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48926 +
48927 + if (err)
48928 + goto cleanup;
48929 +
48930 + /* set nested subject list to null */
48931 + r_tmp->hash->first = NULL;
48932 +
48933 + insert_acl_role_label(r_tmp);
48934 + }
48935 +
48936 + goto return_err;
48937 + cleanup:
48938 + free_variables();
48939 + return_err:
48940 + return err;
48941 +
48942 +}
48943 +
48944 +static int
48945 +gracl_init(struct gr_arg *args)
48946 +{
48947 + int error = 0;
48948 +
48949 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48950 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48951 +
48952 + if (init_variables(args)) {
48953 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48954 + error = -ENOMEM;
48955 + free_variables();
48956 + goto out;
48957 + }
48958 +
48959 + error = copy_user_acl(args);
48960 + free_init_variables();
48961 + if (error) {
48962 + free_variables();
48963 + goto out;
48964 + }
48965 +
48966 + if ((error = gr_set_acls(0))) {
48967 + free_variables();
48968 + goto out;
48969 + }
48970 +
48971 + pax_open_kernel();
48972 + gr_status |= GR_READY;
48973 + pax_close_kernel();
48974 +
48975 + out:
48976 + return error;
48977 +}
48978 +
48979 +/* derived from glibc fnmatch() 0: match, 1: no match*/
48980 +
48981 +static int
48982 +glob_match(const char *p, const char *n)
48983 +{
48984 + char c;
48985 +
48986 + while ((c = *p++) != '\0') {
48987 + switch (c) {
48988 + case '?':
48989 + if (*n == '\0')
48990 + return 1;
48991 + else if (*n == '/')
48992 + return 1;
48993 + break;
48994 + case '\\':
48995 + if (*n != c)
48996 + return 1;
48997 + break;
48998 + case '*':
48999 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
49000 + if (*n == '/')
49001 + return 1;
49002 + else if (c == '?') {
49003 + if (*n == '\0')
49004 + return 1;
49005 + else
49006 + ++n;
49007 + }
49008 + }
49009 + if (c == '\0') {
49010 + return 0;
49011 + } else {
49012 + const char *endp;
49013 +
49014 + if ((endp = strchr(n, '/')) == NULL)
49015 + endp = n + strlen(n);
49016 +
49017 + if (c == '[') {
49018 + for (--p; n < endp; ++n)
49019 + if (!glob_match(p, n))
49020 + return 0;
49021 + } else if (c == '/') {
49022 + while (*n != '\0' && *n != '/')
49023 + ++n;
49024 + if (*n == '/' && !glob_match(p, n + 1))
49025 + return 0;
49026 + } else {
49027 + for (--p; n < endp; ++n)
49028 + if (*n == c && !glob_match(p, n))
49029 + return 0;
49030 + }
49031 +
49032 + return 1;
49033 + }
49034 + case '[':
49035 + {
49036 + int not;
49037 + char cold;
49038 +
49039 + if (*n == '\0' || *n == '/')
49040 + return 1;
49041 +
49042 + not = (*p == '!' || *p == '^');
49043 + if (not)
49044 + ++p;
49045 +
49046 + c = *p++;
49047 + for (;;) {
49048 + unsigned char fn = (unsigned char)*n;
49049 +
49050 + if (c == '\0')
49051 + return 1;
49052 + else {
49053 + if (c == fn)
49054 + goto matched;
49055 + cold = c;
49056 + c = *p++;
49057 +
49058 + if (c == '-' && *p != ']') {
49059 + unsigned char cend = *p++;
49060 +
49061 + if (cend == '\0')
49062 + return 1;
49063 +
49064 + if (cold <= fn && fn <= cend)
49065 + goto matched;
49066 +
49067 + c = *p++;
49068 + }
49069 + }
49070 +
49071 + if (c == ']')
49072 + break;
49073 + }
49074 + if (!not)
49075 + return 1;
49076 + break;
49077 + matched:
49078 + while (c != ']') {
49079 + if (c == '\0')
49080 + return 1;
49081 +
49082 + c = *p++;
49083 + }
49084 + if (not)
49085 + return 1;
49086 + }
49087 + break;
49088 + default:
49089 + if (c != *n)
49090 + return 1;
49091 + }
49092 +
49093 + ++n;
49094 + }
49095 +
49096 + if (*n == '\0')
49097 + return 0;
49098 +
49099 + if (*n == '/')
49100 + return 0;
49101 +
49102 + return 1;
49103 +}
49104 +
49105 +static struct acl_object_label *
49106 +chk_glob_label(struct acl_object_label *globbed,
49107 + struct dentry *dentry, struct vfsmount *mnt, char **path)
49108 +{
49109 + struct acl_object_label *tmp;
49110 +
49111 + if (*path == NULL)
49112 + *path = gr_to_filename_nolock(dentry, mnt);
49113 +
49114 + tmp = globbed;
49115 +
49116 + while (tmp) {
49117 + if (!glob_match(tmp->filename, *path))
49118 + return tmp;
49119 + tmp = tmp->next;
49120 + }
49121 +
49122 + return NULL;
49123 +}
49124 +
49125 +static struct acl_object_label *
49126 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49127 + const ino_t curr_ino, const dev_t curr_dev,
49128 + const struct acl_subject_label *subj, char **path, const int checkglob)
49129 +{
49130 + struct acl_subject_label *tmpsubj;
49131 + struct acl_object_label *retval;
49132 + struct acl_object_label *retval2;
49133 +
49134 + tmpsubj = (struct acl_subject_label *) subj;
49135 + read_lock(&gr_inode_lock);
49136 + do {
49137 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49138 + if (retval) {
49139 + if (checkglob && retval->globbed) {
49140 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49141 + (struct vfsmount *)orig_mnt, path);
49142 + if (retval2)
49143 + retval = retval2;
49144 + }
49145 + break;
49146 + }
49147 + } while ((tmpsubj = tmpsubj->parent_subject));
49148 + read_unlock(&gr_inode_lock);
49149 +
49150 + return retval;
49151 +}
49152 +
49153 +static __inline__ struct acl_object_label *
49154 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49155 + const struct dentry *curr_dentry,
49156 + const struct acl_subject_label *subj, char **path, const int checkglob)
49157 +{
49158 + int newglob = checkglob;
49159 +
49160 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49161 + as we don't want a / * rule to match instead of the / object
49162 + don't do this for create lookups that call this function though, since they're looking up
49163 + on the parent and thus need globbing checks on all paths
49164 + */
49165 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49166 + newglob = GR_NO_GLOB;
49167 +
49168 + return __full_lookup(orig_dentry, orig_mnt,
49169 + curr_dentry->d_inode->i_ino,
49170 + __get_dev(curr_dentry), subj, path, newglob);
49171 +}
49172 +
49173 +static struct acl_object_label *
49174 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49175 + const struct acl_subject_label *subj, char *path, const int checkglob)
49176 +{
49177 + struct dentry *dentry = (struct dentry *) l_dentry;
49178 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49179 + struct acl_object_label *retval;
49180 +
49181 + spin_lock(&dcache_lock);
49182 + spin_lock(&vfsmount_lock);
49183 +
49184 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49185 +#ifdef CONFIG_NET
49186 + mnt == sock_mnt ||
49187 +#endif
49188 +#ifdef CONFIG_HUGETLBFS
49189 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49190 +#endif
49191 + /* ignore Eric Biederman */
49192 + IS_PRIVATE(l_dentry->d_inode))) {
49193 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49194 + goto out;
49195 + }
49196 +
49197 + for (;;) {
49198 + if (dentry == real_root && mnt == real_root_mnt)
49199 + break;
49200 +
49201 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49202 + if (mnt->mnt_parent == mnt)
49203 + break;
49204 +
49205 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49206 + if (retval != NULL)
49207 + goto out;
49208 +
49209 + dentry = mnt->mnt_mountpoint;
49210 + mnt = mnt->mnt_parent;
49211 + continue;
49212 + }
49213 +
49214 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49215 + if (retval != NULL)
49216 + goto out;
49217 +
49218 + dentry = dentry->d_parent;
49219 + }
49220 +
49221 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49222 +
49223 + if (retval == NULL)
49224 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
49225 +out:
49226 + spin_unlock(&vfsmount_lock);
49227 + spin_unlock(&dcache_lock);
49228 +
49229 + BUG_ON(retval == NULL);
49230 +
49231 + return retval;
49232 +}
49233 +
49234 +static __inline__ struct acl_object_label *
49235 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49236 + const struct acl_subject_label *subj)
49237 +{
49238 + char *path = NULL;
49239 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49240 +}
49241 +
49242 +static __inline__ struct acl_object_label *
49243 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49244 + const struct acl_subject_label *subj)
49245 +{
49246 + char *path = NULL;
49247 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49248 +}
49249 +
49250 +static __inline__ struct acl_object_label *
49251 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49252 + const struct acl_subject_label *subj, char *path)
49253 +{
49254 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49255 +}
49256 +
49257 +static struct acl_subject_label *
49258 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49259 + const struct acl_role_label *role)
49260 +{
49261 + struct dentry *dentry = (struct dentry *) l_dentry;
49262 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49263 + struct acl_subject_label *retval;
49264 +
49265 + spin_lock(&dcache_lock);
49266 + spin_lock(&vfsmount_lock);
49267 +
49268 + for (;;) {
49269 + if (dentry == real_root && mnt == real_root_mnt)
49270 + break;
49271 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49272 + if (mnt->mnt_parent == mnt)
49273 + break;
49274 +
49275 + read_lock(&gr_inode_lock);
49276 + retval =
49277 + lookup_acl_subj_label(dentry->d_inode->i_ino,
49278 + __get_dev(dentry), role);
49279 + read_unlock(&gr_inode_lock);
49280 + if (retval != NULL)
49281 + goto out;
49282 +
49283 + dentry = mnt->mnt_mountpoint;
49284 + mnt = mnt->mnt_parent;
49285 + continue;
49286 + }
49287 +
49288 + read_lock(&gr_inode_lock);
49289 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49290 + __get_dev(dentry), role);
49291 + read_unlock(&gr_inode_lock);
49292 + if (retval != NULL)
49293 + goto out;
49294 +
49295 + dentry = dentry->d_parent;
49296 + }
49297 +
49298 + read_lock(&gr_inode_lock);
49299 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49300 + __get_dev(dentry), role);
49301 + read_unlock(&gr_inode_lock);
49302 +
49303 + if (unlikely(retval == NULL)) {
49304 + read_lock(&gr_inode_lock);
49305 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
49306 + __get_dev(real_root), role);
49307 + read_unlock(&gr_inode_lock);
49308 + }
49309 +out:
49310 + spin_unlock(&vfsmount_lock);
49311 + spin_unlock(&dcache_lock);
49312 +
49313 + BUG_ON(retval == NULL);
49314 +
49315 + return retval;
49316 +}
49317 +
49318 +static void
49319 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49320 +{
49321 + struct task_struct *task = current;
49322 + const struct cred *cred = current_cred();
49323 +
49324 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49325 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49326 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49327 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49328 +
49329 + return;
49330 +}
49331 +
49332 +static void
49333 +gr_log_learn_sysctl(const char *path, const __u32 mode)
49334 +{
49335 + struct task_struct *task = current;
49336 + const struct cred *cred = current_cred();
49337 +
49338 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49339 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49340 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49341 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49342 +
49343 + return;
49344 +}
49345 +
49346 +static void
49347 +gr_log_learn_id_change(const char type, const unsigned int real,
49348 + const unsigned int effective, const unsigned int fs)
49349 +{
49350 + struct task_struct *task = current;
49351 + const struct cred *cred = current_cred();
49352 +
49353 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49354 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49355 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49356 + type, real, effective, fs, &task->signal->saved_ip);
49357 +
49358 + return;
49359 +}
49360 +
49361 +__u32
49362 +gr_check_link(const struct dentry * new_dentry,
49363 + const struct dentry * parent_dentry,
49364 + const struct vfsmount * parent_mnt,
49365 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49366 +{
49367 + struct acl_object_label *obj;
49368 + __u32 oldmode, newmode;
49369 + __u32 needmode;
49370 +
49371 + if (unlikely(!(gr_status & GR_READY)))
49372 + return (GR_CREATE | GR_LINK);
49373 +
49374 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49375 + oldmode = obj->mode;
49376 +
49377 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49378 + oldmode |= (GR_CREATE | GR_LINK);
49379 +
49380 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
49381 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49382 + needmode |= GR_SETID | GR_AUDIT_SETID;
49383 +
49384 + newmode =
49385 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
49386 + oldmode | needmode);
49387 +
49388 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
49389 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
49390 + GR_INHERIT | GR_AUDIT_INHERIT);
49391 +
49392 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
49393 + goto bad;
49394 +
49395 + if ((oldmode & needmode) != needmode)
49396 + goto bad;
49397 +
49398 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49399 + if ((newmode & needmode) != needmode)
49400 + goto bad;
49401 +
49402 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49403 + return newmode;
49404 +bad:
49405 + needmode = oldmode;
49406 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49407 + needmode |= GR_SETID;
49408 +
49409 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49410 + gr_log_learn(old_dentry, old_mnt, needmode);
49411 + return (GR_CREATE | GR_LINK);
49412 + } else if (newmode & GR_SUPPRESS)
49413 + return GR_SUPPRESS;
49414 + else
49415 + return 0;
49416 +}
49417 +
49418 +__u32
49419 +gr_search_file(const struct dentry * dentry, const __u32 mode,
49420 + const struct vfsmount * mnt)
49421 +{
49422 + __u32 retval = mode;
49423 + struct acl_subject_label *curracl;
49424 + struct acl_object_label *currobj;
49425 +
49426 + if (unlikely(!(gr_status & GR_READY)))
49427 + return (mode & ~GR_AUDITS);
49428 +
49429 + curracl = current->acl;
49430 +
49431 + currobj = chk_obj_label(dentry, mnt, curracl);
49432 + retval = currobj->mode & mode;
49433 +
49434 + /* if we're opening a specified transfer file for writing
49435 + (e.g. /dev/initctl), then transfer our role to init
49436 + */
49437 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49438 + current->role->roletype & GR_ROLE_PERSIST)) {
49439 + struct task_struct *task = init_pid_ns.child_reaper;
49440 +
49441 + if (task->role != current->role) {
49442 + task->acl_sp_role = 0;
49443 + task->acl_role_id = current->acl_role_id;
49444 + task->role = current->role;
49445 + rcu_read_lock();
49446 + read_lock(&grsec_exec_file_lock);
49447 + gr_apply_subject_to_task(task);
49448 + read_unlock(&grsec_exec_file_lock);
49449 + rcu_read_unlock();
49450 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49451 + }
49452 + }
49453 +
49454 + if (unlikely
49455 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49456 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49457 + __u32 new_mode = mode;
49458 +
49459 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49460 +
49461 + retval = new_mode;
49462 +
49463 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49464 + new_mode |= GR_INHERIT;
49465 +
49466 + if (!(mode & GR_NOLEARN))
49467 + gr_log_learn(dentry, mnt, new_mode);
49468 + }
49469 +
49470 + return retval;
49471 +}
49472 +
49473 +__u32
49474 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49475 + const struct vfsmount * mnt, const __u32 mode)
49476 +{
49477 + struct name_entry *match;
49478 + struct acl_object_label *matchpo;
49479 + struct acl_subject_label *curracl;
49480 + char *path;
49481 + __u32 retval;
49482 +
49483 + if (unlikely(!(gr_status & GR_READY)))
49484 + return (mode & ~GR_AUDITS);
49485 +
49486 + preempt_disable();
49487 + path = gr_to_filename_rbac(new_dentry, mnt);
49488 + match = lookup_name_entry_create(path);
49489 +
49490 + if (!match)
49491 + goto check_parent;
49492 +
49493 + curracl = current->acl;
49494 +
49495 + read_lock(&gr_inode_lock);
49496 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49497 + read_unlock(&gr_inode_lock);
49498 +
49499 + if (matchpo) {
49500 + if ((matchpo->mode & mode) !=
49501 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
49502 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49503 + __u32 new_mode = mode;
49504 +
49505 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49506 +
49507 + gr_log_learn(new_dentry, mnt, new_mode);
49508 +
49509 + preempt_enable();
49510 + return new_mode;
49511 + }
49512 + preempt_enable();
49513 + return (matchpo->mode & mode);
49514 + }
49515 +
49516 + check_parent:
49517 + curracl = current->acl;
49518 +
49519 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49520 + retval = matchpo->mode & mode;
49521 +
49522 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49523 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49524 + __u32 new_mode = mode;
49525 +
49526 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49527 +
49528 + gr_log_learn(new_dentry, mnt, new_mode);
49529 + preempt_enable();
49530 + return new_mode;
49531 + }
49532 +
49533 + preempt_enable();
49534 + return retval;
49535 +}
49536 +
49537 +int
49538 +gr_check_hidden_task(const struct task_struct *task)
49539 +{
49540 + if (unlikely(!(gr_status & GR_READY)))
49541 + return 0;
49542 +
49543 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49544 + return 1;
49545 +
49546 + return 0;
49547 +}
49548 +
49549 +int
49550 +gr_check_protected_task(const struct task_struct *task)
49551 +{
49552 + if (unlikely(!(gr_status & GR_READY) || !task))
49553 + return 0;
49554 +
49555 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49556 + task->acl != current->acl)
49557 + return 1;
49558 +
49559 + return 0;
49560 +}
49561 +
49562 +int
49563 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49564 +{
49565 + struct task_struct *p;
49566 + int ret = 0;
49567 +
49568 + if (unlikely(!(gr_status & GR_READY) || !pid))
49569 + return ret;
49570 +
49571 + read_lock(&tasklist_lock);
49572 + do_each_pid_task(pid, type, p) {
49573 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49574 + p->acl != current->acl) {
49575 + ret = 1;
49576 + goto out;
49577 + }
49578 + } while_each_pid_task(pid, type, p);
49579 +out:
49580 + read_unlock(&tasklist_lock);
49581 +
49582 + return ret;
49583 +}
49584 +
49585 +void
49586 +gr_copy_label(struct task_struct *tsk)
49587 +{
49588 + tsk->signal->used_accept = 0;
49589 + tsk->acl_sp_role = 0;
49590 + tsk->acl_role_id = current->acl_role_id;
49591 + tsk->acl = current->acl;
49592 + tsk->role = current->role;
49593 + tsk->signal->curr_ip = current->signal->curr_ip;
49594 + tsk->signal->saved_ip = current->signal->saved_ip;
49595 + if (current->exec_file)
49596 + get_file(current->exec_file);
49597 + tsk->exec_file = current->exec_file;
49598 + tsk->is_writable = current->is_writable;
49599 + if (unlikely(current->signal->used_accept)) {
49600 + current->signal->curr_ip = 0;
49601 + current->signal->saved_ip = 0;
49602 + }
49603 +
49604 + return;
49605 +}
49606 +
49607 +static void
49608 +gr_set_proc_res(struct task_struct *task)
49609 +{
49610 + struct acl_subject_label *proc;
49611 + unsigned short i;
49612 +
49613 + proc = task->acl;
49614 +
49615 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49616 + return;
49617 +
49618 + for (i = 0; i < RLIM_NLIMITS; i++) {
49619 + if (!(proc->resmask & (1 << i)))
49620 + continue;
49621 +
49622 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49623 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49624 + }
49625 +
49626 + return;
49627 +}
49628 +
49629 +extern int __gr_process_user_ban(struct user_struct *user);
49630 +
49631 +int
49632 +gr_check_user_change(int real, int effective, int fs)
49633 +{
49634 + unsigned int i;
49635 + __u16 num;
49636 + uid_t *uidlist;
49637 + int curuid;
49638 + int realok = 0;
49639 + int effectiveok = 0;
49640 + int fsok = 0;
49641 +
49642 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49643 + struct user_struct *user;
49644 +
49645 + if (real == -1)
49646 + goto skipit;
49647 +
49648 + user = find_user(real);
49649 + if (user == NULL)
49650 + goto skipit;
49651 +
49652 + if (__gr_process_user_ban(user)) {
49653 + /* for find_user */
49654 + free_uid(user);
49655 + return 1;
49656 + }
49657 +
49658 + /* for find_user */
49659 + free_uid(user);
49660 +
49661 +skipit:
49662 +#endif
49663 +
49664 + if (unlikely(!(gr_status & GR_READY)))
49665 + return 0;
49666 +
49667 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49668 + gr_log_learn_id_change('u', real, effective, fs);
49669 +
49670 + num = current->acl->user_trans_num;
49671 + uidlist = current->acl->user_transitions;
49672 +
49673 + if (uidlist == NULL)
49674 + return 0;
49675 +
49676 + if (real == -1)
49677 + realok = 1;
49678 + if (effective == -1)
49679 + effectiveok = 1;
49680 + if (fs == -1)
49681 + fsok = 1;
49682 +
49683 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
49684 + for (i = 0; i < num; i++) {
49685 + curuid = (int)uidlist[i];
49686 + if (real == curuid)
49687 + realok = 1;
49688 + if (effective == curuid)
49689 + effectiveok = 1;
49690 + if (fs == curuid)
49691 + fsok = 1;
49692 + }
49693 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
49694 + for (i = 0; i < num; i++) {
49695 + curuid = (int)uidlist[i];
49696 + if (real == curuid)
49697 + break;
49698 + if (effective == curuid)
49699 + break;
49700 + if (fs == curuid)
49701 + break;
49702 + }
49703 + /* not in deny list */
49704 + if (i == num) {
49705 + realok = 1;
49706 + effectiveok = 1;
49707 + fsok = 1;
49708 + }
49709 + }
49710 +
49711 + if (realok && effectiveok && fsok)
49712 + return 0;
49713 + else {
49714 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49715 + return 1;
49716 + }
49717 +}
49718 +
49719 +int
49720 +gr_check_group_change(int real, int effective, int fs)
49721 +{
49722 + unsigned int i;
49723 + __u16 num;
49724 + gid_t *gidlist;
49725 + int curgid;
49726 + int realok = 0;
49727 + int effectiveok = 0;
49728 + int fsok = 0;
49729 +
49730 + if (unlikely(!(gr_status & GR_READY)))
49731 + return 0;
49732 +
49733 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49734 + gr_log_learn_id_change('g', real, effective, fs);
49735 +
49736 + num = current->acl->group_trans_num;
49737 + gidlist = current->acl->group_transitions;
49738 +
49739 + if (gidlist == NULL)
49740 + return 0;
49741 +
49742 + if (real == -1)
49743 + realok = 1;
49744 + if (effective == -1)
49745 + effectiveok = 1;
49746 + if (fs == -1)
49747 + fsok = 1;
49748 +
49749 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
49750 + for (i = 0; i < num; i++) {
49751 + curgid = (int)gidlist[i];
49752 + if (real == curgid)
49753 + realok = 1;
49754 + if (effective == curgid)
49755 + effectiveok = 1;
49756 + if (fs == curgid)
49757 + fsok = 1;
49758 + }
49759 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
49760 + for (i = 0; i < num; i++) {
49761 + curgid = (int)gidlist[i];
49762 + if (real == curgid)
49763 + break;
49764 + if (effective == curgid)
49765 + break;
49766 + if (fs == curgid)
49767 + break;
49768 + }
49769 + /* not in deny list */
49770 + if (i == num) {
49771 + realok = 1;
49772 + effectiveok = 1;
49773 + fsok = 1;
49774 + }
49775 + }
49776 +
49777 + if (realok && effectiveok && fsok)
49778 + return 0;
49779 + else {
49780 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49781 + return 1;
49782 + }
49783 +}
49784 +
49785 +void
49786 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49787 +{
49788 + struct acl_role_label *role = task->role;
49789 + struct acl_subject_label *subj = NULL;
49790 + struct acl_object_label *obj;
49791 + struct file *filp;
49792 +
49793 + if (unlikely(!(gr_status & GR_READY)))
49794 + return;
49795 +
49796 + filp = task->exec_file;
49797 +
49798 + /* kernel process, we'll give them the kernel role */
49799 + if (unlikely(!filp)) {
49800 + task->role = kernel_role;
49801 + task->acl = kernel_role->root_label;
49802 + return;
49803 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49804 + role = lookup_acl_role_label(task, uid, gid);
49805 +
49806 + /* perform subject lookup in possibly new role
49807 + we can use this result below in the case where role == task->role
49808 + */
49809 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49810 +
49811 + /* if we changed uid/gid, but result in the same role
49812 + and are using inheritance, don't lose the inherited subject
49813 + if current subject is other than what normal lookup
49814 + would result in, we arrived via inheritance, don't
49815 + lose subject
49816 + */
49817 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49818 + (subj == task->acl)))
49819 + task->acl = subj;
49820 +
49821 + task->role = role;
49822 +
49823 + task->is_writable = 0;
49824 +
49825 + /* ignore additional mmap checks for processes that are writable
49826 + by the default ACL */
49827 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49828 + if (unlikely(obj->mode & GR_WRITE))
49829 + task->is_writable = 1;
49830 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49831 + if (unlikely(obj->mode & GR_WRITE))
49832 + task->is_writable = 1;
49833 +
49834 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49835 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49836 +#endif
49837 +
49838 + gr_set_proc_res(task);
49839 +
49840 + return;
49841 +}
49842 +
49843 +int
49844 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49845 + const int unsafe_share)
49846 +{
49847 + struct task_struct *task = current;
49848 + struct acl_subject_label *newacl;
49849 + struct acl_object_label *obj;
49850 + __u32 retmode;
49851 +
49852 + if (unlikely(!(gr_status & GR_READY)))
49853 + return 0;
49854 +
49855 + newacl = chk_subj_label(dentry, mnt, task->role);
49856 +
49857 + task_lock(task);
49858 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49859 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49860 + !(task->role->roletype & GR_ROLE_GOD) &&
49861 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49862 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49863 + task_unlock(task);
49864 + if (unsafe_share)
49865 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49866 + else
49867 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49868 + return -EACCES;
49869 + }
49870 + task_unlock(task);
49871 +
49872 + obj = chk_obj_label(dentry, mnt, task->acl);
49873 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49874 +
49875 + if (!(task->acl->mode & GR_INHERITLEARN) &&
49876 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49877 + if (obj->nested)
49878 + task->acl = obj->nested;
49879 + else
49880 + task->acl = newacl;
49881 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49882 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49883 +
49884 + task->is_writable = 0;
49885 +
49886 + /* ignore additional mmap checks for processes that are writable
49887 + by the default ACL */
49888 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
49889 + if (unlikely(obj->mode & GR_WRITE))
49890 + task->is_writable = 1;
49891 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
49892 + if (unlikely(obj->mode & GR_WRITE))
49893 + task->is_writable = 1;
49894 +
49895 + gr_set_proc_res(task);
49896 +
49897 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49898 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49899 +#endif
49900 + return 0;
49901 +}
49902 +
49903 +/* always called with valid inodev ptr */
49904 +static void
49905 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49906 +{
49907 + struct acl_object_label *matchpo;
49908 + struct acl_subject_label *matchps;
49909 + struct acl_subject_label *subj;
49910 + struct acl_role_label *role;
49911 + unsigned int x;
49912 +
49913 + FOR_EACH_ROLE_START(role)
49914 + FOR_EACH_SUBJECT_START(role, subj, x)
49915 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49916 + matchpo->mode |= GR_DELETED;
49917 + FOR_EACH_SUBJECT_END(subj,x)
49918 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
49919 + if (subj->inode == ino && subj->device == dev)
49920 + subj->mode |= GR_DELETED;
49921 + FOR_EACH_NESTED_SUBJECT_END(subj)
49922 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49923 + matchps->mode |= GR_DELETED;
49924 + FOR_EACH_ROLE_END(role)
49925 +
49926 + inodev->nentry->deleted = 1;
49927 +
49928 + return;
49929 +}
49930 +
49931 +void
49932 +gr_handle_delete(const ino_t ino, const dev_t dev)
49933 +{
49934 + struct inodev_entry *inodev;
49935 +
49936 + if (unlikely(!(gr_status & GR_READY)))
49937 + return;
49938 +
49939 + write_lock(&gr_inode_lock);
49940 + inodev = lookup_inodev_entry(ino, dev);
49941 + if (inodev != NULL)
49942 + do_handle_delete(inodev, ino, dev);
49943 + write_unlock(&gr_inode_lock);
49944 +
49945 + return;
49946 +}
49947 +
49948 +static void
49949 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49950 + const ino_t newinode, const dev_t newdevice,
49951 + struct acl_subject_label *subj)
49952 +{
49953 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49954 + struct acl_object_label *match;
49955 +
49956 + match = subj->obj_hash[index];
49957 +
49958 + while (match && (match->inode != oldinode ||
49959 + match->device != olddevice ||
49960 + !(match->mode & GR_DELETED)))
49961 + match = match->next;
49962 +
49963 + if (match && (match->inode == oldinode)
49964 + && (match->device == olddevice)
49965 + && (match->mode & GR_DELETED)) {
49966 + if (match->prev == NULL) {
49967 + subj->obj_hash[index] = match->next;
49968 + if (match->next != NULL)
49969 + match->next->prev = NULL;
49970 + } else {
49971 + match->prev->next = match->next;
49972 + if (match->next != NULL)
49973 + match->next->prev = match->prev;
49974 + }
49975 + match->prev = NULL;
49976 + match->next = NULL;
49977 + match->inode = newinode;
49978 + match->device = newdevice;
49979 + match->mode &= ~GR_DELETED;
49980 +
49981 + insert_acl_obj_label(match, subj);
49982 + }
49983 +
49984 + return;
49985 +}
49986 +
49987 +static void
49988 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49989 + const ino_t newinode, const dev_t newdevice,
49990 + struct acl_role_label *role)
49991 +{
49992 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49993 + struct acl_subject_label *match;
49994 +
49995 + match = role->subj_hash[index];
49996 +
49997 + while (match && (match->inode != oldinode ||
49998 + match->device != olddevice ||
49999 + !(match->mode & GR_DELETED)))
50000 + match = match->next;
50001 +
50002 + if (match && (match->inode == oldinode)
50003 + && (match->device == olddevice)
50004 + && (match->mode & GR_DELETED)) {
50005 + if (match->prev == NULL) {
50006 + role->subj_hash[index] = match->next;
50007 + if (match->next != NULL)
50008 + match->next->prev = NULL;
50009 + } else {
50010 + match->prev->next = match->next;
50011 + if (match->next != NULL)
50012 + match->next->prev = match->prev;
50013 + }
50014 + match->prev = NULL;
50015 + match->next = NULL;
50016 + match->inode = newinode;
50017 + match->device = newdevice;
50018 + match->mode &= ~GR_DELETED;
50019 +
50020 + insert_acl_subj_label(match, role);
50021 + }
50022 +
50023 + return;
50024 +}
50025 +
50026 +static void
50027 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50028 + const ino_t newinode, const dev_t newdevice)
50029 +{
50030 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50031 + struct inodev_entry *match;
50032 +
50033 + match = inodev_set.i_hash[index];
50034 +
50035 + while (match && (match->nentry->inode != oldinode ||
50036 + match->nentry->device != olddevice || !match->nentry->deleted))
50037 + match = match->next;
50038 +
50039 + if (match && (match->nentry->inode == oldinode)
50040 + && (match->nentry->device == olddevice) &&
50041 + match->nentry->deleted) {
50042 + if (match->prev == NULL) {
50043 + inodev_set.i_hash[index] = match->next;
50044 + if (match->next != NULL)
50045 + match->next->prev = NULL;
50046 + } else {
50047 + match->prev->next = match->next;
50048 + if (match->next != NULL)
50049 + match->next->prev = match->prev;
50050 + }
50051 + match->prev = NULL;
50052 + match->next = NULL;
50053 + match->nentry->inode = newinode;
50054 + match->nentry->device = newdevice;
50055 + match->nentry->deleted = 0;
50056 +
50057 + insert_inodev_entry(match);
50058 + }
50059 +
50060 + return;
50061 +}
50062 +
50063 +static void
50064 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50065 + const struct vfsmount *mnt)
50066 +{
50067 + struct acl_subject_label *subj;
50068 + struct acl_role_label *role;
50069 + unsigned int x;
50070 + ino_t inode = dentry->d_inode->i_ino;
50071 + dev_t dev = __get_dev(dentry);
50072 +
50073 + FOR_EACH_ROLE_START(role)
50074 + update_acl_subj_label(matchn->inode, matchn->device,
50075 + inode, dev, role);
50076 +
50077 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50078 + if ((subj->inode == inode) && (subj->device == dev)) {
50079 + subj->inode = inode;
50080 + subj->device = dev;
50081 + }
50082 + FOR_EACH_NESTED_SUBJECT_END(subj)
50083 + FOR_EACH_SUBJECT_START(role, subj, x)
50084 + update_acl_obj_label(matchn->inode, matchn->device,
50085 + inode, dev, subj);
50086 + FOR_EACH_SUBJECT_END(subj,x)
50087 + FOR_EACH_ROLE_END(role)
50088 +
50089 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
50090 +
50091 + return;
50092 +}
50093 +
50094 +void
50095 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50096 +{
50097 + struct name_entry *matchn;
50098 +
50099 + if (unlikely(!(gr_status & GR_READY)))
50100 + return;
50101 +
50102 + preempt_disable();
50103 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50104 +
50105 + if (unlikely((unsigned long)matchn)) {
50106 + write_lock(&gr_inode_lock);
50107 + do_handle_create(matchn, dentry, mnt);
50108 + write_unlock(&gr_inode_lock);
50109 + }
50110 + preempt_enable();
50111 +
50112 + return;
50113 +}
50114 +
50115 +void
50116 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50117 + struct dentry *old_dentry,
50118 + struct dentry *new_dentry,
50119 + struct vfsmount *mnt, const __u8 replace)
50120 +{
50121 + struct name_entry *matchn;
50122 + struct inodev_entry *inodev;
50123 + ino_t oldinode = old_dentry->d_inode->i_ino;
50124 + dev_t olddev = __get_dev(old_dentry);
50125 +
50126 + /* vfs_rename swaps the name and parent link for old_dentry and
50127 + new_dentry
50128 + at this point, old_dentry has the new name, parent link, and inode
50129 + for the renamed file
50130 + if a file is being replaced by a rename, new_dentry has the inode
50131 + and name for the replaced file
50132 + */
50133 +
50134 + if (unlikely(!(gr_status & GR_READY)))
50135 + return;
50136 +
50137 + preempt_disable();
50138 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50139 +
50140 + /* we wouldn't have to check d_inode if it weren't for
50141 + NFS silly-renaming
50142 + */
50143 +
50144 + write_lock(&gr_inode_lock);
50145 + if (unlikely(replace && new_dentry->d_inode)) {
50146 + ino_t newinode = new_dentry->d_inode->i_ino;
50147 + dev_t newdev = __get_dev(new_dentry);
50148 + inodev = lookup_inodev_entry(newinode, newdev);
50149 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
50150 + do_handle_delete(inodev, newinode, newdev);
50151 + }
50152 +
50153 + inodev = lookup_inodev_entry(oldinode, olddev);
50154 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
50155 + do_handle_delete(inodev, oldinode, olddev);
50156 +
50157 + if (unlikely((unsigned long)matchn))
50158 + do_handle_create(matchn, old_dentry, mnt);
50159 +
50160 + write_unlock(&gr_inode_lock);
50161 + preempt_enable();
50162 +
50163 + return;
50164 +}
50165 +
50166 +static int
50167 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50168 + unsigned char **sum)
50169 +{
50170 + struct acl_role_label *r;
50171 + struct role_allowed_ip *ipp;
50172 + struct role_transition *trans;
50173 + unsigned int i;
50174 + int found = 0;
50175 + u32 curr_ip = current->signal->curr_ip;
50176 +
50177 + current->signal->saved_ip = curr_ip;
50178 +
50179 + /* check transition table */
50180 +
50181 + for (trans = current->role->transitions; trans; trans = trans->next) {
50182 + if (!strcmp(rolename, trans->rolename)) {
50183 + found = 1;
50184 + break;
50185 + }
50186 + }
50187 +
50188 + if (!found)
50189 + return 0;
50190 +
50191 + /* handle special roles that do not require authentication
50192 + and check ip */
50193 +
50194 + FOR_EACH_ROLE_START(r)
50195 + if (!strcmp(rolename, r->rolename) &&
50196 + (r->roletype & GR_ROLE_SPECIAL)) {
50197 + found = 0;
50198 + if (r->allowed_ips != NULL) {
50199 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50200 + if ((ntohl(curr_ip) & ipp->netmask) ==
50201 + (ntohl(ipp->addr) & ipp->netmask))
50202 + found = 1;
50203 + }
50204 + } else
50205 + found = 2;
50206 + if (!found)
50207 + return 0;
50208 +
50209 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50210 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50211 + *salt = NULL;
50212 + *sum = NULL;
50213 + return 1;
50214 + }
50215 + }
50216 + FOR_EACH_ROLE_END(r)
50217 +
50218 + for (i = 0; i < num_sprole_pws; i++) {
50219 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50220 + *salt = acl_special_roles[i]->salt;
50221 + *sum = acl_special_roles[i]->sum;
50222 + return 1;
50223 + }
50224 + }
50225 +
50226 + return 0;
50227 +}
50228 +
50229 +static void
50230 +assign_special_role(char *rolename)
50231 +{
50232 + struct acl_object_label *obj;
50233 + struct acl_role_label *r;
50234 + struct acl_role_label *assigned = NULL;
50235 + struct task_struct *tsk;
50236 + struct file *filp;
50237 +
50238 + FOR_EACH_ROLE_START(r)
50239 + if (!strcmp(rolename, r->rolename) &&
50240 + (r->roletype & GR_ROLE_SPECIAL)) {
50241 + assigned = r;
50242 + break;
50243 + }
50244 + FOR_EACH_ROLE_END(r)
50245 +
50246 + if (!assigned)
50247 + return;
50248 +
50249 + read_lock(&tasklist_lock);
50250 + read_lock(&grsec_exec_file_lock);
50251 +
50252 + tsk = current->real_parent;
50253 + if (tsk == NULL)
50254 + goto out_unlock;
50255 +
50256 + filp = tsk->exec_file;
50257 + if (filp == NULL)
50258 + goto out_unlock;
50259 +
50260 + tsk->is_writable = 0;
50261 +
50262 + tsk->acl_sp_role = 1;
50263 + tsk->acl_role_id = ++acl_sp_role_value;
50264 + tsk->role = assigned;
50265 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50266 +
50267 + /* ignore additional mmap checks for processes that are writable
50268 + by the default ACL */
50269 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50270 + if (unlikely(obj->mode & GR_WRITE))
50271 + tsk->is_writable = 1;
50272 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50273 + if (unlikely(obj->mode & GR_WRITE))
50274 + tsk->is_writable = 1;
50275 +
50276 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50277 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50278 +#endif
50279 +
50280 +out_unlock:
50281 + read_unlock(&grsec_exec_file_lock);
50282 + read_unlock(&tasklist_lock);
50283 + return;
50284 +}
50285 +
50286 +int gr_check_secure_terminal(struct task_struct *task)
50287 +{
50288 + struct task_struct *p, *p2, *p3;
50289 + struct files_struct *files;
50290 + struct fdtable *fdt;
50291 + struct file *our_file = NULL, *file;
50292 + int i;
50293 +
50294 + if (task->signal->tty == NULL)
50295 + return 1;
50296 +
50297 + files = get_files_struct(task);
50298 + if (files != NULL) {
50299 + rcu_read_lock();
50300 + fdt = files_fdtable(files);
50301 + for (i=0; i < fdt->max_fds; i++) {
50302 + file = fcheck_files(files, i);
50303 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50304 + get_file(file);
50305 + our_file = file;
50306 + }
50307 + }
50308 + rcu_read_unlock();
50309 + put_files_struct(files);
50310 + }
50311 +
50312 + if (our_file == NULL)
50313 + return 1;
50314 +
50315 + read_lock(&tasklist_lock);
50316 + do_each_thread(p2, p) {
50317 + files = get_files_struct(p);
50318 + if (files == NULL ||
50319 + (p->signal && p->signal->tty == task->signal->tty)) {
50320 + if (files != NULL)
50321 + put_files_struct(files);
50322 + continue;
50323 + }
50324 + rcu_read_lock();
50325 + fdt = files_fdtable(files);
50326 + for (i=0; i < fdt->max_fds; i++) {
50327 + file = fcheck_files(files, i);
50328 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50329 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50330 + p3 = task;
50331 + while (p3->pid > 0) {
50332 + if (p3 == p)
50333 + break;
50334 + p3 = p3->real_parent;
50335 + }
50336 + if (p3 == p)
50337 + break;
50338 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50339 + gr_handle_alertkill(p);
50340 + rcu_read_unlock();
50341 + put_files_struct(files);
50342 + read_unlock(&tasklist_lock);
50343 + fput(our_file);
50344 + return 0;
50345 + }
50346 + }
50347 + rcu_read_unlock();
50348 + put_files_struct(files);
50349 + } while_each_thread(p2, p);
50350 + read_unlock(&tasklist_lock);
50351 +
50352 + fput(our_file);
50353 + return 1;
50354 +}
50355 +
50356 +ssize_t
50357 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50358 +{
50359 + struct gr_arg_wrapper uwrap;
50360 + unsigned char *sprole_salt = NULL;
50361 + unsigned char *sprole_sum = NULL;
50362 + int error = sizeof (struct gr_arg_wrapper);
50363 + int error2 = 0;
50364 +
50365 + mutex_lock(&gr_dev_mutex);
50366 +
50367 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50368 + error = -EPERM;
50369 + goto out;
50370 + }
50371 +
50372 + if (count != sizeof (struct gr_arg_wrapper)) {
50373 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50374 + error = -EINVAL;
50375 + goto out;
50376 + }
50377 +
50378 +
50379 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50380 + gr_auth_expires = 0;
50381 + gr_auth_attempts = 0;
50382 + }
50383 +
50384 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50385 + error = -EFAULT;
50386 + goto out;
50387 + }
50388 +
50389 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50390 + error = -EINVAL;
50391 + goto out;
50392 + }
50393 +
50394 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50395 + error = -EFAULT;
50396 + goto out;
50397 + }
50398 +
50399 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50400 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50401 + time_after(gr_auth_expires, get_seconds())) {
50402 + error = -EBUSY;
50403 + goto out;
50404 + }
50405 +
50406 + /* if non-root trying to do anything other than use a special role,
50407 + do not attempt authentication, do not count towards authentication
50408 + locking
50409 + */
50410 +
50411 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50412 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50413 + current_uid()) {
50414 + error = -EPERM;
50415 + goto out;
50416 + }
50417 +
50418 + /* ensure pw and special role name are null terminated */
50419 +
50420 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50421 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50422 +
50423 + /* Okay.
50424 + * We have our enough of the argument structure..(we have yet
50425 + * to copy_from_user the tables themselves) . Copy the tables
50426 + * only if we need them, i.e. for loading operations. */
50427 +
50428 + switch (gr_usermode->mode) {
50429 + case GR_STATUS:
50430 + if (gr_status & GR_READY) {
50431 + error = 1;
50432 + if (!gr_check_secure_terminal(current))
50433 + error = 3;
50434 + } else
50435 + error = 2;
50436 + goto out;
50437 + case GR_SHUTDOWN:
50438 + if ((gr_status & GR_READY)
50439 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50440 + pax_open_kernel();
50441 + gr_status &= ~GR_READY;
50442 + pax_close_kernel();
50443 +
50444 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50445 + free_variables();
50446 + memset(gr_usermode, 0, sizeof (struct gr_arg));
50447 + memset(gr_system_salt, 0, GR_SALT_LEN);
50448 + memset(gr_system_sum, 0, GR_SHA_LEN);
50449 + } else if (gr_status & GR_READY) {
50450 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50451 + error = -EPERM;
50452 + } else {
50453 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50454 + error = -EAGAIN;
50455 + }
50456 + break;
50457 + case GR_ENABLE:
50458 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50459 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50460 + else {
50461 + if (gr_status & GR_READY)
50462 + error = -EAGAIN;
50463 + else
50464 + error = error2;
50465 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50466 + }
50467 + break;
50468 + case GR_RELOAD:
50469 + if (!(gr_status & GR_READY)) {
50470 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50471 + error = -EAGAIN;
50472 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50473 + lock_kernel();
50474 +
50475 + pax_open_kernel();
50476 + gr_status &= ~GR_READY;
50477 + pax_close_kernel();
50478 +
50479 + free_variables();
50480 + if (!(error2 = gracl_init(gr_usermode))) {
50481 + unlock_kernel();
50482 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50483 + } else {
50484 + unlock_kernel();
50485 + error = error2;
50486 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50487 + }
50488 + } else {
50489 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50490 + error = -EPERM;
50491 + }
50492 + break;
50493 + case GR_SEGVMOD:
50494 + if (unlikely(!(gr_status & GR_READY))) {
50495 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50496 + error = -EAGAIN;
50497 + break;
50498 + }
50499 +
50500 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50501 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50502 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50503 + struct acl_subject_label *segvacl;
50504 + segvacl =
50505 + lookup_acl_subj_label(gr_usermode->segv_inode,
50506 + gr_usermode->segv_device,
50507 + current->role);
50508 + if (segvacl) {
50509 + segvacl->crashes = 0;
50510 + segvacl->expires = 0;
50511 + }
50512 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50513 + gr_remove_uid(gr_usermode->segv_uid);
50514 + }
50515 + } else {
50516 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50517 + error = -EPERM;
50518 + }
50519 + break;
50520 + case GR_SPROLE:
50521 + case GR_SPROLEPAM:
50522 + if (unlikely(!(gr_status & GR_READY))) {
50523 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50524 + error = -EAGAIN;
50525 + break;
50526 + }
50527 +
50528 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50529 + current->role->expires = 0;
50530 + current->role->auth_attempts = 0;
50531 + }
50532 +
50533 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50534 + time_after(current->role->expires, get_seconds())) {
50535 + error = -EBUSY;
50536 + goto out;
50537 + }
50538 +
50539 + if (lookup_special_role_auth
50540 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50541 + && ((!sprole_salt && !sprole_sum)
50542 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50543 + char *p = "";
50544 + assign_special_role(gr_usermode->sp_role);
50545 + read_lock(&tasklist_lock);
50546 + if (current->real_parent)
50547 + p = current->real_parent->role->rolename;
50548 + read_unlock(&tasklist_lock);
50549 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50550 + p, acl_sp_role_value);
50551 + } else {
50552 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50553 + error = -EPERM;
50554 + if(!(current->role->auth_attempts++))
50555 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50556 +
50557 + goto out;
50558 + }
50559 + break;
50560 + case GR_UNSPROLE:
50561 + if (unlikely(!(gr_status & GR_READY))) {
50562 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50563 + error = -EAGAIN;
50564 + break;
50565 + }
50566 +
50567 + if (current->role->roletype & GR_ROLE_SPECIAL) {
50568 + char *p = "";
50569 + int i = 0;
50570 +
50571 + read_lock(&tasklist_lock);
50572 + if (current->real_parent) {
50573 + p = current->real_parent->role->rolename;
50574 + i = current->real_parent->acl_role_id;
50575 + }
50576 + read_unlock(&tasklist_lock);
50577 +
50578 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50579 + gr_set_acls(1);
50580 + } else {
50581 + error = -EPERM;
50582 + goto out;
50583 + }
50584 + break;
50585 + default:
50586 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50587 + error = -EINVAL;
50588 + break;
50589 + }
50590 +
50591 + if (error != -EPERM)
50592 + goto out;
50593 +
50594 + if(!(gr_auth_attempts++))
50595 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50596 +
50597 + out:
50598 + mutex_unlock(&gr_dev_mutex);
50599 + return error;
50600 +}
50601 +
50602 +/* must be called with
50603 + rcu_read_lock();
50604 + read_lock(&tasklist_lock);
50605 + read_lock(&grsec_exec_file_lock);
50606 +*/
50607 +int gr_apply_subject_to_task(struct task_struct *task)
50608 +{
50609 + struct acl_object_label *obj;
50610 + char *tmpname;
50611 + struct acl_subject_label *tmpsubj;
50612 + struct file *filp;
50613 + struct name_entry *nmatch;
50614 +
50615 + filp = task->exec_file;
50616 + if (filp == NULL)
50617 + return 0;
50618 +
50619 + /* the following is to apply the correct subject
50620 + on binaries running when the RBAC system
50621 + is enabled, when the binaries have been
50622 + replaced or deleted since their execution
50623 + -----
50624 + when the RBAC system starts, the inode/dev
50625 + from exec_file will be one the RBAC system
50626 + is unaware of. It only knows the inode/dev
50627 + of the present file on disk, or the absence
50628 + of it.
50629 + */
50630 + preempt_disable();
50631 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50632 +
50633 + nmatch = lookup_name_entry(tmpname);
50634 + preempt_enable();
50635 + tmpsubj = NULL;
50636 + if (nmatch) {
50637 + if (nmatch->deleted)
50638 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50639 + else
50640 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50641 + if (tmpsubj != NULL)
50642 + task->acl = tmpsubj;
50643 + }
50644 + if (tmpsubj == NULL)
50645 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50646 + task->role);
50647 + if (task->acl) {
50648 + task->is_writable = 0;
50649 + /* ignore additional mmap checks for processes that are writable
50650 + by the default ACL */
50651 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50652 + if (unlikely(obj->mode & GR_WRITE))
50653 + task->is_writable = 1;
50654 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50655 + if (unlikely(obj->mode & GR_WRITE))
50656 + task->is_writable = 1;
50657 +
50658 + gr_set_proc_res(task);
50659 +
50660 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50661 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50662 +#endif
50663 + } else {
50664 + return 1;
50665 + }
50666 +
50667 + return 0;
50668 +}
50669 +
50670 +int
50671 +gr_set_acls(const int type)
50672 +{
50673 + struct task_struct *task, *task2;
50674 + struct acl_role_label *role = current->role;
50675 + __u16 acl_role_id = current->acl_role_id;
50676 + const struct cred *cred;
50677 + int ret;
50678 +
50679 + rcu_read_lock();
50680 + read_lock(&tasklist_lock);
50681 + read_lock(&grsec_exec_file_lock);
50682 + do_each_thread(task2, task) {
50683 + /* check to see if we're called from the exit handler,
50684 + if so, only replace ACLs that have inherited the admin
50685 + ACL */
50686 +
50687 + if (type && (task->role != role ||
50688 + task->acl_role_id != acl_role_id))
50689 + continue;
50690 +
50691 + task->acl_role_id = 0;
50692 + task->acl_sp_role = 0;
50693 +
50694 + if (task->exec_file) {
50695 + cred = __task_cred(task);
50696 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50697 +
50698 + ret = gr_apply_subject_to_task(task);
50699 + if (ret) {
50700 + read_unlock(&grsec_exec_file_lock);
50701 + read_unlock(&tasklist_lock);
50702 + rcu_read_unlock();
50703 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50704 + return ret;
50705 + }
50706 + } else {
50707 + // it's a kernel process
50708 + task->role = kernel_role;
50709 + task->acl = kernel_role->root_label;
50710 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50711 + task->acl->mode &= ~GR_PROCFIND;
50712 +#endif
50713 + }
50714 + } while_each_thread(task2, task);
50715 + read_unlock(&grsec_exec_file_lock);
50716 + read_unlock(&tasklist_lock);
50717 + rcu_read_unlock();
50718 +
50719 + return 0;
50720 +}
50721 +
50722 +void
50723 +gr_learn_resource(const struct task_struct *task,
50724 + const int res, const unsigned long wanted, const int gt)
50725 +{
50726 + struct acl_subject_label *acl;
50727 + const struct cred *cred;
50728 +
50729 + if (unlikely((gr_status & GR_READY) &&
50730 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50731 + goto skip_reslog;
50732 +
50733 +#ifdef CONFIG_GRKERNSEC_RESLOG
50734 + gr_log_resource(task, res, wanted, gt);
50735 +#endif
50736 + skip_reslog:
50737 +
50738 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50739 + return;
50740 +
50741 + acl = task->acl;
50742 +
50743 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50744 + !(acl->resmask & (1 << (unsigned short) res))))
50745 + return;
50746 +
50747 + if (wanted >= acl->res[res].rlim_cur) {
50748 + unsigned long res_add;
50749 +
50750 + res_add = wanted;
50751 + switch (res) {
50752 + case RLIMIT_CPU:
50753 + res_add += GR_RLIM_CPU_BUMP;
50754 + break;
50755 + case RLIMIT_FSIZE:
50756 + res_add += GR_RLIM_FSIZE_BUMP;
50757 + break;
50758 + case RLIMIT_DATA:
50759 + res_add += GR_RLIM_DATA_BUMP;
50760 + break;
50761 + case RLIMIT_STACK:
50762 + res_add += GR_RLIM_STACK_BUMP;
50763 + break;
50764 + case RLIMIT_CORE:
50765 + res_add += GR_RLIM_CORE_BUMP;
50766 + break;
50767 + case RLIMIT_RSS:
50768 + res_add += GR_RLIM_RSS_BUMP;
50769 + break;
50770 + case RLIMIT_NPROC:
50771 + res_add += GR_RLIM_NPROC_BUMP;
50772 + break;
50773 + case RLIMIT_NOFILE:
50774 + res_add += GR_RLIM_NOFILE_BUMP;
50775 + break;
50776 + case RLIMIT_MEMLOCK:
50777 + res_add += GR_RLIM_MEMLOCK_BUMP;
50778 + break;
50779 + case RLIMIT_AS:
50780 + res_add += GR_RLIM_AS_BUMP;
50781 + break;
50782 + case RLIMIT_LOCKS:
50783 + res_add += GR_RLIM_LOCKS_BUMP;
50784 + break;
50785 + case RLIMIT_SIGPENDING:
50786 + res_add += GR_RLIM_SIGPENDING_BUMP;
50787 + break;
50788 + case RLIMIT_MSGQUEUE:
50789 + res_add += GR_RLIM_MSGQUEUE_BUMP;
50790 + break;
50791 + case RLIMIT_NICE:
50792 + res_add += GR_RLIM_NICE_BUMP;
50793 + break;
50794 + case RLIMIT_RTPRIO:
50795 + res_add += GR_RLIM_RTPRIO_BUMP;
50796 + break;
50797 + case RLIMIT_RTTIME:
50798 + res_add += GR_RLIM_RTTIME_BUMP;
50799 + break;
50800 + }
50801 +
50802 + acl->res[res].rlim_cur = res_add;
50803 +
50804 + if (wanted > acl->res[res].rlim_max)
50805 + acl->res[res].rlim_max = res_add;
50806 +
50807 + /* only log the subject filename, since resource logging is supported for
50808 + single-subject learning only */
50809 + rcu_read_lock();
50810 + cred = __task_cred(task);
50811 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50812 + task->role->roletype, cred->uid, cred->gid, acl->filename,
50813 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50814 + "", (unsigned long) res, &task->signal->saved_ip);
50815 + rcu_read_unlock();
50816 + }
50817 +
50818 + return;
50819 +}
50820 +
50821 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50822 +void
50823 +pax_set_initial_flags(struct linux_binprm *bprm)
50824 +{
50825 + struct task_struct *task = current;
50826 + struct acl_subject_label *proc;
50827 + unsigned long flags;
50828 +
50829 + if (unlikely(!(gr_status & GR_READY)))
50830 + return;
50831 +
50832 + flags = pax_get_flags(task);
50833 +
50834 + proc = task->acl;
50835 +
50836 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50837 + flags &= ~MF_PAX_PAGEEXEC;
50838 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50839 + flags &= ~MF_PAX_SEGMEXEC;
50840 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50841 + flags &= ~MF_PAX_RANDMMAP;
50842 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50843 + flags &= ~MF_PAX_EMUTRAMP;
50844 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50845 + flags &= ~MF_PAX_MPROTECT;
50846 +
50847 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50848 + flags |= MF_PAX_PAGEEXEC;
50849 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50850 + flags |= MF_PAX_SEGMEXEC;
50851 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50852 + flags |= MF_PAX_RANDMMAP;
50853 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50854 + flags |= MF_PAX_EMUTRAMP;
50855 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50856 + flags |= MF_PAX_MPROTECT;
50857 +
50858 + pax_set_flags(task, flags);
50859 +
50860 + return;
50861 +}
50862 +#endif
50863 +
50864 +#ifdef CONFIG_SYSCTL
50865 +/* Eric Biederman likes breaking userland ABI and every inode-based security
50866 + system to save 35kb of memory */
50867 +
50868 +/* we modify the passed in filename, but adjust it back before returning */
50869 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50870 +{
50871 + struct name_entry *nmatch;
50872 + char *p, *lastp = NULL;
50873 + struct acl_object_label *obj = NULL, *tmp;
50874 + struct acl_subject_label *tmpsubj;
50875 + char c = '\0';
50876 +
50877 + read_lock(&gr_inode_lock);
50878 +
50879 + p = name + len - 1;
50880 + do {
50881 + nmatch = lookup_name_entry(name);
50882 + if (lastp != NULL)
50883 + *lastp = c;
50884 +
50885 + if (nmatch == NULL)
50886 + goto next_component;
50887 + tmpsubj = current->acl;
50888 + do {
50889 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50890 + if (obj != NULL) {
50891 + tmp = obj->globbed;
50892 + while (tmp) {
50893 + if (!glob_match(tmp->filename, name)) {
50894 + obj = tmp;
50895 + goto found_obj;
50896 + }
50897 + tmp = tmp->next;
50898 + }
50899 + goto found_obj;
50900 + }
50901 + } while ((tmpsubj = tmpsubj->parent_subject));
50902 +next_component:
50903 + /* end case */
50904 + if (p == name)
50905 + break;
50906 +
50907 + while (*p != '/')
50908 + p--;
50909 + if (p == name)
50910 + lastp = p + 1;
50911 + else {
50912 + lastp = p;
50913 + p--;
50914 + }
50915 + c = *lastp;
50916 + *lastp = '\0';
50917 + } while (1);
50918 +found_obj:
50919 + read_unlock(&gr_inode_lock);
50920 + /* obj returned will always be non-null */
50921 + return obj;
50922 +}
50923 +
50924 +/* returns 0 when allowing, non-zero on error
50925 + op of 0 is used for readdir, so we don't log the names of hidden files
50926 +*/
50927 +__u32
50928 +gr_handle_sysctl(const struct ctl_table *table, const int op)
50929 +{
50930 + ctl_table *tmp;
50931 + const char *proc_sys = "/proc/sys";
50932 + char *path;
50933 + struct acl_object_label *obj;
50934 + unsigned short len = 0, pos = 0, depth = 0, i;
50935 + __u32 err = 0;
50936 + __u32 mode = 0;
50937 +
50938 + if (unlikely(!(gr_status & GR_READY)))
50939 + return 0;
50940 +
50941 + /* for now, ignore operations on non-sysctl entries if it's not a
50942 + readdir*/
50943 + if (table->child != NULL && op != 0)
50944 + return 0;
50945 +
50946 + mode |= GR_FIND;
50947 + /* it's only a read if it's an entry, read on dirs is for readdir */
50948 + if (op & MAY_READ)
50949 + mode |= GR_READ;
50950 + if (op & MAY_WRITE)
50951 + mode |= GR_WRITE;
50952 +
50953 + preempt_disable();
50954 +
50955 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50956 +
50957 + /* it's only a read/write if it's an actual entry, not a dir
50958 + (which are opened for readdir)
50959 + */
50960 +
50961 + /* convert the requested sysctl entry into a pathname */
50962 +
50963 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50964 + len += strlen(tmp->procname);
50965 + len++;
50966 + depth++;
50967 + }
50968 +
50969 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50970 + /* deny */
50971 + goto out;
50972 + }
50973 +
50974 + memset(path, 0, PAGE_SIZE);
50975 +
50976 + memcpy(path, proc_sys, strlen(proc_sys));
50977 +
50978 + pos += strlen(proc_sys);
50979 +
50980 + for (; depth > 0; depth--) {
50981 + path[pos] = '/';
50982 + pos++;
50983 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50984 + if (depth == i) {
50985 + memcpy(path + pos, tmp->procname,
50986 + strlen(tmp->procname));
50987 + pos += strlen(tmp->procname);
50988 + }
50989 + i++;
50990 + }
50991 + }
50992 +
50993 + obj = gr_lookup_by_name(path, pos);
50994 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50995 +
50996 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50997 + ((err & mode) != mode))) {
50998 + __u32 new_mode = mode;
50999 +
51000 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51001 +
51002 + err = 0;
51003 + gr_log_learn_sysctl(path, new_mode);
51004 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51005 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51006 + err = -ENOENT;
51007 + } else if (!(err & GR_FIND)) {
51008 + err = -ENOENT;
51009 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51010 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51011 + path, (mode & GR_READ) ? " reading" : "",
51012 + (mode & GR_WRITE) ? " writing" : "");
51013 + err = -EACCES;
51014 + } else if ((err & mode) != mode) {
51015 + err = -EACCES;
51016 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51017 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51018 + path, (mode & GR_READ) ? " reading" : "",
51019 + (mode & GR_WRITE) ? " writing" : "");
51020 + err = 0;
51021 + } else
51022 + err = 0;
51023 +
51024 + out:
51025 + preempt_enable();
51026 +
51027 + return err;
51028 +}
51029 +#endif
51030 +
51031 +int
51032 +gr_handle_proc_ptrace(struct task_struct *task)
51033 +{
51034 + struct file *filp;
51035 + struct task_struct *tmp = task;
51036 + struct task_struct *curtemp = current;
51037 + __u32 retmode;
51038 +
51039 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51040 + if (unlikely(!(gr_status & GR_READY)))
51041 + return 0;
51042 +#endif
51043 +
51044 + read_lock(&tasklist_lock);
51045 + read_lock(&grsec_exec_file_lock);
51046 + filp = task->exec_file;
51047 +
51048 + while (tmp->pid > 0) {
51049 + if (tmp == curtemp)
51050 + break;
51051 + tmp = tmp->real_parent;
51052 + }
51053 +
51054 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51055 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51056 + read_unlock(&grsec_exec_file_lock);
51057 + read_unlock(&tasklist_lock);
51058 + return 1;
51059 + }
51060 +
51061 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51062 + if (!(gr_status & GR_READY)) {
51063 + read_unlock(&grsec_exec_file_lock);
51064 + read_unlock(&tasklist_lock);
51065 + return 0;
51066 + }
51067 +#endif
51068 +
51069 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51070 + read_unlock(&grsec_exec_file_lock);
51071 + read_unlock(&tasklist_lock);
51072 +
51073 + if (retmode & GR_NOPTRACE)
51074 + return 1;
51075 +
51076 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51077 + && (current->acl != task->acl || (current->acl != current->role->root_label
51078 + && current->pid != task->pid)))
51079 + return 1;
51080 +
51081 + return 0;
51082 +}
51083 +
51084 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51085 +{
51086 + if (unlikely(!(gr_status & GR_READY)))
51087 + return;
51088 +
51089 + if (!(current->role->roletype & GR_ROLE_GOD))
51090 + return;
51091 +
51092 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51093 + p->role->rolename, gr_task_roletype_to_char(p),
51094 + p->acl->filename);
51095 +}
51096 +
51097 +int
51098 +gr_handle_ptrace(struct task_struct *task, const long request)
51099 +{
51100 + struct task_struct *tmp = task;
51101 + struct task_struct *curtemp = current;
51102 + __u32 retmode;
51103 +
51104 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51105 + if (unlikely(!(gr_status & GR_READY)))
51106 + return 0;
51107 +#endif
51108 +
51109 + read_lock(&tasklist_lock);
51110 + while (tmp->pid > 0) {
51111 + if (tmp == curtemp)
51112 + break;
51113 + tmp = tmp->real_parent;
51114 + }
51115 +
51116 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51117 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51118 + read_unlock(&tasklist_lock);
51119 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51120 + return 1;
51121 + }
51122 + read_unlock(&tasklist_lock);
51123 +
51124 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51125 + if (!(gr_status & GR_READY))
51126 + return 0;
51127 +#endif
51128 +
51129 + read_lock(&grsec_exec_file_lock);
51130 + if (unlikely(!task->exec_file)) {
51131 + read_unlock(&grsec_exec_file_lock);
51132 + return 0;
51133 + }
51134 +
51135 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51136 + read_unlock(&grsec_exec_file_lock);
51137 +
51138 + if (retmode & GR_NOPTRACE) {
51139 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51140 + return 1;
51141 + }
51142 +
51143 + if (retmode & GR_PTRACERD) {
51144 + switch (request) {
51145 + case PTRACE_POKETEXT:
51146 + case PTRACE_POKEDATA:
51147 + case PTRACE_POKEUSR:
51148 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51149 + case PTRACE_SETREGS:
51150 + case PTRACE_SETFPREGS:
51151 +#endif
51152 +#ifdef CONFIG_X86
51153 + case PTRACE_SETFPXREGS:
51154 +#endif
51155 +#ifdef CONFIG_ALTIVEC
51156 + case PTRACE_SETVRREGS:
51157 +#endif
51158 + return 1;
51159 + default:
51160 + return 0;
51161 + }
51162 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
51163 + !(current->role->roletype & GR_ROLE_GOD) &&
51164 + (current->acl != task->acl)) {
51165 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51166 + return 1;
51167 + }
51168 +
51169 + return 0;
51170 +}
51171 +
51172 +static int is_writable_mmap(const struct file *filp)
51173 +{
51174 + struct task_struct *task = current;
51175 + struct acl_object_label *obj, *obj2;
51176 +
51177 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51178 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51179 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51180 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51181 + task->role->root_label);
51182 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51183 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51184 + return 1;
51185 + }
51186 + }
51187 + return 0;
51188 +}
51189 +
51190 +int
51191 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51192 +{
51193 + __u32 mode;
51194 +
51195 + if (unlikely(!file || !(prot & PROT_EXEC)))
51196 + return 1;
51197 +
51198 + if (is_writable_mmap(file))
51199 + return 0;
51200 +
51201 + mode =
51202 + gr_search_file(file->f_path.dentry,
51203 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51204 + file->f_path.mnt);
51205 +
51206 + if (!gr_tpe_allow(file))
51207 + return 0;
51208 +
51209 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51210 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51211 + return 0;
51212 + } else if (unlikely(!(mode & GR_EXEC))) {
51213 + return 0;
51214 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51215 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51216 + return 1;
51217 + }
51218 +
51219 + return 1;
51220 +}
51221 +
51222 +int
51223 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51224 +{
51225 + __u32 mode;
51226 +
51227 + if (unlikely(!file || !(prot & PROT_EXEC)))
51228 + return 1;
51229 +
51230 + if (is_writable_mmap(file))
51231 + return 0;
51232 +
51233 + mode =
51234 + gr_search_file(file->f_path.dentry,
51235 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51236 + file->f_path.mnt);
51237 +
51238 + if (!gr_tpe_allow(file))
51239 + return 0;
51240 +
51241 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51242 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51243 + return 0;
51244 + } else if (unlikely(!(mode & GR_EXEC))) {
51245 + return 0;
51246 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51247 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51248 + return 1;
51249 + }
51250 +
51251 + return 1;
51252 +}
51253 +
51254 +void
51255 +gr_acl_handle_psacct(struct task_struct *task, const long code)
51256 +{
51257 + unsigned long runtime;
51258 + unsigned long cputime;
51259 + unsigned int wday, cday;
51260 + __u8 whr, chr;
51261 + __u8 wmin, cmin;
51262 + __u8 wsec, csec;
51263 + struct timespec timeval;
51264 +
51265 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51266 + !(task->acl->mode & GR_PROCACCT)))
51267 + return;
51268 +
51269 + do_posix_clock_monotonic_gettime(&timeval);
51270 + runtime = timeval.tv_sec - task->start_time.tv_sec;
51271 + wday = runtime / (3600 * 24);
51272 + runtime -= wday * (3600 * 24);
51273 + whr = runtime / 3600;
51274 + runtime -= whr * 3600;
51275 + wmin = runtime / 60;
51276 + runtime -= wmin * 60;
51277 + wsec = runtime;
51278 +
51279 + cputime = (task->utime + task->stime) / HZ;
51280 + cday = cputime / (3600 * 24);
51281 + cputime -= cday * (3600 * 24);
51282 + chr = cputime / 3600;
51283 + cputime -= chr * 3600;
51284 + cmin = cputime / 60;
51285 + cputime -= cmin * 60;
51286 + csec = cputime;
51287 +
51288 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51289 +
51290 + return;
51291 +}
51292 +
51293 +void gr_set_kernel_label(struct task_struct *task)
51294 +{
51295 + if (gr_status & GR_READY) {
51296 + task->role = kernel_role;
51297 + task->acl = kernel_role->root_label;
51298 + }
51299 + return;
51300 +}
51301 +
51302 +#ifdef CONFIG_TASKSTATS
51303 +int gr_is_taskstats_denied(int pid)
51304 +{
51305 + struct task_struct *task;
51306 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51307 + const struct cred *cred;
51308 +#endif
51309 + int ret = 0;
51310 +
51311 + /* restrict taskstats viewing to un-chrooted root users
51312 + who have the 'view' subject flag if the RBAC system is enabled
51313 + */
51314 +
51315 + rcu_read_lock();
51316 + read_lock(&tasklist_lock);
51317 + task = find_task_by_vpid(pid);
51318 + if (task) {
51319 +#ifdef CONFIG_GRKERNSEC_CHROOT
51320 + if (proc_is_chrooted(task))
51321 + ret = -EACCES;
51322 +#endif
51323 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51324 + cred = __task_cred(task);
51325 +#ifdef CONFIG_GRKERNSEC_PROC_USER
51326 + if (cred->uid != 0)
51327 + ret = -EACCES;
51328 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51329 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51330 + ret = -EACCES;
51331 +#endif
51332 +#endif
51333 + if (gr_status & GR_READY) {
51334 + if (!(task->acl->mode & GR_VIEW))
51335 + ret = -EACCES;
51336 + }
51337 + } else
51338 + ret = -ENOENT;
51339 +
51340 + read_unlock(&tasklist_lock);
51341 + rcu_read_unlock();
51342 +
51343 + return ret;
51344 +}
51345 +#endif
51346 +
51347 +/* AUXV entries are filled via a descendant of search_binary_handler
51348 + after we've already applied the subject for the target
51349 +*/
51350 +int gr_acl_enable_at_secure(void)
51351 +{
51352 + if (unlikely(!(gr_status & GR_READY)))
51353 + return 0;
51354 +
51355 + if (current->acl->mode & GR_ATSECURE)
51356 + return 1;
51357 +
51358 + return 0;
51359 +}
51360 +
51361 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51362 +{
51363 + struct task_struct *task = current;
51364 + struct dentry *dentry = file->f_path.dentry;
51365 + struct vfsmount *mnt = file->f_path.mnt;
51366 + struct acl_object_label *obj, *tmp;
51367 + struct acl_subject_label *subj;
51368 + unsigned int bufsize;
51369 + int is_not_root;
51370 + char *path;
51371 + dev_t dev = __get_dev(dentry);
51372 +
51373 + if (unlikely(!(gr_status & GR_READY)))
51374 + return 1;
51375 +
51376 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51377 + return 1;
51378 +
51379 + /* ignore Eric Biederman */
51380 + if (IS_PRIVATE(dentry->d_inode))
51381 + return 1;
51382 +
51383 + subj = task->acl;
51384 + do {
51385 + obj = lookup_acl_obj_label(ino, dev, subj);
51386 + if (obj != NULL)
51387 + return (obj->mode & GR_FIND) ? 1 : 0;
51388 + } while ((subj = subj->parent_subject));
51389 +
51390 + /* this is purely an optimization since we're looking for an object
51391 + for the directory we're doing a readdir on
51392 + if it's possible for any globbed object to match the entry we're
51393 + filling into the directory, then the object we find here will be
51394 + an anchor point with attached globbed objects
51395 + */
51396 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51397 + if (obj->globbed == NULL)
51398 + return (obj->mode & GR_FIND) ? 1 : 0;
51399 +
51400 + is_not_root = ((obj->filename[0] == '/') &&
51401 + (obj->filename[1] == '\0')) ? 0 : 1;
51402 + bufsize = PAGE_SIZE - namelen - is_not_root;
51403 +
51404 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
51405 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51406 + return 1;
51407 +
51408 + preempt_disable();
51409 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51410 + bufsize);
51411 +
51412 + bufsize = strlen(path);
51413 +
51414 + /* if base is "/", don't append an additional slash */
51415 + if (is_not_root)
51416 + *(path + bufsize) = '/';
51417 + memcpy(path + bufsize + is_not_root, name, namelen);
51418 + *(path + bufsize + namelen + is_not_root) = '\0';
51419 +
51420 + tmp = obj->globbed;
51421 + while (tmp) {
51422 + if (!glob_match(tmp->filename, path)) {
51423 + preempt_enable();
51424 + return (tmp->mode & GR_FIND) ? 1 : 0;
51425 + }
51426 + tmp = tmp->next;
51427 + }
51428 + preempt_enable();
51429 + return (obj->mode & GR_FIND) ? 1 : 0;
51430 +}
51431 +
51432 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51433 +EXPORT_SYMBOL(gr_acl_is_enabled);
51434 +#endif
51435 +EXPORT_SYMBOL(gr_learn_resource);
51436 +EXPORT_SYMBOL(gr_set_kernel_label);
51437 +#ifdef CONFIG_SECURITY
51438 +EXPORT_SYMBOL(gr_check_user_change);
51439 +EXPORT_SYMBOL(gr_check_group_change);
51440 +#endif
51441 +
51442 diff -urNp linux-2.6.32.44/grsecurity/gracl_cap.c linux-2.6.32.44/grsecurity/gracl_cap.c
51443 --- linux-2.6.32.44/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51444 +++ linux-2.6.32.44/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51445 @@ -0,0 +1,138 @@
51446 +#include <linux/kernel.h>
51447 +#include <linux/module.h>
51448 +#include <linux/sched.h>
51449 +#include <linux/gracl.h>
51450 +#include <linux/grsecurity.h>
51451 +#include <linux/grinternal.h>
51452 +
51453 +static const char *captab_log[] = {
51454 + "CAP_CHOWN",
51455 + "CAP_DAC_OVERRIDE",
51456 + "CAP_DAC_READ_SEARCH",
51457 + "CAP_FOWNER",
51458 + "CAP_FSETID",
51459 + "CAP_KILL",
51460 + "CAP_SETGID",
51461 + "CAP_SETUID",
51462 + "CAP_SETPCAP",
51463 + "CAP_LINUX_IMMUTABLE",
51464 + "CAP_NET_BIND_SERVICE",
51465 + "CAP_NET_BROADCAST",
51466 + "CAP_NET_ADMIN",
51467 + "CAP_NET_RAW",
51468 + "CAP_IPC_LOCK",
51469 + "CAP_IPC_OWNER",
51470 + "CAP_SYS_MODULE",
51471 + "CAP_SYS_RAWIO",
51472 + "CAP_SYS_CHROOT",
51473 + "CAP_SYS_PTRACE",
51474 + "CAP_SYS_PACCT",
51475 + "CAP_SYS_ADMIN",
51476 + "CAP_SYS_BOOT",
51477 + "CAP_SYS_NICE",
51478 + "CAP_SYS_RESOURCE",
51479 + "CAP_SYS_TIME",
51480 + "CAP_SYS_TTY_CONFIG",
51481 + "CAP_MKNOD",
51482 + "CAP_LEASE",
51483 + "CAP_AUDIT_WRITE",
51484 + "CAP_AUDIT_CONTROL",
51485 + "CAP_SETFCAP",
51486 + "CAP_MAC_OVERRIDE",
51487 + "CAP_MAC_ADMIN"
51488 +};
51489 +
51490 +EXPORT_SYMBOL(gr_is_capable);
51491 +EXPORT_SYMBOL(gr_is_capable_nolog);
51492 +
51493 +int
51494 +gr_is_capable(const int cap)
51495 +{
51496 + struct task_struct *task = current;
51497 + const struct cred *cred = current_cred();
51498 + struct acl_subject_label *curracl;
51499 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51500 + kernel_cap_t cap_audit = __cap_empty_set;
51501 +
51502 + if (!gr_acl_is_enabled())
51503 + return 1;
51504 +
51505 + curracl = task->acl;
51506 +
51507 + cap_drop = curracl->cap_lower;
51508 + cap_mask = curracl->cap_mask;
51509 + cap_audit = curracl->cap_invert_audit;
51510 +
51511 + while ((curracl = curracl->parent_subject)) {
51512 + /* if the cap isn't specified in the current computed mask but is specified in the
51513 + current level subject, and is lowered in the current level subject, then add
51514 + it to the set of dropped capabilities
51515 + otherwise, add the current level subject's mask to the current computed mask
51516 + */
51517 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51518 + cap_raise(cap_mask, cap);
51519 + if (cap_raised(curracl->cap_lower, cap))
51520 + cap_raise(cap_drop, cap);
51521 + if (cap_raised(curracl->cap_invert_audit, cap))
51522 + cap_raise(cap_audit, cap);
51523 + }
51524 + }
51525 +
51526 + if (!cap_raised(cap_drop, cap)) {
51527 + if (cap_raised(cap_audit, cap))
51528 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51529 + return 1;
51530 + }
51531 +
51532 + curracl = task->acl;
51533 +
51534 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51535 + && cap_raised(cred->cap_effective, cap)) {
51536 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51537 + task->role->roletype, cred->uid,
51538 + cred->gid, task->exec_file ?
51539 + gr_to_filename(task->exec_file->f_path.dentry,
51540 + task->exec_file->f_path.mnt) : curracl->filename,
51541 + curracl->filename, 0UL,
51542 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51543 + return 1;
51544 + }
51545 +
51546 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51547 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51548 + return 0;
51549 +}
51550 +
51551 +int
51552 +gr_is_capable_nolog(const int cap)
51553 +{
51554 + struct acl_subject_label *curracl;
51555 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51556 +
51557 + if (!gr_acl_is_enabled())
51558 + return 1;
51559 +
51560 + curracl = current->acl;
51561 +
51562 + cap_drop = curracl->cap_lower;
51563 + cap_mask = curracl->cap_mask;
51564 +
51565 + while ((curracl = curracl->parent_subject)) {
51566 + /* if the cap isn't specified in the current computed mask but is specified in the
51567 + current level subject, and is lowered in the current level subject, then add
51568 + it to the set of dropped capabilities
51569 + otherwise, add the current level subject's mask to the current computed mask
51570 + */
51571 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51572 + cap_raise(cap_mask, cap);
51573 + if (cap_raised(curracl->cap_lower, cap))
51574 + cap_raise(cap_drop, cap);
51575 + }
51576 + }
51577 +
51578 + if (!cap_raised(cap_drop, cap))
51579 + return 1;
51580 +
51581 + return 0;
51582 +}
51583 +
51584 diff -urNp linux-2.6.32.44/grsecurity/gracl_fs.c linux-2.6.32.44/grsecurity/gracl_fs.c
51585 --- linux-2.6.32.44/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51586 +++ linux-2.6.32.44/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51587 @@ -0,0 +1,431 @@
51588 +#include <linux/kernel.h>
51589 +#include <linux/sched.h>
51590 +#include <linux/types.h>
51591 +#include <linux/fs.h>
51592 +#include <linux/file.h>
51593 +#include <linux/stat.h>
51594 +#include <linux/grsecurity.h>
51595 +#include <linux/grinternal.h>
51596 +#include <linux/gracl.h>
51597 +
51598 +__u32
51599 +gr_acl_handle_hidden_file(const struct dentry * dentry,
51600 + const struct vfsmount * mnt)
51601 +{
51602 + __u32 mode;
51603 +
51604 + if (unlikely(!dentry->d_inode))
51605 + return GR_FIND;
51606 +
51607 + mode =
51608 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51609 +
51610 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51611 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51612 + return mode;
51613 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51614 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51615 + return 0;
51616 + } else if (unlikely(!(mode & GR_FIND)))
51617 + return 0;
51618 +
51619 + return GR_FIND;
51620 +}
51621 +
51622 +__u32
51623 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51624 + const int fmode)
51625 +{
51626 + __u32 reqmode = GR_FIND;
51627 + __u32 mode;
51628 +
51629 + if (unlikely(!dentry->d_inode))
51630 + return reqmode;
51631 +
51632 + if (unlikely(fmode & O_APPEND))
51633 + reqmode |= GR_APPEND;
51634 + else if (unlikely(fmode & FMODE_WRITE))
51635 + reqmode |= GR_WRITE;
51636 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51637 + reqmode |= GR_READ;
51638 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51639 + reqmode &= ~GR_READ;
51640 + mode =
51641 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51642 + mnt);
51643 +
51644 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51645 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51646 + reqmode & GR_READ ? " reading" : "",
51647 + reqmode & GR_WRITE ? " writing" : reqmode &
51648 + GR_APPEND ? " appending" : "");
51649 + return reqmode;
51650 + } else
51651 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51652 + {
51653 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51654 + reqmode & GR_READ ? " reading" : "",
51655 + reqmode & GR_WRITE ? " writing" : reqmode &
51656 + GR_APPEND ? " appending" : "");
51657 + return 0;
51658 + } else if (unlikely((mode & reqmode) != reqmode))
51659 + return 0;
51660 +
51661 + return reqmode;
51662 +}
51663 +
51664 +__u32
51665 +gr_acl_handle_creat(const struct dentry * dentry,
51666 + const struct dentry * p_dentry,
51667 + const struct vfsmount * p_mnt, const int fmode,
51668 + const int imode)
51669 +{
51670 + __u32 reqmode = GR_WRITE | GR_CREATE;
51671 + __u32 mode;
51672 +
51673 + if (unlikely(fmode & O_APPEND))
51674 + reqmode |= GR_APPEND;
51675 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51676 + reqmode |= GR_READ;
51677 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51678 + reqmode |= GR_SETID;
51679 +
51680 + mode =
51681 + gr_check_create(dentry, p_dentry, p_mnt,
51682 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51683 +
51684 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51685 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51686 + reqmode & GR_READ ? " reading" : "",
51687 + reqmode & GR_WRITE ? " writing" : reqmode &
51688 + GR_APPEND ? " appending" : "");
51689 + return reqmode;
51690 + } else
51691 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51692 + {
51693 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51694 + reqmode & GR_READ ? " reading" : "",
51695 + reqmode & GR_WRITE ? " writing" : reqmode &
51696 + GR_APPEND ? " appending" : "");
51697 + return 0;
51698 + } else if (unlikely((mode & reqmode) != reqmode))
51699 + return 0;
51700 +
51701 + return reqmode;
51702 +}
51703 +
51704 +__u32
51705 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51706 + const int fmode)
51707 +{
51708 + __u32 mode, reqmode = GR_FIND;
51709 +
51710 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51711 + reqmode |= GR_EXEC;
51712 + if (fmode & S_IWOTH)
51713 + reqmode |= GR_WRITE;
51714 + if (fmode & S_IROTH)
51715 + reqmode |= GR_READ;
51716 +
51717 + mode =
51718 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51719 + mnt);
51720 +
51721 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51722 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51723 + reqmode & GR_READ ? " reading" : "",
51724 + reqmode & GR_WRITE ? " writing" : "",
51725 + reqmode & GR_EXEC ? " executing" : "");
51726 + return reqmode;
51727 + } else
51728 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51729 + {
51730 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51731 + reqmode & GR_READ ? " reading" : "",
51732 + reqmode & GR_WRITE ? " writing" : "",
51733 + reqmode & GR_EXEC ? " executing" : "");
51734 + return 0;
51735 + } else if (unlikely((mode & reqmode) != reqmode))
51736 + return 0;
51737 +
51738 + return reqmode;
51739 +}
51740 +
51741 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51742 +{
51743 + __u32 mode;
51744 +
51745 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51746 +
51747 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51748 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51749 + return mode;
51750 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51751 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51752 + return 0;
51753 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51754 + return 0;
51755 +
51756 + return (reqmode);
51757 +}
51758 +
51759 +__u32
51760 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51761 +{
51762 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51763 +}
51764 +
51765 +__u32
51766 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51767 +{
51768 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51769 +}
51770 +
51771 +__u32
51772 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51773 +{
51774 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51775 +}
51776 +
51777 +__u32
51778 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51779 +{
51780 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51781 +}
51782 +
51783 +__u32
51784 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51785 + mode_t mode)
51786 +{
51787 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51788 + return 1;
51789 +
51790 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51791 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51792 + GR_FCHMOD_ACL_MSG);
51793 + } else {
51794 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51795 + }
51796 +}
51797 +
51798 +__u32
51799 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51800 + mode_t mode)
51801 +{
51802 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51803 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51804 + GR_CHMOD_ACL_MSG);
51805 + } else {
51806 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51807 + }
51808 +}
51809 +
51810 +__u32
51811 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51812 +{
51813 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51814 +}
51815 +
51816 +__u32
51817 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51818 +{
51819 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51820 +}
51821 +
51822 +__u32
51823 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51824 +{
51825 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51826 +}
51827 +
51828 +__u32
51829 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51830 +{
51831 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51832 + GR_UNIXCONNECT_ACL_MSG);
51833 +}
51834 +
51835 +/* hardlinks require at minimum create permission,
51836 + any additional privilege required is based on the
51837 + privilege of the file being linked to
51838 +*/
51839 +__u32
51840 +gr_acl_handle_link(const struct dentry * new_dentry,
51841 + const struct dentry * parent_dentry,
51842 + const struct vfsmount * parent_mnt,
51843 + const struct dentry * old_dentry,
51844 + const struct vfsmount * old_mnt, const char *to)
51845 +{
51846 + __u32 mode;
51847 + __u32 needmode = GR_CREATE | GR_LINK;
51848 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51849 +
51850 + mode =
51851 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51852 + old_mnt);
51853 +
51854 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51855 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51856 + return mode;
51857 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51858 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51859 + return 0;
51860 + } else if (unlikely((mode & needmode) != needmode))
51861 + return 0;
51862 +
51863 + return 1;
51864 +}
51865 +
51866 +__u32
51867 +gr_acl_handle_symlink(const struct dentry * new_dentry,
51868 + const struct dentry * parent_dentry,
51869 + const struct vfsmount * parent_mnt, const char *from)
51870 +{
51871 + __u32 needmode = GR_WRITE | GR_CREATE;
51872 + __u32 mode;
51873 +
51874 + mode =
51875 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
51876 + GR_CREATE | GR_AUDIT_CREATE |
51877 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51878 +
51879 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51880 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51881 + return mode;
51882 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51883 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51884 + return 0;
51885 + } else if (unlikely((mode & needmode) != needmode))
51886 + return 0;
51887 +
51888 + return (GR_WRITE | GR_CREATE);
51889 +}
51890 +
51891 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51892 +{
51893 + __u32 mode;
51894 +
51895 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51896 +
51897 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51898 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51899 + return mode;
51900 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51901 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51902 + return 0;
51903 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
51904 + return 0;
51905 +
51906 + return (reqmode);
51907 +}
51908 +
51909 +__u32
51910 +gr_acl_handle_mknod(const struct dentry * new_dentry,
51911 + const struct dentry * parent_dentry,
51912 + const struct vfsmount * parent_mnt,
51913 + const int mode)
51914 +{
51915 + __u32 reqmode = GR_WRITE | GR_CREATE;
51916 + if (unlikely(mode & (S_ISUID | S_ISGID)))
51917 + reqmode |= GR_SETID;
51918 +
51919 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51920 + reqmode, GR_MKNOD_ACL_MSG);
51921 +}
51922 +
51923 +__u32
51924 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
51925 + const struct dentry *parent_dentry,
51926 + const struct vfsmount *parent_mnt)
51927 +{
51928 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51929 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51930 +}
51931 +
51932 +#define RENAME_CHECK_SUCCESS(old, new) \
51933 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51934 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51935 +
51936 +int
51937 +gr_acl_handle_rename(struct dentry *new_dentry,
51938 + struct dentry *parent_dentry,
51939 + const struct vfsmount *parent_mnt,
51940 + struct dentry *old_dentry,
51941 + struct inode *old_parent_inode,
51942 + struct vfsmount *old_mnt, const char *newname)
51943 +{
51944 + __u32 comp1, comp2;
51945 + int error = 0;
51946 +
51947 + if (unlikely(!gr_acl_is_enabled()))
51948 + return 0;
51949 +
51950 + if (!new_dentry->d_inode) {
51951 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51952 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51953 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51954 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51955 + GR_DELETE | GR_AUDIT_DELETE |
51956 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51957 + GR_SUPPRESS, old_mnt);
51958 + } else {
51959 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51960 + GR_CREATE | GR_DELETE |
51961 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51962 + GR_AUDIT_READ | GR_AUDIT_WRITE |
51963 + GR_SUPPRESS, parent_mnt);
51964 + comp2 =
51965 + gr_search_file(old_dentry,
51966 + GR_READ | GR_WRITE | GR_AUDIT_READ |
51967 + GR_DELETE | GR_AUDIT_DELETE |
51968 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51969 + }
51970 +
51971 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51972 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51973 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51974 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51975 + && !(comp2 & GR_SUPPRESS)) {
51976 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51977 + error = -EACCES;
51978 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51979 + error = -EACCES;
51980 +
51981 + return error;
51982 +}
51983 +
51984 +void
51985 +gr_acl_handle_exit(void)
51986 +{
51987 + u16 id;
51988 + char *rolename;
51989 + struct file *exec_file;
51990 +
51991 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51992 + !(current->role->roletype & GR_ROLE_PERSIST))) {
51993 + id = current->acl_role_id;
51994 + rolename = current->role->rolename;
51995 + gr_set_acls(1);
51996 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51997 + }
51998 +
51999 + write_lock(&grsec_exec_file_lock);
52000 + exec_file = current->exec_file;
52001 + current->exec_file = NULL;
52002 + write_unlock(&grsec_exec_file_lock);
52003 +
52004 + if (exec_file)
52005 + fput(exec_file);
52006 +}
52007 +
52008 +int
52009 +gr_acl_handle_procpidmem(const struct task_struct *task)
52010 +{
52011 + if (unlikely(!gr_acl_is_enabled()))
52012 + return 0;
52013 +
52014 + if (task != current && task->acl->mode & GR_PROTPROCFD)
52015 + return -EACCES;
52016 +
52017 + return 0;
52018 +}
52019 diff -urNp linux-2.6.32.44/grsecurity/gracl_ip.c linux-2.6.32.44/grsecurity/gracl_ip.c
52020 --- linux-2.6.32.44/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52021 +++ linux-2.6.32.44/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
52022 @@ -0,0 +1,382 @@
52023 +#include <linux/kernel.h>
52024 +#include <asm/uaccess.h>
52025 +#include <asm/errno.h>
52026 +#include <net/sock.h>
52027 +#include <linux/file.h>
52028 +#include <linux/fs.h>
52029 +#include <linux/net.h>
52030 +#include <linux/in.h>
52031 +#include <linux/skbuff.h>
52032 +#include <linux/ip.h>
52033 +#include <linux/udp.h>
52034 +#include <linux/smp_lock.h>
52035 +#include <linux/types.h>
52036 +#include <linux/sched.h>
52037 +#include <linux/netdevice.h>
52038 +#include <linux/inetdevice.h>
52039 +#include <linux/gracl.h>
52040 +#include <linux/grsecurity.h>
52041 +#include <linux/grinternal.h>
52042 +
52043 +#define GR_BIND 0x01
52044 +#define GR_CONNECT 0x02
52045 +#define GR_INVERT 0x04
52046 +#define GR_BINDOVERRIDE 0x08
52047 +#define GR_CONNECTOVERRIDE 0x10
52048 +#define GR_SOCK_FAMILY 0x20
52049 +
52050 +static const char * gr_protocols[IPPROTO_MAX] = {
52051 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52052 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52053 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52054 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52055 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52056 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52057 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52058 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52059 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52060 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52061 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52062 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52063 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52064 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52065 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52066 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52067 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52068 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52069 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52070 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52071 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52072 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52073 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52074 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52075 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52076 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52077 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52078 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52079 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52080 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52081 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52082 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52083 + };
52084 +
52085 +static const char * gr_socktypes[SOCK_MAX] = {
52086 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52087 + "unknown:7", "unknown:8", "unknown:9", "packet"
52088 + };
52089 +
52090 +static const char * gr_sockfamilies[AF_MAX+1] = {
52091 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52092 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52093 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52094 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
52095 + };
52096 +
52097 +const char *
52098 +gr_proto_to_name(unsigned char proto)
52099 +{
52100 + return gr_protocols[proto];
52101 +}
52102 +
52103 +const char *
52104 +gr_socktype_to_name(unsigned char type)
52105 +{
52106 + return gr_socktypes[type];
52107 +}
52108 +
52109 +const char *
52110 +gr_sockfamily_to_name(unsigned char family)
52111 +{
52112 + return gr_sockfamilies[family];
52113 +}
52114 +
52115 +int
52116 +gr_search_socket(const int domain, const int type, const int protocol)
52117 +{
52118 + struct acl_subject_label *curr;
52119 + const struct cred *cred = current_cred();
52120 +
52121 + if (unlikely(!gr_acl_is_enabled()))
52122 + goto exit;
52123 +
52124 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
52125 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52126 + goto exit; // let the kernel handle it
52127 +
52128 + curr = current->acl;
52129 +
52130 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52131 + /* the family is allowed, if this is PF_INET allow it only if
52132 + the extra sock type/protocol checks pass */
52133 + if (domain == PF_INET)
52134 + goto inet_check;
52135 + goto exit;
52136 + } else {
52137 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52138 + __u32 fakeip = 0;
52139 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52140 + current->role->roletype, cred->uid,
52141 + cred->gid, current->exec_file ?
52142 + gr_to_filename(current->exec_file->f_path.dentry,
52143 + current->exec_file->f_path.mnt) :
52144 + curr->filename, curr->filename,
52145 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52146 + &current->signal->saved_ip);
52147 + goto exit;
52148 + }
52149 + goto exit_fail;
52150 + }
52151 +
52152 +inet_check:
52153 + /* the rest of this checking is for IPv4 only */
52154 + if (!curr->ips)
52155 + goto exit;
52156 +
52157 + if ((curr->ip_type & (1 << type)) &&
52158 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52159 + goto exit;
52160 +
52161 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52162 + /* we don't place acls on raw sockets , and sometimes
52163 + dgram/ip sockets are opened for ioctl and not
52164 + bind/connect, so we'll fake a bind learn log */
52165 + if (type == SOCK_RAW || type == SOCK_PACKET) {
52166 + __u32 fakeip = 0;
52167 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52168 + current->role->roletype, cred->uid,
52169 + cred->gid, current->exec_file ?
52170 + gr_to_filename(current->exec_file->f_path.dentry,
52171 + current->exec_file->f_path.mnt) :
52172 + curr->filename, curr->filename,
52173 + &fakeip, 0, type,
52174 + protocol, GR_CONNECT, &current->signal->saved_ip);
52175 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52176 + __u32 fakeip = 0;
52177 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52178 + current->role->roletype, cred->uid,
52179 + cred->gid, current->exec_file ?
52180 + gr_to_filename(current->exec_file->f_path.dentry,
52181 + current->exec_file->f_path.mnt) :
52182 + curr->filename, curr->filename,
52183 + &fakeip, 0, type,
52184 + protocol, GR_BIND, &current->signal->saved_ip);
52185 + }
52186 + /* we'll log when they use connect or bind */
52187 + goto exit;
52188 + }
52189 +
52190 +exit_fail:
52191 + if (domain == PF_INET)
52192 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52193 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
52194 + else
52195 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52196 + gr_socktype_to_name(type), protocol);
52197 +
52198 + return 0;
52199 +exit:
52200 + return 1;
52201 +}
52202 +
52203 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52204 +{
52205 + if ((ip->mode & mode) &&
52206 + (ip_port >= ip->low) &&
52207 + (ip_port <= ip->high) &&
52208 + ((ntohl(ip_addr) & our_netmask) ==
52209 + (ntohl(our_addr) & our_netmask))
52210 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52211 + && (ip->type & (1 << type))) {
52212 + if (ip->mode & GR_INVERT)
52213 + return 2; // specifically denied
52214 + else
52215 + return 1; // allowed
52216 + }
52217 +
52218 + return 0; // not specifically allowed, may continue parsing
52219 +}
52220 +
52221 +static int
52222 +gr_search_connectbind(const int full_mode, struct sock *sk,
52223 + struct sockaddr_in *addr, const int type)
52224 +{
52225 + char iface[IFNAMSIZ] = {0};
52226 + struct acl_subject_label *curr;
52227 + struct acl_ip_label *ip;
52228 + struct inet_sock *isk;
52229 + struct net_device *dev;
52230 + struct in_device *idev;
52231 + unsigned long i;
52232 + int ret;
52233 + int mode = full_mode & (GR_BIND | GR_CONNECT);
52234 + __u32 ip_addr = 0;
52235 + __u32 our_addr;
52236 + __u32 our_netmask;
52237 + char *p;
52238 + __u16 ip_port = 0;
52239 + const struct cred *cred = current_cred();
52240 +
52241 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52242 + return 0;
52243 +
52244 + curr = current->acl;
52245 + isk = inet_sk(sk);
52246 +
52247 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52248 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52249 + addr->sin_addr.s_addr = curr->inaddr_any_override;
52250 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52251 + struct sockaddr_in saddr;
52252 + int err;
52253 +
52254 + saddr.sin_family = AF_INET;
52255 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
52256 + saddr.sin_port = isk->sport;
52257 +
52258 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52259 + if (err)
52260 + return err;
52261 +
52262 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52263 + if (err)
52264 + return err;
52265 + }
52266 +
52267 + if (!curr->ips)
52268 + return 0;
52269 +
52270 + ip_addr = addr->sin_addr.s_addr;
52271 + ip_port = ntohs(addr->sin_port);
52272 +
52273 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52274 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52275 + current->role->roletype, cred->uid,
52276 + cred->gid, current->exec_file ?
52277 + gr_to_filename(current->exec_file->f_path.dentry,
52278 + current->exec_file->f_path.mnt) :
52279 + curr->filename, curr->filename,
52280 + &ip_addr, ip_port, type,
52281 + sk->sk_protocol, mode, &current->signal->saved_ip);
52282 + return 0;
52283 + }
52284 +
52285 + for (i = 0; i < curr->ip_num; i++) {
52286 + ip = *(curr->ips + i);
52287 + if (ip->iface != NULL) {
52288 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
52289 + p = strchr(iface, ':');
52290 + if (p != NULL)
52291 + *p = '\0';
52292 + dev = dev_get_by_name(sock_net(sk), iface);
52293 + if (dev == NULL)
52294 + continue;
52295 + idev = in_dev_get(dev);
52296 + if (idev == NULL) {
52297 + dev_put(dev);
52298 + continue;
52299 + }
52300 + rcu_read_lock();
52301 + for_ifa(idev) {
52302 + if (!strcmp(ip->iface, ifa->ifa_label)) {
52303 + our_addr = ifa->ifa_address;
52304 + our_netmask = 0xffffffff;
52305 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52306 + if (ret == 1) {
52307 + rcu_read_unlock();
52308 + in_dev_put(idev);
52309 + dev_put(dev);
52310 + return 0;
52311 + } else if (ret == 2) {
52312 + rcu_read_unlock();
52313 + in_dev_put(idev);
52314 + dev_put(dev);
52315 + goto denied;
52316 + }
52317 + }
52318 + } endfor_ifa(idev);
52319 + rcu_read_unlock();
52320 + in_dev_put(idev);
52321 + dev_put(dev);
52322 + } else {
52323 + our_addr = ip->addr;
52324 + our_netmask = ip->netmask;
52325 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52326 + if (ret == 1)
52327 + return 0;
52328 + else if (ret == 2)
52329 + goto denied;
52330 + }
52331 + }
52332 +
52333 +denied:
52334 + if (mode == GR_BIND)
52335 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52336 + else if (mode == GR_CONNECT)
52337 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52338 +
52339 + return -EACCES;
52340 +}
52341 +
52342 +int
52343 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52344 +{
52345 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52346 +}
52347 +
52348 +int
52349 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52350 +{
52351 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52352 +}
52353 +
52354 +int gr_search_listen(struct socket *sock)
52355 +{
52356 + struct sock *sk = sock->sk;
52357 + struct sockaddr_in addr;
52358 +
52359 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52360 + addr.sin_port = inet_sk(sk)->sport;
52361 +
52362 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52363 +}
52364 +
52365 +int gr_search_accept(struct socket *sock)
52366 +{
52367 + struct sock *sk = sock->sk;
52368 + struct sockaddr_in addr;
52369 +
52370 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52371 + addr.sin_port = inet_sk(sk)->sport;
52372 +
52373 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52374 +}
52375 +
52376 +int
52377 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52378 +{
52379 + if (addr)
52380 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52381 + else {
52382 + struct sockaddr_in sin;
52383 + const struct inet_sock *inet = inet_sk(sk);
52384 +
52385 + sin.sin_addr.s_addr = inet->daddr;
52386 + sin.sin_port = inet->dport;
52387 +
52388 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52389 + }
52390 +}
52391 +
52392 +int
52393 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52394 +{
52395 + struct sockaddr_in sin;
52396 +
52397 + if (unlikely(skb->len < sizeof (struct udphdr)))
52398 + return 0; // skip this packet
52399 +
52400 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52401 + sin.sin_port = udp_hdr(skb)->source;
52402 +
52403 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52404 +}
52405 diff -urNp linux-2.6.32.44/grsecurity/gracl_learn.c linux-2.6.32.44/grsecurity/gracl_learn.c
52406 --- linux-2.6.32.44/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52407 +++ linux-2.6.32.44/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52408 @@ -0,0 +1,208 @@
52409 +#include <linux/kernel.h>
52410 +#include <linux/mm.h>
52411 +#include <linux/sched.h>
52412 +#include <linux/poll.h>
52413 +#include <linux/smp_lock.h>
52414 +#include <linux/string.h>
52415 +#include <linux/file.h>
52416 +#include <linux/types.h>
52417 +#include <linux/vmalloc.h>
52418 +#include <linux/grinternal.h>
52419 +
52420 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52421 + size_t count, loff_t *ppos);
52422 +extern int gr_acl_is_enabled(void);
52423 +
52424 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52425 +static int gr_learn_attached;
52426 +
52427 +/* use a 512k buffer */
52428 +#define LEARN_BUFFER_SIZE (512 * 1024)
52429 +
52430 +static DEFINE_SPINLOCK(gr_learn_lock);
52431 +static DEFINE_MUTEX(gr_learn_user_mutex);
52432 +
52433 +/* we need to maintain two buffers, so that the kernel context of grlearn
52434 + uses a semaphore around the userspace copying, and the other kernel contexts
52435 + use a spinlock when copying into the buffer, since they cannot sleep
52436 +*/
52437 +static char *learn_buffer;
52438 +static char *learn_buffer_user;
52439 +static int learn_buffer_len;
52440 +static int learn_buffer_user_len;
52441 +
52442 +static ssize_t
52443 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52444 +{
52445 + DECLARE_WAITQUEUE(wait, current);
52446 + ssize_t retval = 0;
52447 +
52448 + add_wait_queue(&learn_wait, &wait);
52449 + set_current_state(TASK_INTERRUPTIBLE);
52450 + do {
52451 + mutex_lock(&gr_learn_user_mutex);
52452 + spin_lock(&gr_learn_lock);
52453 + if (learn_buffer_len)
52454 + break;
52455 + spin_unlock(&gr_learn_lock);
52456 + mutex_unlock(&gr_learn_user_mutex);
52457 + if (file->f_flags & O_NONBLOCK) {
52458 + retval = -EAGAIN;
52459 + goto out;
52460 + }
52461 + if (signal_pending(current)) {
52462 + retval = -ERESTARTSYS;
52463 + goto out;
52464 + }
52465 +
52466 + schedule();
52467 + } while (1);
52468 +
52469 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52470 + learn_buffer_user_len = learn_buffer_len;
52471 + retval = learn_buffer_len;
52472 + learn_buffer_len = 0;
52473 +
52474 + spin_unlock(&gr_learn_lock);
52475 +
52476 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52477 + retval = -EFAULT;
52478 +
52479 + mutex_unlock(&gr_learn_user_mutex);
52480 +out:
52481 + set_current_state(TASK_RUNNING);
52482 + remove_wait_queue(&learn_wait, &wait);
52483 + return retval;
52484 +}
52485 +
52486 +static unsigned int
52487 +poll_learn(struct file * file, poll_table * wait)
52488 +{
52489 + poll_wait(file, &learn_wait, wait);
52490 +
52491 + if (learn_buffer_len)
52492 + return (POLLIN | POLLRDNORM);
52493 +
52494 + return 0;
52495 +}
52496 +
52497 +void
52498 +gr_clear_learn_entries(void)
52499 +{
52500 + char *tmp;
52501 +
52502 + mutex_lock(&gr_learn_user_mutex);
52503 + spin_lock(&gr_learn_lock);
52504 + tmp = learn_buffer;
52505 + learn_buffer = NULL;
52506 + spin_unlock(&gr_learn_lock);
52507 + if (tmp)
52508 + vfree(tmp);
52509 + if (learn_buffer_user != NULL) {
52510 + vfree(learn_buffer_user);
52511 + learn_buffer_user = NULL;
52512 + }
52513 + learn_buffer_len = 0;
52514 + mutex_unlock(&gr_learn_user_mutex);
52515 +
52516 + return;
52517 +}
52518 +
52519 +void
52520 +gr_add_learn_entry(const char *fmt, ...)
52521 +{
52522 + va_list args;
52523 + unsigned int len;
52524 +
52525 + if (!gr_learn_attached)
52526 + return;
52527 +
52528 + spin_lock(&gr_learn_lock);
52529 +
52530 + /* leave a gap at the end so we know when it's "full" but don't have to
52531 + compute the exact length of the string we're trying to append
52532 + */
52533 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52534 + spin_unlock(&gr_learn_lock);
52535 + wake_up_interruptible(&learn_wait);
52536 + return;
52537 + }
52538 + if (learn_buffer == NULL) {
52539 + spin_unlock(&gr_learn_lock);
52540 + return;
52541 + }
52542 +
52543 + va_start(args, fmt);
52544 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52545 + va_end(args);
52546 +
52547 + learn_buffer_len += len + 1;
52548 +
52549 + spin_unlock(&gr_learn_lock);
52550 + wake_up_interruptible(&learn_wait);
52551 +
52552 + return;
52553 +}
52554 +
52555 +static int
52556 +open_learn(struct inode *inode, struct file *file)
52557 +{
52558 + if (file->f_mode & FMODE_READ && gr_learn_attached)
52559 + return -EBUSY;
52560 + if (file->f_mode & FMODE_READ) {
52561 + int retval = 0;
52562 + mutex_lock(&gr_learn_user_mutex);
52563 + if (learn_buffer == NULL)
52564 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52565 + if (learn_buffer_user == NULL)
52566 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52567 + if (learn_buffer == NULL) {
52568 + retval = -ENOMEM;
52569 + goto out_error;
52570 + }
52571 + if (learn_buffer_user == NULL) {
52572 + retval = -ENOMEM;
52573 + goto out_error;
52574 + }
52575 + learn_buffer_len = 0;
52576 + learn_buffer_user_len = 0;
52577 + gr_learn_attached = 1;
52578 +out_error:
52579 + mutex_unlock(&gr_learn_user_mutex);
52580 + return retval;
52581 + }
52582 + return 0;
52583 +}
52584 +
52585 +static int
52586 +close_learn(struct inode *inode, struct file *file)
52587 +{
52588 + if (file->f_mode & FMODE_READ) {
52589 + char *tmp = NULL;
52590 + mutex_lock(&gr_learn_user_mutex);
52591 + spin_lock(&gr_learn_lock);
52592 + tmp = learn_buffer;
52593 + learn_buffer = NULL;
52594 + spin_unlock(&gr_learn_lock);
52595 + if (tmp)
52596 + vfree(tmp);
52597 + if (learn_buffer_user != NULL) {
52598 + vfree(learn_buffer_user);
52599 + learn_buffer_user = NULL;
52600 + }
52601 + learn_buffer_len = 0;
52602 + learn_buffer_user_len = 0;
52603 + gr_learn_attached = 0;
52604 + mutex_unlock(&gr_learn_user_mutex);
52605 + }
52606 +
52607 + return 0;
52608 +}
52609 +
52610 +const struct file_operations grsec_fops = {
52611 + .read = read_learn,
52612 + .write = write_grsec_handler,
52613 + .open = open_learn,
52614 + .release = close_learn,
52615 + .poll = poll_learn,
52616 +};
52617 diff -urNp linux-2.6.32.44/grsecurity/gracl_res.c linux-2.6.32.44/grsecurity/gracl_res.c
52618 --- linux-2.6.32.44/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52619 +++ linux-2.6.32.44/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52620 @@ -0,0 +1,67 @@
52621 +#include <linux/kernel.h>
52622 +#include <linux/sched.h>
52623 +#include <linux/gracl.h>
52624 +#include <linux/grinternal.h>
52625 +
52626 +static const char *restab_log[] = {
52627 + [RLIMIT_CPU] = "RLIMIT_CPU",
52628 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52629 + [RLIMIT_DATA] = "RLIMIT_DATA",
52630 + [RLIMIT_STACK] = "RLIMIT_STACK",
52631 + [RLIMIT_CORE] = "RLIMIT_CORE",
52632 + [RLIMIT_RSS] = "RLIMIT_RSS",
52633 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
52634 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52635 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52636 + [RLIMIT_AS] = "RLIMIT_AS",
52637 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52638 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52639 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52640 + [RLIMIT_NICE] = "RLIMIT_NICE",
52641 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52642 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52643 + [GR_CRASH_RES] = "RLIMIT_CRASH"
52644 +};
52645 +
52646 +void
52647 +gr_log_resource(const struct task_struct *task,
52648 + const int res, const unsigned long wanted, const int gt)
52649 +{
52650 + const struct cred *cred;
52651 + unsigned long rlim;
52652 +
52653 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
52654 + return;
52655 +
52656 + // not yet supported resource
52657 + if (unlikely(!restab_log[res]))
52658 + return;
52659 +
52660 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52661 + rlim = task->signal->rlim[res].rlim_max;
52662 + else
52663 + rlim = task->signal->rlim[res].rlim_cur;
52664 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52665 + return;
52666 +
52667 + rcu_read_lock();
52668 + cred = __task_cred(task);
52669 +
52670 + if (res == RLIMIT_NPROC &&
52671 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52672 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52673 + goto out_rcu_unlock;
52674 + else if (res == RLIMIT_MEMLOCK &&
52675 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52676 + goto out_rcu_unlock;
52677 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52678 + goto out_rcu_unlock;
52679 + rcu_read_unlock();
52680 +
52681 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52682 +
52683 + return;
52684 +out_rcu_unlock:
52685 + rcu_read_unlock();
52686 + return;
52687 +}
52688 diff -urNp linux-2.6.32.44/grsecurity/gracl_segv.c linux-2.6.32.44/grsecurity/gracl_segv.c
52689 --- linux-2.6.32.44/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52690 +++ linux-2.6.32.44/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52691 @@ -0,0 +1,284 @@
52692 +#include <linux/kernel.h>
52693 +#include <linux/mm.h>
52694 +#include <asm/uaccess.h>
52695 +#include <asm/errno.h>
52696 +#include <asm/mman.h>
52697 +#include <net/sock.h>
52698 +#include <linux/file.h>
52699 +#include <linux/fs.h>
52700 +#include <linux/net.h>
52701 +#include <linux/in.h>
52702 +#include <linux/smp_lock.h>
52703 +#include <linux/slab.h>
52704 +#include <linux/types.h>
52705 +#include <linux/sched.h>
52706 +#include <linux/timer.h>
52707 +#include <linux/gracl.h>
52708 +#include <linux/grsecurity.h>
52709 +#include <linux/grinternal.h>
52710 +
52711 +static struct crash_uid *uid_set;
52712 +static unsigned short uid_used;
52713 +static DEFINE_SPINLOCK(gr_uid_lock);
52714 +extern rwlock_t gr_inode_lock;
52715 +extern struct acl_subject_label *
52716 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52717 + struct acl_role_label *role);
52718 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
52719 +
52720 +int
52721 +gr_init_uidset(void)
52722 +{
52723 + uid_set =
52724 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52725 + uid_used = 0;
52726 +
52727 + return uid_set ? 1 : 0;
52728 +}
52729 +
52730 +void
52731 +gr_free_uidset(void)
52732 +{
52733 + if (uid_set)
52734 + kfree(uid_set);
52735 +
52736 + return;
52737 +}
52738 +
52739 +int
52740 +gr_find_uid(const uid_t uid)
52741 +{
52742 + struct crash_uid *tmp = uid_set;
52743 + uid_t buid;
52744 + int low = 0, high = uid_used - 1, mid;
52745 +
52746 + while (high >= low) {
52747 + mid = (low + high) >> 1;
52748 + buid = tmp[mid].uid;
52749 + if (buid == uid)
52750 + return mid;
52751 + if (buid > uid)
52752 + high = mid - 1;
52753 + if (buid < uid)
52754 + low = mid + 1;
52755 + }
52756 +
52757 + return -1;
52758 +}
52759 +
52760 +static __inline__ void
52761 +gr_insertsort(void)
52762 +{
52763 + unsigned short i, j;
52764 + struct crash_uid index;
52765 +
52766 + for (i = 1; i < uid_used; i++) {
52767 + index = uid_set[i];
52768 + j = i;
52769 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52770 + uid_set[j] = uid_set[j - 1];
52771 + j--;
52772 + }
52773 + uid_set[j] = index;
52774 + }
52775 +
52776 + return;
52777 +}
52778 +
52779 +static __inline__ void
52780 +gr_insert_uid(const uid_t uid, const unsigned long expires)
52781 +{
52782 + int loc;
52783 +
52784 + if (uid_used == GR_UIDTABLE_MAX)
52785 + return;
52786 +
52787 + loc = gr_find_uid(uid);
52788 +
52789 + if (loc >= 0) {
52790 + uid_set[loc].expires = expires;
52791 + return;
52792 + }
52793 +
52794 + uid_set[uid_used].uid = uid;
52795 + uid_set[uid_used].expires = expires;
52796 + uid_used++;
52797 +
52798 + gr_insertsort();
52799 +
52800 + return;
52801 +}
52802 +
52803 +void
52804 +gr_remove_uid(const unsigned short loc)
52805 +{
52806 + unsigned short i;
52807 +
52808 + for (i = loc + 1; i < uid_used; i++)
52809 + uid_set[i - 1] = uid_set[i];
52810 +
52811 + uid_used--;
52812 +
52813 + return;
52814 +}
52815 +
52816 +int
52817 +gr_check_crash_uid(const uid_t uid)
52818 +{
52819 + int loc;
52820 + int ret = 0;
52821 +
52822 + if (unlikely(!gr_acl_is_enabled()))
52823 + return 0;
52824 +
52825 + spin_lock(&gr_uid_lock);
52826 + loc = gr_find_uid(uid);
52827 +
52828 + if (loc < 0)
52829 + goto out_unlock;
52830 +
52831 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
52832 + gr_remove_uid(loc);
52833 + else
52834 + ret = 1;
52835 +
52836 +out_unlock:
52837 + spin_unlock(&gr_uid_lock);
52838 + return ret;
52839 +}
52840 +
52841 +static __inline__ int
52842 +proc_is_setxid(const struct cred *cred)
52843 +{
52844 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
52845 + cred->uid != cred->fsuid)
52846 + return 1;
52847 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52848 + cred->gid != cred->fsgid)
52849 + return 1;
52850 +
52851 + return 0;
52852 +}
52853 +
52854 +void
52855 +gr_handle_crash(struct task_struct *task, const int sig)
52856 +{
52857 + struct acl_subject_label *curr;
52858 + struct acl_subject_label *curr2;
52859 + struct task_struct *tsk, *tsk2;
52860 + const struct cred *cred;
52861 + const struct cred *cred2;
52862 +
52863 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52864 + return;
52865 +
52866 + if (unlikely(!gr_acl_is_enabled()))
52867 + return;
52868 +
52869 + curr = task->acl;
52870 +
52871 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
52872 + return;
52873 +
52874 + if (time_before_eq(curr->expires, get_seconds())) {
52875 + curr->expires = 0;
52876 + curr->crashes = 0;
52877 + }
52878 +
52879 + curr->crashes++;
52880 +
52881 + if (!curr->expires)
52882 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52883 +
52884 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52885 + time_after(curr->expires, get_seconds())) {
52886 + rcu_read_lock();
52887 + cred = __task_cred(task);
52888 + if (cred->uid && proc_is_setxid(cred)) {
52889 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52890 + spin_lock(&gr_uid_lock);
52891 + gr_insert_uid(cred->uid, curr->expires);
52892 + spin_unlock(&gr_uid_lock);
52893 + curr->expires = 0;
52894 + curr->crashes = 0;
52895 + read_lock(&tasklist_lock);
52896 + do_each_thread(tsk2, tsk) {
52897 + cred2 = __task_cred(tsk);
52898 + if (tsk != task && cred2->uid == cred->uid)
52899 + gr_fake_force_sig(SIGKILL, tsk);
52900 + } while_each_thread(tsk2, tsk);
52901 + read_unlock(&tasklist_lock);
52902 + } else {
52903 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52904 + read_lock(&tasklist_lock);
52905 + do_each_thread(tsk2, tsk) {
52906 + if (likely(tsk != task)) {
52907 + curr2 = tsk->acl;
52908 +
52909 + if (curr2->device == curr->device &&
52910 + curr2->inode == curr->inode)
52911 + gr_fake_force_sig(SIGKILL, tsk);
52912 + }
52913 + } while_each_thread(tsk2, tsk);
52914 + read_unlock(&tasklist_lock);
52915 + }
52916 + rcu_read_unlock();
52917 + }
52918 +
52919 + return;
52920 +}
52921 +
52922 +int
52923 +gr_check_crash_exec(const struct file *filp)
52924 +{
52925 + struct acl_subject_label *curr;
52926 +
52927 + if (unlikely(!gr_acl_is_enabled()))
52928 + return 0;
52929 +
52930 + read_lock(&gr_inode_lock);
52931 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52932 + filp->f_path.dentry->d_inode->i_sb->s_dev,
52933 + current->role);
52934 + read_unlock(&gr_inode_lock);
52935 +
52936 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52937 + (!curr->crashes && !curr->expires))
52938 + return 0;
52939 +
52940 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52941 + time_after(curr->expires, get_seconds()))
52942 + return 1;
52943 + else if (time_before_eq(curr->expires, get_seconds())) {
52944 + curr->crashes = 0;
52945 + curr->expires = 0;
52946 + }
52947 +
52948 + return 0;
52949 +}
52950 +
52951 +void
52952 +gr_handle_alertkill(struct task_struct *task)
52953 +{
52954 + struct acl_subject_label *curracl;
52955 + __u32 curr_ip;
52956 + struct task_struct *p, *p2;
52957 +
52958 + if (unlikely(!gr_acl_is_enabled()))
52959 + return;
52960 +
52961 + curracl = task->acl;
52962 + curr_ip = task->signal->curr_ip;
52963 +
52964 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52965 + read_lock(&tasklist_lock);
52966 + do_each_thread(p2, p) {
52967 + if (p->signal->curr_ip == curr_ip)
52968 + gr_fake_force_sig(SIGKILL, p);
52969 + } while_each_thread(p2, p);
52970 + read_unlock(&tasklist_lock);
52971 + } else if (curracl->mode & GR_KILLPROC)
52972 + gr_fake_force_sig(SIGKILL, task);
52973 +
52974 + return;
52975 +}
52976 diff -urNp linux-2.6.32.44/grsecurity/gracl_shm.c linux-2.6.32.44/grsecurity/gracl_shm.c
52977 --- linux-2.6.32.44/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52978 +++ linux-2.6.32.44/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52979 @@ -0,0 +1,40 @@
52980 +#include <linux/kernel.h>
52981 +#include <linux/mm.h>
52982 +#include <linux/sched.h>
52983 +#include <linux/file.h>
52984 +#include <linux/ipc.h>
52985 +#include <linux/gracl.h>
52986 +#include <linux/grsecurity.h>
52987 +#include <linux/grinternal.h>
52988 +
52989 +int
52990 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52991 + const time_t shm_createtime, const uid_t cuid, const int shmid)
52992 +{
52993 + struct task_struct *task;
52994 +
52995 + if (!gr_acl_is_enabled())
52996 + return 1;
52997 +
52998 + rcu_read_lock();
52999 + read_lock(&tasklist_lock);
53000 +
53001 + task = find_task_by_vpid(shm_cprid);
53002 +
53003 + if (unlikely(!task))
53004 + task = find_task_by_vpid(shm_lapid);
53005 +
53006 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53007 + (task->pid == shm_lapid)) &&
53008 + (task->acl->mode & GR_PROTSHM) &&
53009 + (task->acl != current->acl))) {
53010 + read_unlock(&tasklist_lock);
53011 + rcu_read_unlock();
53012 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53013 + return 0;
53014 + }
53015 + read_unlock(&tasklist_lock);
53016 + rcu_read_unlock();
53017 +
53018 + return 1;
53019 +}
53020 diff -urNp linux-2.6.32.44/grsecurity/grsec_chdir.c linux-2.6.32.44/grsecurity/grsec_chdir.c
53021 --- linux-2.6.32.44/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53022 +++ linux-2.6.32.44/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
53023 @@ -0,0 +1,19 @@
53024 +#include <linux/kernel.h>
53025 +#include <linux/sched.h>
53026 +#include <linux/fs.h>
53027 +#include <linux/file.h>
53028 +#include <linux/grsecurity.h>
53029 +#include <linux/grinternal.h>
53030 +
53031 +void
53032 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53033 +{
53034 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53035 + if ((grsec_enable_chdir && grsec_enable_group &&
53036 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53037 + !grsec_enable_group)) {
53038 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53039 + }
53040 +#endif
53041 + return;
53042 +}
53043 diff -urNp linux-2.6.32.44/grsecurity/grsec_chroot.c linux-2.6.32.44/grsecurity/grsec_chroot.c
53044 --- linux-2.6.32.44/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53045 +++ linux-2.6.32.44/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
53046 @@ -0,0 +1,384 @@
53047 +#include <linux/kernel.h>
53048 +#include <linux/module.h>
53049 +#include <linux/sched.h>
53050 +#include <linux/file.h>
53051 +#include <linux/fs.h>
53052 +#include <linux/mount.h>
53053 +#include <linux/types.h>
53054 +#include <linux/pid_namespace.h>
53055 +#include <linux/grsecurity.h>
53056 +#include <linux/grinternal.h>
53057 +
53058 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53059 +{
53060 +#ifdef CONFIG_GRKERNSEC
53061 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53062 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53063 + task->gr_is_chrooted = 1;
53064 + else
53065 + task->gr_is_chrooted = 0;
53066 +
53067 + task->gr_chroot_dentry = path->dentry;
53068 +#endif
53069 + return;
53070 +}
53071 +
53072 +void gr_clear_chroot_entries(struct task_struct *task)
53073 +{
53074 +#ifdef CONFIG_GRKERNSEC
53075 + task->gr_is_chrooted = 0;
53076 + task->gr_chroot_dentry = NULL;
53077 +#endif
53078 + return;
53079 +}
53080 +
53081 +int
53082 +gr_handle_chroot_unix(const pid_t pid)
53083 +{
53084 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53085 + struct task_struct *p;
53086 +
53087 + if (unlikely(!grsec_enable_chroot_unix))
53088 + return 1;
53089 +
53090 + if (likely(!proc_is_chrooted(current)))
53091 + return 1;
53092 +
53093 + rcu_read_lock();
53094 + read_lock(&tasklist_lock);
53095 +
53096 + p = find_task_by_vpid_unrestricted(pid);
53097 + if (unlikely(p && !have_same_root(current, p))) {
53098 + read_unlock(&tasklist_lock);
53099 + rcu_read_unlock();
53100 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53101 + return 0;
53102 + }
53103 + read_unlock(&tasklist_lock);
53104 + rcu_read_unlock();
53105 +#endif
53106 + return 1;
53107 +}
53108 +
53109 +int
53110 +gr_handle_chroot_nice(void)
53111 +{
53112 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53113 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53114 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53115 + return -EPERM;
53116 + }
53117 +#endif
53118 + return 0;
53119 +}
53120 +
53121 +int
53122 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53123 +{
53124 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53125 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53126 + && proc_is_chrooted(current)) {
53127 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53128 + return -EACCES;
53129 + }
53130 +#endif
53131 + return 0;
53132 +}
53133 +
53134 +int
53135 +gr_handle_chroot_rawio(const struct inode *inode)
53136 +{
53137 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53138 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53139 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53140 + return 1;
53141 +#endif
53142 + return 0;
53143 +}
53144 +
53145 +int
53146 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53147 +{
53148 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53149 + struct task_struct *p;
53150 + int ret = 0;
53151 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53152 + return ret;
53153 +
53154 + read_lock(&tasklist_lock);
53155 + do_each_pid_task(pid, type, p) {
53156 + if (!have_same_root(current, p)) {
53157 + ret = 1;
53158 + goto out;
53159 + }
53160 + } while_each_pid_task(pid, type, p);
53161 +out:
53162 + read_unlock(&tasklist_lock);
53163 + return ret;
53164 +#endif
53165 + return 0;
53166 +}
53167 +
53168 +int
53169 +gr_pid_is_chrooted(struct task_struct *p)
53170 +{
53171 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53172 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53173 + return 0;
53174 +
53175 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53176 + !have_same_root(current, p)) {
53177 + return 1;
53178 + }
53179 +#endif
53180 + return 0;
53181 +}
53182 +
53183 +EXPORT_SYMBOL(gr_pid_is_chrooted);
53184 +
53185 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53186 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53187 +{
53188 + struct dentry *dentry = (struct dentry *)u_dentry;
53189 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
53190 + struct dentry *realroot;
53191 + struct vfsmount *realrootmnt;
53192 + struct dentry *currentroot;
53193 + struct vfsmount *currentmnt;
53194 + struct task_struct *reaper = &init_task;
53195 + int ret = 1;
53196 +
53197 + read_lock(&reaper->fs->lock);
53198 + realrootmnt = mntget(reaper->fs->root.mnt);
53199 + realroot = dget(reaper->fs->root.dentry);
53200 + read_unlock(&reaper->fs->lock);
53201 +
53202 + read_lock(&current->fs->lock);
53203 + currentmnt = mntget(current->fs->root.mnt);
53204 + currentroot = dget(current->fs->root.dentry);
53205 + read_unlock(&current->fs->lock);
53206 +
53207 + spin_lock(&dcache_lock);
53208 + for (;;) {
53209 + if (unlikely((dentry == realroot && mnt == realrootmnt)
53210 + || (dentry == currentroot && mnt == currentmnt)))
53211 + break;
53212 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
53213 + if (mnt->mnt_parent == mnt)
53214 + break;
53215 + dentry = mnt->mnt_mountpoint;
53216 + mnt = mnt->mnt_parent;
53217 + continue;
53218 + }
53219 + dentry = dentry->d_parent;
53220 + }
53221 + spin_unlock(&dcache_lock);
53222 +
53223 + dput(currentroot);
53224 + mntput(currentmnt);
53225 +
53226 + /* access is outside of chroot */
53227 + if (dentry == realroot && mnt == realrootmnt)
53228 + ret = 0;
53229 +
53230 + dput(realroot);
53231 + mntput(realrootmnt);
53232 + return ret;
53233 +}
53234 +#endif
53235 +
53236 +int
53237 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53238 +{
53239 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53240 + if (!grsec_enable_chroot_fchdir)
53241 + return 1;
53242 +
53243 + if (!proc_is_chrooted(current))
53244 + return 1;
53245 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53246 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53247 + return 0;
53248 + }
53249 +#endif
53250 + return 1;
53251 +}
53252 +
53253 +int
53254 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53255 + const time_t shm_createtime)
53256 +{
53257 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53258 + struct task_struct *p;
53259 + time_t starttime;
53260 +
53261 + if (unlikely(!grsec_enable_chroot_shmat))
53262 + return 1;
53263 +
53264 + if (likely(!proc_is_chrooted(current)))
53265 + return 1;
53266 +
53267 + rcu_read_lock();
53268 + read_lock(&tasklist_lock);
53269 +
53270 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53271 + starttime = p->start_time.tv_sec;
53272 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53273 + if (have_same_root(current, p)) {
53274 + goto allow;
53275 + } else {
53276 + read_unlock(&tasklist_lock);
53277 + rcu_read_unlock();
53278 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53279 + return 0;
53280 + }
53281 + }
53282 + /* creator exited, pid reuse, fall through to next check */
53283 + }
53284 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53285 + if (unlikely(!have_same_root(current, p))) {
53286 + read_unlock(&tasklist_lock);
53287 + rcu_read_unlock();
53288 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53289 + return 0;
53290 + }
53291 + }
53292 +
53293 +allow:
53294 + read_unlock(&tasklist_lock);
53295 + rcu_read_unlock();
53296 +#endif
53297 + return 1;
53298 +}
53299 +
53300 +void
53301 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53302 +{
53303 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53304 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53305 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53306 +#endif
53307 + return;
53308 +}
53309 +
53310 +int
53311 +gr_handle_chroot_mknod(const struct dentry *dentry,
53312 + const struct vfsmount *mnt, const int mode)
53313 +{
53314 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53315 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53316 + proc_is_chrooted(current)) {
53317 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53318 + return -EPERM;
53319 + }
53320 +#endif
53321 + return 0;
53322 +}
53323 +
53324 +int
53325 +gr_handle_chroot_mount(const struct dentry *dentry,
53326 + const struct vfsmount *mnt, const char *dev_name)
53327 +{
53328 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53329 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53330 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
53331 + return -EPERM;
53332 + }
53333 +#endif
53334 + return 0;
53335 +}
53336 +
53337 +int
53338 +gr_handle_chroot_pivot(void)
53339 +{
53340 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53341 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53342 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53343 + return -EPERM;
53344 + }
53345 +#endif
53346 + return 0;
53347 +}
53348 +
53349 +int
53350 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53351 +{
53352 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53353 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53354 + !gr_is_outside_chroot(dentry, mnt)) {
53355 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53356 + return -EPERM;
53357 + }
53358 +#endif
53359 + return 0;
53360 +}
53361 +
53362 +int
53363 +gr_handle_chroot_caps(struct path *path)
53364 +{
53365 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53366 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
53367 + (init_task.fs->root.dentry != path->dentry) &&
53368 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
53369 +
53370 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53371 + const struct cred *old = current_cred();
53372 + struct cred *new = prepare_creds();
53373 + if (new == NULL)
53374 + return 1;
53375 +
53376 + new->cap_permitted = cap_drop(old->cap_permitted,
53377 + chroot_caps);
53378 + new->cap_inheritable = cap_drop(old->cap_inheritable,
53379 + chroot_caps);
53380 + new->cap_effective = cap_drop(old->cap_effective,
53381 + chroot_caps);
53382 +
53383 + commit_creds(new);
53384 +
53385 + return 0;
53386 + }
53387 +#endif
53388 + return 0;
53389 +}
53390 +
53391 +int
53392 +gr_handle_chroot_sysctl(const int op)
53393 +{
53394 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53395 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
53396 + && (op & MAY_WRITE))
53397 + return -EACCES;
53398 +#endif
53399 + return 0;
53400 +}
53401 +
53402 +void
53403 +gr_handle_chroot_chdir(struct path *path)
53404 +{
53405 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53406 + if (grsec_enable_chroot_chdir)
53407 + set_fs_pwd(current->fs, path);
53408 +#endif
53409 + return;
53410 +}
53411 +
53412 +int
53413 +gr_handle_chroot_chmod(const struct dentry *dentry,
53414 + const struct vfsmount *mnt, const int mode)
53415 +{
53416 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53417 + /* allow chmod +s on directories, but not on files */
53418 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53419 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53420 + proc_is_chrooted(current)) {
53421 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53422 + return -EPERM;
53423 + }
53424 +#endif
53425 + return 0;
53426 +}
53427 +
53428 +#ifdef CONFIG_SECURITY
53429 +EXPORT_SYMBOL(gr_handle_chroot_caps);
53430 +#endif
53431 diff -urNp linux-2.6.32.44/grsecurity/grsec_disabled.c linux-2.6.32.44/grsecurity/grsec_disabled.c
53432 --- linux-2.6.32.44/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53433 +++ linux-2.6.32.44/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53434 @@ -0,0 +1,447 @@
53435 +#include <linux/kernel.h>
53436 +#include <linux/module.h>
53437 +#include <linux/sched.h>
53438 +#include <linux/file.h>
53439 +#include <linux/fs.h>
53440 +#include <linux/kdev_t.h>
53441 +#include <linux/net.h>
53442 +#include <linux/in.h>
53443 +#include <linux/ip.h>
53444 +#include <linux/skbuff.h>
53445 +#include <linux/sysctl.h>
53446 +
53447 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53448 +void
53449 +pax_set_initial_flags(struct linux_binprm *bprm)
53450 +{
53451 + return;
53452 +}
53453 +#endif
53454 +
53455 +#ifdef CONFIG_SYSCTL
53456 +__u32
53457 +gr_handle_sysctl(const struct ctl_table * table, const int op)
53458 +{
53459 + return 0;
53460 +}
53461 +#endif
53462 +
53463 +#ifdef CONFIG_TASKSTATS
53464 +int gr_is_taskstats_denied(int pid)
53465 +{
53466 + return 0;
53467 +}
53468 +#endif
53469 +
53470 +int
53471 +gr_acl_is_enabled(void)
53472 +{
53473 + return 0;
53474 +}
53475 +
53476 +int
53477 +gr_handle_rawio(const struct inode *inode)
53478 +{
53479 + return 0;
53480 +}
53481 +
53482 +void
53483 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53484 +{
53485 + return;
53486 +}
53487 +
53488 +int
53489 +gr_handle_ptrace(struct task_struct *task, const long request)
53490 +{
53491 + return 0;
53492 +}
53493 +
53494 +int
53495 +gr_handle_proc_ptrace(struct task_struct *task)
53496 +{
53497 + return 0;
53498 +}
53499 +
53500 +void
53501 +gr_learn_resource(const struct task_struct *task,
53502 + const int res, const unsigned long wanted, const int gt)
53503 +{
53504 + return;
53505 +}
53506 +
53507 +int
53508 +gr_set_acls(const int type)
53509 +{
53510 + return 0;
53511 +}
53512 +
53513 +int
53514 +gr_check_hidden_task(const struct task_struct *tsk)
53515 +{
53516 + return 0;
53517 +}
53518 +
53519 +int
53520 +gr_check_protected_task(const struct task_struct *task)
53521 +{
53522 + return 0;
53523 +}
53524 +
53525 +int
53526 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53527 +{
53528 + return 0;
53529 +}
53530 +
53531 +void
53532 +gr_copy_label(struct task_struct *tsk)
53533 +{
53534 + return;
53535 +}
53536 +
53537 +void
53538 +gr_set_pax_flags(struct task_struct *task)
53539 +{
53540 + return;
53541 +}
53542 +
53543 +int
53544 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53545 + const int unsafe_share)
53546 +{
53547 + return 0;
53548 +}
53549 +
53550 +void
53551 +gr_handle_delete(const ino_t ino, const dev_t dev)
53552 +{
53553 + return;
53554 +}
53555 +
53556 +void
53557 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53558 +{
53559 + return;
53560 +}
53561 +
53562 +void
53563 +gr_handle_crash(struct task_struct *task, const int sig)
53564 +{
53565 + return;
53566 +}
53567 +
53568 +int
53569 +gr_check_crash_exec(const struct file *filp)
53570 +{
53571 + return 0;
53572 +}
53573 +
53574 +int
53575 +gr_check_crash_uid(const uid_t uid)
53576 +{
53577 + return 0;
53578 +}
53579 +
53580 +void
53581 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53582 + struct dentry *old_dentry,
53583 + struct dentry *new_dentry,
53584 + struct vfsmount *mnt, const __u8 replace)
53585 +{
53586 + return;
53587 +}
53588 +
53589 +int
53590 +gr_search_socket(const int family, const int type, const int protocol)
53591 +{
53592 + return 1;
53593 +}
53594 +
53595 +int
53596 +gr_search_connectbind(const int mode, const struct socket *sock,
53597 + const struct sockaddr_in *addr)
53598 +{
53599 + return 0;
53600 +}
53601 +
53602 +int
53603 +gr_is_capable(const int cap)
53604 +{
53605 + return 1;
53606 +}
53607 +
53608 +int
53609 +gr_is_capable_nolog(const int cap)
53610 +{
53611 + return 1;
53612 +}
53613 +
53614 +void
53615 +gr_handle_alertkill(struct task_struct *task)
53616 +{
53617 + return;
53618 +}
53619 +
53620 +__u32
53621 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53622 +{
53623 + return 1;
53624 +}
53625 +
53626 +__u32
53627 +gr_acl_handle_hidden_file(const struct dentry * dentry,
53628 + const struct vfsmount * mnt)
53629 +{
53630 + return 1;
53631 +}
53632 +
53633 +__u32
53634 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53635 + const int fmode)
53636 +{
53637 + return 1;
53638 +}
53639 +
53640 +__u32
53641 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53642 +{
53643 + return 1;
53644 +}
53645 +
53646 +__u32
53647 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53648 +{
53649 + return 1;
53650 +}
53651 +
53652 +int
53653 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53654 + unsigned int *vm_flags)
53655 +{
53656 + return 1;
53657 +}
53658 +
53659 +__u32
53660 +gr_acl_handle_truncate(const struct dentry * dentry,
53661 + const struct vfsmount * mnt)
53662 +{
53663 + return 1;
53664 +}
53665 +
53666 +__u32
53667 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53668 +{
53669 + return 1;
53670 +}
53671 +
53672 +__u32
53673 +gr_acl_handle_access(const struct dentry * dentry,
53674 + const struct vfsmount * mnt, const int fmode)
53675 +{
53676 + return 1;
53677 +}
53678 +
53679 +__u32
53680 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53681 + mode_t mode)
53682 +{
53683 + return 1;
53684 +}
53685 +
53686 +__u32
53687 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53688 + mode_t mode)
53689 +{
53690 + return 1;
53691 +}
53692 +
53693 +__u32
53694 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53695 +{
53696 + return 1;
53697 +}
53698 +
53699 +__u32
53700 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53701 +{
53702 + return 1;
53703 +}
53704 +
53705 +void
53706 +grsecurity_init(void)
53707 +{
53708 + return;
53709 +}
53710 +
53711 +__u32
53712 +gr_acl_handle_mknod(const struct dentry * new_dentry,
53713 + const struct dentry * parent_dentry,
53714 + const struct vfsmount * parent_mnt,
53715 + const int mode)
53716 +{
53717 + return 1;
53718 +}
53719 +
53720 +__u32
53721 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
53722 + const struct dentry * parent_dentry,
53723 + const struct vfsmount * parent_mnt)
53724 +{
53725 + return 1;
53726 +}
53727 +
53728 +__u32
53729 +gr_acl_handle_symlink(const struct dentry * new_dentry,
53730 + const struct dentry * parent_dentry,
53731 + const struct vfsmount * parent_mnt, const char *from)
53732 +{
53733 + return 1;
53734 +}
53735 +
53736 +__u32
53737 +gr_acl_handle_link(const struct dentry * new_dentry,
53738 + const struct dentry * parent_dentry,
53739 + const struct vfsmount * parent_mnt,
53740 + const struct dentry * old_dentry,
53741 + const struct vfsmount * old_mnt, const char *to)
53742 +{
53743 + return 1;
53744 +}
53745 +
53746 +int
53747 +gr_acl_handle_rename(const struct dentry *new_dentry,
53748 + const struct dentry *parent_dentry,
53749 + const struct vfsmount *parent_mnt,
53750 + const struct dentry *old_dentry,
53751 + const struct inode *old_parent_inode,
53752 + const struct vfsmount *old_mnt, const char *newname)
53753 +{
53754 + return 0;
53755 +}
53756 +
53757 +int
53758 +gr_acl_handle_filldir(const struct file *file, const char *name,
53759 + const int namelen, const ino_t ino)
53760 +{
53761 + return 1;
53762 +}
53763 +
53764 +int
53765 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53766 + const time_t shm_createtime, const uid_t cuid, const int shmid)
53767 +{
53768 + return 1;
53769 +}
53770 +
53771 +int
53772 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53773 +{
53774 + return 0;
53775 +}
53776 +
53777 +int
53778 +gr_search_accept(const struct socket *sock)
53779 +{
53780 + return 0;
53781 +}
53782 +
53783 +int
53784 +gr_search_listen(const struct socket *sock)
53785 +{
53786 + return 0;
53787 +}
53788 +
53789 +int
53790 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53791 +{
53792 + return 0;
53793 +}
53794 +
53795 +__u32
53796 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53797 +{
53798 + return 1;
53799 +}
53800 +
53801 +__u32
53802 +gr_acl_handle_creat(const struct dentry * dentry,
53803 + const struct dentry * p_dentry,
53804 + const struct vfsmount * p_mnt, const int fmode,
53805 + const int imode)
53806 +{
53807 + return 1;
53808 +}
53809 +
53810 +void
53811 +gr_acl_handle_exit(void)
53812 +{
53813 + return;
53814 +}
53815 +
53816 +int
53817 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53818 +{
53819 + return 1;
53820 +}
53821 +
53822 +void
53823 +gr_set_role_label(const uid_t uid, const gid_t gid)
53824 +{
53825 + return;
53826 +}
53827 +
53828 +int
53829 +gr_acl_handle_procpidmem(const struct task_struct *task)
53830 +{
53831 + return 0;
53832 +}
53833 +
53834 +int
53835 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53836 +{
53837 + return 0;
53838 +}
53839 +
53840 +int
53841 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53842 +{
53843 + return 0;
53844 +}
53845 +
53846 +void
53847 +gr_set_kernel_label(struct task_struct *task)
53848 +{
53849 + return;
53850 +}
53851 +
53852 +int
53853 +gr_check_user_change(int real, int effective, int fs)
53854 +{
53855 + return 0;
53856 +}
53857 +
53858 +int
53859 +gr_check_group_change(int real, int effective, int fs)
53860 +{
53861 + return 0;
53862 +}
53863 +
53864 +int gr_acl_enable_at_secure(void)
53865 +{
53866 + return 0;
53867 +}
53868 +
53869 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53870 +{
53871 + return dentry->d_inode->i_sb->s_dev;
53872 +}
53873 +
53874 +EXPORT_SYMBOL(gr_is_capable);
53875 +EXPORT_SYMBOL(gr_is_capable_nolog);
53876 +EXPORT_SYMBOL(gr_learn_resource);
53877 +EXPORT_SYMBOL(gr_set_kernel_label);
53878 +#ifdef CONFIG_SECURITY
53879 +EXPORT_SYMBOL(gr_check_user_change);
53880 +EXPORT_SYMBOL(gr_check_group_change);
53881 +#endif
53882 diff -urNp linux-2.6.32.44/grsecurity/grsec_exec.c linux-2.6.32.44/grsecurity/grsec_exec.c
53883 --- linux-2.6.32.44/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53884 +++ linux-2.6.32.44/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
53885 @@ -0,0 +1,148 @@
53886 +#include <linux/kernel.h>
53887 +#include <linux/sched.h>
53888 +#include <linux/file.h>
53889 +#include <linux/binfmts.h>
53890 +#include <linux/smp_lock.h>
53891 +#include <linux/fs.h>
53892 +#include <linux/types.h>
53893 +#include <linux/grdefs.h>
53894 +#include <linux/grinternal.h>
53895 +#include <linux/capability.h>
53896 +#include <linux/compat.h>
53897 +
53898 +#include <asm/uaccess.h>
53899 +
53900 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53901 +static char gr_exec_arg_buf[132];
53902 +static DEFINE_MUTEX(gr_exec_arg_mutex);
53903 +#endif
53904 +
53905 +int
53906 +gr_handle_nproc(void)
53907 +{
53908 +#ifdef CONFIG_GRKERNSEC_EXECVE
53909 + const struct cred *cred = current_cred();
53910 + if (grsec_enable_execve && cred->user &&
53911 + (atomic_read(&cred->user->processes) >
53912 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
53913 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
53914 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
53915 + return -EAGAIN;
53916 + }
53917 +#endif
53918 + return 0;
53919 +}
53920 +
53921 +void
53922 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53923 +{
53924 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53925 + char *grarg = gr_exec_arg_buf;
53926 + unsigned int i, x, execlen = 0;
53927 + char c;
53928 +
53929 + if (!((grsec_enable_execlog && grsec_enable_group &&
53930 + in_group_p(grsec_audit_gid))
53931 + || (grsec_enable_execlog && !grsec_enable_group)))
53932 + return;
53933 +
53934 + mutex_lock(&gr_exec_arg_mutex);
53935 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53936 +
53937 + if (unlikely(argv == NULL))
53938 + goto log;
53939 +
53940 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53941 + const char __user *p;
53942 + unsigned int len;
53943 +
53944 + if (copy_from_user(&p, argv + i, sizeof(p)))
53945 + goto log;
53946 + if (!p)
53947 + goto log;
53948 + len = strnlen_user(p, 128 - execlen);
53949 + if (len > 128 - execlen)
53950 + len = 128 - execlen;
53951 + else if (len > 0)
53952 + len--;
53953 + if (copy_from_user(grarg + execlen, p, len))
53954 + goto log;
53955 +
53956 + /* rewrite unprintable characters */
53957 + for (x = 0; x < len; x++) {
53958 + c = *(grarg + execlen + x);
53959 + if (c < 32 || c > 126)
53960 + *(grarg + execlen + x) = ' ';
53961 + }
53962 +
53963 + execlen += len;
53964 + *(grarg + execlen) = ' ';
53965 + *(grarg + execlen + 1) = '\0';
53966 + execlen++;
53967 + }
53968 +
53969 + log:
53970 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53971 + bprm->file->f_path.mnt, grarg);
53972 + mutex_unlock(&gr_exec_arg_mutex);
53973 +#endif
53974 + return;
53975 +}
53976 +
53977 +#ifdef CONFIG_COMPAT
53978 +void
53979 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53980 +{
53981 +#ifdef CONFIG_GRKERNSEC_EXECLOG
53982 + char *grarg = gr_exec_arg_buf;
53983 + unsigned int i, x, execlen = 0;
53984 + char c;
53985 +
53986 + if (!((grsec_enable_execlog && grsec_enable_group &&
53987 + in_group_p(grsec_audit_gid))
53988 + || (grsec_enable_execlog && !grsec_enable_group)))
53989 + return;
53990 +
53991 + mutex_lock(&gr_exec_arg_mutex);
53992 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
53993 +
53994 + if (unlikely(argv == NULL))
53995 + goto log;
53996 +
53997 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
53998 + compat_uptr_t p;
53999 + unsigned int len;
54000 +
54001 + if (get_user(p, argv + i))
54002 + goto log;
54003 + len = strnlen_user(compat_ptr(p), 128 - execlen);
54004 + if (len > 128 - execlen)
54005 + len = 128 - execlen;
54006 + else if (len > 0)
54007 + len--;
54008 + else
54009 + goto log;
54010 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
54011 + goto log;
54012 +
54013 + /* rewrite unprintable characters */
54014 + for (x = 0; x < len; x++) {
54015 + c = *(grarg + execlen + x);
54016 + if (c < 32 || c > 126)
54017 + *(grarg + execlen + x) = ' ';
54018 + }
54019 +
54020 + execlen += len;
54021 + *(grarg + execlen) = ' ';
54022 + *(grarg + execlen + 1) = '\0';
54023 + execlen++;
54024 + }
54025 +
54026 + log:
54027 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54028 + bprm->file->f_path.mnt, grarg);
54029 + mutex_unlock(&gr_exec_arg_mutex);
54030 +#endif
54031 + return;
54032 +}
54033 +#endif
54034 diff -urNp linux-2.6.32.44/grsecurity/grsec_fifo.c linux-2.6.32.44/grsecurity/grsec_fifo.c
54035 --- linux-2.6.32.44/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54036 +++ linux-2.6.32.44/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
54037 @@ -0,0 +1,24 @@
54038 +#include <linux/kernel.h>
54039 +#include <linux/sched.h>
54040 +#include <linux/fs.h>
54041 +#include <linux/file.h>
54042 +#include <linux/grinternal.h>
54043 +
54044 +int
54045 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54046 + const struct dentry *dir, const int flag, const int acc_mode)
54047 +{
54048 +#ifdef CONFIG_GRKERNSEC_FIFO
54049 + const struct cred *cred = current_cred();
54050 +
54051 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54052 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54053 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54054 + (cred->fsuid != dentry->d_inode->i_uid)) {
54055 + if (!inode_permission(dentry->d_inode, acc_mode))
54056 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54057 + return -EACCES;
54058 + }
54059 +#endif
54060 + return 0;
54061 +}
54062 diff -urNp linux-2.6.32.44/grsecurity/grsec_fork.c linux-2.6.32.44/grsecurity/grsec_fork.c
54063 --- linux-2.6.32.44/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54064 +++ linux-2.6.32.44/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
54065 @@ -0,0 +1,23 @@
54066 +#include <linux/kernel.h>
54067 +#include <linux/sched.h>
54068 +#include <linux/grsecurity.h>
54069 +#include <linux/grinternal.h>
54070 +#include <linux/errno.h>
54071 +
54072 +void
54073 +gr_log_forkfail(const int retval)
54074 +{
54075 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54076 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54077 + switch (retval) {
54078 + case -EAGAIN:
54079 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54080 + break;
54081 + case -ENOMEM:
54082 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54083 + break;
54084 + }
54085 + }
54086 +#endif
54087 + return;
54088 +}
54089 diff -urNp linux-2.6.32.44/grsecurity/grsec_init.c linux-2.6.32.44/grsecurity/grsec_init.c
54090 --- linux-2.6.32.44/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54091 +++ linux-2.6.32.44/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
54092 @@ -0,0 +1,274 @@
54093 +#include <linux/kernel.h>
54094 +#include <linux/sched.h>
54095 +#include <linux/mm.h>
54096 +#include <linux/smp_lock.h>
54097 +#include <linux/gracl.h>
54098 +#include <linux/slab.h>
54099 +#include <linux/vmalloc.h>
54100 +#include <linux/percpu.h>
54101 +#include <linux/module.h>
54102 +
54103 +int grsec_enable_brute;
54104 +int grsec_enable_link;
54105 +int grsec_enable_dmesg;
54106 +int grsec_enable_harden_ptrace;
54107 +int grsec_enable_fifo;
54108 +int grsec_enable_execve;
54109 +int grsec_enable_execlog;
54110 +int grsec_enable_signal;
54111 +int grsec_enable_forkfail;
54112 +int grsec_enable_audit_ptrace;
54113 +int grsec_enable_time;
54114 +int grsec_enable_audit_textrel;
54115 +int grsec_enable_group;
54116 +int grsec_audit_gid;
54117 +int grsec_enable_chdir;
54118 +int grsec_enable_mount;
54119 +int grsec_enable_rofs;
54120 +int grsec_enable_chroot_findtask;
54121 +int grsec_enable_chroot_mount;
54122 +int grsec_enable_chroot_shmat;
54123 +int grsec_enable_chroot_fchdir;
54124 +int grsec_enable_chroot_double;
54125 +int grsec_enable_chroot_pivot;
54126 +int grsec_enable_chroot_chdir;
54127 +int grsec_enable_chroot_chmod;
54128 +int grsec_enable_chroot_mknod;
54129 +int grsec_enable_chroot_nice;
54130 +int grsec_enable_chroot_execlog;
54131 +int grsec_enable_chroot_caps;
54132 +int grsec_enable_chroot_sysctl;
54133 +int grsec_enable_chroot_unix;
54134 +int grsec_enable_tpe;
54135 +int grsec_tpe_gid;
54136 +int grsec_enable_blackhole;
54137 +#ifdef CONFIG_IPV6_MODULE
54138 +EXPORT_SYMBOL(grsec_enable_blackhole);
54139 +#endif
54140 +int grsec_lastack_retries;
54141 +int grsec_enable_tpe_all;
54142 +int grsec_enable_tpe_invert;
54143 +int grsec_enable_socket_all;
54144 +int grsec_socket_all_gid;
54145 +int grsec_enable_socket_client;
54146 +int grsec_socket_client_gid;
54147 +int grsec_enable_socket_server;
54148 +int grsec_socket_server_gid;
54149 +int grsec_resource_logging;
54150 +int grsec_disable_privio;
54151 +int grsec_enable_log_rwxmaps;
54152 +int grsec_lock;
54153 +
54154 +DEFINE_SPINLOCK(grsec_alert_lock);
54155 +unsigned long grsec_alert_wtime = 0;
54156 +unsigned long grsec_alert_fyet = 0;
54157 +
54158 +DEFINE_SPINLOCK(grsec_audit_lock);
54159 +
54160 +DEFINE_RWLOCK(grsec_exec_file_lock);
54161 +
54162 +char *gr_shared_page[4];
54163 +
54164 +char *gr_alert_log_fmt;
54165 +char *gr_audit_log_fmt;
54166 +char *gr_alert_log_buf;
54167 +char *gr_audit_log_buf;
54168 +
54169 +extern struct gr_arg *gr_usermode;
54170 +extern unsigned char *gr_system_salt;
54171 +extern unsigned char *gr_system_sum;
54172 +
54173 +void __init
54174 +grsecurity_init(void)
54175 +{
54176 + int j;
54177 + /* create the per-cpu shared pages */
54178 +
54179 +#ifdef CONFIG_X86
54180 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54181 +#endif
54182 +
54183 + for (j = 0; j < 4; j++) {
54184 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54185 + if (gr_shared_page[j] == NULL) {
54186 + panic("Unable to allocate grsecurity shared page");
54187 + return;
54188 + }
54189 + }
54190 +
54191 + /* allocate log buffers */
54192 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54193 + if (!gr_alert_log_fmt) {
54194 + panic("Unable to allocate grsecurity alert log format buffer");
54195 + return;
54196 + }
54197 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54198 + if (!gr_audit_log_fmt) {
54199 + panic("Unable to allocate grsecurity audit log format buffer");
54200 + return;
54201 + }
54202 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54203 + if (!gr_alert_log_buf) {
54204 + panic("Unable to allocate grsecurity alert log buffer");
54205 + return;
54206 + }
54207 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54208 + if (!gr_audit_log_buf) {
54209 + panic("Unable to allocate grsecurity audit log buffer");
54210 + return;
54211 + }
54212 +
54213 + /* allocate memory for authentication structure */
54214 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54215 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54216 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54217 +
54218 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54219 + panic("Unable to allocate grsecurity authentication structure");
54220 + return;
54221 + }
54222 +
54223 +
54224 +#ifdef CONFIG_GRKERNSEC_IO
54225 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54226 + grsec_disable_privio = 1;
54227 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54228 + grsec_disable_privio = 1;
54229 +#else
54230 + grsec_disable_privio = 0;
54231 +#endif
54232 +#endif
54233 +
54234 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54235 + /* for backward compatibility, tpe_invert always defaults to on if
54236 + enabled in the kernel
54237 + */
54238 + grsec_enable_tpe_invert = 1;
54239 +#endif
54240 +
54241 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54242 +#ifndef CONFIG_GRKERNSEC_SYSCTL
54243 + grsec_lock = 1;
54244 +#endif
54245 +
54246 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54247 + grsec_enable_audit_textrel = 1;
54248 +#endif
54249 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54250 + grsec_enable_log_rwxmaps = 1;
54251 +#endif
54252 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54253 + grsec_enable_group = 1;
54254 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54255 +#endif
54256 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54257 + grsec_enable_chdir = 1;
54258 +#endif
54259 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54260 + grsec_enable_harden_ptrace = 1;
54261 +#endif
54262 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54263 + grsec_enable_mount = 1;
54264 +#endif
54265 +#ifdef CONFIG_GRKERNSEC_LINK
54266 + grsec_enable_link = 1;
54267 +#endif
54268 +#ifdef CONFIG_GRKERNSEC_BRUTE
54269 + grsec_enable_brute = 1;
54270 +#endif
54271 +#ifdef CONFIG_GRKERNSEC_DMESG
54272 + grsec_enable_dmesg = 1;
54273 +#endif
54274 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54275 + grsec_enable_blackhole = 1;
54276 + grsec_lastack_retries = 4;
54277 +#endif
54278 +#ifdef CONFIG_GRKERNSEC_FIFO
54279 + grsec_enable_fifo = 1;
54280 +#endif
54281 +#ifdef CONFIG_GRKERNSEC_EXECVE
54282 + grsec_enable_execve = 1;
54283 +#endif
54284 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54285 + grsec_enable_execlog = 1;
54286 +#endif
54287 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54288 + grsec_enable_signal = 1;
54289 +#endif
54290 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
54291 + grsec_enable_forkfail = 1;
54292 +#endif
54293 +#ifdef CONFIG_GRKERNSEC_TIME
54294 + grsec_enable_time = 1;
54295 +#endif
54296 +#ifdef CONFIG_GRKERNSEC_RESLOG
54297 + grsec_resource_logging = 1;
54298 +#endif
54299 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54300 + grsec_enable_chroot_findtask = 1;
54301 +#endif
54302 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54303 + grsec_enable_chroot_unix = 1;
54304 +#endif
54305 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54306 + grsec_enable_chroot_mount = 1;
54307 +#endif
54308 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54309 + grsec_enable_chroot_fchdir = 1;
54310 +#endif
54311 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54312 + grsec_enable_chroot_shmat = 1;
54313 +#endif
54314 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54315 + grsec_enable_audit_ptrace = 1;
54316 +#endif
54317 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54318 + grsec_enable_chroot_double = 1;
54319 +#endif
54320 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54321 + grsec_enable_chroot_pivot = 1;
54322 +#endif
54323 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54324 + grsec_enable_chroot_chdir = 1;
54325 +#endif
54326 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54327 + grsec_enable_chroot_chmod = 1;
54328 +#endif
54329 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54330 + grsec_enable_chroot_mknod = 1;
54331 +#endif
54332 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54333 + grsec_enable_chroot_nice = 1;
54334 +#endif
54335 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54336 + grsec_enable_chroot_execlog = 1;
54337 +#endif
54338 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54339 + grsec_enable_chroot_caps = 1;
54340 +#endif
54341 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54342 + grsec_enable_chroot_sysctl = 1;
54343 +#endif
54344 +#ifdef CONFIG_GRKERNSEC_TPE
54345 + grsec_enable_tpe = 1;
54346 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54347 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
54348 + grsec_enable_tpe_all = 1;
54349 +#endif
54350 +#endif
54351 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54352 + grsec_enable_socket_all = 1;
54353 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54354 +#endif
54355 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54356 + grsec_enable_socket_client = 1;
54357 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54358 +#endif
54359 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54360 + grsec_enable_socket_server = 1;
54361 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54362 +#endif
54363 +#endif
54364 +
54365 + return;
54366 +}
54367 diff -urNp linux-2.6.32.44/grsecurity/grsec_link.c linux-2.6.32.44/grsecurity/grsec_link.c
54368 --- linux-2.6.32.44/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54369 +++ linux-2.6.32.44/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
54370 @@ -0,0 +1,43 @@
54371 +#include <linux/kernel.h>
54372 +#include <linux/sched.h>
54373 +#include <linux/fs.h>
54374 +#include <linux/file.h>
54375 +#include <linux/grinternal.h>
54376 +
54377 +int
54378 +gr_handle_follow_link(const struct inode *parent,
54379 + const struct inode *inode,
54380 + const struct dentry *dentry, const struct vfsmount *mnt)
54381 +{
54382 +#ifdef CONFIG_GRKERNSEC_LINK
54383 + const struct cred *cred = current_cred();
54384 +
54385 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54386 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54387 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54388 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54389 + return -EACCES;
54390 + }
54391 +#endif
54392 + return 0;
54393 +}
54394 +
54395 +int
54396 +gr_handle_hardlink(const struct dentry *dentry,
54397 + const struct vfsmount *mnt,
54398 + struct inode *inode, const int mode, const char *to)
54399 +{
54400 +#ifdef CONFIG_GRKERNSEC_LINK
54401 + const struct cred *cred = current_cred();
54402 +
54403 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54404 + (!S_ISREG(mode) || (mode & S_ISUID) ||
54405 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54406 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54407 + !capable(CAP_FOWNER) && cred->uid) {
54408 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54409 + return -EPERM;
54410 + }
54411 +#endif
54412 + return 0;
54413 +}
54414 diff -urNp linux-2.6.32.44/grsecurity/grsec_log.c linux-2.6.32.44/grsecurity/grsec_log.c
54415 --- linux-2.6.32.44/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54416 +++ linux-2.6.32.44/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
54417 @@ -0,0 +1,310 @@
54418 +#include <linux/kernel.h>
54419 +#include <linux/sched.h>
54420 +#include <linux/file.h>
54421 +#include <linux/tty.h>
54422 +#include <linux/fs.h>
54423 +#include <linux/grinternal.h>
54424 +
54425 +#ifdef CONFIG_TREE_PREEMPT_RCU
54426 +#define DISABLE_PREEMPT() preempt_disable()
54427 +#define ENABLE_PREEMPT() preempt_enable()
54428 +#else
54429 +#define DISABLE_PREEMPT()
54430 +#define ENABLE_PREEMPT()
54431 +#endif
54432 +
54433 +#define BEGIN_LOCKS(x) \
54434 + DISABLE_PREEMPT(); \
54435 + rcu_read_lock(); \
54436 + read_lock(&tasklist_lock); \
54437 + read_lock(&grsec_exec_file_lock); \
54438 + if (x != GR_DO_AUDIT) \
54439 + spin_lock(&grsec_alert_lock); \
54440 + else \
54441 + spin_lock(&grsec_audit_lock)
54442 +
54443 +#define END_LOCKS(x) \
54444 + if (x != GR_DO_AUDIT) \
54445 + spin_unlock(&grsec_alert_lock); \
54446 + else \
54447 + spin_unlock(&grsec_audit_lock); \
54448 + read_unlock(&grsec_exec_file_lock); \
54449 + read_unlock(&tasklist_lock); \
54450 + rcu_read_unlock(); \
54451 + ENABLE_PREEMPT(); \
54452 + if (x == GR_DONT_AUDIT) \
54453 + gr_handle_alertkill(current)
54454 +
54455 +enum {
54456 + FLOODING,
54457 + NO_FLOODING
54458 +};
54459 +
54460 +extern char *gr_alert_log_fmt;
54461 +extern char *gr_audit_log_fmt;
54462 +extern char *gr_alert_log_buf;
54463 +extern char *gr_audit_log_buf;
54464 +
54465 +static int gr_log_start(int audit)
54466 +{
54467 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54468 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54469 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54470 +
54471 + if (audit == GR_DO_AUDIT)
54472 + goto set_fmt;
54473 +
54474 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54475 + grsec_alert_wtime = jiffies;
54476 + grsec_alert_fyet = 0;
54477 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54478 + grsec_alert_fyet++;
54479 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54480 + grsec_alert_wtime = jiffies;
54481 + grsec_alert_fyet++;
54482 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54483 + return FLOODING;
54484 + } else return FLOODING;
54485 +
54486 +set_fmt:
54487 + memset(buf, 0, PAGE_SIZE);
54488 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
54489 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54490 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54491 + } else if (current->signal->curr_ip) {
54492 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54493 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54494 + } else if (gr_acl_is_enabled()) {
54495 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54496 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54497 + } else {
54498 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
54499 + strcpy(buf, fmt);
54500 + }
54501 +
54502 + return NO_FLOODING;
54503 +}
54504 +
54505 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54506 + __attribute__ ((format (printf, 2, 0)));
54507 +
54508 +static void gr_log_middle(int audit, const char *msg, va_list ap)
54509 +{
54510 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54511 + unsigned int len = strlen(buf);
54512 +
54513 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54514 +
54515 + return;
54516 +}
54517 +
54518 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54519 + __attribute__ ((format (printf, 2, 3)));
54520 +
54521 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
54522 +{
54523 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54524 + unsigned int len = strlen(buf);
54525 + va_list ap;
54526 +
54527 + va_start(ap, msg);
54528 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54529 + va_end(ap);
54530 +
54531 + return;
54532 +}
54533 +
54534 +static void gr_log_end(int audit)
54535 +{
54536 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54537 + unsigned int len = strlen(buf);
54538 +
54539 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54540 + printk("%s\n", buf);
54541 +
54542 + return;
54543 +}
54544 +
54545 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54546 +{
54547 + int logtype;
54548 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54549 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54550 + void *voidptr = NULL;
54551 + int num1 = 0, num2 = 0;
54552 + unsigned long ulong1 = 0, ulong2 = 0;
54553 + struct dentry *dentry = NULL;
54554 + struct vfsmount *mnt = NULL;
54555 + struct file *file = NULL;
54556 + struct task_struct *task = NULL;
54557 + const struct cred *cred, *pcred;
54558 + va_list ap;
54559 +
54560 + BEGIN_LOCKS(audit);
54561 + logtype = gr_log_start(audit);
54562 + if (logtype == FLOODING) {
54563 + END_LOCKS(audit);
54564 + return;
54565 + }
54566 + va_start(ap, argtypes);
54567 + switch (argtypes) {
54568 + case GR_TTYSNIFF:
54569 + task = va_arg(ap, struct task_struct *);
54570 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54571 + break;
54572 + case GR_SYSCTL_HIDDEN:
54573 + str1 = va_arg(ap, char *);
54574 + gr_log_middle_varargs(audit, msg, result, str1);
54575 + break;
54576 + case GR_RBAC:
54577 + dentry = va_arg(ap, struct dentry *);
54578 + mnt = va_arg(ap, struct vfsmount *);
54579 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54580 + break;
54581 + case GR_RBAC_STR:
54582 + dentry = va_arg(ap, struct dentry *);
54583 + mnt = va_arg(ap, struct vfsmount *);
54584 + str1 = va_arg(ap, char *);
54585 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54586 + break;
54587 + case GR_STR_RBAC:
54588 + str1 = va_arg(ap, char *);
54589 + dentry = va_arg(ap, struct dentry *);
54590 + mnt = va_arg(ap, struct vfsmount *);
54591 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54592 + break;
54593 + case GR_RBAC_MODE2:
54594 + dentry = va_arg(ap, struct dentry *);
54595 + mnt = va_arg(ap, struct vfsmount *);
54596 + str1 = va_arg(ap, char *);
54597 + str2 = va_arg(ap, char *);
54598 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54599 + break;
54600 + case GR_RBAC_MODE3:
54601 + dentry = va_arg(ap, struct dentry *);
54602 + mnt = va_arg(ap, struct vfsmount *);
54603 + str1 = va_arg(ap, char *);
54604 + str2 = va_arg(ap, char *);
54605 + str3 = va_arg(ap, char *);
54606 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54607 + break;
54608 + case GR_FILENAME:
54609 + dentry = va_arg(ap, struct dentry *);
54610 + mnt = va_arg(ap, struct vfsmount *);
54611 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54612 + break;
54613 + case GR_STR_FILENAME:
54614 + str1 = va_arg(ap, char *);
54615 + dentry = va_arg(ap, struct dentry *);
54616 + mnt = va_arg(ap, struct vfsmount *);
54617 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54618 + break;
54619 + case GR_FILENAME_STR:
54620 + dentry = va_arg(ap, struct dentry *);
54621 + mnt = va_arg(ap, struct vfsmount *);
54622 + str1 = va_arg(ap, char *);
54623 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54624 + break;
54625 + case GR_FILENAME_TWO_INT:
54626 + dentry = va_arg(ap, struct dentry *);
54627 + mnt = va_arg(ap, struct vfsmount *);
54628 + num1 = va_arg(ap, int);
54629 + num2 = va_arg(ap, int);
54630 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54631 + break;
54632 + case GR_FILENAME_TWO_INT_STR:
54633 + dentry = va_arg(ap, struct dentry *);
54634 + mnt = va_arg(ap, struct vfsmount *);
54635 + num1 = va_arg(ap, int);
54636 + num2 = va_arg(ap, int);
54637 + str1 = va_arg(ap, char *);
54638 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54639 + break;
54640 + case GR_TEXTREL:
54641 + file = va_arg(ap, struct file *);
54642 + ulong1 = va_arg(ap, unsigned long);
54643 + ulong2 = va_arg(ap, unsigned long);
54644 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54645 + break;
54646 + case GR_PTRACE:
54647 + task = va_arg(ap, struct task_struct *);
54648 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54649 + break;
54650 + case GR_RESOURCE:
54651 + task = va_arg(ap, struct task_struct *);
54652 + cred = __task_cred(task);
54653 + pcred = __task_cred(task->real_parent);
54654 + ulong1 = va_arg(ap, unsigned long);
54655 + str1 = va_arg(ap, char *);
54656 + ulong2 = va_arg(ap, unsigned long);
54657 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54658 + break;
54659 + case GR_CAP:
54660 + task = va_arg(ap, struct task_struct *);
54661 + cred = __task_cred(task);
54662 + pcred = __task_cred(task->real_parent);
54663 + str1 = va_arg(ap, char *);
54664 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54665 + break;
54666 + case GR_SIG:
54667 + str1 = va_arg(ap, char *);
54668 + voidptr = va_arg(ap, void *);
54669 + gr_log_middle_varargs(audit, msg, str1, voidptr);
54670 + break;
54671 + case GR_SIG2:
54672 + task = va_arg(ap, struct task_struct *);
54673 + cred = __task_cred(task);
54674 + pcred = __task_cred(task->real_parent);
54675 + num1 = va_arg(ap, int);
54676 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54677 + break;
54678 + case GR_CRASH1:
54679 + task = va_arg(ap, struct task_struct *);
54680 + cred = __task_cred(task);
54681 + pcred = __task_cred(task->real_parent);
54682 + ulong1 = va_arg(ap, unsigned long);
54683 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54684 + break;
54685 + case GR_CRASH2:
54686 + task = va_arg(ap, struct task_struct *);
54687 + cred = __task_cred(task);
54688 + pcred = __task_cred(task->real_parent);
54689 + ulong1 = va_arg(ap, unsigned long);
54690 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54691 + break;
54692 + case GR_RWXMAP:
54693 + file = va_arg(ap, struct file *);
54694 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54695 + break;
54696 + case GR_PSACCT:
54697 + {
54698 + unsigned int wday, cday;
54699 + __u8 whr, chr;
54700 + __u8 wmin, cmin;
54701 + __u8 wsec, csec;
54702 + char cur_tty[64] = { 0 };
54703 + char parent_tty[64] = { 0 };
54704 +
54705 + task = va_arg(ap, struct task_struct *);
54706 + wday = va_arg(ap, unsigned int);
54707 + cday = va_arg(ap, unsigned int);
54708 + whr = va_arg(ap, int);
54709 + chr = va_arg(ap, int);
54710 + wmin = va_arg(ap, int);
54711 + cmin = va_arg(ap, int);
54712 + wsec = va_arg(ap, int);
54713 + csec = va_arg(ap, int);
54714 + ulong1 = va_arg(ap, unsigned long);
54715 + cred = __task_cred(task);
54716 + pcred = __task_cred(task->real_parent);
54717 +
54718 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54719 + }
54720 + break;
54721 + default:
54722 + gr_log_middle(audit, msg, ap);
54723 + }
54724 + va_end(ap);
54725 + gr_log_end(audit);
54726 + END_LOCKS(audit);
54727 +}
54728 diff -urNp linux-2.6.32.44/grsecurity/grsec_mem.c linux-2.6.32.44/grsecurity/grsec_mem.c
54729 --- linux-2.6.32.44/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54730 +++ linux-2.6.32.44/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54731 @@ -0,0 +1,33 @@
54732 +#include <linux/kernel.h>
54733 +#include <linux/sched.h>
54734 +#include <linux/mm.h>
54735 +#include <linux/mman.h>
54736 +#include <linux/grinternal.h>
54737 +
54738 +void
54739 +gr_handle_ioperm(void)
54740 +{
54741 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54742 + return;
54743 +}
54744 +
54745 +void
54746 +gr_handle_iopl(void)
54747 +{
54748 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54749 + return;
54750 +}
54751 +
54752 +void
54753 +gr_handle_mem_readwrite(u64 from, u64 to)
54754 +{
54755 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54756 + return;
54757 +}
54758 +
54759 +void
54760 +gr_handle_vm86(void)
54761 +{
54762 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54763 + return;
54764 +}
54765 diff -urNp linux-2.6.32.44/grsecurity/grsec_mount.c linux-2.6.32.44/grsecurity/grsec_mount.c
54766 --- linux-2.6.32.44/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54767 +++ linux-2.6.32.44/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54768 @@ -0,0 +1,62 @@
54769 +#include <linux/kernel.h>
54770 +#include <linux/sched.h>
54771 +#include <linux/mount.h>
54772 +#include <linux/grsecurity.h>
54773 +#include <linux/grinternal.h>
54774 +
54775 +void
54776 +gr_log_remount(const char *devname, const int retval)
54777 +{
54778 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54779 + if (grsec_enable_mount && (retval >= 0))
54780 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54781 +#endif
54782 + return;
54783 +}
54784 +
54785 +void
54786 +gr_log_unmount(const char *devname, const int retval)
54787 +{
54788 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54789 + if (grsec_enable_mount && (retval >= 0))
54790 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54791 +#endif
54792 + return;
54793 +}
54794 +
54795 +void
54796 +gr_log_mount(const char *from, const char *to, const int retval)
54797 +{
54798 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54799 + if (grsec_enable_mount && (retval >= 0))
54800 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54801 +#endif
54802 + return;
54803 +}
54804 +
54805 +int
54806 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54807 +{
54808 +#ifdef CONFIG_GRKERNSEC_ROFS
54809 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54810 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54811 + return -EPERM;
54812 + } else
54813 + return 0;
54814 +#endif
54815 + return 0;
54816 +}
54817 +
54818 +int
54819 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54820 +{
54821 +#ifdef CONFIG_GRKERNSEC_ROFS
54822 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54823 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54824 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54825 + return -EPERM;
54826 + } else
54827 + return 0;
54828 +#endif
54829 + return 0;
54830 +}
54831 diff -urNp linux-2.6.32.44/grsecurity/grsec_pax.c linux-2.6.32.44/grsecurity/grsec_pax.c
54832 --- linux-2.6.32.44/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54833 +++ linux-2.6.32.44/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54834 @@ -0,0 +1,36 @@
54835 +#include <linux/kernel.h>
54836 +#include <linux/sched.h>
54837 +#include <linux/mm.h>
54838 +#include <linux/file.h>
54839 +#include <linux/grinternal.h>
54840 +#include <linux/grsecurity.h>
54841 +
54842 +void
54843 +gr_log_textrel(struct vm_area_struct * vma)
54844 +{
54845 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54846 + if (grsec_enable_audit_textrel)
54847 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54848 +#endif
54849 + return;
54850 +}
54851 +
54852 +void
54853 +gr_log_rwxmmap(struct file *file)
54854 +{
54855 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54856 + if (grsec_enable_log_rwxmaps)
54857 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54858 +#endif
54859 + return;
54860 +}
54861 +
54862 +void
54863 +gr_log_rwxmprotect(struct file *file)
54864 +{
54865 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54866 + if (grsec_enable_log_rwxmaps)
54867 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54868 +#endif
54869 + return;
54870 +}
54871 diff -urNp linux-2.6.32.44/grsecurity/grsec_ptrace.c linux-2.6.32.44/grsecurity/grsec_ptrace.c
54872 --- linux-2.6.32.44/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54873 +++ linux-2.6.32.44/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54874 @@ -0,0 +1,14 @@
54875 +#include <linux/kernel.h>
54876 +#include <linux/sched.h>
54877 +#include <linux/grinternal.h>
54878 +#include <linux/grsecurity.h>
54879 +
54880 +void
54881 +gr_audit_ptrace(struct task_struct *task)
54882 +{
54883 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54884 + if (grsec_enable_audit_ptrace)
54885 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54886 +#endif
54887 + return;
54888 +}
54889 diff -urNp linux-2.6.32.44/grsecurity/grsec_sig.c linux-2.6.32.44/grsecurity/grsec_sig.c
54890 --- linux-2.6.32.44/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54891 +++ linux-2.6.32.44/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54892 @@ -0,0 +1,205 @@
54893 +#include <linux/kernel.h>
54894 +#include <linux/sched.h>
54895 +#include <linux/delay.h>
54896 +#include <linux/grsecurity.h>
54897 +#include <linux/grinternal.h>
54898 +#include <linux/hardirq.h>
54899 +
54900 +char *signames[] = {
54901 + [SIGSEGV] = "Segmentation fault",
54902 + [SIGILL] = "Illegal instruction",
54903 + [SIGABRT] = "Abort",
54904 + [SIGBUS] = "Invalid alignment/Bus error"
54905 +};
54906 +
54907 +void
54908 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54909 +{
54910 +#ifdef CONFIG_GRKERNSEC_SIGNAL
54911 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54912 + (sig == SIGABRT) || (sig == SIGBUS))) {
54913 + if (t->pid == current->pid) {
54914 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54915 + } else {
54916 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54917 + }
54918 + }
54919 +#endif
54920 + return;
54921 +}
54922 +
54923 +int
54924 +gr_handle_signal(const struct task_struct *p, const int sig)
54925 +{
54926 +#ifdef CONFIG_GRKERNSEC
54927 + if (current->pid > 1 && gr_check_protected_task(p)) {
54928 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54929 + return -EPERM;
54930 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54931 + return -EPERM;
54932 + }
54933 +#endif
54934 + return 0;
54935 +}
54936 +
54937 +#ifdef CONFIG_GRKERNSEC
54938 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54939 +
54940 +int gr_fake_force_sig(int sig, struct task_struct *t)
54941 +{
54942 + unsigned long int flags;
54943 + int ret, blocked, ignored;
54944 + struct k_sigaction *action;
54945 +
54946 + spin_lock_irqsave(&t->sighand->siglock, flags);
54947 + action = &t->sighand->action[sig-1];
54948 + ignored = action->sa.sa_handler == SIG_IGN;
54949 + blocked = sigismember(&t->blocked, sig);
54950 + if (blocked || ignored) {
54951 + action->sa.sa_handler = SIG_DFL;
54952 + if (blocked) {
54953 + sigdelset(&t->blocked, sig);
54954 + recalc_sigpending_and_wake(t);
54955 + }
54956 + }
54957 + if (action->sa.sa_handler == SIG_DFL)
54958 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
54959 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54960 +
54961 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
54962 +
54963 + return ret;
54964 +}
54965 +#endif
54966 +
54967 +#ifdef CONFIG_GRKERNSEC_BRUTE
54968 +#define GR_USER_BAN_TIME (15 * 60)
54969 +
54970 +static int __get_dumpable(unsigned long mm_flags)
54971 +{
54972 + int ret;
54973 +
54974 + ret = mm_flags & MMF_DUMPABLE_MASK;
54975 + return (ret >= 2) ? 2 : ret;
54976 +}
54977 +#endif
54978 +
54979 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54980 +{
54981 +#ifdef CONFIG_GRKERNSEC_BRUTE
54982 + uid_t uid = 0;
54983 +
54984 + if (!grsec_enable_brute)
54985 + return;
54986 +
54987 + rcu_read_lock();
54988 + read_lock(&tasklist_lock);
54989 + read_lock(&grsec_exec_file_lock);
54990 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54991 + p->real_parent->brute = 1;
54992 + else {
54993 + const struct cred *cred = __task_cred(p), *cred2;
54994 + struct task_struct *tsk, *tsk2;
54995 +
54996 + if (!__get_dumpable(mm_flags) && cred->uid) {
54997 + struct user_struct *user;
54998 +
54999 + uid = cred->uid;
55000 +
55001 + /* this is put upon execution past expiration */
55002 + user = find_user(uid);
55003 + if (user == NULL)
55004 + goto unlock;
55005 + user->banned = 1;
55006 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55007 + if (user->ban_expires == ~0UL)
55008 + user->ban_expires--;
55009 +
55010 + do_each_thread(tsk2, tsk) {
55011 + cred2 = __task_cred(tsk);
55012 + if (tsk != p && cred2->uid == uid)
55013 + gr_fake_force_sig(SIGKILL, tsk);
55014 + } while_each_thread(tsk2, tsk);
55015 + }
55016 + }
55017 +unlock:
55018 + read_unlock(&grsec_exec_file_lock);
55019 + read_unlock(&tasklist_lock);
55020 + rcu_read_unlock();
55021 +
55022 + if (uid)
55023 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55024 +#endif
55025 + return;
55026 +}
55027 +
55028 +void gr_handle_brute_check(void)
55029 +{
55030 +#ifdef CONFIG_GRKERNSEC_BRUTE
55031 + if (current->brute)
55032 + msleep(30 * 1000);
55033 +#endif
55034 + return;
55035 +}
55036 +
55037 +void gr_handle_kernel_exploit(void)
55038 +{
55039 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55040 + const struct cred *cred;
55041 + struct task_struct *tsk, *tsk2;
55042 + struct user_struct *user;
55043 + uid_t uid;
55044 +
55045 + if (in_irq() || in_serving_softirq() || in_nmi())
55046 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55047 +
55048 + uid = current_uid();
55049 +
55050 + if (uid == 0)
55051 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
55052 + else {
55053 + /* kill all the processes of this user, hold a reference
55054 + to their creds struct, and prevent them from creating
55055 + another process until system reset
55056 + */
55057 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55058 + /* we intentionally leak this ref */
55059 + user = get_uid(current->cred->user);
55060 + if (user) {
55061 + user->banned = 1;
55062 + user->ban_expires = ~0UL;
55063 + }
55064 +
55065 + read_lock(&tasklist_lock);
55066 + do_each_thread(tsk2, tsk) {
55067 + cred = __task_cred(tsk);
55068 + if (cred->uid == uid)
55069 + gr_fake_force_sig(SIGKILL, tsk);
55070 + } while_each_thread(tsk2, tsk);
55071 + read_unlock(&tasklist_lock);
55072 + }
55073 +#endif
55074 +}
55075 +
55076 +int __gr_process_user_ban(struct user_struct *user)
55077 +{
55078 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55079 + if (unlikely(user->banned)) {
55080 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55081 + user->banned = 0;
55082 + user->ban_expires = 0;
55083 + free_uid(user);
55084 + } else
55085 + return -EPERM;
55086 + }
55087 +#endif
55088 + return 0;
55089 +}
55090 +
55091 +int gr_process_user_ban(void)
55092 +{
55093 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55094 + return __gr_process_user_ban(current->cred->user);
55095 +#endif
55096 + return 0;
55097 +}
55098 diff -urNp linux-2.6.32.44/grsecurity/grsec_sock.c linux-2.6.32.44/grsecurity/grsec_sock.c
55099 --- linux-2.6.32.44/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55100 +++ linux-2.6.32.44/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
55101 @@ -0,0 +1,275 @@
55102 +#include <linux/kernel.h>
55103 +#include <linux/module.h>
55104 +#include <linux/sched.h>
55105 +#include <linux/file.h>
55106 +#include <linux/net.h>
55107 +#include <linux/in.h>
55108 +#include <linux/ip.h>
55109 +#include <net/sock.h>
55110 +#include <net/inet_sock.h>
55111 +#include <linux/grsecurity.h>
55112 +#include <linux/grinternal.h>
55113 +#include <linux/gracl.h>
55114 +
55115 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
55116 +EXPORT_SYMBOL(gr_cap_rtnetlink);
55117 +
55118 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55119 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55120 +
55121 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
55122 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
55123 +
55124 +#ifdef CONFIG_UNIX_MODULE
55125 +EXPORT_SYMBOL(gr_acl_handle_unix);
55126 +EXPORT_SYMBOL(gr_acl_handle_mknod);
55127 +EXPORT_SYMBOL(gr_handle_chroot_unix);
55128 +EXPORT_SYMBOL(gr_handle_create);
55129 +#endif
55130 +
55131 +#ifdef CONFIG_GRKERNSEC
55132 +#define gr_conn_table_size 32749
55133 +struct conn_table_entry {
55134 + struct conn_table_entry *next;
55135 + struct signal_struct *sig;
55136 +};
55137 +
55138 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55139 +DEFINE_SPINLOCK(gr_conn_table_lock);
55140 +
55141 +extern const char * gr_socktype_to_name(unsigned char type);
55142 +extern const char * gr_proto_to_name(unsigned char proto);
55143 +extern const char * gr_sockfamily_to_name(unsigned char family);
55144 +
55145 +static __inline__ int
55146 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55147 +{
55148 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55149 +}
55150 +
55151 +static __inline__ int
55152 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55153 + __u16 sport, __u16 dport)
55154 +{
55155 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55156 + sig->gr_sport == sport && sig->gr_dport == dport))
55157 + return 1;
55158 + else
55159 + return 0;
55160 +}
55161 +
55162 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55163 +{
55164 + struct conn_table_entry **match;
55165 + unsigned int index;
55166 +
55167 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55168 + sig->gr_sport, sig->gr_dport,
55169 + gr_conn_table_size);
55170 +
55171 + newent->sig = sig;
55172 +
55173 + match = &gr_conn_table[index];
55174 + newent->next = *match;
55175 + *match = newent;
55176 +
55177 + return;
55178 +}
55179 +
55180 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55181 +{
55182 + struct conn_table_entry *match, *last = NULL;
55183 + unsigned int index;
55184 +
55185 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55186 + sig->gr_sport, sig->gr_dport,
55187 + gr_conn_table_size);
55188 +
55189 + match = gr_conn_table[index];
55190 + while (match && !conn_match(match->sig,
55191 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55192 + sig->gr_dport)) {
55193 + last = match;
55194 + match = match->next;
55195 + }
55196 +
55197 + if (match) {
55198 + if (last)
55199 + last->next = match->next;
55200 + else
55201 + gr_conn_table[index] = NULL;
55202 + kfree(match);
55203 + }
55204 +
55205 + return;
55206 +}
55207 +
55208 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55209 + __u16 sport, __u16 dport)
55210 +{
55211 + struct conn_table_entry *match;
55212 + unsigned int index;
55213 +
55214 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55215 +
55216 + match = gr_conn_table[index];
55217 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55218 + match = match->next;
55219 +
55220 + if (match)
55221 + return match->sig;
55222 + else
55223 + return NULL;
55224 +}
55225 +
55226 +#endif
55227 +
55228 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55229 +{
55230 +#ifdef CONFIG_GRKERNSEC
55231 + struct signal_struct *sig = task->signal;
55232 + struct conn_table_entry *newent;
55233 +
55234 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55235 + if (newent == NULL)
55236 + return;
55237 + /* no bh lock needed since we are called with bh disabled */
55238 + spin_lock(&gr_conn_table_lock);
55239 + gr_del_task_from_ip_table_nolock(sig);
55240 + sig->gr_saddr = inet->rcv_saddr;
55241 + sig->gr_daddr = inet->daddr;
55242 + sig->gr_sport = inet->sport;
55243 + sig->gr_dport = inet->dport;
55244 + gr_add_to_task_ip_table_nolock(sig, newent);
55245 + spin_unlock(&gr_conn_table_lock);
55246 +#endif
55247 + return;
55248 +}
55249 +
55250 +void gr_del_task_from_ip_table(struct task_struct *task)
55251 +{
55252 +#ifdef CONFIG_GRKERNSEC
55253 + spin_lock_bh(&gr_conn_table_lock);
55254 + gr_del_task_from_ip_table_nolock(task->signal);
55255 + spin_unlock_bh(&gr_conn_table_lock);
55256 +#endif
55257 + return;
55258 +}
55259 +
55260 +void
55261 +gr_attach_curr_ip(const struct sock *sk)
55262 +{
55263 +#ifdef CONFIG_GRKERNSEC
55264 + struct signal_struct *p, *set;
55265 + const struct inet_sock *inet = inet_sk(sk);
55266 +
55267 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55268 + return;
55269 +
55270 + set = current->signal;
55271 +
55272 + spin_lock_bh(&gr_conn_table_lock);
55273 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
55274 + inet->dport, inet->sport);
55275 + if (unlikely(p != NULL)) {
55276 + set->curr_ip = p->curr_ip;
55277 + set->used_accept = 1;
55278 + gr_del_task_from_ip_table_nolock(p);
55279 + spin_unlock_bh(&gr_conn_table_lock);
55280 + return;
55281 + }
55282 + spin_unlock_bh(&gr_conn_table_lock);
55283 +
55284 + set->curr_ip = inet->daddr;
55285 + set->used_accept = 1;
55286 +#endif
55287 + return;
55288 +}
55289 +
55290 +int
55291 +gr_handle_sock_all(const int family, const int type, const int protocol)
55292 +{
55293 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55294 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55295 + (family != AF_UNIX)) {
55296 + if (family == AF_INET)
55297 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55298 + else
55299 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55300 + return -EACCES;
55301 + }
55302 +#endif
55303 + return 0;
55304 +}
55305 +
55306 +int
55307 +gr_handle_sock_server(const struct sockaddr *sck)
55308 +{
55309 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55310 + if (grsec_enable_socket_server &&
55311 + in_group_p(grsec_socket_server_gid) &&
55312 + sck && (sck->sa_family != AF_UNIX) &&
55313 + (sck->sa_family != AF_LOCAL)) {
55314 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55315 + return -EACCES;
55316 + }
55317 +#endif
55318 + return 0;
55319 +}
55320 +
55321 +int
55322 +gr_handle_sock_server_other(const struct sock *sck)
55323 +{
55324 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55325 + if (grsec_enable_socket_server &&
55326 + in_group_p(grsec_socket_server_gid) &&
55327 + sck && (sck->sk_family != AF_UNIX) &&
55328 + (sck->sk_family != AF_LOCAL)) {
55329 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55330 + return -EACCES;
55331 + }
55332 +#endif
55333 + return 0;
55334 +}
55335 +
55336 +int
55337 +gr_handle_sock_client(const struct sockaddr *sck)
55338 +{
55339 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55340 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55341 + sck && (sck->sa_family != AF_UNIX) &&
55342 + (sck->sa_family != AF_LOCAL)) {
55343 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55344 + return -EACCES;
55345 + }
55346 +#endif
55347 + return 0;
55348 +}
55349 +
55350 +kernel_cap_t
55351 +gr_cap_rtnetlink(struct sock *sock)
55352 +{
55353 +#ifdef CONFIG_GRKERNSEC
55354 + if (!gr_acl_is_enabled())
55355 + return current_cap();
55356 + else if (sock->sk_protocol == NETLINK_ISCSI &&
55357 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
55358 + gr_is_capable(CAP_SYS_ADMIN))
55359 + return current_cap();
55360 + else if (sock->sk_protocol == NETLINK_AUDIT &&
55361 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
55362 + gr_is_capable(CAP_AUDIT_WRITE) &&
55363 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
55364 + gr_is_capable(CAP_AUDIT_CONTROL))
55365 + return current_cap();
55366 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
55367 + ((sock->sk_protocol == NETLINK_ROUTE) ?
55368 + gr_is_capable_nolog(CAP_NET_ADMIN) :
55369 + gr_is_capable(CAP_NET_ADMIN)))
55370 + return current_cap();
55371 + else
55372 + return __cap_empty_set;
55373 +#else
55374 + return current_cap();
55375 +#endif
55376 +}
55377 diff -urNp linux-2.6.32.44/grsecurity/grsec_sysctl.c linux-2.6.32.44/grsecurity/grsec_sysctl.c
55378 --- linux-2.6.32.44/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55379 +++ linux-2.6.32.44/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
55380 @@ -0,0 +1,489 @@
55381 +#include <linux/kernel.h>
55382 +#include <linux/sched.h>
55383 +#include <linux/sysctl.h>
55384 +#include <linux/grsecurity.h>
55385 +#include <linux/grinternal.h>
55386 +
55387 +int
55388 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55389 +{
55390 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55391 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55392 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55393 + return -EACCES;
55394 + }
55395 +#endif
55396 + return 0;
55397 +}
55398 +
55399 +#ifdef CONFIG_GRKERNSEC_ROFS
55400 +static int __maybe_unused one = 1;
55401 +#endif
55402 +
55403 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55404 +ctl_table grsecurity_table[] = {
55405 +#ifdef CONFIG_GRKERNSEC_SYSCTL
55406 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55407 +#ifdef CONFIG_GRKERNSEC_IO
55408 + {
55409 + .ctl_name = CTL_UNNUMBERED,
55410 + .procname = "disable_priv_io",
55411 + .data = &grsec_disable_privio,
55412 + .maxlen = sizeof(int),
55413 + .mode = 0600,
55414 + .proc_handler = &proc_dointvec,
55415 + },
55416 +#endif
55417 +#endif
55418 +#ifdef CONFIG_GRKERNSEC_LINK
55419 + {
55420 + .ctl_name = CTL_UNNUMBERED,
55421 + .procname = "linking_restrictions",
55422 + .data = &grsec_enable_link,
55423 + .maxlen = sizeof(int),
55424 + .mode = 0600,
55425 + .proc_handler = &proc_dointvec,
55426 + },
55427 +#endif
55428 +#ifdef CONFIG_GRKERNSEC_BRUTE
55429 + {
55430 + .ctl_name = CTL_UNNUMBERED,
55431 + .procname = "deter_bruteforce",
55432 + .data = &grsec_enable_brute,
55433 + .maxlen = sizeof(int),
55434 + .mode = 0600,
55435 + .proc_handler = &proc_dointvec,
55436 + },
55437 +#endif
55438 +#ifdef CONFIG_GRKERNSEC_FIFO
55439 + {
55440 + .ctl_name = CTL_UNNUMBERED,
55441 + .procname = "fifo_restrictions",
55442 + .data = &grsec_enable_fifo,
55443 + .maxlen = sizeof(int),
55444 + .mode = 0600,
55445 + .proc_handler = &proc_dointvec,
55446 + },
55447 +#endif
55448 +#ifdef CONFIG_GRKERNSEC_EXECVE
55449 + {
55450 + .ctl_name = CTL_UNNUMBERED,
55451 + .procname = "execve_limiting",
55452 + .data = &grsec_enable_execve,
55453 + .maxlen = sizeof(int),
55454 + .mode = 0600,
55455 + .proc_handler = &proc_dointvec,
55456 + },
55457 +#endif
55458 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55459 + {
55460 + .ctl_name = CTL_UNNUMBERED,
55461 + .procname = "ip_blackhole",
55462 + .data = &grsec_enable_blackhole,
55463 + .maxlen = sizeof(int),
55464 + .mode = 0600,
55465 + .proc_handler = &proc_dointvec,
55466 + },
55467 + {
55468 + .ctl_name = CTL_UNNUMBERED,
55469 + .procname = "lastack_retries",
55470 + .data = &grsec_lastack_retries,
55471 + .maxlen = sizeof(int),
55472 + .mode = 0600,
55473 + .proc_handler = &proc_dointvec,
55474 + },
55475 +#endif
55476 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55477 + {
55478 + .ctl_name = CTL_UNNUMBERED,
55479 + .procname = "exec_logging",
55480 + .data = &grsec_enable_execlog,
55481 + .maxlen = sizeof(int),
55482 + .mode = 0600,
55483 + .proc_handler = &proc_dointvec,
55484 + },
55485 +#endif
55486 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55487 + {
55488 + .ctl_name = CTL_UNNUMBERED,
55489 + .procname = "rwxmap_logging",
55490 + .data = &grsec_enable_log_rwxmaps,
55491 + .maxlen = sizeof(int),
55492 + .mode = 0600,
55493 + .proc_handler = &proc_dointvec,
55494 + },
55495 +#endif
55496 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55497 + {
55498 + .ctl_name = CTL_UNNUMBERED,
55499 + .procname = "signal_logging",
55500 + .data = &grsec_enable_signal,
55501 + .maxlen = sizeof(int),
55502 + .mode = 0600,
55503 + .proc_handler = &proc_dointvec,
55504 + },
55505 +#endif
55506 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55507 + {
55508 + .ctl_name = CTL_UNNUMBERED,
55509 + .procname = "forkfail_logging",
55510 + .data = &grsec_enable_forkfail,
55511 + .maxlen = sizeof(int),
55512 + .mode = 0600,
55513 + .proc_handler = &proc_dointvec,
55514 + },
55515 +#endif
55516 +#ifdef CONFIG_GRKERNSEC_TIME
55517 + {
55518 + .ctl_name = CTL_UNNUMBERED,
55519 + .procname = "timechange_logging",
55520 + .data = &grsec_enable_time,
55521 + .maxlen = sizeof(int),
55522 + .mode = 0600,
55523 + .proc_handler = &proc_dointvec,
55524 + },
55525 +#endif
55526 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55527 + {
55528 + .ctl_name = CTL_UNNUMBERED,
55529 + .procname = "chroot_deny_shmat",
55530 + .data = &grsec_enable_chroot_shmat,
55531 + .maxlen = sizeof(int),
55532 + .mode = 0600,
55533 + .proc_handler = &proc_dointvec,
55534 + },
55535 +#endif
55536 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55537 + {
55538 + .ctl_name = CTL_UNNUMBERED,
55539 + .procname = "chroot_deny_unix",
55540 + .data = &grsec_enable_chroot_unix,
55541 + .maxlen = sizeof(int),
55542 + .mode = 0600,
55543 + .proc_handler = &proc_dointvec,
55544 + },
55545 +#endif
55546 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55547 + {
55548 + .ctl_name = CTL_UNNUMBERED,
55549 + .procname = "chroot_deny_mount",
55550 + .data = &grsec_enable_chroot_mount,
55551 + .maxlen = sizeof(int),
55552 + .mode = 0600,
55553 + .proc_handler = &proc_dointvec,
55554 + },
55555 +#endif
55556 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55557 + {
55558 + .ctl_name = CTL_UNNUMBERED,
55559 + .procname = "chroot_deny_fchdir",
55560 + .data = &grsec_enable_chroot_fchdir,
55561 + .maxlen = sizeof(int),
55562 + .mode = 0600,
55563 + .proc_handler = &proc_dointvec,
55564 + },
55565 +#endif
55566 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55567 + {
55568 + .ctl_name = CTL_UNNUMBERED,
55569 + .procname = "chroot_deny_chroot",
55570 + .data = &grsec_enable_chroot_double,
55571 + .maxlen = sizeof(int),
55572 + .mode = 0600,
55573 + .proc_handler = &proc_dointvec,
55574 + },
55575 +#endif
55576 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55577 + {
55578 + .ctl_name = CTL_UNNUMBERED,
55579 + .procname = "chroot_deny_pivot",
55580 + .data = &grsec_enable_chroot_pivot,
55581 + .maxlen = sizeof(int),
55582 + .mode = 0600,
55583 + .proc_handler = &proc_dointvec,
55584 + },
55585 +#endif
55586 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55587 + {
55588 + .ctl_name = CTL_UNNUMBERED,
55589 + .procname = "chroot_enforce_chdir",
55590 + .data = &grsec_enable_chroot_chdir,
55591 + .maxlen = sizeof(int),
55592 + .mode = 0600,
55593 + .proc_handler = &proc_dointvec,
55594 + },
55595 +#endif
55596 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55597 + {
55598 + .ctl_name = CTL_UNNUMBERED,
55599 + .procname = "chroot_deny_chmod",
55600 + .data = &grsec_enable_chroot_chmod,
55601 + .maxlen = sizeof(int),
55602 + .mode = 0600,
55603 + .proc_handler = &proc_dointvec,
55604 + },
55605 +#endif
55606 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55607 + {
55608 + .ctl_name = CTL_UNNUMBERED,
55609 + .procname = "chroot_deny_mknod",
55610 + .data = &grsec_enable_chroot_mknod,
55611 + .maxlen = sizeof(int),
55612 + .mode = 0600,
55613 + .proc_handler = &proc_dointvec,
55614 + },
55615 +#endif
55616 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55617 + {
55618 + .ctl_name = CTL_UNNUMBERED,
55619 + .procname = "chroot_restrict_nice",
55620 + .data = &grsec_enable_chroot_nice,
55621 + .maxlen = sizeof(int),
55622 + .mode = 0600,
55623 + .proc_handler = &proc_dointvec,
55624 + },
55625 +#endif
55626 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55627 + {
55628 + .ctl_name = CTL_UNNUMBERED,
55629 + .procname = "chroot_execlog",
55630 + .data = &grsec_enable_chroot_execlog,
55631 + .maxlen = sizeof(int),
55632 + .mode = 0600,
55633 + .proc_handler = &proc_dointvec,
55634 + },
55635 +#endif
55636 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55637 + {
55638 + .ctl_name = CTL_UNNUMBERED,
55639 + .procname = "chroot_caps",
55640 + .data = &grsec_enable_chroot_caps,
55641 + .maxlen = sizeof(int),
55642 + .mode = 0600,
55643 + .proc_handler = &proc_dointvec,
55644 + },
55645 +#endif
55646 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55647 + {
55648 + .ctl_name = CTL_UNNUMBERED,
55649 + .procname = "chroot_deny_sysctl",
55650 + .data = &grsec_enable_chroot_sysctl,
55651 + .maxlen = sizeof(int),
55652 + .mode = 0600,
55653 + .proc_handler = &proc_dointvec,
55654 + },
55655 +#endif
55656 +#ifdef CONFIG_GRKERNSEC_TPE
55657 + {
55658 + .ctl_name = CTL_UNNUMBERED,
55659 + .procname = "tpe",
55660 + .data = &grsec_enable_tpe,
55661 + .maxlen = sizeof(int),
55662 + .mode = 0600,
55663 + .proc_handler = &proc_dointvec,
55664 + },
55665 + {
55666 + .ctl_name = CTL_UNNUMBERED,
55667 + .procname = "tpe_gid",
55668 + .data = &grsec_tpe_gid,
55669 + .maxlen = sizeof(int),
55670 + .mode = 0600,
55671 + .proc_handler = &proc_dointvec,
55672 + },
55673 +#endif
55674 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55675 + {
55676 + .ctl_name = CTL_UNNUMBERED,
55677 + .procname = "tpe_invert",
55678 + .data = &grsec_enable_tpe_invert,
55679 + .maxlen = sizeof(int),
55680 + .mode = 0600,
55681 + .proc_handler = &proc_dointvec,
55682 + },
55683 +#endif
55684 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55685 + {
55686 + .ctl_name = CTL_UNNUMBERED,
55687 + .procname = "tpe_restrict_all",
55688 + .data = &grsec_enable_tpe_all,
55689 + .maxlen = sizeof(int),
55690 + .mode = 0600,
55691 + .proc_handler = &proc_dointvec,
55692 + },
55693 +#endif
55694 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55695 + {
55696 + .ctl_name = CTL_UNNUMBERED,
55697 + .procname = "socket_all",
55698 + .data = &grsec_enable_socket_all,
55699 + .maxlen = sizeof(int),
55700 + .mode = 0600,
55701 + .proc_handler = &proc_dointvec,
55702 + },
55703 + {
55704 + .ctl_name = CTL_UNNUMBERED,
55705 + .procname = "socket_all_gid",
55706 + .data = &grsec_socket_all_gid,
55707 + .maxlen = sizeof(int),
55708 + .mode = 0600,
55709 + .proc_handler = &proc_dointvec,
55710 + },
55711 +#endif
55712 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55713 + {
55714 + .ctl_name = CTL_UNNUMBERED,
55715 + .procname = "socket_client",
55716 + .data = &grsec_enable_socket_client,
55717 + .maxlen = sizeof(int),
55718 + .mode = 0600,
55719 + .proc_handler = &proc_dointvec,
55720 + },
55721 + {
55722 + .ctl_name = CTL_UNNUMBERED,
55723 + .procname = "socket_client_gid",
55724 + .data = &grsec_socket_client_gid,
55725 + .maxlen = sizeof(int),
55726 + .mode = 0600,
55727 + .proc_handler = &proc_dointvec,
55728 + },
55729 +#endif
55730 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55731 + {
55732 + .ctl_name = CTL_UNNUMBERED,
55733 + .procname = "socket_server",
55734 + .data = &grsec_enable_socket_server,
55735 + .maxlen = sizeof(int),
55736 + .mode = 0600,
55737 + .proc_handler = &proc_dointvec,
55738 + },
55739 + {
55740 + .ctl_name = CTL_UNNUMBERED,
55741 + .procname = "socket_server_gid",
55742 + .data = &grsec_socket_server_gid,
55743 + .maxlen = sizeof(int),
55744 + .mode = 0600,
55745 + .proc_handler = &proc_dointvec,
55746 + },
55747 +#endif
55748 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55749 + {
55750 + .ctl_name = CTL_UNNUMBERED,
55751 + .procname = "audit_group",
55752 + .data = &grsec_enable_group,
55753 + .maxlen = sizeof(int),
55754 + .mode = 0600,
55755 + .proc_handler = &proc_dointvec,
55756 + },
55757 + {
55758 + .ctl_name = CTL_UNNUMBERED,
55759 + .procname = "audit_gid",
55760 + .data = &grsec_audit_gid,
55761 + .maxlen = sizeof(int),
55762 + .mode = 0600,
55763 + .proc_handler = &proc_dointvec,
55764 + },
55765 +#endif
55766 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55767 + {
55768 + .ctl_name = CTL_UNNUMBERED,
55769 + .procname = "audit_chdir",
55770 + .data = &grsec_enable_chdir,
55771 + .maxlen = sizeof(int),
55772 + .mode = 0600,
55773 + .proc_handler = &proc_dointvec,
55774 + },
55775 +#endif
55776 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55777 + {
55778 + .ctl_name = CTL_UNNUMBERED,
55779 + .procname = "audit_mount",
55780 + .data = &grsec_enable_mount,
55781 + .maxlen = sizeof(int),
55782 + .mode = 0600,
55783 + .proc_handler = &proc_dointvec,
55784 + },
55785 +#endif
55786 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55787 + {
55788 + .ctl_name = CTL_UNNUMBERED,
55789 + .procname = "audit_textrel",
55790 + .data = &grsec_enable_audit_textrel,
55791 + .maxlen = sizeof(int),
55792 + .mode = 0600,
55793 + .proc_handler = &proc_dointvec,
55794 + },
55795 +#endif
55796 +#ifdef CONFIG_GRKERNSEC_DMESG
55797 + {
55798 + .ctl_name = CTL_UNNUMBERED,
55799 + .procname = "dmesg",
55800 + .data = &grsec_enable_dmesg,
55801 + .maxlen = sizeof(int),
55802 + .mode = 0600,
55803 + .proc_handler = &proc_dointvec,
55804 + },
55805 +#endif
55806 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55807 + {
55808 + .ctl_name = CTL_UNNUMBERED,
55809 + .procname = "chroot_findtask",
55810 + .data = &grsec_enable_chroot_findtask,
55811 + .maxlen = sizeof(int),
55812 + .mode = 0600,
55813 + .proc_handler = &proc_dointvec,
55814 + },
55815 +#endif
55816 +#ifdef CONFIG_GRKERNSEC_RESLOG
55817 + {
55818 + .ctl_name = CTL_UNNUMBERED,
55819 + .procname = "resource_logging",
55820 + .data = &grsec_resource_logging,
55821 + .maxlen = sizeof(int),
55822 + .mode = 0600,
55823 + .proc_handler = &proc_dointvec,
55824 + },
55825 +#endif
55826 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55827 + {
55828 + .ctl_name = CTL_UNNUMBERED,
55829 + .procname = "audit_ptrace",
55830 + .data = &grsec_enable_audit_ptrace,
55831 + .maxlen = sizeof(int),
55832 + .mode = 0600,
55833 + .proc_handler = &proc_dointvec,
55834 + },
55835 +#endif
55836 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55837 + {
55838 + .ctl_name = CTL_UNNUMBERED,
55839 + .procname = "harden_ptrace",
55840 + .data = &grsec_enable_harden_ptrace,
55841 + .maxlen = sizeof(int),
55842 + .mode = 0600,
55843 + .proc_handler = &proc_dointvec,
55844 + },
55845 +#endif
55846 + {
55847 + .ctl_name = CTL_UNNUMBERED,
55848 + .procname = "grsec_lock",
55849 + .data = &grsec_lock,
55850 + .maxlen = sizeof(int),
55851 + .mode = 0600,
55852 + .proc_handler = &proc_dointvec,
55853 + },
55854 +#endif
55855 +#ifdef CONFIG_GRKERNSEC_ROFS
55856 + {
55857 + .ctl_name = CTL_UNNUMBERED,
55858 + .procname = "romount_protect",
55859 + .data = &grsec_enable_rofs,
55860 + .maxlen = sizeof(int),
55861 + .mode = 0600,
55862 + .proc_handler = &proc_dointvec_minmax,
55863 + .extra1 = &one,
55864 + .extra2 = &one,
55865 + },
55866 +#endif
55867 + { .ctl_name = 0 }
55868 +};
55869 +#endif
55870 diff -urNp linux-2.6.32.44/grsecurity/grsec_time.c linux-2.6.32.44/grsecurity/grsec_time.c
55871 --- linux-2.6.32.44/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55872 +++ linux-2.6.32.44/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55873 @@ -0,0 +1,16 @@
55874 +#include <linux/kernel.h>
55875 +#include <linux/sched.h>
55876 +#include <linux/grinternal.h>
55877 +#include <linux/module.h>
55878 +
55879 +void
55880 +gr_log_timechange(void)
55881 +{
55882 +#ifdef CONFIG_GRKERNSEC_TIME
55883 + if (grsec_enable_time)
55884 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55885 +#endif
55886 + return;
55887 +}
55888 +
55889 +EXPORT_SYMBOL(gr_log_timechange);
55890 diff -urNp linux-2.6.32.44/grsecurity/grsec_tpe.c linux-2.6.32.44/grsecurity/grsec_tpe.c
55891 --- linux-2.6.32.44/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55892 +++ linux-2.6.32.44/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55893 @@ -0,0 +1,39 @@
55894 +#include <linux/kernel.h>
55895 +#include <linux/sched.h>
55896 +#include <linux/file.h>
55897 +#include <linux/fs.h>
55898 +#include <linux/grinternal.h>
55899 +
55900 +extern int gr_acl_tpe_check(void);
55901 +
55902 +int
55903 +gr_tpe_allow(const struct file *file)
55904 +{
55905 +#ifdef CONFIG_GRKERNSEC
55906 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55907 + const struct cred *cred = current_cred();
55908 +
55909 + if (cred->uid && ((grsec_enable_tpe &&
55910 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55911 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55912 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55913 +#else
55914 + in_group_p(grsec_tpe_gid)
55915 +#endif
55916 + ) || gr_acl_tpe_check()) &&
55917 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55918 + (inode->i_mode & S_IWOTH))))) {
55919 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55920 + return 0;
55921 + }
55922 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55923 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55924 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55925 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55926 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55927 + return 0;
55928 + }
55929 +#endif
55930 +#endif
55931 + return 1;
55932 +}
55933 diff -urNp linux-2.6.32.44/grsecurity/grsum.c linux-2.6.32.44/grsecurity/grsum.c
55934 --- linux-2.6.32.44/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55935 +++ linux-2.6.32.44/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55936 @@ -0,0 +1,61 @@
55937 +#include <linux/err.h>
55938 +#include <linux/kernel.h>
55939 +#include <linux/sched.h>
55940 +#include <linux/mm.h>
55941 +#include <linux/scatterlist.h>
55942 +#include <linux/crypto.h>
55943 +#include <linux/gracl.h>
55944 +
55945 +
55946 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55947 +#error "crypto and sha256 must be built into the kernel"
55948 +#endif
55949 +
55950 +int
55951 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55952 +{
55953 + char *p;
55954 + struct crypto_hash *tfm;
55955 + struct hash_desc desc;
55956 + struct scatterlist sg;
55957 + unsigned char temp_sum[GR_SHA_LEN];
55958 + volatile int retval = 0;
55959 + volatile int dummy = 0;
55960 + unsigned int i;
55961 +
55962 + sg_init_table(&sg, 1);
55963 +
55964 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55965 + if (IS_ERR(tfm)) {
55966 + /* should never happen, since sha256 should be built in */
55967 + return 1;
55968 + }
55969 +
55970 + desc.tfm = tfm;
55971 + desc.flags = 0;
55972 +
55973 + crypto_hash_init(&desc);
55974 +
55975 + p = salt;
55976 + sg_set_buf(&sg, p, GR_SALT_LEN);
55977 + crypto_hash_update(&desc, &sg, sg.length);
55978 +
55979 + p = entry->pw;
55980 + sg_set_buf(&sg, p, strlen(p));
55981 +
55982 + crypto_hash_update(&desc, &sg, sg.length);
55983 +
55984 + crypto_hash_final(&desc, temp_sum);
55985 +
55986 + memset(entry->pw, 0, GR_PW_LEN);
55987 +
55988 + for (i = 0; i < GR_SHA_LEN; i++)
55989 + if (sum[i] != temp_sum[i])
55990 + retval = 1;
55991 + else
55992 + dummy = 1; // waste a cycle
55993 +
55994 + crypto_free_hash(tfm);
55995 +
55996 + return retval;
55997 +}
55998 diff -urNp linux-2.6.32.44/grsecurity/Kconfig linux-2.6.32.44/grsecurity/Kconfig
55999 --- linux-2.6.32.44/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
56000 +++ linux-2.6.32.44/grsecurity/Kconfig 2011-07-06 19:57:57.000000000 -0400
56001 @@ -0,0 +1,1047 @@
56002 +#
56003 +# grecurity configuration
56004 +#
56005 +
56006 +menu "Grsecurity"
56007 +
56008 +config GRKERNSEC
56009 + bool "Grsecurity"
56010 + select CRYPTO
56011 + select CRYPTO_SHA256
56012 + help
56013 + If you say Y here, you will be able to configure many features
56014 + that will enhance the security of your system. It is highly
56015 + recommended that you say Y here and read through the help
56016 + for each option so that you fully understand the features and
56017 + can evaluate their usefulness for your machine.
56018 +
56019 +choice
56020 + prompt "Security Level"
56021 + depends on GRKERNSEC
56022 + default GRKERNSEC_CUSTOM
56023 +
56024 +config GRKERNSEC_LOW
56025 + bool "Low"
56026 + select GRKERNSEC_LINK
56027 + select GRKERNSEC_FIFO
56028 + select GRKERNSEC_EXECVE
56029 + select GRKERNSEC_RANDNET
56030 + select GRKERNSEC_DMESG
56031 + select GRKERNSEC_CHROOT
56032 + select GRKERNSEC_CHROOT_CHDIR
56033 +
56034 + help
56035 + If you choose this option, several of the grsecurity options will
56036 + be enabled that will give you greater protection against a number
56037 + of attacks, while assuring that none of your software will have any
56038 + conflicts with the additional security measures. If you run a lot
56039 + of unusual software, or you are having problems with the higher
56040 + security levels, you should say Y here. With this option, the
56041 + following features are enabled:
56042 +
56043 + - Linking restrictions
56044 + - FIFO restrictions
56045 + - Enforcing RLIMIT_NPROC on execve
56046 + - Restricted dmesg
56047 + - Enforced chdir("/") on chroot
56048 + - Runtime module disabling
56049 +
56050 +config GRKERNSEC_MEDIUM
56051 + bool "Medium"
56052 + select PAX
56053 + select PAX_EI_PAX
56054 + select PAX_PT_PAX_FLAGS
56055 + select PAX_HAVE_ACL_FLAGS
56056 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56057 + select GRKERNSEC_CHROOT
56058 + select GRKERNSEC_CHROOT_SYSCTL
56059 + select GRKERNSEC_LINK
56060 + select GRKERNSEC_FIFO
56061 + select GRKERNSEC_EXECVE
56062 + select GRKERNSEC_DMESG
56063 + select GRKERNSEC_RANDNET
56064 + select GRKERNSEC_FORKFAIL
56065 + select GRKERNSEC_TIME
56066 + select GRKERNSEC_SIGNAL
56067 + select GRKERNSEC_CHROOT
56068 + select GRKERNSEC_CHROOT_UNIX
56069 + select GRKERNSEC_CHROOT_MOUNT
56070 + select GRKERNSEC_CHROOT_PIVOT
56071 + select GRKERNSEC_CHROOT_DOUBLE
56072 + select GRKERNSEC_CHROOT_CHDIR
56073 + select GRKERNSEC_CHROOT_MKNOD
56074 + select GRKERNSEC_PROC
56075 + select GRKERNSEC_PROC_USERGROUP
56076 + select PAX_RANDUSTACK
56077 + select PAX_ASLR
56078 + select PAX_RANDMMAP
56079 + select PAX_REFCOUNT if (X86 || SPARC64)
56080 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56081 +
56082 + help
56083 + If you say Y here, several features in addition to those included
56084 + in the low additional security level will be enabled. These
56085 + features provide even more security to your system, though in rare
56086 + cases they may be incompatible with very old or poorly written
56087 + software. If you enable this option, make sure that your auth
56088 + service (identd) is running as gid 1001. With this option,
56089 + the following features (in addition to those provided in the
56090 + low additional security level) will be enabled:
56091 +
56092 + - Failed fork logging
56093 + - Time change logging
56094 + - Signal logging
56095 + - Deny mounts in chroot
56096 + - Deny double chrooting
56097 + - Deny sysctl writes in chroot
56098 + - Deny mknod in chroot
56099 + - Deny access to abstract AF_UNIX sockets out of chroot
56100 + - Deny pivot_root in chroot
56101 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
56102 + - /proc restrictions with special GID set to 10 (usually wheel)
56103 + - Address Space Layout Randomization (ASLR)
56104 + - Prevent exploitation of most refcount overflows
56105 + - Bounds checking of copying between the kernel and userland
56106 +
56107 +config GRKERNSEC_HIGH
56108 + bool "High"
56109 + select GRKERNSEC_LINK
56110 + select GRKERNSEC_FIFO
56111 + select GRKERNSEC_EXECVE
56112 + select GRKERNSEC_DMESG
56113 + select GRKERNSEC_FORKFAIL
56114 + select GRKERNSEC_TIME
56115 + select GRKERNSEC_SIGNAL
56116 + select GRKERNSEC_CHROOT
56117 + select GRKERNSEC_CHROOT_SHMAT
56118 + select GRKERNSEC_CHROOT_UNIX
56119 + select GRKERNSEC_CHROOT_MOUNT
56120 + select GRKERNSEC_CHROOT_FCHDIR
56121 + select GRKERNSEC_CHROOT_PIVOT
56122 + select GRKERNSEC_CHROOT_DOUBLE
56123 + select GRKERNSEC_CHROOT_CHDIR
56124 + select GRKERNSEC_CHROOT_MKNOD
56125 + select GRKERNSEC_CHROOT_CAPS
56126 + select GRKERNSEC_CHROOT_SYSCTL
56127 + select GRKERNSEC_CHROOT_FINDTASK
56128 + select GRKERNSEC_SYSFS_RESTRICT
56129 + select GRKERNSEC_PROC
56130 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56131 + select GRKERNSEC_HIDESYM
56132 + select GRKERNSEC_BRUTE
56133 + select GRKERNSEC_PROC_USERGROUP
56134 + select GRKERNSEC_KMEM
56135 + select GRKERNSEC_RESLOG
56136 + select GRKERNSEC_RANDNET
56137 + select GRKERNSEC_PROC_ADD
56138 + select GRKERNSEC_CHROOT_CHMOD
56139 + select GRKERNSEC_CHROOT_NICE
56140 + select GRKERNSEC_AUDIT_MOUNT
56141 + select GRKERNSEC_MODHARDEN if (MODULES)
56142 + select GRKERNSEC_HARDEN_PTRACE
56143 + select GRKERNSEC_VM86 if (X86_32)
56144 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56145 + select PAX
56146 + select PAX_RANDUSTACK
56147 + select PAX_ASLR
56148 + select PAX_RANDMMAP
56149 + select PAX_NOEXEC
56150 + select PAX_MPROTECT
56151 + select PAX_EI_PAX
56152 + select PAX_PT_PAX_FLAGS
56153 + select PAX_HAVE_ACL_FLAGS
56154 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56155 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
56156 + select PAX_RANDKSTACK if (X86_TSC && X86)
56157 + select PAX_SEGMEXEC if (X86_32)
56158 + select PAX_PAGEEXEC
56159 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56160 + select PAX_EMUTRAMP if (PARISC)
56161 + select PAX_EMUSIGRT if (PARISC)
56162 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56163 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56164 + select PAX_REFCOUNT if (X86 || SPARC64)
56165 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56166 + help
56167 + If you say Y here, many of the features of grsecurity will be
56168 + enabled, which will protect you against many kinds of attacks
56169 + against your system. The heightened security comes at a cost
56170 + of an increased chance of incompatibilities with rare software
56171 + on your machine. Since this security level enables PaX, you should
56172 + view <http://pax.grsecurity.net> and read about the PaX
56173 + project. While you are there, download chpax and run it on
56174 + binaries that cause problems with PaX. Also remember that
56175 + since the /proc restrictions are enabled, you must run your
56176 + identd as gid 1001. This security level enables the following
56177 + features in addition to those listed in the low and medium
56178 + security levels:
56179 +
56180 + - Additional /proc restrictions
56181 + - Chmod restrictions in chroot
56182 + - No signals, ptrace, or viewing of processes outside of chroot
56183 + - Capability restrictions in chroot
56184 + - Deny fchdir out of chroot
56185 + - Priority restrictions in chroot
56186 + - Segmentation-based implementation of PaX
56187 + - Mprotect restrictions
56188 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56189 + - Kernel stack randomization
56190 + - Mount/unmount/remount logging
56191 + - Kernel symbol hiding
56192 + - Prevention of memory exhaustion-based exploits
56193 + - Hardening of module auto-loading
56194 + - Ptrace restrictions
56195 + - Restricted vm86 mode
56196 + - Restricted sysfs/debugfs
56197 + - Active kernel exploit response
56198 +
56199 +config GRKERNSEC_CUSTOM
56200 + bool "Custom"
56201 + help
56202 + If you say Y here, you will be able to configure every grsecurity
56203 + option, which allows you to enable many more features that aren't
56204 + covered in the basic security levels. These additional features
56205 + include TPE, socket restrictions, and the sysctl system for
56206 + grsecurity. It is advised that you read through the help for
56207 + each option to determine its usefulness in your situation.
56208 +
56209 +endchoice
56210 +
56211 +menu "Address Space Protection"
56212 +depends on GRKERNSEC
56213 +
56214 +config GRKERNSEC_KMEM
56215 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
56216 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56217 + help
56218 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56219 + be written to via mmap or otherwise to modify the running kernel.
56220 + /dev/port will also not be allowed to be opened. If you have module
56221 + support disabled, enabling this will close up four ways that are
56222 + currently used to insert malicious code into the running kernel.
56223 + Even with all these features enabled, we still highly recommend that
56224 + you use the RBAC system, as it is still possible for an attacker to
56225 + modify the running kernel through privileged I/O granted by ioperm/iopl.
56226 + If you are not using XFree86, you may be able to stop this additional
56227 + case by enabling the 'Disable privileged I/O' option. Though nothing
56228 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56229 + but only to video memory, which is the only writing we allow in this
56230 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56231 + not be allowed to mprotect it with PROT_WRITE later.
56232 + It is highly recommended that you say Y here if you meet all the
56233 + conditions above.
56234 +
56235 +config GRKERNSEC_VM86
56236 + bool "Restrict VM86 mode"
56237 + depends on X86_32
56238 +
56239 + help
56240 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56241 + make use of a special execution mode on 32bit x86 processors called
56242 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56243 + video cards and will still work with this option enabled. The purpose
56244 + of the option is to prevent exploitation of emulation errors in
56245 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
56246 + Nearly all users should be able to enable this option.
56247 +
56248 +config GRKERNSEC_IO
56249 + bool "Disable privileged I/O"
56250 + depends on X86
56251 + select RTC_CLASS
56252 + select RTC_INTF_DEV
56253 + select RTC_DRV_CMOS
56254 +
56255 + help
56256 + If you say Y here, all ioperm and iopl calls will return an error.
56257 + Ioperm and iopl can be used to modify the running kernel.
56258 + Unfortunately, some programs need this access to operate properly,
56259 + the most notable of which are XFree86 and hwclock. hwclock can be
56260 + remedied by having RTC support in the kernel, so real-time
56261 + clock support is enabled if this option is enabled, to ensure
56262 + that hwclock operates correctly. XFree86 still will not
56263 + operate correctly with this option enabled, so DO NOT CHOOSE Y
56264 + IF YOU USE XFree86. If you use XFree86 and you still want to
56265 + protect your kernel against modification, use the RBAC system.
56266 +
56267 +config GRKERNSEC_PROC_MEMMAP
56268 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56269 + default y if (PAX_NOEXEC || PAX_ASLR)
56270 + depends on PAX_NOEXEC || PAX_ASLR
56271 + help
56272 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56273 + give no information about the addresses of its mappings if
56274 + PaX features that rely on random addresses are enabled on the task.
56275 + If you use PaX it is greatly recommended that you say Y here as it
56276 + closes up a hole that makes the full ASLR useless for suid
56277 + binaries.
56278 +
56279 +config GRKERNSEC_BRUTE
56280 + bool "Deter exploit bruteforcing"
56281 + help
56282 + If you say Y here, attempts to bruteforce exploits against forking
56283 + daemons such as apache or sshd, as well as against suid/sgid binaries
56284 + will be deterred. When a child of a forking daemon is killed by PaX
56285 + or crashes due to an illegal instruction or other suspicious signal,
56286 + the parent process will be delayed 30 seconds upon every subsequent
56287 + fork until the administrator is able to assess the situation and
56288 + restart the daemon.
56289 + In the suid/sgid case, the attempt is logged, the user has all their
56290 + processes terminated, and they are prevented from executing any further
56291 + processes for 15 minutes.
56292 + It is recommended that you also enable signal logging in the auditing
56293 + section so that logs are generated when a process triggers a suspicious
56294 + signal.
56295 + If the sysctl option is enabled, a sysctl option with name
56296 + "deter_bruteforce" is created.
56297 +
56298 +config GRKERNSEC_MODHARDEN
56299 + bool "Harden module auto-loading"
56300 + depends on MODULES
56301 + help
56302 + If you say Y here, module auto-loading in response to use of some
56303 + feature implemented by an unloaded module will be restricted to
56304 + root users. Enabling this option helps defend against attacks
56305 + by unprivileged users who abuse the auto-loading behavior to
56306 + cause a vulnerable module to load that is then exploited.
56307 +
56308 + If this option prevents a legitimate use of auto-loading for a
56309 + non-root user, the administrator can execute modprobe manually
56310 + with the exact name of the module mentioned in the alert log.
56311 + Alternatively, the administrator can add the module to the list
56312 + of modules loaded at boot by modifying init scripts.
56313 +
56314 + Modification of init scripts will most likely be needed on
56315 + Ubuntu servers with encrypted home directory support enabled,
56316 + as the first non-root user logging in will cause the ecb(aes),
56317 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56318 +
56319 +config GRKERNSEC_HIDESYM
56320 + bool "Hide kernel symbols"
56321 + help
56322 + If you say Y here, getting information on loaded modules, and
56323 + displaying all kernel symbols through a syscall will be restricted
56324 + to users with CAP_SYS_MODULE. For software compatibility reasons,
56325 + /proc/kallsyms will be restricted to the root user. The RBAC
56326 + system can hide that entry even from root.
56327 +
56328 + This option also prevents leaking of kernel addresses through
56329 + several /proc entries.
56330 +
56331 + Note that this option is only effective provided the following
56332 + conditions are met:
56333 + 1) The kernel using grsecurity is not precompiled by some distribution
56334 + 2) You have also enabled GRKERNSEC_DMESG
56335 + 3) You are using the RBAC system and hiding other files such as your
56336 + kernel image and System.map. Alternatively, enabling this option
56337 + causes the permissions on /boot, /lib/modules, and the kernel
56338 + source directory to change at compile time to prevent
56339 + reading by non-root users.
56340 + If the above conditions are met, this option will aid in providing a
56341 + useful protection against local kernel exploitation of overflows
56342 + and arbitrary read/write vulnerabilities.
56343 +
56344 +config GRKERNSEC_KERN_LOCKOUT
56345 + bool "Active kernel exploit response"
56346 + depends on X86 || ARM || PPC || SPARC
56347 + help
56348 + If you say Y here, when a PaX alert is triggered due to suspicious
56349 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56350 + or an OOPs occurs due to bad memory accesses, instead of just
56351 + terminating the offending process (and potentially allowing
56352 + a subsequent exploit from the same user), we will take one of two
56353 + actions:
56354 + If the user was root, we will panic the system
56355 + If the user was non-root, we will log the attempt, terminate
56356 + all processes owned by the user, then prevent them from creating
56357 + any new processes until the system is restarted
56358 + This deters repeated kernel exploitation/bruteforcing attempts
56359 + and is useful for later forensics.
56360 +
56361 +endmenu
56362 +menu "Role Based Access Control Options"
56363 +depends on GRKERNSEC
56364 +
56365 +config GRKERNSEC_RBAC_DEBUG
56366 + bool
56367 +
56368 +config GRKERNSEC_NO_RBAC
56369 + bool "Disable RBAC system"
56370 + help
56371 + If you say Y here, the /dev/grsec device will be removed from the kernel,
56372 + preventing the RBAC system from being enabled. You should only say Y
56373 + here if you have no intention of using the RBAC system, so as to prevent
56374 + an attacker with root access from misusing the RBAC system to hide files
56375 + and processes when loadable module support and /dev/[k]mem have been
56376 + locked down.
56377 +
56378 +config GRKERNSEC_ACL_HIDEKERN
56379 + bool "Hide kernel processes"
56380 + help
56381 + If you say Y here, all kernel threads will be hidden to all
56382 + processes but those whose subject has the "view hidden processes"
56383 + flag.
56384 +
56385 +config GRKERNSEC_ACL_MAXTRIES
56386 + int "Maximum tries before password lockout"
56387 + default 3
56388 + help
56389 + This option enforces the maximum number of times a user can attempt
56390 + to authorize themselves with the grsecurity RBAC system before being
56391 + denied the ability to attempt authorization again for a specified time.
56392 + The lower the number, the harder it will be to brute-force a password.
56393 +
56394 +config GRKERNSEC_ACL_TIMEOUT
56395 + int "Time to wait after max password tries, in seconds"
56396 + default 30
56397 + help
56398 + This option specifies the time the user must wait after attempting to
56399 + authorize to the RBAC system with the maximum number of invalid
56400 + passwords. The higher the number, the harder it will be to brute-force
56401 + a password.
56402 +
56403 +endmenu
56404 +menu "Filesystem Protections"
56405 +depends on GRKERNSEC
56406 +
56407 +config GRKERNSEC_PROC
56408 + bool "Proc restrictions"
56409 + help
56410 + If you say Y here, the permissions of the /proc filesystem
56411 + will be altered to enhance system security and privacy. You MUST
56412 + choose either a user only restriction or a user and group restriction.
56413 + Depending upon the option you choose, you can either restrict users to
56414 + see only the processes they themselves run, or choose a group that can
56415 + view all processes and files normally restricted to root if you choose
56416 + the "restrict to user only" option. NOTE: If you're running identd as
56417 + a non-root user, you will have to run it as the group you specify here.
56418 +
56419 +config GRKERNSEC_PROC_USER
56420 + bool "Restrict /proc to user only"
56421 + depends on GRKERNSEC_PROC
56422 + help
56423 + If you say Y here, non-root users will only be able to view their own
56424 + processes, and restricts them from viewing network-related information,
56425 + and viewing kernel symbol and module information.
56426 +
56427 +config GRKERNSEC_PROC_USERGROUP
56428 + bool "Allow special group"
56429 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56430 + help
56431 + If you say Y here, you will be able to select a group that will be
56432 + able to view all processes and network-related information. If you've
56433 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56434 + remain hidden. This option is useful if you want to run identd as
56435 + a non-root user.
56436 +
56437 +config GRKERNSEC_PROC_GID
56438 + int "GID for special group"
56439 + depends on GRKERNSEC_PROC_USERGROUP
56440 + default 1001
56441 +
56442 +config GRKERNSEC_PROC_ADD
56443 + bool "Additional restrictions"
56444 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56445 + help
56446 + If you say Y here, additional restrictions will be placed on
56447 + /proc that keep normal users from viewing device information and
56448 + slabinfo information that could be useful for exploits.
56449 +
56450 +config GRKERNSEC_LINK
56451 + bool "Linking restrictions"
56452 + help
56453 + If you say Y here, /tmp race exploits will be prevented, since users
56454 + will no longer be able to follow symlinks owned by other users in
56455 + world-writable +t directories (e.g. /tmp), unless the owner of the
56456 + symlink is the owner of the directory. users will also not be
56457 + able to hardlink to files they do not own. If the sysctl option is
56458 + enabled, a sysctl option with name "linking_restrictions" is created.
56459 +
56460 +config GRKERNSEC_FIFO
56461 + bool "FIFO restrictions"
56462 + help
56463 + If you say Y here, users will not be able to write to FIFOs they don't
56464 + own in world-writable +t directories (e.g. /tmp), unless the owner of
56465 + the FIFO is the same owner of the directory it's held in. If the sysctl
56466 + option is enabled, a sysctl option with name "fifo_restrictions" is
56467 + created.
56468 +
56469 +config GRKERNSEC_SYSFS_RESTRICT
56470 + bool "Sysfs/debugfs restriction"
56471 + depends on SYSFS
56472 + help
56473 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56474 + any filesystem normally mounted under it (e.g. debugfs) will only
56475 + be accessible by root. These filesystems generally provide access
56476 + to hardware and debug information that isn't appropriate for unprivileged
56477 + users of the system. Sysfs and debugfs have also become a large source
56478 + of new vulnerabilities, ranging from infoleaks to local compromise.
56479 + There has been very little oversight with an eye toward security involved
56480 + in adding new exporters of information to these filesystems, so their
56481 + use is discouraged.
56482 + This option is equivalent to a chmod 0700 of the mount paths.
56483 +
56484 +config GRKERNSEC_ROFS
56485 + bool "Runtime read-only mount protection"
56486 + help
56487 + If you say Y here, a sysctl option with name "romount_protect" will
56488 + be created. By setting this option to 1 at runtime, filesystems
56489 + will be protected in the following ways:
56490 + * No new writable mounts will be allowed
56491 + * Existing read-only mounts won't be able to be remounted read/write
56492 + * Write operations will be denied on all block devices
56493 + This option acts independently of grsec_lock: once it is set to 1,
56494 + it cannot be turned off. Therefore, please be mindful of the resulting
56495 + behavior if this option is enabled in an init script on a read-only
56496 + filesystem. This feature is mainly intended for secure embedded systems.
56497 +
56498 +config GRKERNSEC_CHROOT
56499 + bool "Chroot jail restrictions"
56500 + help
56501 + If you say Y here, you will be able to choose several options that will
56502 + make breaking out of a chrooted jail much more difficult. If you
56503 + encounter no software incompatibilities with the following options, it
56504 + is recommended that you enable each one.
56505 +
56506 +config GRKERNSEC_CHROOT_MOUNT
56507 + bool "Deny mounts"
56508 + depends on GRKERNSEC_CHROOT
56509 + help
56510 + If you say Y here, processes inside a chroot will not be able to
56511 + mount or remount filesystems. If the sysctl option is enabled, a
56512 + sysctl option with name "chroot_deny_mount" is created.
56513 +
56514 +config GRKERNSEC_CHROOT_DOUBLE
56515 + bool "Deny double-chroots"
56516 + depends on GRKERNSEC_CHROOT
56517 + help
56518 + If you say Y here, processes inside a chroot will not be able to chroot
56519 + again outside the chroot. This is a widely used method of breaking
56520 + out of a chroot jail and should not be allowed. If the sysctl
56521 + option is enabled, a sysctl option with name
56522 + "chroot_deny_chroot" is created.
56523 +
56524 +config GRKERNSEC_CHROOT_PIVOT
56525 + bool "Deny pivot_root in chroot"
56526 + depends on GRKERNSEC_CHROOT
56527 + help
56528 + If you say Y here, processes inside a chroot will not be able to use
56529 + a function called pivot_root() that was introduced in Linux 2.3.41. It
56530 + works similar to chroot in that it changes the root filesystem. This
56531 + function could be misused in a chrooted process to attempt to break out
56532 + of the chroot, and therefore should not be allowed. If the sysctl
56533 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
56534 + created.
56535 +
56536 +config GRKERNSEC_CHROOT_CHDIR
56537 + bool "Enforce chdir(\"/\") on all chroots"
56538 + depends on GRKERNSEC_CHROOT
56539 + help
56540 + If you say Y here, the current working directory of all newly-chrooted
56541 + applications will be set to the the root directory of the chroot.
56542 + The man page on chroot(2) states:
56543 + Note that this call does not change the current working
56544 + directory, so that `.' can be outside the tree rooted at
56545 + `/'. In particular, the super-user can escape from a
56546 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56547 +
56548 + It is recommended that you say Y here, since it's not known to break
56549 + any software. If the sysctl option is enabled, a sysctl option with
56550 + name "chroot_enforce_chdir" is created.
56551 +
56552 +config GRKERNSEC_CHROOT_CHMOD
56553 + bool "Deny (f)chmod +s"
56554 + depends on GRKERNSEC_CHROOT
56555 + help
56556 + If you say Y here, processes inside a chroot will not be able to chmod
56557 + or fchmod files to make them have suid or sgid bits. This protects
56558 + against another published method of breaking a chroot. If the sysctl
56559 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
56560 + created.
56561 +
56562 +config GRKERNSEC_CHROOT_FCHDIR
56563 + bool "Deny fchdir out of chroot"
56564 + depends on GRKERNSEC_CHROOT
56565 + help
56566 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
56567 + to a file descriptor of the chrooting process that points to a directory
56568 + outside the filesystem will be stopped. If the sysctl option
56569 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56570 +
56571 +config GRKERNSEC_CHROOT_MKNOD
56572 + bool "Deny mknod"
56573 + depends on GRKERNSEC_CHROOT
56574 + help
56575 + If you say Y here, processes inside a chroot will not be allowed to
56576 + mknod. The problem with using mknod inside a chroot is that it
56577 + would allow an attacker to create a device entry that is the same
56578 + as one on the physical root of your system, which could range from
56579 + anything from the console device to a device for your harddrive (which
56580 + they could then use to wipe the drive or steal data). It is recommended
56581 + that you say Y here, unless you run into software incompatibilities.
56582 + If the sysctl option is enabled, a sysctl option with name
56583 + "chroot_deny_mknod" is created.
56584 +
56585 +config GRKERNSEC_CHROOT_SHMAT
56586 + bool "Deny shmat() out of chroot"
56587 + depends on GRKERNSEC_CHROOT
56588 + help
56589 + If you say Y here, processes inside a chroot will not be able to attach
56590 + to shared memory segments that were created outside of the chroot jail.
56591 + It is recommended that you say Y here. If the sysctl option is enabled,
56592 + a sysctl option with name "chroot_deny_shmat" is created.
56593 +
56594 +config GRKERNSEC_CHROOT_UNIX
56595 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
56596 + depends on GRKERNSEC_CHROOT
56597 + help
56598 + If you say Y here, processes inside a chroot will not be able to
56599 + connect to abstract (meaning not belonging to a filesystem) Unix
56600 + domain sockets that were bound outside of a chroot. It is recommended
56601 + that you say Y here. If the sysctl option is enabled, a sysctl option
56602 + with name "chroot_deny_unix" is created.
56603 +
56604 +config GRKERNSEC_CHROOT_FINDTASK
56605 + bool "Protect outside processes"
56606 + depends on GRKERNSEC_CHROOT
56607 + help
56608 + If you say Y here, processes inside a chroot will not be able to
56609 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56610 + getsid, or view any process outside of the chroot. If the sysctl
56611 + option is enabled, a sysctl option with name "chroot_findtask" is
56612 + created.
56613 +
56614 +config GRKERNSEC_CHROOT_NICE
56615 + bool "Restrict priority changes"
56616 + depends on GRKERNSEC_CHROOT
56617 + help
56618 + If you say Y here, processes inside a chroot will not be able to raise
56619 + the priority of processes in the chroot, or alter the priority of
56620 + processes outside the chroot. This provides more security than simply
56621 + removing CAP_SYS_NICE from the process' capability set. If the
56622 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56623 + is created.
56624 +
56625 +config GRKERNSEC_CHROOT_SYSCTL
56626 + bool "Deny sysctl writes"
56627 + depends on GRKERNSEC_CHROOT
56628 + help
56629 + If you say Y here, an attacker in a chroot will not be able to
56630 + write to sysctl entries, either by sysctl(2) or through a /proc
56631 + interface. It is strongly recommended that you say Y here. If the
56632 + sysctl option is enabled, a sysctl option with name
56633 + "chroot_deny_sysctl" is created.
56634 +
56635 +config GRKERNSEC_CHROOT_CAPS
56636 + bool "Capability restrictions"
56637 + depends on GRKERNSEC_CHROOT
56638 + help
56639 + If you say Y here, the capabilities on all root processes within a
56640 + chroot jail will be lowered to stop module insertion, raw i/o,
56641 + system and net admin tasks, rebooting the system, modifying immutable
56642 + files, modifying IPC owned by another, and changing the system time.
56643 + This is left an option because it can break some apps. Disable this
56644 + if your chrooted apps are having problems performing those kinds of
56645 + tasks. If the sysctl option is enabled, a sysctl option with
56646 + name "chroot_caps" is created.
56647 +
56648 +endmenu
56649 +menu "Kernel Auditing"
56650 +depends on GRKERNSEC
56651 +
56652 +config GRKERNSEC_AUDIT_GROUP
56653 + bool "Single group for auditing"
56654 + help
56655 + If you say Y here, the exec, chdir, and (un)mount logging features
56656 + will only operate on a group you specify. This option is recommended
56657 + if you only want to watch certain users instead of having a large
56658 + amount of logs from the entire system. If the sysctl option is enabled,
56659 + a sysctl option with name "audit_group" is created.
56660 +
56661 +config GRKERNSEC_AUDIT_GID
56662 + int "GID for auditing"
56663 + depends on GRKERNSEC_AUDIT_GROUP
56664 + default 1007
56665 +
56666 +config GRKERNSEC_EXECLOG
56667 + bool "Exec logging"
56668 + help
56669 + If you say Y here, all execve() calls will be logged (since the
56670 + other exec*() calls are frontends to execve(), all execution
56671 + will be logged). Useful for shell-servers that like to keep track
56672 + of their users. If the sysctl option is enabled, a sysctl option with
56673 + name "exec_logging" is created.
56674 + WARNING: This option when enabled will produce a LOT of logs, especially
56675 + on an active system.
56676 +
56677 +config GRKERNSEC_RESLOG
56678 + bool "Resource logging"
56679 + help
56680 + If you say Y here, all attempts to overstep resource limits will
56681 + be logged with the resource name, the requested size, and the current
56682 + limit. It is highly recommended that you say Y here. If the sysctl
56683 + option is enabled, a sysctl option with name "resource_logging" is
56684 + created. If the RBAC system is enabled, the sysctl value is ignored.
56685 +
56686 +config GRKERNSEC_CHROOT_EXECLOG
56687 + bool "Log execs within chroot"
56688 + help
56689 + If you say Y here, all executions inside a chroot jail will be logged
56690 + to syslog. This can cause a large amount of logs if certain
56691 + applications (eg. djb's daemontools) are installed on the system, and
56692 + is therefore left as an option. If the sysctl option is enabled, a
56693 + sysctl option with name "chroot_execlog" is created.
56694 +
56695 +config GRKERNSEC_AUDIT_PTRACE
56696 + bool "Ptrace logging"
56697 + help
56698 + If you say Y here, all attempts to attach to a process via ptrace
56699 + will be logged. If the sysctl option is enabled, a sysctl option
56700 + with name "audit_ptrace" is created.
56701 +
56702 +config GRKERNSEC_AUDIT_CHDIR
56703 + bool "Chdir logging"
56704 + help
56705 + If you say Y here, all chdir() calls will be logged. If the sysctl
56706 + option is enabled, a sysctl option with name "audit_chdir" is created.
56707 +
56708 +config GRKERNSEC_AUDIT_MOUNT
56709 + bool "(Un)Mount logging"
56710 + help
56711 + If you say Y here, all mounts and unmounts will be logged. If the
56712 + sysctl option is enabled, a sysctl option with name "audit_mount" is
56713 + created.
56714 +
56715 +config GRKERNSEC_SIGNAL
56716 + bool "Signal logging"
56717 + help
56718 + If you say Y here, certain important signals will be logged, such as
56719 + SIGSEGV, which will as a result inform you of when a error in a program
56720 + occurred, which in some cases could mean a possible exploit attempt.
56721 + If the sysctl option is enabled, a sysctl option with name
56722 + "signal_logging" is created.
56723 +
56724 +config GRKERNSEC_FORKFAIL
56725 + bool "Fork failure logging"
56726 + help
56727 + If you say Y here, all failed fork() attempts will be logged.
56728 + This could suggest a fork bomb, or someone attempting to overstep
56729 + their process limit. If the sysctl option is enabled, a sysctl option
56730 + with name "forkfail_logging" is created.
56731 +
56732 +config GRKERNSEC_TIME
56733 + bool "Time change logging"
56734 + help
56735 + If you say Y here, any changes of the system clock will be logged.
56736 + If the sysctl option is enabled, a sysctl option with name
56737 + "timechange_logging" is created.
56738 +
56739 +config GRKERNSEC_PROC_IPADDR
56740 + bool "/proc/<pid>/ipaddr support"
56741 + help
56742 + If you say Y here, a new entry will be added to each /proc/<pid>
56743 + directory that contains the IP address of the person using the task.
56744 + The IP is carried across local TCP and AF_UNIX stream sockets.
56745 + This information can be useful for IDS/IPSes to perform remote response
56746 + to a local attack. The entry is readable by only the owner of the
56747 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56748 + the RBAC system), and thus does not create privacy concerns.
56749 +
56750 +config GRKERNSEC_RWXMAP_LOG
56751 + bool 'Denied RWX mmap/mprotect logging'
56752 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56753 + help
56754 + If you say Y here, calls to mmap() and mprotect() with explicit
56755 + usage of PROT_WRITE and PROT_EXEC together will be logged when
56756 + denied by the PAX_MPROTECT feature. If the sysctl option is
56757 + enabled, a sysctl option with name "rwxmap_logging" is created.
56758 +
56759 +config GRKERNSEC_AUDIT_TEXTREL
56760 + bool 'ELF text relocations logging (READ HELP)'
56761 + depends on PAX_MPROTECT
56762 + help
56763 + If you say Y here, text relocations will be logged with the filename
56764 + of the offending library or binary. The purpose of the feature is
56765 + to help Linux distribution developers get rid of libraries and
56766 + binaries that need text relocations which hinder the future progress
56767 + of PaX. Only Linux distribution developers should say Y here, and
56768 + never on a production machine, as this option creates an information
56769 + leak that could aid an attacker in defeating the randomization of
56770 + a single memory region. If the sysctl option is enabled, a sysctl
56771 + option with name "audit_textrel" is created.
56772 +
56773 +endmenu
56774 +
56775 +menu "Executable Protections"
56776 +depends on GRKERNSEC
56777 +
56778 +config GRKERNSEC_EXECVE
56779 + bool "Enforce RLIMIT_NPROC on execs"
56780 + help
56781 + If you say Y here, users with a resource limit on processes will
56782 + have the value checked during execve() calls. The current system
56783 + only checks the system limit during fork() calls. If the sysctl option
56784 + is enabled, a sysctl option with name "execve_limiting" is created.
56785 +
56786 +config GRKERNSEC_DMESG
56787 + bool "Dmesg(8) restriction"
56788 + help
56789 + If you say Y here, non-root users will not be able to use dmesg(8)
56790 + to view up to the last 4kb of messages in the kernel's log buffer.
56791 + The kernel's log buffer often contains kernel addresses and other
56792 + identifying information useful to an attacker in fingerprinting a
56793 + system for a targeted exploit.
56794 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
56795 + created.
56796 +
56797 +config GRKERNSEC_HARDEN_PTRACE
56798 + bool "Deter ptrace-based process snooping"
56799 + help
56800 + If you say Y here, TTY sniffers and other malicious monitoring
56801 + programs implemented through ptrace will be defeated. If you
56802 + have been using the RBAC system, this option has already been
56803 + enabled for several years for all users, with the ability to make
56804 + fine-grained exceptions.
56805 +
56806 + This option only affects the ability of non-root users to ptrace
56807 + processes that are not a descendent of the ptracing process.
56808 + This means that strace ./binary and gdb ./binary will still work,
56809 + but attaching to arbitrary processes will not. If the sysctl
56810 + option is enabled, a sysctl option with name "harden_ptrace" is
56811 + created.
56812 +
56813 +config GRKERNSEC_TPE
56814 + bool "Trusted Path Execution (TPE)"
56815 + help
56816 + If you say Y here, you will be able to choose a gid to add to the
56817 + supplementary groups of users you want to mark as "untrusted."
56818 + These users will not be able to execute any files that are not in
56819 + root-owned directories writable only by root. If the sysctl option
56820 + is enabled, a sysctl option with name "tpe" is created.
56821 +
56822 +config GRKERNSEC_TPE_ALL
56823 + bool "Partially restrict all non-root users"
56824 + depends on GRKERNSEC_TPE
56825 + help
56826 + If you say Y here, all non-root users will be covered under
56827 + a weaker TPE restriction. This is separate from, and in addition to,
56828 + the main TPE options that you have selected elsewhere. Thus, if a
56829 + "trusted" GID is chosen, this restriction applies to even that GID.
56830 + Under this restriction, all non-root users will only be allowed to
56831 + execute files in directories they own that are not group or
56832 + world-writable, or in directories owned by root and writable only by
56833 + root. If the sysctl option is enabled, a sysctl option with name
56834 + "tpe_restrict_all" is created.
56835 +
56836 +config GRKERNSEC_TPE_INVERT
56837 + bool "Invert GID option"
56838 + depends on GRKERNSEC_TPE
56839 + help
56840 + If you say Y here, the group you specify in the TPE configuration will
56841 + decide what group TPE restrictions will be *disabled* for. This
56842 + option is useful if you want TPE restrictions to be applied to most
56843 + users on the system. If the sysctl option is enabled, a sysctl option
56844 + with name "tpe_invert" is created. Unlike other sysctl options, this
56845 + entry will default to on for backward-compatibility.
56846 +
56847 +config GRKERNSEC_TPE_GID
56848 + int "GID for untrusted users"
56849 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56850 + default 1005
56851 + help
56852 + Setting this GID determines what group TPE restrictions will be
56853 + *enabled* for. If the sysctl option is enabled, a sysctl option
56854 + with name "tpe_gid" is created.
56855 +
56856 +config GRKERNSEC_TPE_GID
56857 + int "GID for trusted users"
56858 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56859 + default 1005
56860 + help
56861 + Setting this GID determines what group TPE restrictions will be
56862 + *disabled* for. If the sysctl option is enabled, a sysctl option
56863 + with name "tpe_gid" is created.
56864 +
56865 +endmenu
56866 +menu "Network Protections"
56867 +depends on GRKERNSEC
56868 +
56869 +config GRKERNSEC_RANDNET
56870 + bool "Larger entropy pools"
56871 + help
56872 + If you say Y here, the entropy pools used for many features of Linux
56873 + and grsecurity will be doubled in size. Since several grsecurity
56874 + features use additional randomness, it is recommended that you say Y
56875 + here. Saying Y here has a similar effect as modifying
56876 + /proc/sys/kernel/random/poolsize.
56877 +
56878 +config GRKERNSEC_BLACKHOLE
56879 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56880 + help
56881 + If you say Y here, neither TCP resets nor ICMP
56882 + destination-unreachable packets will be sent in response to packets
56883 + sent to ports for which no associated listening process exists.
56884 + This feature supports both IPV4 and IPV6 and exempts the
56885 + loopback interface from blackholing. Enabling this feature
56886 + makes a host more resilient to DoS attacks and reduces network
56887 + visibility against scanners.
56888 +
56889 + The blackhole feature as-implemented is equivalent to the FreeBSD
56890 + blackhole feature, as it prevents RST responses to all packets, not
56891 + just SYNs. Under most application behavior this causes no
56892 + problems, but applications (like haproxy) may not close certain
56893 + connections in a way that cleanly terminates them on the remote
56894 + end, leaving the remote host in LAST_ACK state. Because of this
56895 + side-effect and to prevent intentional LAST_ACK DoSes, this
56896 + feature also adds automatic mitigation against such attacks.
56897 + The mitigation drastically reduces the amount of time a socket
56898 + can spend in LAST_ACK state. If you're using haproxy and not
56899 + all servers it connects to have this option enabled, consider
56900 + disabling this feature on the haproxy host.
56901 +
56902 + If the sysctl option is enabled, two sysctl options with names
56903 + "ip_blackhole" and "lastack_retries" will be created.
56904 + While "ip_blackhole" takes the standard zero/non-zero on/off
56905 + toggle, "lastack_retries" uses the same kinds of values as
56906 + "tcp_retries1" and "tcp_retries2". The default value of 4
56907 + prevents a socket from lasting more than 45 seconds in LAST_ACK
56908 + state.
56909 +
56910 +config GRKERNSEC_SOCKET
56911 + bool "Socket restrictions"
56912 + help
56913 + If you say Y here, you will be able to choose from several options.
56914 + If you assign a GID on your system and add it to the supplementary
56915 + groups of users you want to restrict socket access to, this patch
56916 + will perform up to three things, based on the option(s) you choose.
56917 +
56918 +config GRKERNSEC_SOCKET_ALL
56919 + bool "Deny any sockets to group"
56920 + depends on GRKERNSEC_SOCKET
56921 + help
56922 + If you say Y here, you will be able to choose a GID of whose users will
56923 + be unable to connect to other hosts from your machine or run server
56924 + applications from your machine. If the sysctl option is enabled, a
56925 + sysctl option with name "socket_all" is created.
56926 +
56927 +config GRKERNSEC_SOCKET_ALL_GID
56928 + int "GID to deny all sockets for"
56929 + depends on GRKERNSEC_SOCKET_ALL
56930 + default 1004
56931 + help
56932 + Here you can choose the GID to disable socket access for. Remember to
56933 + add the users you want socket access disabled for to the GID
56934 + specified here. If the sysctl option is enabled, a sysctl option
56935 + with name "socket_all_gid" is created.
56936 +
56937 +config GRKERNSEC_SOCKET_CLIENT
56938 + bool "Deny client sockets to group"
56939 + depends on GRKERNSEC_SOCKET
56940 + help
56941 + If you say Y here, you will be able to choose a GID of whose users will
56942 + be unable to connect to other hosts from your machine, but will be
56943 + able to run servers. If this option is enabled, all users in the group
56944 + you specify will have to use passive mode when initiating ftp transfers
56945 + from the shell on your machine. If the sysctl option is enabled, a
56946 + sysctl option with name "socket_client" is created.
56947 +
56948 +config GRKERNSEC_SOCKET_CLIENT_GID
56949 + int "GID to deny client sockets for"
56950 + depends on GRKERNSEC_SOCKET_CLIENT
56951 + default 1003
56952 + help
56953 + Here you can choose the GID to disable client socket access for.
56954 + Remember to add the users you want client socket access disabled for to
56955 + the GID specified here. If the sysctl option is enabled, a sysctl
56956 + option with name "socket_client_gid" is created.
56957 +
56958 +config GRKERNSEC_SOCKET_SERVER
56959 + bool "Deny server sockets to group"
56960 + depends on GRKERNSEC_SOCKET
56961 + help
56962 + If you say Y here, you will be able to choose a GID of whose users will
56963 + be unable to run server applications from your machine. If the sysctl
56964 + option is enabled, a sysctl option with name "socket_server" is created.
56965 +
56966 +config GRKERNSEC_SOCKET_SERVER_GID
56967 + int "GID to deny server sockets for"
56968 + depends on GRKERNSEC_SOCKET_SERVER
56969 + default 1002
56970 + help
56971 + Here you can choose the GID to disable server socket access for.
56972 + Remember to add the users you want server socket access disabled for to
56973 + the GID specified here. If the sysctl option is enabled, a sysctl
56974 + option with name "socket_server_gid" is created.
56975 +
56976 +endmenu
56977 +menu "Sysctl support"
56978 +depends on GRKERNSEC && SYSCTL
56979 +
56980 +config GRKERNSEC_SYSCTL
56981 + bool "Sysctl support"
56982 + help
56983 + If you say Y here, you will be able to change the options that
56984 + grsecurity runs with at bootup, without having to recompile your
56985 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56986 + to enable (1) or disable (0) various features. All the sysctl entries
56987 + are mutable until the "grsec_lock" entry is set to a non-zero value.
56988 + All features enabled in the kernel configuration are disabled at boot
56989 + if you do not say Y to the "Turn on features by default" option.
56990 + All options should be set at startup, and the grsec_lock entry should
56991 + be set to a non-zero value after all the options are set.
56992 + *THIS IS EXTREMELY IMPORTANT*
56993 +
56994 +config GRKERNSEC_SYSCTL_DISTRO
56995 + bool "Extra sysctl support for distro makers (READ HELP)"
56996 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56997 + help
56998 + If you say Y here, additional sysctl options will be created
56999 + for features that affect processes running as root. Therefore,
57000 + it is critical when using this option that the grsec_lock entry be
57001 + enabled after boot. Only distros with prebuilt kernel packages
57002 + with this option enabled that can ensure grsec_lock is enabled
57003 + after boot should use this option.
57004 + *Failure to set grsec_lock after boot makes all grsec features
57005 + this option covers useless*
57006 +
57007 + Currently this option creates the following sysctl entries:
57008 + "Disable Privileged I/O": "disable_priv_io"
57009 +
57010 +config GRKERNSEC_SYSCTL_ON
57011 + bool "Turn on features by default"
57012 + depends on GRKERNSEC_SYSCTL
57013 + help
57014 + If you say Y here, instead of having all features enabled in the
57015 + kernel configuration disabled at boot time, the features will be
57016 + enabled at boot time. It is recommended you say Y here unless
57017 + there is some reason you would want all sysctl-tunable features to
57018 + be disabled by default. As mentioned elsewhere, it is important
57019 + to enable the grsec_lock entry once you have finished modifying
57020 + the sysctl entries.
57021 +
57022 +endmenu
57023 +menu "Logging Options"
57024 +depends on GRKERNSEC
57025 +
57026 +config GRKERNSEC_FLOODTIME
57027 + int "Seconds in between log messages (minimum)"
57028 + default 10
57029 + help
57030 + This option allows you to enforce the number of seconds between
57031 + grsecurity log messages. The default should be suitable for most
57032 + people, however, if you choose to change it, choose a value small enough
57033 + to allow informative logs to be produced, but large enough to
57034 + prevent flooding.
57035 +
57036 +config GRKERNSEC_FLOODBURST
57037 + int "Number of messages in a burst (maximum)"
57038 + default 4
57039 + help
57040 + This option allows you to choose the maximum number of messages allowed
57041 + within the flood time interval you chose in a separate option. The
57042 + default should be suitable for most people, however if you find that
57043 + many of your logs are being interpreted as flooding, you may want to
57044 + raise this value.
57045 +
57046 +endmenu
57047 +
57048 +endmenu
57049 diff -urNp linux-2.6.32.44/grsecurity/Makefile linux-2.6.32.44/grsecurity/Makefile
57050 --- linux-2.6.32.44/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
57051 +++ linux-2.6.32.44/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
57052 @@ -0,0 +1,33 @@
57053 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57054 +# during 2001-2009 it has been completely redesigned by Brad Spengler
57055 +# into an RBAC system
57056 +#
57057 +# All code in this directory and various hooks inserted throughout the kernel
57058 +# are copyright Brad Spengler - Open Source Security, Inc., and released
57059 +# under the GPL v2 or higher
57060 +
57061 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57062 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
57063 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57064 +
57065 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57066 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57067 + gracl_learn.o grsec_log.o
57068 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57069 +
57070 +ifdef CONFIG_NET
57071 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57072 +endif
57073 +
57074 +ifndef CONFIG_GRKERNSEC
57075 +obj-y += grsec_disabled.o
57076 +endif
57077 +
57078 +ifdef CONFIG_GRKERNSEC_HIDESYM
57079 +extra-y := grsec_hidesym.o
57080 +$(obj)/grsec_hidesym.o:
57081 + @-chmod -f 500 /boot
57082 + @-chmod -f 500 /lib/modules
57083 + @-chmod -f 700 .
57084 + @echo ' grsec: protected kernel image paths'
57085 +endif
57086 diff -urNp linux-2.6.32.44/include/acpi/acpi_bus.h linux-2.6.32.44/include/acpi/acpi_bus.h
57087 --- linux-2.6.32.44/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
57088 +++ linux-2.6.32.44/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
57089 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57090 acpi_op_bind bind;
57091 acpi_op_unbind unbind;
57092 acpi_op_notify notify;
57093 -};
57094 +} __no_const;
57095
57096 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57097
57098 diff -urNp linux-2.6.32.44/include/acpi/acpi_drivers.h linux-2.6.32.44/include/acpi/acpi_drivers.h
57099 --- linux-2.6.32.44/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
57100 +++ linux-2.6.32.44/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
57101 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
57102 Dock Station
57103 -------------------------------------------------------------------------- */
57104 struct acpi_dock_ops {
57105 - acpi_notify_handler handler;
57106 - acpi_notify_handler uevent;
57107 + const acpi_notify_handler handler;
57108 + const acpi_notify_handler uevent;
57109 };
57110
57111 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
57112 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
57113 extern int register_dock_notifier(struct notifier_block *nb);
57114 extern void unregister_dock_notifier(struct notifier_block *nb);
57115 extern int register_hotplug_dock_device(acpi_handle handle,
57116 - struct acpi_dock_ops *ops,
57117 + const struct acpi_dock_ops *ops,
57118 void *context);
57119 extern void unregister_hotplug_dock_device(acpi_handle handle);
57120 #else
57121 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
57122 {
57123 }
57124 static inline int register_hotplug_dock_device(acpi_handle handle,
57125 - struct acpi_dock_ops *ops,
57126 + const struct acpi_dock_ops *ops,
57127 void *context)
57128 {
57129 return -ENODEV;
57130 diff -urNp linux-2.6.32.44/include/asm-generic/atomic-long.h linux-2.6.32.44/include/asm-generic/atomic-long.h
57131 --- linux-2.6.32.44/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
57132 +++ linux-2.6.32.44/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
57133 @@ -22,6 +22,12 @@
57134
57135 typedef atomic64_t atomic_long_t;
57136
57137 +#ifdef CONFIG_PAX_REFCOUNT
57138 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57139 +#else
57140 +typedef atomic64_t atomic_long_unchecked_t;
57141 +#endif
57142 +
57143 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57144
57145 static inline long atomic_long_read(atomic_long_t *l)
57146 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57147 return (long)atomic64_read(v);
57148 }
57149
57150 +#ifdef CONFIG_PAX_REFCOUNT
57151 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57152 +{
57153 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57154 +
57155 + return (long)atomic64_read_unchecked(v);
57156 +}
57157 +#endif
57158 +
57159 static inline void atomic_long_set(atomic_long_t *l, long i)
57160 {
57161 atomic64_t *v = (atomic64_t *)l;
57162 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57163 atomic64_set(v, i);
57164 }
57165
57166 +#ifdef CONFIG_PAX_REFCOUNT
57167 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57168 +{
57169 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57170 +
57171 + atomic64_set_unchecked(v, i);
57172 +}
57173 +#endif
57174 +
57175 static inline void atomic_long_inc(atomic_long_t *l)
57176 {
57177 atomic64_t *v = (atomic64_t *)l;
57178 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57179 atomic64_inc(v);
57180 }
57181
57182 +#ifdef CONFIG_PAX_REFCOUNT
57183 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57184 +{
57185 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57186 +
57187 + atomic64_inc_unchecked(v);
57188 +}
57189 +#endif
57190 +
57191 static inline void atomic_long_dec(atomic_long_t *l)
57192 {
57193 atomic64_t *v = (atomic64_t *)l;
57194 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57195 atomic64_dec(v);
57196 }
57197
57198 +#ifdef CONFIG_PAX_REFCOUNT
57199 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57200 +{
57201 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57202 +
57203 + atomic64_dec_unchecked(v);
57204 +}
57205 +#endif
57206 +
57207 static inline void atomic_long_add(long i, atomic_long_t *l)
57208 {
57209 atomic64_t *v = (atomic64_t *)l;
57210 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57211 atomic64_add(i, v);
57212 }
57213
57214 +#ifdef CONFIG_PAX_REFCOUNT
57215 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57216 +{
57217 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57218 +
57219 + atomic64_add_unchecked(i, v);
57220 +}
57221 +#endif
57222 +
57223 static inline void atomic_long_sub(long i, atomic_long_t *l)
57224 {
57225 atomic64_t *v = (atomic64_t *)l;
57226 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
57227 return (long)atomic64_inc_return(v);
57228 }
57229
57230 +#ifdef CONFIG_PAX_REFCOUNT
57231 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57232 +{
57233 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57234 +
57235 + return (long)atomic64_inc_return_unchecked(v);
57236 +}
57237 +#endif
57238 +
57239 static inline long atomic_long_dec_return(atomic_long_t *l)
57240 {
57241 atomic64_t *v = (atomic64_t *)l;
57242 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
57243
57244 typedef atomic_t atomic_long_t;
57245
57246 +#ifdef CONFIG_PAX_REFCOUNT
57247 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57248 +#else
57249 +typedef atomic_t atomic_long_unchecked_t;
57250 +#endif
57251 +
57252 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57253 static inline long atomic_long_read(atomic_long_t *l)
57254 {
57255 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
57256 return (long)atomic_read(v);
57257 }
57258
57259 +#ifdef CONFIG_PAX_REFCOUNT
57260 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57261 +{
57262 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57263 +
57264 + return (long)atomic_read_unchecked(v);
57265 +}
57266 +#endif
57267 +
57268 static inline void atomic_long_set(atomic_long_t *l, long i)
57269 {
57270 atomic_t *v = (atomic_t *)l;
57271 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
57272 atomic_set(v, i);
57273 }
57274
57275 +#ifdef CONFIG_PAX_REFCOUNT
57276 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57277 +{
57278 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57279 +
57280 + atomic_set_unchecked(v, i);
57281 +}
57282 +#endif
57283 +
57284 static inline void atomic_long_inc(atomic_long_t *l)
57285 {
57286 atomic_t *v = (atomic_t *)l;
57287 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
57288 atomic_inc(v);
57289 }
57290
57291 +#ifdef CONFIG_PAX_REFCOUNT
57292 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57293 +{
57294 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57295 +
57296 + atomic_inc_unchecked(v);
57297 +}
57298 +#endif
57299 +
57300 static inline void atomic_long_dec(atomic_long_t *l)
57301 {
57302 atomic_t *v = (atomic_t *)l;
57303 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
57304 atomic_dec(v);
57305 }
57306
57307 +#ifdef CONFIG_PAX_REFCOUNT
57308 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57309 +{
57310 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57311 +
57312 + atomic_dec_unchecked(v);
57313 +}
57314 +#endif
57315 +
57316 static inline void atomic_long_add(long i, atomic_long_t *l)
57317 {
57318 atomic_t *v = (atomic_t *)l;
57319 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
57320 atomic_add(i, v);
57321 }
57322
57323 +#ifdef CONFIG_PAX_REFCOUNT
57324 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57325 +{
57326 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57327 +
57328 + atomic_add_unchecked(i, v);
57329 +}
57330 +#endif
57331 +
57332 static inline void atomic_long_sub(long i, atomic_long_t *l)
57333 {
57334 atomic_t *v = (atomic_t *)l;
57335 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
57336 return (long)atomic_inc_return(v);
57337 }
57338
57339 +#ifdef CONFIG_PAX_REFCOUNT
57340 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57341 +{
57342 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57343 +
57344 + return (long)atomic_inc_return_unchecked(v);
57345 +}
57346 +#endif
57347 +
57348 static inline long atomic_long_dec_return(atomic_long_t *l)
57349 {
57350 atomic_t *v = (atomic_t *)l;
57351 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
57352
57353 #endif /* BITS_PER_LONG == 64 */
57354
57355 +#ifdef CONFIG_PAX_REFCOUNT
57356 +static inline void pax_refcount_needs_these_functions(void)
57357 +{
57358 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57359 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57360 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57361 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57362 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57363 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57364 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57365 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57366 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57367 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57368 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57369 +
57370 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57371 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57372 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57373 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57374 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57375 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57376 +}
57377 +#else
57378 +#define atomic_read_unchecked(v) atomic_read(v)
57379 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57380 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57381 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57382 +#define atomic_inc_unchecked(v) atomic_inc(v)
57383 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57384 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57385 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57386 +#define atomic_dec_unchecked(v) atomic_dec(v)
57387 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57388 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57389 +
57390 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57391 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57392 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57393 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57394 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57395 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57396 +#endif
57397 +
57398 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57399 diff -urNp linux-2.6.32.44/include/asm-generic/cache.h linux-2.6.32.44/include/asm-generic/cache.h
57400 --- linux-2.6.32.44/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
57401 +++ linux-2.6.32.44/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
57402 @@ -6,7 +6,7 @@
57403 * cache lines need to provide their own cache.h.
57404 */
57405
57406 -#define L1_CACHE_SHIFT 5
57407 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57408 +#define L1_CACHE_SHIFT 5UL
57409 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57410
57411 #endif /* __ASM_GENERIC_CACHE_H */
57412 diff -urNp linux-2.6.32.44/include/asm-generic/dma-mapping-common.h linux-2.6.32.44/include/asm-generic/dma-mapping-common.h
57413 --- linux-2.6.32.44/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
57414 +++ linux-2.6.32.44/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
57415 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
57416 enum dma_data_direction dir,
57417 struct dma_attrs *attrs)
57418 {
57419 - struct dma_map_ops *ops = get_dma_ops(dev);
57420 + const struct dma_map_ops *ops = get_dma_ops(dev);
57421 dma_addr_t addr;
57422
57423 kmemcheck_mark_initialized(ptr, size);
57424 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
57425 enum dma_data_direction dir,
57426 struct dma_attrs *attrs)
57427 {
57428 - struct dma_map_ops *ops = get_dma_ops(dev);
57429 + const struct dma_map_ops *ops = get_dma_ops(dev);
57430
57431 BUG_ON(!valid_dma_direction(dir));
57432 if (ops->unmap_page)
57433 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
57434 int nents, enum dma_data_direction dir,
57435 struct dma_attrs *attrs)
57436 {
57437 - struct dma_map_ops *ops = get_dma_ops(dev);
57438 + const struct dma_map_ops *ops = get_dma_ops(dev);
57439 int i, ents;
57440 struct scatterlist *s;
57441
57442 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57443 int nents, enum dma_data_direction dir,
57444 struct dma_attrs *attrs)
57445 {
57446 - struct dma_map_ops *ops = get_dma_ops(dev);
57447 + const struct dma_map_ops *ops = get_dma_ops(dev);
57448
57449 BUG_ON(!valid_dma_direction(dir));
57450 debug_dma_unmap_sg(dev, sg, nents, dir);
57451 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57452 size_t offset, size_t size,
57453 enum dma_data_direction dir)
57454 {
57455 - struct dma_map_ops *ops = get_dma_ops(dev);
57456 + const struct dma_map_ops *ops = get_dma_ops(dev);
57457 dma_addr_t addr;
57458
57459 kmemcheck_mark_initialized(page_address(page) + offset, size);
57460 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57461 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57462 size_t size, enum dma_data_direction dir)
57463 {
57464 - struct dma_map_ops *ops = get_dma_ops(dev);
57465 + const struct dma_map_ops *ops = get_dma_ops(dev);
57466
57467 BUG_ON(!valid_dma_direction(dir));
57468 if (ops->unmap_page)
57469 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57470 size_t size,
57471 enum dma_data_direction dir)
57472 {
57473 - struct dma_map_ops *ops = get_dma_ops(dev);
57474 + const struct dma_map_ops *ops = get_dma_ops(dev);
57475
57476 BUG_ON(!valid_dma_direction(dir));
57477 if (ops->sync_single_for_cpu)
57478 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57479 dma_addr_t addr, size_t size,
57480 enum dma_data_direction dir)
57481 {
57482 - struct dma_map_ops *ops = get_dma_ops(dev);
57483 + const struct dma_map_ops *ops = get_dma_ops(dev);
57484
57485 BUG_ON(!valid_dma_direction(dir));
57486 if (ops->sync_single_for_device)
57487 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57488 size_t size,
57489 enum dma_data_direction dir)
57490 {
57491 - struct dma_map_ops *ops = get_dma_ops(dev);
57492 + const struct dma_map_ops *ops = get_dma_ops(dev);
57493
57494 BUG_ON(!valid_dma_direction(dir));
57495 if (ops->sync_single_range_for_cpu) {
57496 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57497 size_t size,
57498 enum dma_data_direction dir)
57499 {
57500 - struct dma_map_ops *ops = get_dma_ops(dev);
57501 + const struct dma_map_ops *ops = get_dma_ops(dev);
57502
57503 BUG_ON(!valid_dma_direction(dir));
57504 if (ops->sync_single_range_for_device) {
57505 @@ -155,7 +155,7 @@ static inline void
57506 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57507 int nelems, enum dma_data_direction dir)
57508 {
57509 - struct dma_map_ops *ops = get_dma_ops(dev);
57510 + const struct dma_map_ops *ops = get_dma_ops(dev);
57511
57512 BUG_ON(!valid_dma_direction(dir));
57513 if (ops->sync_sg_for_cpu)
57514 @@ -167,7 +167,7 @@ static inline void
57515 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57516 int nelems, enum dma_data_direction dir)
57517 {
57518 - struct dma_map_ops *ops = get_dma_ops(dev);
57519 + const struct dma_map_ops *ops = get_dma_ops(dev);
57520
57521 BUG_ON(!valid_dma_direction(dir));
57522 if (ops->sync_sg_for_device)
57523 diff -urNp linux-2.6.32.44/include/asm-generic/futex.h linux-2.6.32.44/include/asm-generic/futex.h
57524 --- linux-2.6.32.44/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57525 +++ linux-2.6.32.44/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57526 @@ -6,7 +6,7 @@
57527 #include <asm/errno.h>
57528
57529 static inline int
57530 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57531 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57532 {
57533 int op = (encoded_op >> 28) & 7;
57534 int cmp = (encoded_op >> 24) & 15;
57535 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57536 }
57537
57538 static inline int
57539 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57540 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57541 {
57542 return -ENOSYS;
57543 }
57544 diff -urNp linux-2.6.32.44/include/asm-generic/int-l64.h linux-2.6.32.44/include/asm-generic/int-l64.h
57545 --- linux-2.6.32.44/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57546 +++ linux-2.6.32.44/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57547 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57548 typedef signed long s64;
57549 typedef unsigned long u64;
57550
57551 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57552 +
57553 #define S8_C(x) x
57554 #define U8_C(x) x ## U
57555 #define S16_C(x) x
57556 diff -urNp linux-2.6.32.44/include/asm-generic/int-ll64.h linux-2.6.32.44/include/asm-generic/int-ll64.h
57557 --- linux-2.6.32.44/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57558 +++ linux-2.6.32.44/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57559 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57560 typedef signed long long s64;
57561 typedef unsigned long long u64;
57562
57563 +typedef unsigned long long intoverflow_t;
57564 +
57565 #define S8_C(x) x
57566 #define U8_C(x) x ## U
57567 #define S16_C(x) x
57568 diff -urNp linux-2.6.32.44/include/asm-generic/kmap_types.h linux-2.6.32.44/include/asm-generic/kmap_types.h
57569 --- linux-2.6.32.44/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57570 +++ linux-2.6.32.44/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57571 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57572 KMAP_D(16) KM_IRQ_PTE,
57573 KMAP_D(17) KM_NMI,
57574 KMAP_D(18) KM_NMI_PTE,
57575 -KMAP_D(19) KM_TYPE_NR
57576 +KMAP_D(19) KM_CLEARPAGE,
57577 +KMAP_D(20) KM_TYPE_NR
57578 };
57579
57580 #undef KMAP_D
57581 diff -urNp linux-2.6.32.44/include/asm-generic/pgtable.h linux-2.6.32.44/include/asm-generic/pgtable.h
57582 --- linux-2.6.32.44/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57583 +++ linux-2.6.32.44/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57584 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57585 unsigned long size);
57586 #endif
57587
57588 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57589 +static inline unsigned long pax_open_kernel(void) { return 0; }
57590 +#endif
57591 +
57592 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57593 +static inline unsigned long pax_close_kernel(void) { return 0; }
57594 +#endif
57595 +
57596 #endif /* !__ASSEMBLY__ */
57597
57598 #endif /* _ASM_GENERIC_PGTABLE_H */
57599 diff -urNp linux-2.6.32.44/include/asm-generic/pgtable-nopmd.h linux-2.6.32.44/include/asm-generic/pgtable-nopmd.h
57600 --- linux-2.6.32.44/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57601 +++ linux-2.6.32.44/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57602 @@ -1,14 +1,19 @@
57603 #ifndef _PGTABLE_NOPMD_H
57604 #define _PGTABLE_NOPMD_H
57605
57606 -#ifndef __ASSEMBLY__
57607 -
57608 #include <asm-generic/pgtable-nopud.h>
57609
57610 -struct mm_struct;
57611 -
57612 #define __PAGETABLE_PMD_FOLDED
57613
57614 +#define PMD_SHIFT PUD_SHIFT
57615 +#define PTRS_PER_PMD 1
57616 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57617 +#define PMD_MASK (~(PMD_SIZE-1))
57618 +
57619 +#ifndef __ASSEMBLY__
57620 +
57621 +struct mm_struct;
57622 +
57623 /*
57624 * Having the pmd type consist of a pud gets the size right, and allows
57625 * us to conceptually access the pud entry that this pmd is folded into
57626 @@ -16,11 +21,6 @@ struct mm_struct;
57627 */
57628 typedef struct { pud_t pud; } pmd_t;
57629
57630 -#define PMD_SHIFT PUD_SHIFT
57631 -#define PTRS_PER_PMD 1
57632 -#define PMD_SIZE (1UL << PMD_SHIFT)
57633 -#define PMD_MASK (~(PMD_SIZE-1))
57634 -
57635 /*
57636 * The "pud_xxx()" functions here are trivial for a folded two-level
57637 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57638 diff -urNp linux-2.6.32.44/include/asm-generic/pgtable-nopud.h linux-2.6.32.44/include/asm-generic/pgtable-nopud.h
57639 --- linux-2.6.32.44/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57640 +++ linux-2.6.32.44/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57641 @@ -1,10 +1,15 @@
57642 #ifndef _PGTABLE_NOPUD_H
57643 #define _PGTABLE_NOPUD_H
57644
57645 -#ifndef __ASSEMBLY__
57646 -
57647 #define __PAGETABLE_PUD_FOLDED
57648
57649 +#define PUD_SHIFT PGDIR_SHIFT
57650 +#define PTRS_PER_PUD 1
57651 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57652 +#define PUD_MASK (~(PUD_SIZE-1))
57653 +
57654 +#ifndef __ASSEMBLY__
57655 +
57656 /*
57657 * Having the pud type consist of a pgd gets the size right, and allows
57658 * us to conceptually access the pgd entry that this pud is folded into
57659 @@ -12,11 +17,6 @@
57660 */
57661 typedef struct { pgd_t pgd; } pud_t;
57662
57663 -#define PUD_SHIFT PGDIR_SHIFT
57664 -#define PTRS_PER_PUD 1
57665 -#define PUD_SIZE (1UL << PUD_SHIFT)
57666 -#define PUD_MASK (~(PUD_SIZE-1))
57667 -
57668 /*
57669 * The "pgd_xxx()" functions here are trivial for a folded two-level
57670 * setup: the pud is never bad, and a pud always exists (as it's folded
57671 diff -urNp linux-2.6.32.44/include/asm-generic/vmlinux.lds.h linux-2.6.32.44/include/asm-generic/vmlinux.lds.h
57672 --- linux-2.6.32.44/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57673 +++ linux-2.6.32.44/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57674 @@ -199,6 +199,7 @@
57675 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57676 VMLINUX_SYMBOL(__start_rodata) = .; \
57677 *(.rodata) *(.rodata.*) \
57678 + *(.data.read_only) \
57679 *(__vermagic) /* Kernel version magic */ \
57680 *(__markers_strings) /* Markers: strings */ \
57681 *(__tracepoints_strings)/* Tracepoints: strings */ \
57682 @@ -656,22 +657,24 @@
57683 * section in the linker script will go there too. @phdr should have
57684 * a leading colon.
57685 *
57686 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57687 + * Note that this macros defines per_cpu_load as an absolute symbol.
57688 * If there is no need to put the percpu section at a predetermined
57689 * address, use PERCPU().
57690 */
57691 #define PERCPU_VADDR(vaddr, phdr) \
57692 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57693 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57694 + per_cpu_load = .; \
57695 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57696 - LOAD_OFFSET) { \
57697 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57698 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57699 *(.data.percpu.first) \
57700 - *(.data.percpu.page_aligned) \
57701 *(.data.percpu) \
57702 + . = ALIGN(PAGE_SIZE); \
57703 + *(.data.percpu.page_aligned) \
57704 *(.data.percpu.shared_aligned) \
57705 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57706 } phdr \
57707 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57708 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57709
57710 /**
57711 * PERCPU - define output section for percpu area, simple version
57712 diff -urNp linux-2.6.32.44/include/drm/drm_crtc_helper.h linux-2.6.32.44/include/drm/drm_crtc_helper.h
57713 --- linux-2.6.32.44/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57714 +++ linux-2.6.32.44/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57715 @@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57716
57717 /* reload the current crtc LUT */
57718 void (*load_lut)(struct drm_crtc *crtc);
57719 -};
57720 +} __no_const;
57721
57722 struct drm_encoder_helper_funcs {
57723 void (*dpms)(struct drm_encoder *encoder, int mode);
57724 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57725 struct drm_connector *connector);
57726 /* disable encoder when not in use - more explicit than dpms off */
57727 void (*disable)(struct drm_encoder *encoder);
57728 -};
57729 +} __no_const;
57730
57731 struct drm_connector_helper_funcs {
57732 int (*get_modes)(struct drm_connector *connector);
57733 diff -urNp linux-2.6.32.44/include/drm/drmP.h linux-2.6.32.44/include/drm/drmP.h
57734 --- linux-2.6.32.44/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57735 +++ linux-2.6.32.44/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57736 @@ -71,6 +71,7 @@
57737 #include <linux/workqueue.h>
57738 #include <linux/poll.h>
57739 #include <asm/pgalloc.h>
57740 +#include <asm/local.h>
57741 #include "drm.h"
57742
57743 #include <linux/idr.h>
57744 @@ -814,7 +815,7 @@ struct drm_driver {
57745 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57746
57747 /* Driver private ops for this object */
57748 - struct vm_operations_struct *gem_vm_ops;
57749 + const struct vm_operations_struct *gem_vm_ops;
57750
57751 int major;
57752 int minor;
57753 @@ -917,7 +918,7 @@ struct drm_device {
57754
57755 /** \name Usage Counters */
57756 /*@{ */
57757 - int open_count; /**< Outstanding files open */
57758 + local_t open_count; /**< Outstanding files open */
57759 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57760 atomic_t vma_count; /**< Outstanding vma areas open */
57761 int buf_use; /**< Buffers in use -- cannot alloc */
57762 @@ -928,7 +929,7 @@ struct drm_device {
57763 /*@{ */
57764 unsigned long counters;
57765 enum drm_stat_type types[15];
57766 - atomic_t counts[15];
57767 + atomic_unchecked_t counts[15];
57768 /*@} */
57769
57770 struct list_head filelist;
57771 @@ -1016,7 +1017,7 @@ struct drm_device {
57772 struct pci_controller *hose;
57773 #endif
57774 struct drm_sg_mem *sg; /**< Scatter gather memory */
57775 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
57776 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
57777 void *dev_private; /**< device private data */
57778 void *mm_private;
57779 struct address_space *dev_mapping;
57780 @@ -1042,11 +1043,11 @@ struct drm_device {
57781 spinlock_t object_name_lock;
57782 struct idr object_name_idr;
57783 atomic_t object_count;
57784 - atomic_t object_memory;
57785 + atomic_unchecked_t object_memory;
57786 atomic_t pin_count;
57787 - atomic_t pin_memory;
57788 + atomic_unchecked_t pin_memory;
57789 atomic_t gtt_count;
57790 - atomic_t gtt_memory;
57791 + atomic_unchecked_t gtt_memory;
57792 uint32_t gtt_total;
57793 uint32_t invalidate_domains; /* domains pending invalidation */
57794 uint32_t flush_domains; /* domains pending flush */
57795 diff -urNp linux-2.6.32.44/include/drm/ttm/ttm_memory.h linux-2.6.32.44/include/drm/ttm/ttm_memory.h
57796 --- linux-2.6.32.44/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57797 +++ linux-2.6.32.44/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57798 @@ -47,7 +47,7 @@
57799
57800 struct ttm_mem_shrink {
57801 int (*do_shrink) (struct ttm_mem_shrink *);
57802 -};
57803 +} __no_const;
57804
57805 /**
57806 * struct ttm_mem_global - Global memory accounting structure.
57807 diff -urNp linux-2.6.32.44/include/linux/a.out.h linux-2.6.32.44/include/linux/a.out.h
57808 --- linux-2.6.32.44/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57809 +++ linux-2.6.32.44/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57810 @@ -39,6 +39,14 @@ enum machine_type {
57811 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57812 };
57813
57814 +/* Constants for the N_FLAGS field */
57815 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57816 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57817 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57818 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57819 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57820 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57821 +
57822 #if !defined (N_MAGIC)
57823 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57824 #endif
57825 diff -urNp linux-2.6.32.44/include/linux/atmdev.h linux-2.6.32.44/include/linux/atmdev.h
57826 --- linux-2.6.32.44/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57827 +++ linux-2.6.32.44/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57828 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57829 #endif
57830
57831 struct k_atm_aal_stats {
57832 -#define __HANDLE_ITEM(i) atomic_t i
57833 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57834 __AAL_STAT_ITEMS
57835 #undef __HANDLE_ITEM
57836 };
57837 diff -urNp linux-2.6.32.44/include/linux/backlight.h linux-2.6.32.44/include/linux/backlight.h
57838 --- linux-2.6.32.44/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57839 +++ linux-2.6.32.44/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57840 @@ -36,18 +36,18 @@ struct backlight_device;
57841 struct fb_info;
57842
57843 struct backlight_ops {
57844 - unsigned int options;
57845 + const unsigned int options;
57846
57847 #define BL_CORE_SUSPENDRESUME (1 << 0)
57848
57849 /* Notify the backlight driver some property has changed */
57850 - int (*update_status)(struct backlight_device *);
57851 + int (* const update_status)(struct backlight_device *);
57852 /* Return the current backlight brightness (accounting for power,
57853 fb_blank etc.) */
57854 - int (*get_brightness)(struct backlight_device *);
57855 + int (* const get_brightness)(struct backlight_device *);
57856 /* Check if given framebuffer device is the one bound to this backlight;
57857 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57858 - int (*check_fb)(struct fb_info *);
57859 + int (* const check_fb)(struct fb_info *);
57860 };
57861
57862 /* This structure defines all the properties of a backlight */
57863 @@ -86,7 +86,7 @@ struct backlight_device {
57864 registered this device has been unloaded, and if class_get_devdata()
57865 points to something in the body of that driver, it is also invalid. */
57866 struct mutex ops_lock;
57867 - struct backlight_ops *ops;
57868 + const struct backlight_ops *ops;
57869
57870 /* The framebuffer notifier block */
57871 struct notifier_block fb_notif;
57872 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
57873 }
57874
57875 extern struct backlight_device *backlight_device_register(const char *name,
57876 - struct device *dev, void *devdata, struct backlight_ops *ops);
57877 + struct device *dev, void *devdata, const struct backlight_ops *ops);
57878 extern void backlight_device_unregister(struct backlight_device *bd);
57879 extern void backlight_force_update(struct backlight_device *bd,
57880 enum backlight_update_reason reason);
57881 diff -urNp linux-2.6.32.44/include/linux/binfmts.h linux-2.6.32.44/include/linux/binfmts.h
57882 --- linux-2.6.32.44/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57883 +++ linux-2.6.32.44/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57884 @@ -83,6 +83,7 @@ struct linux_binfmt {
57885 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57886 int (*load_shlib)(struct file *);
57887 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57888 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57889 unsigned long min_coredump; /* minimal dump size */
57890 int hasvdso;
57891 };
57892 diff -urNp linux-2.6.32.44/include/linux/blkdev.h linux-2.6.32.44/include/linux/blkdev.h
57893 --- linux-2.6.32.44/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57894 +++ linux-2.6.32.44/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57895 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57896 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57897
57898 struct block_device_operations {
57899 - int (*open) (struct block_device *, fmode_t);
57900 - int (*release) (struct gendisk *, fmode_t);
57901 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57902 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57903 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57904 - int (*direct_access) (struct block_device *, sector_t,
57905 + int (* const open) (struct block_device *, fmode_t);
57906 + int (* const release) (struct gendisk *, fmode_t);
57907 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57908 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57909 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57910 + int (* const direct_access) (struct block_device *, sector_t,
57911 void **, unsigned long *);
57912 - int (*media_changed) (struct gendisk *);
57913 - unsigned long long (*set_capacity) (struct gendisk *,
57914 + int (* const media_changed) (struct gendisk *);
57915 + unsigned long long (* const set_capacity) (struct gendisk *,
57916 unsigned long long);
57917 - int (*revalidate_disk) (struct gendisk *);
57918 - int (*getgeo)(struct block_device *, struct hd_geometry *);
57919 - struct module *owner;
57920 + int (* const revalidate_disk) (struct gendisk *);
57921 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
57922 + struct module * const owner;
57923 };
57924
57925 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57926 diff -urNp linux-2.6.32.44/include/linux/blktrace_api.h linux-2.6.32.44/include/linux/blktrace_api.h
57927 --- linux-2.6.32.44/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57928 +++ linux-2.6.32.44/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57929 @@ -160,7 +160,7 @@ struct blk_trace {
57930 struct dentry *dir;
57931 struct dentry *dropped_file;
57932 struct dentry *msg_file;
57933 - atomic_t dropped;
57934 + atomic_unchecked_t dropped;
57935 };
57936
57937 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57938 diff -urNp linux-2.6.32.44/include/linux/byteorder/little_endian.h linux-2.6.32.44/include/linux/byteorder/little_endian.h
57939 --- linux-2.6.32.44/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57940 +++ linux-2.6.32.44/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57941 @@ -42,51 +42,51 @@
57942
57943 static inline __le64 __cpu_to_le64p(const __u64 *p)
57944 {
57945 - return (__force __le64)*p;
57946 + return (__force const __le64)*p;
57947 }
57948 static inline __u64 __le64_to_cpup(const __le64 *p)
57949 {
57950 - return (__force __u64)*p;
57951 + return (__force const __u64)*p;
57952 }
57953 static inline __le32 __cpu_to_le32p(const __u32 *p)
57954 {
57955 - return (__force __le32)*p;
57956 + return (__force const __le32)*p;
57957 }
57958 static inline __u32 __le32_to_cpup(const __le32 *p)
57959 {
57960 - return (__force __u32)*p;
57961 + return (__force const __u32)*p;
57962 }
57963 static inline __le16 __cpu_to_le16p(const __u16 *p)
57964 {
57965 - return (__force __le16)*p;
57966 + return (__force const __le16)*p;
57967 }
57968 static inline __u16 __le16_to_cpup(const __le16 *p)
57969 {
57970 - return (__force __u16)*p;
57971 + return (__force const __u16)*p;
57972 }
57973 static inline __be64 __cpu_to_be64p(const __u64 *p)
57974 {
57975 - return (__force __be64)__swab64p(p);
57976 + return (__force const __be64)__swab64p(p);
57977 }
57978 static inline __u64 __be64_to_cpup(const __be64 *p)
57979 {
57980 - return __swab64p((__u64 *)p);
57981 + return __swab64p((const __u64 *)p);
57982 }
57983 static inline __be32 __cpu_to_be32p(const __u32 *p)
57984 {
57985 - return (__force __be32)__swab32p(p);
57986 + return (__force const __be32)__swab32p(p);
57987 }
57988 static inline __u32 __be32_to_cpup(const __be32 *p)
57989 {
57990 - return __swab32p((__u32 *)p);
57991 + return __swab32p((const __u32 *)p);
57992 }
57993 static inline __be16 __cpu_to_be16p(const __u16 *p)
57994 {
57995 - return (__force __be16)__swab16p(p);
57996 + return (__force const __be16)__swab16p(p);
57997 }
57998 static inline __u16 __be16_to_cpup(const __be16 *p)
57999 {
58000 - return __swab16p((__u16 *)p);
58001 + return __swab16p((const __u16 *)p);
58002 }
58003 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58004 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58005 diff -urNp linux-2.6.32.44/include/linux/cache.h linux-2.6.32.44/include/linux/cache.h
58006 --- linux-2.6.32.44/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
58007 +++ linux-2.6.32.44/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
58008 @@ -16,6 +16,10 @@
58009 #define __read_mostly
58010 #endif
58011
58012 +#ifndef __read_only
58013 +#define __read_only __read_mostly
58014 +#endif
58015 +
58016 #ifndef ____cacheline_aligned
58017 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58018 #endif
58019 diff -urNp linux-2.6.32.44/include/linux/capability.h linux-2.6.32.44/include/linux/capability.h
58020 --- linux-2.6.32.44/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
58021 +++ linux-2.6.32.44/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
58022 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
58023 (security_real_capable_noaudit((t), (cap)) == 0)
58024
58025 extern int capable(int cap);
58026 +int capable_nolog(int cap);
58027
58028 /* audit system wants to get cap info from files as well */
58029 struct dentry;
58030 diff -urNp linux-2.6.32.44/include/linux/compiler-gcc4.h linux-2.6.32.44/include/linux/compiler-gcc4.h
58031 --- linux-2.6.32.44/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
58032 +++ linux-2.6.32.44/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
58033 @@ -36,4 +36,13 @@
58034 the kernel context */
58035 #define __cold __attribute__((__cold__))
58036
58037 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58038 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58039 +#define __bos0(ptr) __bos((ptr), 0)
58040 +#define __bos1(ptr) __bos((ptr), 1)
58041 +
58042 +#if __GNUC_MINOR__ >= 5
58043 +#define __no_const __attribute__((no_const))
58044 +#endif
58045 +
58046 #endif
58047 diff -urNp linux-2.6.32.44/include/linux/compiler.h linux-2.6.32.44/include/linux/compiler.h
58048 --- linux-2.6.32.44/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
58049 +++ linux-2.6.32.44/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
58050 @@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
58051 # define __attribute_const__ /* unimplemented */
58052 #endif
58053
58054 +#ifndef __no_const
58055 +# define __no_const
58056 +#endif
58057 +
58058 /*
58059 * Tell gcc if a function is cold. The compiler will assume any path
58060 * directly leading to the call is unlikely.
58061 @@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
58062 #define __cold
58063 #endif
58064
58065 +#ifndef __alloc_size
58066 +#define __alloc_size(...)
58067 +#endif
58068 +
58069 +#ifndef __bos
58070 +#define __bos(ptr, arg)
58071 +#endif
58072 +
58073 +#ifndef __bos0
58074 +#define __bos0(ptr)
58075 +#endif
58076 +
58077 +#ifndef __bos1
58078 +#define __bos1(ptr)
58079 +#endif
58080 +
58081 /* Simple shorthand for a section definition */
58082 #ifndef __section
58083 # define __section(S) __attribute__ ((__section__(#S)))
58084 @@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
58085 * use is to mediate communication between process-level code and irq/NMI
58086 * handlers, all running on the same CPU.
58087 */
58088 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58089 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58090 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58091
58092 #endif /* __LINUX_COMPILER_H */
58093 diff -urNp linux-2.6.32.44/include/linux/crypto.h linux-2.6.32.44/include/linux/crypto.h
58094 --- linux-2.6.32.44/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
58095 +++ linux-2.6.32.44/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
58096 @@ -394,7 +394,7 @@ struct cipher_tfm {
58097 const u8 *key, unsigned int keylen);
58098 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58099 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58100 -};
58101 +} __no_const;
58102
58103 struct hash_tfm {
58104 int (*init)(struct hash_desc *desc);
58105 @@ -415,13 +415,13 @@ struct compress_tfm {
58106 int (*cot_decompress)(struct crypto_tfm *tfm,
58107 const u8 *src, unsigned int slen,
58108 u8 *dst, unsigned int *dlen);
58109 -};
58110 +} __no_const;
58111
58112 struct rng_tfm {
58113 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58114 unsigned int dlen);
58115 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58116 -};
58117 +} __no_const;
58118
58119 #define crt_ablkcipher crt_u.ablkcipher
58120 #define crt_aead crt_u.aead
58121 diff -urNp linux-2.6.32.44/include/linux/cryptohash.h linux-2.6.32.44/include/linux/cryptohash.h
58122 --- linux-2.6.32.44/include/linux/cryptohash.h 2011-03-27 14:31:47.000000000 -0400
58123 +++ linux-2.6.32.44/include/linux/cryptohash.h 2011-08-07 19:48:09.000000000 -0400
58124 @@ -7,6 +7,11 @@
58125 void sha_init(__u32 *buf);
58126 void sha_transform(__u32 *digest, const char *data, __u32 *W);
58127
58128 +#define MD5_DIGEST_WORDS 4
58129 +#define MD5_MESSAGE_BYTES 64
58130 +
58131 +void md5_transform(__u32 *hash, __u32 const *in);
58132 +
58133 __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
58134
58135 #endif
58136 diff -urNp linux-2.6.32.44/include/linux/dcache.h linux-2.6.32.44/include/linux/dcache.h
58137 --- linux-2.6.32.44/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
58138 +++ linux-2.6.32.44/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
58139 @@ -119,6 +119,8 @@ struct dentry {
58140 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
58141 };
58142
58143 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
58144 +
58145 /*
58146 * dentry->d_lock spinlock nesting subclasses:
58147 *
58148 diff -urNp linux-2.6.32.44/include/linux/decompress/mm.h linux-2.6.32.44/include/linux/decompress/mm.h
58149 --- linux-2.6.32.44/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
58150 +++ linux-2.6.32.44/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
58151 @@ -78,7 +78,7 @@ static void free(void *where)
58152 * warnings when not needed (indeed large_malloc / large_free are not
58153 * needed by inflate */
58154
58155 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58156 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58157 #define free(a) kfree(a)
58158
58159 #define large_malloc(a) vmalloc(a)
58160 diff -urNp linux-2.6.32.44/include/linux/dma-mapping.h linux-2.6.32.44/include/linux/dma-mapping.h
58161 --- linux-2.6.32.44/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
58162 +++ linux-2.6.32.44/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
58163 @@ -16,50 +16,50 @@ enum dma_data_direction {
58164 };
58165
58166 struct dma_map_ops {
58167 - void* (*alloc_coherent)(struct device *dev, size_t size,
58168 + void* (* const alloc_coherent)(struct device *dev, size_t size,
58169 dma_addr_t *dma_handle, gfp_t gfp);
58170 - void (*free_coherent)(struct device *dev, size_t size,
58171 + void (* const free_coherent)(struct device *dev, size_t size,
58172 void *vaddr, dma_addr_t dma_handle);
58173 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
58174 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
58175 unsigned long offset, size_t size,
58176 enum dma_data_direction dir,
58177 struct dma_attrs *attrs);
58178 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
58179 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
58180 size_t size, enum dma_data_direction dir,
58181 struct dma_attrs *attrs);
58182 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
58183 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
58184 int nents, enum dma_data_direction dir,
58185 struct dma_attrs *attrs);
58186 - void (*unmap_sg)(struct device *dev,
58187 + void (* const unmap_sg)(struct device *dev,
58188 struct scatterlist *sg, int nents,
58189 enum dma_data_direction dir,
58190 struct dma_attrs *attrs);
58191 - void (*sync_single_for_cpu)(struct device *dev,
58192 + void (* const sync_single_for_cpu)(struct device *dev,
58193 dma_addr_t dma_handle, size_t size,
58194 enum dma_data_direction dir);
58195 - void (*sync_single_for_device)(struct device *dev,
58196 + void (* const sync_single_for_device)(struct device *dev,
58197 dma_addr_t dma_handle, size_t size,
58198 enum dma_data_direction dir);
58199 - void (*sync_single_range_for_cpu)(struct device *dev,
58200 + void (* const sync_single_range_for_cpu)(struct device *dev,
58201 dma_addr_t dma_handle,
58202 unsigned long offset,
58203 size_t size,
58204 enum dma_data_direction dir);
58205 - void (*sync_single_range_for_device)(struct device *dev,
58206 + void (* const sync_single_range_for_device)(struct device *dev,
58207 dma_addr_t dma_handle,
58208 unsigned long offset,
58209 size_t size,
58210 enum dma_data_direction dir);
58211 - void (*sync_sg_for_cpu)(struct device *dev,
58212 + void (* const sync_sg_for_cpu)(struct device *dev,
58213 struct scatterlist *sg, int nents,
58214 enum dma_data_direction dir);
58215 - void (*sync_sg_for_device)(struct device *dev,
58216 + void (* const sync_sg_for_device)(struct device *dev,
58217 struct scatterlist *sg, int nents,
58218 enum dma_data_direction dir);
58219 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
58220 - int (*dma_supported)(struct device *dev, u64 mask);
58221 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
58222 + int (* const dma_supported)(struct device *dev, u64 mask);
58223 int (*set_dma_mask)(struct device *dev, u64 mask);
58224 - int is_phys;
58225 + const int is_phys;
58226 };
58227
58228 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58229 diff -urNp linux-2.6.32.44/include/linux/dst.h linux-2.6.32.44/include/linux/dst.h
58230 --- linux-2.6.32.44/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
58231 +++ linux-2.6.32.44/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
58232 @@ -380,7 +380,7 @@ struct dst_node
58233 struct thread_pool *pool;
58234
58235 /* Transaction IDs live here */
58236 - atomic_long_t gen;
58237 + atomic_long_unchecked_t gen;
58238
58239 /*
58240 * How frequently and how many times transaction
58241 diff -urNp linux-2.6.32.44/include/linux/elf.h linux-2.6.32.44/include/linux/elf.h
58242 --- linux-2.6.32.44/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
58243 +++ linux-2.6.32.44/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
58244 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58245 #define PT_GNU_EH_FRAME 0x6474e550
58246
58247 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58248 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58249 +
58250 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58251 +
58252 +/* Constants for the e_flags field */
58253 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58254 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58255 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58256 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58257 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58258 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58259
58260 /* These constants define the different elf file types */
58261 #define ET_NONE 0
58262 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
58263 #define DT_DEBUG 21
58264 #define DT_TEXTREL 22
58265 #define DT_JMPREL 23
58266 +#define DT_FLAGS 30
58267 + #define DF_TEXTREL 0x00000004
58268 #define DT_ENCODING 32
58269 #define OLD_DT_LOOS 0x60000000
58270 #define DT_LOOS 0x6000000d
58271 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
58272 #define PF_W 0x2
58273 #define PF_X 0x1
58274
58275 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58276 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58277 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58278 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58279 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58280 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58281 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58282 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58283 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58284 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58285 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58286 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58287 +
58288 typedef struct elf32_phdr{
58289 Elf32_Word p_type;
58290 Elf32_Off p_offset;
58291 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
58292 #define EI_OSABI 7
58293 #define EI_PAD 8
58294
58295 +#define EI_PAX 14
58296 +
58297 #define ELFMAG0 0x7f /* EI_MAG */
58298 #define ELFMAG1 'E'
58299 #define ELFMAG2 'L'
58300 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
58301 #define elf_phdr elf32_phdr
58302 #define elf_note elf32_note
58303 #define elf_addr_t Elf32_Off
58304 +#define elf_dyn Elf32_Dyn
58305
58306 #else
58307
58308 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
58309 #define elf_phdr elf64_phdr
58310 #define elf_note elf64_note
58311 #define elf_addr_t Elf64_Off
58312 +#define elf_dyn Elf64_Dyn
58313
58314 #endif
58315
58316 diff -urNp linux-2.6.32.44/include/linux/fscache-cache.h linux-2.6.32.44/include/linux/fscache-cache.h
58317 --- linux-2.6.32.44/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
58318 +++ linux-2.6.32.44/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
58319 @@ -116,7 +116,7 @@ struct fscache_operation {
58320 #endif
58321 };
58322
58323 -extern atomic_t fscache_op_debug_id;
58324 +extern atomic_unchecked_t fscache_op_debug_id;
58325 extern const struct slow_work_ops fscache_op_slow_work_ops;
58326
58327 extern void fscache_enqueue_operation(struct fscache_operation *);
58328 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
58329 fscache_operation_release_t release)
58330 {
58331 atomic_set(&op->usage, 1);
58332 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58333 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58334 op->release = release;
58335 INIT_LIST_HEAD(&op->pend_link);
58336 fscache_set_op_state(op, "Init");
58337 diff -urNp linux-2.6.32.44/include/linux/fs.h linux-2.6.32.44/include/linux/fs.h
58338 --- linux-2.6.32.44/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
58339 +++ linux-2.6.32.44/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
58340 @@ -90,6 +90,11 @@ struct inodes_stat_t {
58341 /* Expect random access pattern */
58342 #define FMODE_RANDOM ((__force fmode_t)4096)
58343
58344 +/* Hack for grsec so as not to require read permission simply to execute
58345 + * a binary
58346 + */
58347 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
58348 +
58349 /*
58350 * The below are the various read and write types that we support. Some of
58351 * them include behavioral modifiers that send information down to the
58352 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
58353 unsigned long, unsigned long);
58354
58355 struct address_space_operations {
58356 - int (*writepage)(struct page *page, struct writeback_control *wbc);
58357 - int (*readpage)(struct file *, struct page *);
58358 - void (*sync_page)(struct page *);
58359 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
58360 + int (* const readpage)(struct file *, struct page *);
58361 + void (* const sync_page)(struct page *);
58362
58363 /* Write back some dirty pages from this mapping. */
58364 - int (*writepages)(struct address_space *, struct writeback_control *);
58365 + int (* const writepages)(struct address_space *, struct writeback_control *);
58366
58367 /* Set a page dirty. Return true if this dirtied it */
58368 - int (*set_page_dirty)(struct page *page);
58369 + int (* const set_page_dirty)(struct page *page);
58370
58371 - int (*readpages)(struct file *filp, struct address_space *mapping,
58372 + int (* const readpages)(struct file *filp, struct address_space *mapping,
58373 struct list_head *pages, unsigned nr_pages);
58374
58375 - int (*write_begin)(struct file *, struct address_space *mapping,
58376 + int (* const write_begin)(struct file *, struct address_space *mapping,
58377 loff_t pos, unsigned len, unsigned flags,
58378 struct page **pagep, void **fsdata);
58379 - int (*write_end)(struct file *, struct address_space *mapping,
58380 + int (* const write_end)(struct file *, struct address_space *mapping,
58381 loff_t pos, unsigned len, unsigned copied,
58382 struct page *page, void *fsdata);
58383
58384 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
58385 - sector_t (*bmap)(struct address_space *, sector_t);
58386 - void (*invalidatepage) (struct page *, unsigned long);
58387 - int (*releasepage) (struct page *, gfp_t);
58388 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
58389 + sector_t (* const bmap)(struct address_space *, sector_t);
58390 + void (* const invalidatepage) (struct page *, unsigned long);
58391 + int (* const releasepage) (struct page *, gfp_t);
58392 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
58393 loff_t offset, unsigned long nr_segs);
58394 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
58395 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
58396 void **, unsigned long *);
58397 /* migrate the contents of a page to the specified target */
58398 - int (*migratepage) (struct address_space *,
58399 + int (* const migratepage) (struct address_space *,
58400 struct page *, struct page *);
58401 - int (*launder_page) (struct page *);
58402 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
58403 + int (* const launder_page) (struct page *);
58404 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
58405 unsigned long);
58406 - int (*error_remove_page)(struct address_space *, struct page *);
58407 + int (* const error_remove_page)(struct address_space *, struct page *);
58408 };
58409
58410 /*
58411 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
58412 typedef struct files_struct *fl_owner_t;
58413
58414 struct file_lock_operations {
58415 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58416 - void (*fl_release_private)(struct file_lock *);
58417 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58418 + void (* const fl_release_private)(struct file_lock *);
58419 };
58420
58421 struct lock_manager_operations {
58422 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
58423 - void (*fl_notify)(struct file_lock *); /* unblock callback */
58424 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
58425 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58426 - void (*fl_release_private)(struct file_lock *);
58427 - void (*fl_break)(struct file_lock *);
58428 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
58429 - int (*fl_change)(struct file_lock **, int);
58430 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
58431 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
58432 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
58433 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58434 + void (* const fl_release_private)(struct file_lock *);
58435 + void (* const fl_break)(struct file_lock *);
58436 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
58437 + int (* const fl_change)(struct file_lock **, int);
58438 };
58439
58440 struct lock_manager {
58441 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
58442 unsigned int fi_flags; /* Flags as passed from user */
58443 unsigned int fi_extents_mapped; /* Number of mapped extents */
58444 unsigned int fi_extents_max; /* Size of fiemap_extent array */
58445 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
58446 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
58447 * array */
58448 };
58449 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
58450 @@ -1486,7 +1491,7 @@ struct block_device_operations;
58451 * can be called without the big kernel lock held in all filesystems.
58452 */
58453 struct file_operations {
58454 - struct module *owner;
58455 + struct module * const owner;
58456 loff_t (*llseek) (struct file *, loff_t, int);
58457 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58458 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58459 @@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58460 unsigned long, loff_t *);
58461
58462 struct super_operations {
58463 - struct inode *(*alloc_inode)(struct super_block *sb);
58464 - void (*destroy_inode)(struct inode *);
58465 + struct inode *(* const alloc_inode)(struct super_block *sb);
58466 + void (* const destroy_inode)(struct inode *);
58467
58468 - void (*dirty_inode) (struct inode *);
58469 - int (*write_inode) (struct inode *, int);
58470 - void (*drop_inode) (struct inode *);
58471 - void (*delete_inode) (struct inode *);
58472 - void (*put_super) (struct super_block *);
58473 - void (*write_super) (struct super_block *);
58474 - int (*sync_fs)(struct super_block *sb, int wait);
58475 - int (*freeze_fs) (struct super_block *);
58476 - int (*unfreeze_fs) (struct super_block *);
58477 - int (*statfs) (struct dentry *, struct kstatfs *);
58478 - int (*remount_fs) (struct super_block *, int *, char *);
58479 - void (*clear_inode) (struct inode *);
58480 - void (*umount_begin) (struct super_block *);
58481 + void (* const dirty_inode) (struct inode *);
58482 + int (* const write_inode) (struct inode *, int);
58483 + void (* const drop_inode) (struct inode *);
58484 + void (* const delete_inode) (struct inode *);
58485 + void (* const put_super) (struct super_block *);
58486 + void (* const write_super) (struct super_block *);
58487 + int (* const sync_fs)(struct super_block *sb, int wait);
58488 + int (* const freeze_fs) (struct super_block *);
58489 + int (* const unfreeze_fs) (struct super_block *);
58490 + int (* const statfs) (struct dentry *, struct kstatfs *);
58491 + int (* const remount_fs) (struct super_block *, int *, char *);
58492 + void (* const clear_inode) (struct inode *);
58493 + void (* const umount_begin) (struct super_block *);
58494
58495 - int (*show_options)(struct seq_file *, struct vfsmount *);
58496 - int (*show_stats)(struct seq_file *, struct vfsmount *);
58497 + int (* const show_options)(struct seq_file *, struct vfsmount *);
58498 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
58499 #ifdef CONFIG_QUOTA
58500 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58501 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58502 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58503 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58504 #endif
58505 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58506 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58507 };
58508
58509 /*
58510 diff -urNp linux-2.6.32.44/include/linux/fs_struct.h linux-2.6.32.44/include/linux/fs_struct.h
58511 --- linux-2.6.32.44/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58512 +++ linux-2.6.32.44/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58513 @@ -4,7 +4,7 @@
58514 #include <linux/path.h>
58515
58516 struct fs_struct {
58517 - int users;
58518 + atomic_t users;
58519 rwlock_t lock;
58520 int umask;
58521 int in_exec;
58522 diff -urNp linux-2.6.32.44/include/linux/ftrace_event.h linux-2.6.32.44/include/linux/ftrace_event.h
58523 --- linux-2.6.32.44/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58524 +++ linux-2.6.32.44/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58525 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58526 int filter_type);
58527 extern int trace_define_common_fields(struct ftrace_event_call *call);
58528
58529 -#define is_signed_type(type) (((type)(-1)) < 0)
58530 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58531
58532 int trace_set_clr_event(const char *system, const char *event, int set);
58533
58534 diff -urNp linux-2.6.32.44/include/linux/genhd.h linux-2.6.32.44/include/linux/genhd.h
58535 --- linux-2.6.32.44/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58536 +++ linux-2.6.32.44/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58537 @@ -161,7 +161,7 @@ struct gendisk {
58538
58539 struct timer_rand_state *random;
58540
58541 - atomic_t sync_io; /* RAID */
58542 + atomic_unchecked_t sync_io; /* RAID */
58543 struct work_struct async_notify;
58544 #ifdef CONFIG_BLK_DEV_INTEGRITY
58545 struct blk_integrity *integrity;
58546 diff -urNp linux-2.6.32.44/include/linux/gracl.h linux-2.6.32.44/include/linux/gracl.h
58547 --- linux-2.6.32.44/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58548 +++ linux-2.6.32.44/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58549 @@ -0,0 +1,317 @@
58550 +#ifndef GR_ACL_H
58551 +#define GR_ACL_H
58552 +
58553 +#include <linux/grdefs.h>
58554 +#include <linux/resource.h>
58555 +#include <linux/capability.h>
58556 +#include <linux/dcache.h>
58557 +#include <asm/resource.h>
58558 +
58559 +/* Major status information */
58560 +
58561 +#define GR_VERSION "grsecurity 2.2.2"
58562 +#define GRSECURITY_VERSION 0x2202
58563 +
58564 +enum {
58565 + GR_SHUTDOWN = 0,
58566 + GR_ENABLE = 1,
58567 + GR_SPROLE = 2,
58568 + GR_RELOAD = 3,
58569 + GR_SEGVMOD = 4,
58570 + GR_STATUS = 5,
58571 + GR_UNSPROLE = 6,
58572 + GR_PASSSET = 7,
58573 + GR_SPROLEPAM = 8,
58574 +};
58575 +
58576 +/* Password setup definitions
58577 + * kernel/grhash.c */
58578 +enum {
58579 + GR_PW_LEN = 128,
58580 + GR_SALT_LEN = 16,
58581 + GR_SHA_LEN = 32,
58582 +};
58583 +
58584 +enum {
58585 + GR_SPROLE_LEN = 64,
58586 +};
58587 +
58588 +enum {
58589 + GR_NO_GLOB = 0,
58590 + GR_REG_GLOB,
58591 + GR_CREATE_GLOB
58592 +};
58593 +
58594 +#define GR_NLIMITS 32
58595 +
58596 +/* Begin Data Structures */
58597 +
58598 +struct sprole_pw {
58599 + unsigned char *rolename;
58600 + unsigned char salt[GR_SALT_LEN];
58601 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58602 +};
58603 +
58604 +struct name_entry {
58605 + __u32 key;
58606 + ino_t inode;
58607 + dev_t device;
58608 + char *name;
58609 + __u16 len;
58610 + __u8 deleted;
58611 + struct name_entry *prev;
58612 + struct name_entry *next;
58613 +};
58614 +
58615 +struct inodev_entry {
58616 + struct name_entry *nentry;
58617 + struct inodev_entry *prev;
58618 + struct inodev_entry *next;
58619 +};
58620 +
58621 +struct acl_role_db {
58622 + struct acl_role_label **r_hash;
58623 + __u32 r_size;
58624 +};
58625 +
58626 +struct inodev_db {
58627 + struct inodev_entry **i_hash;
58628 + __u32 i_size;
58629 +};
58630 +
58631 +struct name_db {
58632 + struct name_entry **n_hash;
58633 + __u32 n_size;
58634 +};
58635 +
58636 +struct crash_uid {
58637 + uid_t uid;
58638 + unsigned long expires;
58639 +};
58640 +
58641 +struct gr_hash_struct {
58642 + void **table;
58643 + void **nametable;
58644 + void *first;
58645 + __u32 table_size;
58646 + __u32 used_size;
58647 + int type;
58648 +};
58649 +
58650 +/* Userspace Grsecurity ACL data structures */
58651 +
58652 +struct acl_subject_label {
58653 + char *filename;
58654 + ino_t inode;
58655 + dev_t device;
58656 + __u32 mode;
58657 + kernel_cap_t cap_mask;
58658 + kernel_cap_t cap_lower;
58659 + kernel_cap_t cap_invert_audit;
58660 +
58661 + struct rlimit res[GR_NLIMITS];
58662 + __u32 resmask;
58663 +
58664 + __u8 user_trans_type;
58665 + __u8 group_trans_type;
58666 + uid_t *user_transitions;
58667 + gid_t *group_transitions;
58668 + __u16 user_trans_num;
58669 + __u16 group_trans_num;
58670 +
58671 + __u32 sock_families[2];
58672 + __u32 ip_proto[8];
58673 + __u32 ip_type;
58674 + struct acl_ip_label **ips;
58675 + __u32 ip_num;
58676 + __u32 inaddr_any_override;
58677 +
58678 + __u32 crashes;
58679 + unsigned long expires;
58680 +
58681 + struct acl_subject_label *parent_subject;
58682 + struct gr_hash_struct *hash;
58683 + struct acl_subject_label *prev;
58684 + struct acl_subject_label *next;
58685 +
58686 + struct acl_object_label **obj_hash;
58687 + __u32 obj_hash_size;
58688 + __u16 pax_flags;
58689 +};
58690 +
58691 +struct role_allowed_ip {
58692 + __u32 addr;
58693 + __u32 netmask;
58694 +
58695 + struct role_allowed_ip *prev;
58696 + struct role_allowed_ip *next;
58697 +};
58698 +
58699 +struct role_transition {
58700 + char *rolename;
58701 +
58702 + struct role_transition *prev;
58703 + struct role_transition *next;
58704 +};
58705 +
58706 +struct acl_role_label {
58707 + char *rolename;
58708 + uid_t uidgid;
58709 + __u16 roletype;
58710 +
58711 + __u16 auth_attempts;
58712 + unsigned long expires;
58713 +
58714 + struct acl_subject_label *root_label;
58715 + struct gr_hash_struct *hash;
58716 +
58717 + struct acl_role_label *prev;
58718 + struct acl_role_label *next;
58719 +
58720 + struct role_transition *transitions;
58721 + struct role_allowed_ip *allowed_ips;
58722 + uid_t *domain_children;
58723 + __u16 domain_child_num;
58724 +
58725 + struct acl_subject_label **subj_hash;
58726 + __u32 subj_hash_size;
58727 +};
58728 +
58729 +struct user_acl_role_db {
58730 + struct acl_role_label **r_table;
58731 + __u32 num_pointers; /* Number of allocations to track */
58732 + __u32 num_roles; /* Number of roles */
58733 + __u32 num_domain_children; /* Number of domain children */
58734 + __u32 num_subjects; /* Number of subjects */
58735 + __u32 num_objects; /* Number of objects */
58736 +};
58737 +
58738 +struct acl_object_label {
58739 + char *filename;
58740 + ino_t inode;
58741 + dev_t device;
58742 + __u32 mode;
58743 +
58744 + struct acl_subject_label *nested;
58745 + struct acl_object_label *globbed;
58746 +
58747 + /* next two structures not used */
58748 +
58749 + struct acl_object_label *prev;
58750 + struct acl_object_label *next;
58751 +};
58752 +
58753 +struct acl_ip_label {
58754 + char *iface;
58755 + __u32 addr;
58756 + __u32 netmask;
58757 + __u16 low, high;
58758 + __u8 mode;
58759 + __u32 type;
58760 + __u32 proto[8];
58761 +
58762 + /* next two structures not used */
58763 +
58764 + struct acl_ip_label *prev;
58765 + struct acl_ip_label *next;
58766 +};
58767 +
58768 +struct gr_arg {
58769 + struct user_acl_role_db role_db;
58770 + unsigned char pw[GR_PW_LEN];
58771 + unsigned char salt[GR_SALT_LEN];
58772 + unsigned char sum[GR_SHA_LEN];
58773 + unsigned char sp_role[GR_SPROLE_LEN];
58774 + struct sprole_pw *sprole_pws;
58775 + dev_t segv_device;
58776 + ino_t segv_inode;
58777 + uid_t segv_uid;
58778 + __u16 num_sprole_pws;
58779 + __u16 mode;
58780 +};
58781 +
58782 +struct gr_arg_wrapper {
58783 + struct gr_arg *arg;
58784 + __u32 version;
58785 + __u32 size;
58786 +};
58787 +
58788 +struct subject_map {
58789 + struct acl_subject_label *user;
58790 + struct acl_subject_label *kernel;
58791 + struct subject_map *prev;
58792 + struct subject_map *next;
58793 +};
58794 +
58795 +struct acl_subj_map_db {
58796 + struct subject_map **s_hash;
58797 + __u32 s_size;
58798 +};
58799 +
58800 +/* End Data Structures Section */
58801 +
58802 +/* Hash functions generated by empirical testing by Brad Spengler
58803 + Makes good use of the low bits of the inode. Generally 0-1 times
58804 + in loop for successful match. 0-3 for unsuccessful match.
58805 + Shift/add algorithm with modulus of table size and an XOR*/
58806 +
58807 +static __inline__ unsigned int
58808 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58809 +{
58810 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58811 +}
58812 +
58813 + static __inline__ unsigned int
58814 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58815 +{
58816 + return ((const unsigned long)userp % sz);
58817 +}
58818 +
58819 +static __inline__ unsigned int
58820 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58821 +{
58822 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58823 +}
58824 +
58825 +static __inline__ unsigned int
58826 +nhash(const char *name, const __u16 len, const unsigned int sz)
58827 +{
58828 + return full_name_hash((const unsigned char *)name, len) % sz;
58829 +}
58830 +
58831 +#define FOR_EACH_ROLE_START(role) \
58832 + role = role_list; \
58833 + while (role) {
58834 +
58835 +#define FOR_EACH_ROLE_END(role) \
58836 + role = role->prev; \
58837 + }
58838 +
58839 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58840 + subj = NULL; \
58841 + iter = 0; \
58842 + while (iter < role->subj_hash_size) { \
58843 + if (subj == NULL) \
58844 + subj = role->subj_hash[iter]; \
58845 + if (subj == NULL) { \
58846 + iter++; \
58847 + continue; \
58848 + }
58849 +
58850 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58851 + subj = subj->next; \
58852 + if (subj == NULL) \
58853 + iter++; \
58854 + }
58855 +
58856 +
58857 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58858 + subj = role->hash->first; \
58859 + while (subj != NULL) {
58860 +
58861 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58862 + subj = subj->next; \
58863 + }
58864 +
58865 +#endif
58866 +
58867 diff -urNp linux-2.6.32.44/include/linux/gralloc.h linux-2.6.32.44/include/linux/gralloc.h
58868 --- linux-2.6.32.44/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58869 +++ linux-2.6.32.44/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58870 @@ -0,0 +1,9 @@
58871 +#ifndef __GRALLOC_H
58872 +#define __GRALLOC_H
58873 +
58874 +void acl_free_all(void);
58875 +int acl_alloc_stack_init(unsigned long size);
58876 +void *acl_alloc(unsigned long len);
58877 +void *acl_alloc_num(unsigned long num, unsigned long len);
58878 +
58879 +#endif
58880 diff -urNp linux-2.6.32.44/include/linux/grdefs.h linux-2.6.32.44/include/linux/grdefs.h
58881 --- linux-2.6.32.44/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58882 +++ linux-2.6.32.44/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58883 @@ -0,0 +1,140 @@
58884 +#ifndef GRDEFS_H
58885 +#define GRDEFS_H
58886 +
58887 +/* Begin grsecurity status declarations */
58888 +
58889 +enum {
58890 + GR_READY = 0x01,
58891 + GR_STATUS_INIT = 0x00 // disabled state
58892 +};
58893 +
58894 +/* Begin ACL declarations */
58895 +
58896 +/* Role flags */
58897 +
58898 +enum {
58899 + GR_ROLE_USER = 0x0001,
58900 + GR_ROLE_GROUP = 0x0002,
58901 + GR_ROLE_DEFAULT = 0x0004,
58902 + GR_ROLE_SPECIAL = 0x0008,
58903 + GR_ROLE_AUTH = 0x0010,
58904 + GR_ROLE_NOPW = 0x0020,
58905 + GR_ROLE_GOD = 0x0040,
58906 + GR_ROLE_LEARN = 0x0080,
58907 + GR_ROLE_TPE = 0x0100,
58908 + GR_ROLE_DOMAIN = 0x0200,
58909 + GR_ROLE_PAM = 0x0400,
58910 + GR_ROLE_PERSIST = 0x800
58911 +};
58912 +
58913 +/* ACL Subject and Object mode flags */
58914 +enum {
58915 + GR_DELETED = 0x80000000
58916 +};
58917 +
58918 +/* ACL Object-only mode flags */
58919 +enum {
58920 + GR_READ = 0x00000001,
58921 + GR_APPEND = 0x00000002,
58922 + GR_WRITE = 0x00000004,
58923 + GR_EXEC = 0x00000008,
58924 + GR_FIND = 0x00000010,
58925 + GR_INHERIT = 0x00000020,
58926 + GR_SETID = 0x00000040,
58927 + GR_CREATE = 0x00000080,
58928 + GR_DELETE = 0x00000100,
58929 + GR_LINK = 0x00000200,
58930 + GR_AUDIT_READ = 0x00000400,
58931 + GR_AUDIT_APPEND = 0x00000800,
58932 + GR_AUDIT_WRITE = 0x00001000,
58933 + GR_AUDIT_EXEC = 0x00002000,
58934 + GR_AUDIT_FIND = 0x00004000,
58935 + GR_AUDIT_INHERIT= 0x00008000,
58936 + GR_AUDIT_SETID = 0x00010000,
58937 + GR_AUDIT_CREATE = 0x00020000,
58938 + GR_AUDIT_DELETE = 0x00040000,
58939 + GR_AUDIT_LINK = 0x00080000,
58940 + GR_PTRACERD = 0x00100000,
58941 + GR_NOPTRACE = 0x00200000,
58942 + GR_SUPPRESS = 0x00400000,
58943 + GR_NOLEARN = 0x00800000,
58944 + GR_INIT_TRANSFER= 0x01000000
58945 +};
58946 +
58947 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58948 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58949 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58950 +
58951 +/* ACL subject-only mode flags */
58952 +enum {
58953 + GR_KILL = 0x00000001,
58954 + GR_VIEW = 0x00000002,
58955 + GR_PROTECTED = 0x00000004,
58956 + GR_LEARN = 0x00000008,
58957 + GR_OVERRIDE = 0x00000010,
58958 + /* just a placeholder, this mode is only used in userspace */
58959 + GR_DUMMY = 0x00000020,
58960 + GR_PROTSHM = 0x00000040,
58961 + GR_KILLPROC = 0x00000080,
58962 + GR_KILLIPPROC = 0x00000100,
58963 + /* just a placeholder, this mode is only used in userspace */
58964 + GR_NOTROJAN = 0x00000200,
58965 + GR_PROTPROCFD = 0x00000400,
58966 + GR_PROCACCT = 0x00000800,
58967 + GR_RELAXPTRACE = 0x00001000,
58968 + GR_NESTED = 0x00002000,
58969 + GR_INHERITLEARN = 0x00004000,
58970 + GR_PROCFIND = 0x00008000,
58971 + GR_POVERRIDE = 0x00010000,
58972 + GR_KERNELAUTH = 0x00020000,
58973 + GR_ATSECURE = 0x00040000,
58974 + GR_SHMEXEC = 0x00080000
58975 +};
58976 +
58977 +enum {
58978 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58979 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58980 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58981 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58982 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58983 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58984 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58985 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58986 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58987 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58988 +};
58989 +
58990 +enum {
58991 + GR_ID_USER = 0x01,
58992 + GR_ID_GROUP = 0x02,
58993 +};
58994 +
58995 +enum {
58996 + GR_ID_ALLOW = 0x01,
58997 + GR_ID_DENY = 0x02,
58998 +};
58999 +
59000 +#define GR_CRASH_RES 31
59001 +#define GR_UIDTABLE_MAX 500
59002 +
59003 +/* begin resource learning section */
59004 +enum {
59005 + GR_RLIM_CPU_BUMP = 60,
59006 + GR_RLIM_FSIZE_BUMP = 50000,
59007 + GR_RLIM_DATA_BUMP = 10000,
59008 + GR_RLIM_STACK_BUMP = 1000,
59009 + GR_RLIM_CORE_BUMP = 10000,
59010 + GR_RLIM_RSS_BUMP = 500000,
59011 + GR_RLIM_NPROC_BUMP = 1,
59012 + GR_RLIM_NOFILE_BUMP = 5,
59013 + GR_RLIM_MEMLOCK_BUMP = 50000,
59014 + GR_RLIM_AS_BUMP = 500000,
59015 + GR_RLIM_LOCKS_BUMP = 2,
59016 + GR_RLIM_SIGPENDING_BUMP = 5,
59017 + GR_RLIM_MSGQUEUE_BUMP = 10000,
59018 + GR_RLIM_NICE_BUMP = 1,
59019 + GR_RLIM_RTPRIO_BUMP = 1,
59020 + GR_RLIM_RTTIME_BUMP = 1000000
59021 +};
59022 +
59023 +#endif
59024 diff -urNp linux-2.6.32.44/include/linux/grinternal.h linux-2.6.32.44/include/linux/grinternal.h
59025 --- linux-2.6.32.44/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
59026 +++ linux-2.6.32.44/include/linux/grinternal.h 2011-07-14 20:35:29.000000000 -0400
59027 @@ -0,0 +1,218 @@
59028 +#ifndef __GRINTERNAL_H
59029 +#define __GRINTERNAL_H
59030 +
59031 +#ifdef CONFIG_GRKERNSEC
59032 +
59033 +#include <linux/fs.h>
59034 +#include <linux/mnt_namespace.h>
59035 +#include <linux/nsproxy.h>
59036 +#include <linux/gracl.h>
59037 +#include <linux/grdefs.h>
59038 +#include <linux/grmsg.h>
59039 +
59040 +void gr_add_learn_entry(const char *fmt, ...)
59041 + __attribute__ ((format (printf, 1, 2)));
59042 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59043 + const struct vfsmount *mnt);
59044 +__u32 gr_check_create(const struct dentry *new_dentry,
59045 + const struct dentry *parent,
59046 + const struct vfsmount *mnt, const __u32 mode);
59047 +int gr_check_protected_task(const struct task_struct *task);
59048 +__u32 to_gr_audit(const __u32 reqmode);
59049 +int gr_set_acls(const int type);
59050 +int gr_apply_subject_to_task(struct task_struct *task);
59051 +int gr_acl_is_enabled(void);
59052 +char gr_roletype_to_char(void);
59053 +
59054 +void gr_handle_alertkill(struct task_struct *task);
59055 +char *gr_to_filename(const struct dentry *dentry,
59056 + const struct vfsmount *mnt);
59057 +char *gr_to_filename1(const struct dentry *dentry,
59058 + const struct vfsmount *mnt);
59059 +char *gr_to_filename2(const struct dentry *dentry,
59060 + const struct vfsmount *mnt);
59061 +char *gr_to_filename3(const struct dentry *dentry,
59062 + const struct vfsmount *mnt);
59063 +
59064 +extern int grsec_enable_harden_ptrace;
59065 +extern int grsec_enable_link;
59066 +extern int grsec_enable_fifo;
59067 +extern int grsec_enable_execve;
59068 +extern int grsec_enable_shm;
59069 +extern int grsec_enable_execlog;
59070 +extern int grsec_enable_signal;
59071 +extern int grsec_enable_audit_ptrace;
59072 +extern int grsec_enable_forkfail;
59073 +extern int grsec_enable_time;
59074 +extern int grsec_enable_rofs;
59075 +extern int grsec_enable_chroot_shmat;
59076 +extern int grsec_enable_chroot_mount;
59077 +extern int grsec_enable_chroot_double;
59078 +extern int grsec_enable_chroot_pivot;
59079 +extern int grsec_enable_chroot_chdir;
59080 +extern int grsec_enable_chroot_chmod;
59081 +extern int grsec_enable_chroot_mknod;
59082 +extern int grsec_enable_chroot_fchdir;
59083 +extern int grsec_enable_chroot_nice;
59084 +extern int grsec_enable_chroot_execlog;
59085 +extern int grsec_enable_chroot_caps;
59086 +extern int grsec_enable_chroot_sysctl;
59087 +extern int grsec_enable_chroot_unix;
59088 +extern int grsec_enable_tpe;
59089 +extern int grsec_tpe_gid;
59090 +extern int grsec_enable_tpe_all;
59091 +extern int grsec_enable_tpe_invert;
59092 +extern int grsec_enable_socket_all;
59093 +extern int grsec_socket_all_gid;
59094 +extern int grsec_enable_socket_client;
59095 +extern int grsec_socket_client_gid;
59096 +extern int grsec_enable_socket_server;
59097 +extern int grsec_socket_server_gid;
59098 +extern int grsec_audit_gid;
59099 +extern int grsec_enable_group;
59100 +extern int grsec_enable_audit_textrel;
59101 +extern int grsec_enable_log_rwxmaps;
59102 +extern int grsec_enable_mount;
59103 +extern int grsec_enable_chdir;
59104 +extern int grsec_resource_logging;
59105 +extern int grsec_enable_blackhole;
59106 +extern int grsec_lastack_retries;
59107 +extern int grsec_enable_brute;
59108 +extern int grsec_lock;
59109 +
59110 +extern spinlock_t grsec_alert_lock;
59111 +extern unsigned long grsec_alert_wtime;
59112 +extern unsigned long grsec_alert_fyet;
59113 +
59114 +extern spinlock_t grsec_audit_lock;
59115 +
59116 +extern rwlock_t grsec_exec_file_lock;
59117 +
59118 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59119 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59120 + (tsk)->exec_file->f_vfsmnt) : "/")
59121 +
59122 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59123 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59124 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59125 +
59126 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59127 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
59128 + (tsk)->exec_file->f_vfsmnt) : "/")
59129 +
59130 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59131 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59132 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59133 +
59134 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59135 +
59136 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59137 +
59138 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59139 + (task)->pid, (cred)->uid, \
59140 + (cred)->euid, (cred)->gid, (cred)->egid, \
59141 + gr_parent_task_fullpath(task), \
59142 + (task)->real_parent->comm, (task)->real_parent->pid, \
59143 + (pcred)->uid, (pcred)->euid, \
59144 + (pcred)->gid, (pcred)->egid
59145 +
59146 +#define GR_CHROOT_CAPS {{ \
59147 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59148 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59149 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59150 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59151 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59152 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
59153 +
59154 +#define security_learn(normal_msg,args...) \
59155 +({ \
59156 + read_lock(&grsec_exec_file_lock); \
59157 + gr_add_learn_entry(normal_msg "\n", ## args); \
59158 + read_unlock(&grsec_exec_file_lock); \
59159 +})
59160 +
59161 +enum {
59162 + GR_DO_AUDIT,
59163 + GR_DONT_AUDIT,
59164 + GR_DONT_AUDIT_GOOD
59165 +};
59166 +
59167 +enum {
59168 + GR_TTYSNIFF,
59169 + GR_RBAC,
59170 + GR_RBAC_STR,
59171 + GR_STR_RBAC,
59172 + GR_RBAC_MODE2,
59173 + GR_RBAC_MODE3,
59174 + GR_FILENAME,
59175 + GR_SYSCTL_HIDDEN,
59176 + GR_NOARGS,
59177 + GR_ONE_INT,
59178 + GR_ONE_INT_TWO_STR,
59179 + GR_ONE_STR,
59180 + GR_STR_INT,
59181 + GR_TWO_STR_INT,
59182 + GR_TWO_INT,
59183 + GR_TWO_U64,
59184 + GR_THREE_INT,
59185 + GR_FIVE_INT_TWO_STR,
59186 + GR_TWO_STR,
59187 + GR_THREE_STR,
59188 + GR_FOUR_STR,
59189 + GR_STR_FILENAME,
59190 + GR_FILENAME_STR,
59191 + GR_FILENAME_TWO_INT,
59192 + GR_FILENAME_TWO_INT_STR,
59193 + GR_TEXTREL,
59194 + GR_PTRACE,
59195 + GR_RESOURCE,
59196 + GR_CAP,
59197 + GR_SIG,
59198 + GR_SIG2,
59199 + GR_CRASH1,
59200 + GR_CRASH2,
59201 + GR_PSACCT,
59202 + GR_RWXMAP
59203 +};
59204 +
59205 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59206 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59207 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59208 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59209 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59210 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59211 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59212 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59213 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59214 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59215 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59216 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59217 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59218 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59219 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59220 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59221 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59222 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59223 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59224 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59225 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59226 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59227 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59228 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59229 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59230 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59231 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59232 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59233 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59234 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59235 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59236 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59237 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59238 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59239 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59240 +
59241 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59242 +
59243 +#endif
59244 +
59245 +#endif
59246 diff -urNp linux-2.6.32.44/include/linux/grmsg.h linux-2.6.32.44/include/linux/grmsg.h
59247 --- linux-2.6.32.44/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
59248 +++ linux-2.6.32.44/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
59249 @@ -0,0 +1,108 @@
59250 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59251 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59252 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59253 +#define GR_STOPMOD_MSG "denied modification of module state by "
59254 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59255 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59256 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59257 +#define GR_IOPL_MSG "denied use of iopl() by "
59258 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59259 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59260 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59261 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59262 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59263 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59264 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59265 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59266 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59267 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59268 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59269 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59270 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59271 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59272 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59273 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59274 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59275 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59276 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59277 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59278 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59279 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59280 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59281 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59282 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59283 +#define GR_NPROC_MSG "denied overstep of process limit by "
59284 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59285 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
59286 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59287 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59288 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59289 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59290 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59291 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59292 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59293 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59294 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59295 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59296 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59297 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59298 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59299 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59300 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59301 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59302 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59303 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59304 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59305 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59306 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59307 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59308 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59309 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59310 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59311 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59312 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59313 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59314 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59315 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59316 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59317 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59318 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59319 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59320 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59321 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59322 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59323 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59324 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59325 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59326 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59327 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59328 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59329 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59330 +#define GR_TIME_MSG "time set by "
59331 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59332 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59333 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59334 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59335 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59336 +#define GR_BIND_MSG "denied bind() by "
59337 +#define GR_CONNECT_MSG "denied connect() by "
59338 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59339 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59340 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59341 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59342 +#define GR_CAP_ACL_MSG "use of %s denied for "
59343 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59344 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59345 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59346 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59347 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59348 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59349 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59350 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59351 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59352 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59353 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59354 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59355 +#define GR_VM86_MSG "denied use of vm86 by "
59356 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59357 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59358 diff -urNp linux-2.6.32.44/include/linux/grsecurity.h linux-2.6.32.44/include/linux/grsecurity.h
59359 --- linux-2.6.32.44/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
59360 +++ linux-2.6.32.44/include/linux/grsecurity.h 2011-08-05 19:53:46.000000000 -0400
59361 @@ -0,0 +1,218 @@
59362 +#ifndef GR_SECURITY_H
59363 +#define GR_SECURITY_H
59364 +#include <linux/fs.h>
59365 +#include <linux/fs_struct.h>
59366 +#include <linux/binfmts.h>
59367 +#include <linux/gracl.h>
59368 +#include <linux/compat.h>
59369 +
59370 +/* notify of brain-dead configs */
59371 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59372 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59373 +#endif
59374 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59375 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59376 +#endif
59377 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59378 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59379 +#endif
59380 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59381 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59382 +#endif
59383 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59384 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59385 +#endif
59386 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59387 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59388 +#endif
59389 +
59390 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59391 +void gr_handle_brute_check(void);
59392 +void gr_handle_kernel_exploit(void);
59393 +int gr_process_user_ban(void);
59394 +
59395 +char gr_roletype_to_char(void);
59396 +
59397 +int gr_acl_enable_at_secure(void);
59398 +
59399 +int gr_check_user_change(int real, int effective, int fs);
59400 +int gr_check_group_change(int real, int effective, int fs);
59401 +
59402 +void gr_del_task_from_ip_table(struct task_struct *p);
59403 +
59404 +int gr_pid_is_chrooted(struct task_struct *p);
59405 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59406 +int gr_handle_chroot_nice(void);
59407 +int gr_handle_chroot_sysctl(const int op);
59408 +int gr_handle_chroot_setpriority(struct task_struct *p,
59409 + const int niceval);
59410 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59411 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59412 + const struct vfsmount *mnt);
59413 +int gr_handle_chroot_caps(struct path *path);
59414 +void gr_handle_chroot_chdir(struct path *path);
59415 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59416 + const struct vfsmount *mnt, const int mode);
59417 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59418 + const struct vfsmount *mnt, const int mode);
59419 +int gr_handle_chroot_mount(const struct dentry *dentry,
59420 + const struct vfsmount *mnt,
59421 + const char *dev_name);
59422 +int gr_handle_chroot_pivot(void);
59423 +int gr_handle_chroot_unix(const pid_t pid);
59424 +
59425 +int gr_handle_rawio(const struct inode *inode);
59426 +int gr_handle_nproc(void);
59427 +
59428 +void gr_handle_ioperm(void);
59429 +void gr_handle_iopl(void);
59430 +
59431 +int gr_tpe_allow(const struct file *file);
59432 +
59433 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59434 +void gr_clear_chroot_entries(struct task_struct *task);
59435 +
59436 +void gr_log_forkfail(const int retval);
59437 +void gr_log_timechange(void);
59438 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59439 +void gr_log_chdir(const struct dentry *dentry,
59440 + const struct vfsmount *mnt);
59441 +void gr_log_chroot_exec(const struct dentry *dentry,
59442 + const struct vfsmount *mnt);
59443 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
59444 +#ifdef CONFIG_COMPAT
59445 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
59446 +#endif
59447 +void gr_log_remount(const char *devname, const int retval);
59448 +void gr_log_unmount(const char *devname, const int retval);
59449 +void gr_log_mount(const char *from, const char *to, const int retval);
59450 +void gr_log_textrel(struct vm_area_struct *vma);
59451 +void gr_log_rwxmmap(struct file *file);
59452 +void gr_log_rwxmprotect(struct file *file);
59453 +
59454 +int gr_handle_follow_link(const struct inode *parent,
59455 + const struct inode *inode,
59456 + const struct dentry *dentry,
59457 + const struct vfsmount *mnt);
59458 +int gr_handle_fifo(const struct dentry *dentry,
59459 + const struct vfsmount *mnt,
59460 + const struct dentry *dir, const int flag,
59461 + const int acc_mode);
59462 +int gr_handle_hardlink(const struct dentry *dentry,
59463 + const struct vfsmount *mnt,
59464 + struct inode *inode,
59465 + const int mode, const char *to);
59466 +
59467 +int gr_is_capable(const int cap);
59468 +int gr_is_capable_nolog(const int cap);
59469 +void gr_learn_resource(const struct task_struct *task, const int limit,
59470 + const unsigned long wanted, const int gt);
59471 +void gr_copy_label(struct task_struct *tsk);
59472 +void gr_handle_crash(struct task_struct *task, const int sig);
59473 +int gr_handle_signal(const struct task_struct *p, const int sig);
59474 +int gr_check_crash_uid(const uid_t uid);
59475 +int gr_check_protected_task(const struct task_struct *task);
59476 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59477 +int gr_acl_handle_mmap(const struct file *file,
59478 + const unsigned long prot);
59479 +int gr_acl_handle_mprotect(const struct file *file,
59480 + const unsigned long prot);
59481 +int gr_check_hidden_task(const struct task_struct *tsk);
59482 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59483 + const struct vfsmount *mnt);
59484 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59485 + const struct vfsmount *mnt);
59486 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59487 + const struct vfsmount *mnt, const int fmode);
59488 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59489 + const struct vfsmount *mnt, mode_t mode);
59490 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59491 + const struct vfsmount *mnt, mode_t mode);
59492 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59493 + const struct vfsmount *mnt);
59494 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59495 + const struct vfsmount *mnt);
59496 +int gr_handle_ptrace(struct task_struct *task, const long request);
59497 +int gr_handle_proc_ptrace(struct task_struct *task);
59498 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59499 + const struct vfsmount *mnt);
59500 +int gr_check_crash_exec(const struct file *filp);
59501 +int gr_acl_is_enabled(void);
59502 +void gr_set_kernel_label(struct task_struct *task);
59503 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59504 + const gid_t gid);
59505 +int gr_set_proc_label(const struct dentry *dentry,
59506 + const struct vfsmount *mnt,
59507 + const int unsafe_share);
59508 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59509 + const struct vfsmount *mnt);
59510 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59511 + const struct vfsmount *mnt, const int fmode);
59512 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59513 + const struct dentry *p_dentry,
59514 + const struct vfsmount *p_mnt, const int fmode,
59515 + const int imode);
59516 +void gr_handle_create(const struct dentry *dentry,
59517 + const struct vfsmount *mnt);
59518 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59519 + const struct dentry *parent_dentry,
59520 + const struct vfsmount *parent_mnt,
59521 + const int mode);
59522 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59523 + const struct dentry *parent_dentry,
59524 + const struct vfsmount *parent_mnt);
59525 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59526 + const struct vfsmount *mnt);
59527 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59528 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59529 + const struct vfsmount *mnt);
59530 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59531 + const struct dentry *parent_dentry,
59532 + const struct vfsmount *parent_mnt,
59533 + const char *from);
59534 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59535 + const struct dentry *parent_dentry,
59536 + const struct vfsmount *parent_mnt,
59537 + const struct dentry *old_dentry,
59538 + const struct vfsmount *old_mnt, const char *to);
59539 +int gr_acl_handle_rename(struct dentry *new_dentry,
59540 + struct dentry *parent_dentry,
59541 + const struct vfsmount *parent_mnt,
59542 + struct dentry *old_dentry,
59543 + struct inode *old_parent_inode,
59544 + struct vfsmount *old_mnt, const char *newname);
59545 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59546 + struct dentry *old_dentry,
59547 + struct dentry *new_dentry,
59548 + struct vfsmount *mnt, const __u8 replace);
59549 +__u32 gr_check_link(const struct dentry *new_dentry,
59550 + const struct dentry *parent_dentry,
59551 + const struct vfsmount *parent_mnt,
59552 + const struct dentry *old_dentry,
59553 + const struct vfsmount *old_mnt);
59554 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59555 + const unsigned int namelen, const ino_t ino);
59556 +
59557 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59558 + const struct vfsmount *mnt);
59559 +void gr_acl_handle_exit(void);
59560 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59561 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59562 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59563 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59564 +void gr_audit_ptrace(struct task_struct *task);
59565 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59566 +
59567 +#ifdef CONFIG_GRKERNSEC
59568 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59569 +void gr_handle_vm86(void);
59570 +void gr_handle_mem_readwrite(u64 from, u64 to);
59571 +
59572 +extern int grsec_enable_dmesg;
59573 +extern int grsec_disable_privio;
59574 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59575 +extern int grsec_enable_chroot_findtask;
59576 +#endif
59577 +#endif
59578 +
59579 +#endif
59580 diff -urNp linux-2.6.32.44/include/linux/hdpu_features.h linux-2.6.32.44/include/linux/hdpu_features.h
59581 --- linux-2.6.32.44/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59582 +++ linux-2.6.32.44/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59583 @@ -3,7 +3,7 @@
59584 struct cpustate_t {
59585 spinlock_t lock;
59586 int excl;
59587 - int open_count;
59588 + atomic_t open_count;
59589 unsigned char cached_val;
59590 int inited;
59591 unsigned long *set_addr;
59592 diff -urNp linux-2.6.32.44/include/linux/highmem.h linux-2.6.32.44/include/linux/highmem.h
59593 --- linux-2.6.32.44/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59594 +++ linux-2.6.32.44/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59595 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59596 kunmap_atomic(kaddr, KM_USER0);
59597 }
59598
59599 +static inline void sanitize_highpage(struct page *page)
59600 +{
59601 + void *kaddr;
59602 + unsigned long flags;
59603 +
59604 + local_irq_save(flags);
59605 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59606 + clear_page(kaddr);
59607 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59608 + local_irq_restore(flags);
59609 +}
59610 +
59611 static inline void zero_user_segments(struct page *page,
59612 unsigned start1, unsigned end1,
59613 unsigned start2, unsigned end2)
59614 diff -urNp linux-2.6.32.44/include/linux/i2o.h linux-2.6.32.44/include/linux/i2o.h
59615 --- linux-2.6.32.44/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59616 +++ linux-2.6.32.44/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59617 @@ -564,7 +564,7 @@ struct i2o_controller {
59618 struct i2o_device *exec; /* Executive */
59619 #if BITS_PER_LONG == 64
59620 spinlock_t context_list_lock; /* lock for context_list */
59621 - atomic_t context_list_counter; /* needed for unique contexts */
59622 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59623 struct list_head context_list; /* list of context id's
59624 and pointers */
59625 #endif
59626 diff -urNp linux-2.6.32.44/include/linux/init_task.h linux-2.6.32.44/include/linux/init_task.h
59627 --- linux-2.6.32.44/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59628 +++ linux-2.6.32.44/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59629 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
59630 #define INIT_IDS
59631 #endif
59632
59633 +#ifdef CONFIG_X86
59634 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59635 +#else
59636 +#define INIT_TASK_THREAD_INFO
59637 +#endif
59638 +
59639 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59640 /*
59641 * Because of the reduced scope of CAP_SETPCAP when filesystem
59642 @@ -156,6 +162,7 @@ extern struct cred init_cred;
59643 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59644 .comm = "swapper", \
59645 .thread = INIT_THREAD, \
59646 + INIT_TASK_THREAD_INFO \
59647 .fs = &init_fs, \
59648 .files = &init_files, \
59649 .signal = &init_signals, \
59650 diff -urNp linux-2.6.32.44/include/linux/intel-iommu.h linux-2.6.32.44/include/linux/intel-iommu.h
59651 --- linux-2.6.32.44/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59652 +++ linux-2.6.32.44/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59653 @@ -296,7 +296,7 @@ struct iommu_flush {
59654 u8 fm, u64 type);
59655 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59656 unsigned int size_order, u64 type);
59657 -};
59658 +} __no_const;
59659
59660 enum {
59661 SR_DMAR_FECTL_REG,
59662 diff -urNp linux-2.6.32.44/include/linux/interrupt.h linux-2.6.32.44/include/linux/interrupt.h
59663 --- linux-2.6.32.44/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59664 +++ linux-2.6.32.44/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59665 @@ -363,7 +363,7 @@ enum
59666 /* map softirq index to softirq name. update 'softirq_to_name' in
59667 * kernel/softirq.c when adding a new softirq.
59668 */
59669 -extern char *softirq_to_name[NR_SOFTIRQS];
59670 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59671
59672 /* softirq mask and active fields moved to irq_cpustat_t in
59673 * asm/hardirq.h to get better cache usage. KAO
59674 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59675
59676 struct softirq_action
59677 {
59678 - void (*action)(struct softirq_action *);
59679 + void (*action)(void);
59680 };
59681
59682 asmlinkage void do_softirq(void);
59683 asmlinkage void __do_softirq(void);
59684 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59685 +extern void open_softirq(int nr, void (*action)(void));
59686 extern void softirq_init(void);
59687 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59688 extern void raise_softirq_irqoff(unsigned int nr);
59689 diff -urNp linux-2.6.32.44/include/linux/irq.h linux-2.6.32.44/include/linux/irq.h
59690 --- linux-2.6.32.44/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59691 +++ linux-2.6.32.44/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59692 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59693 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59694 bool boot)
59695 {
59696 +#ifdef CONFIG_CPUMASK_OFFSTACK
59697 gfp_t gfp = GFP_ATOMIC;
59698
59699 if (boot)
59700 gfp = GFP_NOWAIT;
59701
59702 -#ifdef CONFIG_CPUMASK_OFFSTACK
59703 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59704 return false;
59705
59706 diff -urNp linux-2.6.32.44/include/linux/kallsyms.h linux-2.6.32.44/include/linux/kallsyms.h
59707 --- linux-2.6.32.44/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59708 +++ linux-2.6.32.44/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59709 @@ -15,7 +15,8 @@
59710
59711 struct module;
59712
59713 -#ifdef CONFIG_KALLSYMS
59714 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59715 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59716 /* Lookup the address for a symbol. Returns 0 if not found. */
59717 unsigned long kallsyms_lookup_name(const char *name);
59718
59719 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59720 /* Stupid that this does nothing, but I didn't create this mess. */
59721 #define __print_symbol(fmt, addr)
59722 #endif /*CONFIG_KALLSYMS*/
59723 +#else /* when included by kallsyms.c, vsnprintf.c, or
59724 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59725 +extern void __print_symbol(const char *fmt, unsigned long address);
59726 +extern int sprint_symbol(char *buffer, unsigned long address);
59727 +const char *kallsyms_lookup(unsigned long addr,
59728 + unsigned long *symbolsize,
59729 + unsigned long *offset,
59730 + char **modname, char *namebuf);
59731 +#endif
59732
59733 /* This macro allows us to keep printk typechecking */
59734 static void __check_printsym_format(const char *fmt, ...)
59735 diff -urNp linux-2.6.32.44/include/linux/kgdb.h linux-2.6.32.44/include/linux/kgdb.h
59736 --- linux-2.6.32.44/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59737 +++ linux-2.6.32.44/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59738 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59739
59740 extern int kgdb_connected;
59741
59742 -extern atomic_t kgdb_setting_breakpoint;
59743 -extern atomic_t kgdb_cpu_doing_single_step;
59744 +extern atomic_unchecked_t kgdb_setting_breakpoint;
59745 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59746
59747 extern struct task_struct *kgdb_usethread;
59748 extern struct task_struct *kgdb_contthread;
59749 @@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59750 * hardware debug registers.
59751 */
59752 struct kgdb_arch {
59753 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59754 - unsigned long flags;
59755 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59756 + const unsigned long flags;
59757
59758 int (*set_breakpoint)(unsigned long, char *);
59759 int (*remove_breakpoint)(unsigned long, char *);
59760 @@ -251,20 +251,20 @@ struct kgdb_arch {
59761 */
59762 struct kgdb_io {
59763 const char *name;
59764 - int (*read_char) (void);
59765 - void (*write_char) (u8);
59766 - void (*flush) (void);
59767 - int (*init) (void);
59768 - void (*pre_exception) (void);
59769 - void (*post_exception) (void);
59770 + int (* const read_char) (void);
59771 + void (* const write_char) (u8);
59772 + void (* const flush) (void);
59773 + int (* const init) (void);
59774 + void (* const pre_exception) (void);
59775 + void (* const post_exception) (void);
59776 };
59777
59778 -extern struct kgdb_arch arch_kgdb_ops;
59779 +extern const struct kgdb_arch arch_kgdb_ops;
59780
59781 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59782
59783 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59784 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59785 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59786 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59787
59788 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59789 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59790 diff -urNp linux-2.6.32.44/include/linux/kmod.h linux-2.6.32.44/include/linux/kmod.h
59791 --- linux-2.6.32.44/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59792 +++ linux-2.6.32.44/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59793 @@ -31,6 +31,8 @@
59794 * usually useless though. */
59795 extern int __request_module(bool wait, const char *name, ...) \
59796 __attribute__((format(printf, 2, 3)));
59797 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59798 + __attribute__((format(printf, 3, 4)));
59799 #define request_module(mod...) __request_module(true, mod)
59800 #define request_module_nowait(mod...) __request_module(false, mod)
59801 #define try_then_request_module(x, mod...) \
59802 diff -urNp linux-2.6.32.44/include/linux/kobject.h linux-2.6.32.44/include/linux/kobject.h
59803 --- linux-2.6.32.44/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59804 +++ linux-2.6.32.44/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59805 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59806
59807 struct kobj_type {
59808 void (*release)(struct kobject *kobj);
59809 - struct sysfs_ops *sysfs_ops;
59810 + const struct sysfs_ops *sysfs_ops;
59811 struct attribute **default_attrs;
59812 };
59813
59814 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
59815 };
59816
59817 struct kset_uevent_ops {
59818 - int (*filter)(struct kset *kset, struct kobject *kobj);
59819 - const char *(*name)(struct kset *kset, struct kobject *kobj);
59820 - int (*uevent)(struct kset *kset, struct kobject *kobj,
59821 + int (* const filter)(struct kset *kset, struct kobject *kobj);
59822 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
59823 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
59824 struct kobj_uevent_env *env);
59825 };
59826
59827 @@ -132,7 +132,7 @@ struct kobj_attribute {
59828 const char *buf, size_t count);
59829 };
59830
59831 -extern struct sysfs_ops kobj_sysfs_ops;
59832 +extern const struct sysfs_ops kobj_sysfs_ops;
59833
59834 /**
59835 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59836 @@ -155,14 +155,14 @@ struct kset {
59837 struct list_head list;
59838 spinlock_t list_lock;
59839 struct kobject kobj;
59840 - struct kset_uevent_ops *uevent_ops;
59841 + const struct kset_uevent_ops *uevent_ops;
59842 };
59843
59844 extern void kset_init(struct kset *kset);
59845 extern int __must_check kset_register(struct kset *kset);
59846 extern void kset_unregister(struct kset *kset);
59847 extern struct kset * __must_check kset_create_and_add(const char *name,
59848 - struct kset_uevent_ops *u,
59849 + const struct kset_uevent_ops *u,
59850 struct kobject *parent_kobj);
59851
59852 static inline struct kset *to_kset(struct kobject *kobj)
59853 diff -urNp linux-2.6.32.44/include/linux/kvm_host.h linux-2.6.32.44/include/linux/kvm_host.h
59854 --- linux-2.6.32.44/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59855 +++ linux-2.6.32.44/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59856 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59857 void vcpu_load(struct kvm_vcpu *vcpu);
59858 void vcpu_put(struct kvm_vcpu *vcpu);
59859
59860 -int kvm_init(void *opaque, unsigned int vcpu_size,
59861 +int kvm_init(const void *opaque, unsigned int vcpu_size,
59862 struct module *module);
59863 void kvm_exit(void);
59864
59865 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59866 struct kvm_guest_debug *dbg);
59867 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59868
59869 -int kvm_arch_init(void *opaque);
59870 +int kvm_arch_init(const void *opaque);
59871 void kvm_arch_exit(void);
59872
59873 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59874 diff -urNp linux-2.6.32.44/include/linux/libata.h linux-2.6.32.44/include/linux/libata.h
59875 --- linux-2.6.32.44/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59876 +++ linux-2.6.32.44/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59877 @@ -525,11 +525,11 @@ struct ata_ioports {
59878
59879 struct ata_host {
59880 spinlock_t lock;
59881 - struct device *dev;
59882 + struct device *dev;
59883 void __iomem * const *iomap;
59884 unsigned int n_ports;
59885 void *private_data;
59886 - struct ata_port_operations *ops;
59887 + const struct ata_port_operations *ops;
59888 unsigned long flags;
59889 #ifdef CONFIG_ATA_ACPI
59890 acpi_handle acpi_handle;
59891 @@ -710,7 +710,7 @@ struct ata_link {
59892
59893 struct ata_port {
59894 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59895 - struct ata_port_operations *ops;
59896 + const struct ata_port_operations *ops;
59897 spinlock_t *lock;
59898 /* Flags owned by the EH context. Only EH should touch these once the
59899 port is active */
59900 @@ -883,7 +883,7 @@ struct ata_port_operations {
59901 * ->inherits must be the last field and all the preceding
59902 * fields must be pointers.
59903 */
59904 - const struct ata_port_operations *inherits;
59905 + const struct ata_port_operations * const inherits;
59906 };
59907
59908 struct ata_port_info {
59909 @@ -892,7 +892,7 @@ struct ata_port_info {
59910 unsigned long pio_mask;
59911 unsigned long mwdma_mask;
59912 unsigned long udma_mask;
59913 - struct ata_port_operations *port_ops;
59914 + const struct ata_port_operations *port_ops;
59915 void *private_data;
59916 };
59917
59918 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59919 extern const unsigned long sata_deb_timing_hotplug[];
59920 extern const unsigned long sata_deb_timing_long[];
59921
59922 -extern struct ata_port_operations ata_dummy_port_ops;
59923 +extern const struct ata_port_operations ata_dummy_port_ops;
59924 extern const struct ata_port_info ata_dummy_port_info;
59925
59926 static inline const unsigned long *
59927 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59928 struct scsi_host_template *sht);
59929 extern void ata_host_detach(struct ata_host *host);
59930 extern void ata_host_init(struct ata_host *, struct device *,
59931 - unsigned long, struct ata_port_operations *);
59932 + unsigned long, const struct ata_port_operations *);
59933 extern int ata_scsi_detect(struct scsi_host_template *sht);
59934 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59935 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59936 diff -urNp linux-2.6.32.44/include/linux/lockd/bind.h linux-2.6.32.44/include/linux/lockd/bind.h
59937 --- linux-2.6.32.44/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59938 +++ linux-2.6.32.44/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59939 @@ -23,13 +23,13 @@ struct svc_rqst;
59940 * This is the set of functions for lockd->nfsd communication
59941 */
59942 struct nlmsvc_binding {
59943 - __be32 (*fopen)(struct svc_rqst *,
59944 + __be32 (* const fopen)(struct svc_rqst *,
59945 struct nfs_fh *,
59946 struct file **);
59947 - void (*fclose)(struct file *);
59948 + void (* const fclose)(struct file *);
59949 };
59950
59951 -extern struct nlmsvc_binding * nlmsvc_ops;
59952 +extern const struct nlmsvc_binding * nlmsvc_ops;
59953
59954 /*
59955 * Similar to nfs_client_initdata, but without the NFS-specific
59956 diff -urNp linux-2.6.32.44/include/linux/mca.h linux-2.6.32.44/include/linux/mca.h
59957 --- linux-2.6.32.44/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59958 +++ linux-2.6.32.44/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59959 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59960 int region);
59961 void * (*mca_transform_memory)(struct mca_device *,
59962 void *memory);
59963 -};
59964 +} __no_const;
59965
59966 struct mca_bus {
59967 u64 default_dma_mask;
59968 diff -urNp linux-2.6.32.44/include/linux/memory.h linux-2.6.32.44/include/linux/memory.h
59969 --- linux-2.6.32.44/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59970 +++ linux-2.6.32.44/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59971 @@ -108,7 +108,7 @@ struct memory_accessor {
59972 size_t count);
59973 ssize_t (*write)(struct memory_accessor *, const char *buf,
59974 off_t offset, size_t count);
59975 -};
59976 +} __no_const;
59977
59978 /*
59979 * Kernel text modification mutex, used for code patching. Users of this lock
59980 diff -urNp linux-2.6.32.44/include/linux/mm.h linux-2.6.32.44/include/linux/mm.h
59981 --- linux-2.6.32.44/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59982 +++ linux-2.6.32.44/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59983 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59984
59985 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59986 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59987 +
59988 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59989 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59990 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59991 +#else
59992 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59993 +#endif
59994 +
59995 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59996 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59997
59998 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59999 int set_page_dirty_lock(struct page *page);
60000 int clear_page_dirty_for_io(struct page *page);
60001
60002 -/* Is the vma a continuation of the stack vma above it? */
60003 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
60004 -{
60005 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
60006 -}
60007 -
60008 extern unsigned long move_page_tables(struct vm_area_struct *vma,
60009 unsigned long old_addr, struct vm_area_struct *new_vma,
60010 unsigned long new_addr, unsigned long len);
60011 @@ -890,6 +891,8 @@ struct shrinker {
60012 extern void register_shrinker(struct shrinker *);
60013 extern void unregister_shrinker(struct shrinker *);
60014
60015 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
60016 +
60017 int vma_wants_writenotify(struct vm_area_struct *vma);
60018
60019 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
60020 @@ -1162,6 +1165,7 @@ out:
60021 }
60022
60023 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60024 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60025
60026 extern unsigned long do_brk(unsigned long, unsigned long);
60027
60028 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
60029 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60030 struct vm_area_struct **pprev);
60031
60032 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60033 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60034 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60035 +
60036 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60037 NULL if none. Assume start_addr < end_addr. */
60038 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60039 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
60040 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60041 }
60042
60043 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
60044 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60045 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60046 unsigned long pfn, unsigned long size, pgprot_t);
60047 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
60048 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
60049 extern int sysctl_memory_failure_early_kill;
60050 extern int sysctl_memory_failure_recovery;
60051 -extern atomic_long_t mce_bad_pages;
60052 +extern atomic_long_unchecked_t mce_bad_pages;
60053 +
60054 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60055 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60056 +#else
60057 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60058 +#endif
60059
60060 #endif /* __KERNEL__ */
60061 #endif /* _LINUX_MM_H */
60062 diff -urNp linux-2.6.32.44/include/linux/mm_types.h linux-2.6.32.44/include/linux/mm_types.h
60063 --- linux-2.6.32.44/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
60064 +++ linux-2.6.32.44/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
60065 @@ -186,6 +186,8 @@ struct vm_area_struct {
60066 #ifdef CONFIG_NUMA
60067 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60068 #endif
60069 +
60070 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60071 };
60072
60073 struct core_thread {
60074 @@ -287,6 +289,24 @@ struct mm_struct {
60075 #ifdef CONFIG_MMU_NOTIFIER
60076 struct mmu_notifier_mm *mmu_notifier_mm;
60077 #endif
60078 +
60079 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60080 + unsigned long pax_flags;
60081 +#endif
60082 +
60083 +#ifdef CONFIG_PAX_DLRESOLVE
60084 + unsigned long call_dl_resolve;
60085 +#endif
60086 +
60087 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60088 + unsigned long call_syscall;
60089 +#endif
60090 +
60091 +#ifdef CONFIG_PAX_ASLR
60092 + unsigned long delta_mmap; /* randomized offset */
60093 + unsigned long delta_stack; /* randomized offset */
60094 +#endif
60095 +
60096 };
60097
60098 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
60099 diff -urNp linux-2.6.32.44/include/linux/mmu_notifier.h linux-2.6.32.44/include/linux/mmu_notifier.h
60100 --- linux-2.6.32.44/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
60101 +++ linux-2.6.32.44/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
60102 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
60103 */
60104 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60105 ({ \
60106 - pte_t __pte; \
60107 + pte_t ___pte; \
60108 struct vm_area_struct *___vma = __vma; \
60109 unsigned long ___address = __address; \
60110 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60111 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60112 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60113 - __pte; \
60114 + ___pte; \
60115 })
60116
60117 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
60118 diff -urNp linux-2.6.32.44/include/linux/mmzone.h linux-2.6.32.44/include/linux/mmzone.h
60119 --- linux-2.6.32.44/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
60120 +++ linux-2.6.32.44/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
60121 @@ -350,7 +350,7 @@ struct zone {
60122 unsigned long flags; /* zone flags, see below */
60123
60124 /* Zone statistics */
60125 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60126 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60127
60128 /*
60129 * prev_priority holds the scanning priority for this zone. It is
60130 diff -urNp linux-2.6.32.44/include/linux/mod_devicetable.h linux-2.6.32.44/include/linux/mod_devicetable.h
60131 --- linux-2.6.32.44/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
60132 +++ linux-2.6.32.44/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
60133 @@ -12,7 +12,7 @@
60134 typedef unsigned long kernel_ulong_t;
60135 #endif
60136
60137 -#define PCI_ANY_ID (~0)
60138 +#define PCI_ANY_ID ((__u16)~0)
60139
60140 struct pci_device_id {
60141 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60142 @@ -131,7 +131,7 @@ struct usb_device_id {
60143 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60144 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60145
60146 -#define HID_ANY_ID (~0)
60147 +#define HID_ANY_ID (~0U)
60148
60149 struct hid_device_id {
60150 __u16 bus;
60151 diff -urNp linux-2.6.32.44/include/linux/module.h linux-2.6.32.44/include/linux/module.h
60152 --- linux-2.6.32.44/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
60153 +++ linux-2.6.32.44/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
60154 @@ -16,6 +16,7 @@
60155 #include <linux/kobject.h>
60156 #include <linux/moduleparam.h>
60157 #include <linux/tracepoint.h>
60158 +#include <linux/fs.h>
60159
60160 #include <asm/local.h>
60161 #include <asm/module.h>
60162 @@ -287,16 +288,16 @@ struct module
60163 int (*init)(void);
60164
60165 /* If this is non-NULL, vfree after init() returns */
60166 - void *module_init;
60167 + void *module_init_rx, *module_init_rw;
60168
60169 /* Here is the actual code + data, vfree'd on unload. */
60170 - void *module_core;
60171 + void *module_core_rx, *module_core_rw;
60172
60173 /* Here are the sizes of the init and core sections */
60174 - unsigned int init_size, core_size;
60175 + unsigned int init_size_rw, core_size_rw;
60176
60177 /* The size of the executable code in each section. */
60178 - unsigned int init_text_size, core_text_size;
60179 + unsigned int init_size_rx, core_size_rx;
60180
60181 /* Arch-specific module values */
60182 struct mod_arch_specific arch;
60183 @@ -345,6 +346,10 @@ struct module
60184 #ifdef CONFIG_EVENT_TRACING
60185 struct ftrace_event_call *trace_events;
60186 unsigned int num_trace_events;
60187 + struct file_operations trace_id;
60188 + struct file_operations trace_enable;
60189 + struct file_operations trace_format;
60190 + struct file_operations trace_filter;
60191 #endif
60192 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60193 unsigned long *ftrace_callsites;
60194 @@ -393,16 +398,46 @@ struct module *__module_address(unsigned
60195 bool is_module_address(unsigned long addr);
60196 bool is_module_text_address(unsigned long addr);
60197
60198 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60199 +{
60200 +
60201 +#ifdef CONFIG_PAX_KERNEXEC
60202 + if (ktla_ktva(addr) >= (unsigned long)start &&
60203 + ktla_ktva(addr) < (unsigned long)start + size)
60204 + return 1;
60205 +#endif
60206 +
60207 + return ((void *)addr >= start && (void *)addr < start + size);
60208 +}
60209 +
60210 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60211 +{
60212 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60213 +}
60214 +
60215 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60216 +{
60217 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60218 +}
60219 +
60220 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60221 +{
60222 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60223 +}
60224 +
60225 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60226 +{
60227 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60228 +}
60229 +
60230 static inline int within_module_core(unsigned long addr, struct module *mod)
60231 {
60232 - return (unsigned long)mod->module_core <= addr &&
60233 - addr < (unsigned long)mod->module_core + mod->core_size;
60234 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60235 }
60236
60237 static inline int within_module_init(unsigned long addr, struct module *mod)
60238 {
60239 - return (unsigned long)mod->module_init <= addr &&
60240 - addr < (unsigned long)mod->module_init + mod->init_size;
60241 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60242 }
60243
60244 /* Search for module by name: must hold module_mutex. */
60245 diff -urNp linux-2.6.32.44/include/linux/moduleloader.h linux-2.6.32.44/include/linux/moduleloader.h
60246 --- linux-2.6.32.44/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
60247 +++ linux-2.6.32.44/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
60248 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
60249 sections. Returns NULL on failure. */
60250 void *module_alloc(unsigned long size);
60251
60252 +#ifdef CONFIG_PAX_KERNEXEC
60253 +void *module_alloc_exec(unsigned long size);
60254 +#else
60255 +#define module_alloc_exec(x) module_alloc(x)
60256 +#endif
60257 +
60258 /* Free memory returned from module_alloc. */
60259 void module_free(struct module *mod, void *module_region);
60260
60261 +#ifdef CONFIG_PAX_KERNEXEC
60262 +void module_free_exec(struct module *mod, void *module_region);
60263 +#else
60264 +#define module_free_exec(x, y) module_free((x), (y))
60265 +#endif
60266 +
60267 /* Apply the given relocation to the (simplified) ELF. Return -error
60268 or 0. */
60269 int apply_relocate(Elf_Shdr *sechdrs,
60270 diff -urNp linux-2.6.32.44/include/linux/moduleparam.h linux-2.6.32.44/include/linux/moduleparam.h
60271 --- linux-2.6.32.44/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
60272 +++ linux-2.6.32.44/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
60273 @@ -132,7 +132,7 @@ struct kparam_array
60274
60275 /* Actually copy string: maxlen param is usually sizeof(string). */
60276 #define module_param_string(name, string, len, perm) \
60277 - static const struct kparam_string __param_string_##name \
60278 + static const struct kparam_string __param_string_##name __used \
60279 = { len, string }; \
60280 __module_param_call(MODULE_PARAM_PREFIX, name, \
60281 param_set_copystring, param_get_string, \
60282 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
60283
60284 /* Comma-separated array: *nump is set to number they actually specified. */
60285 #define module_param_array_named(name, array, type, nump, perm) \
60286 - static const struct kparam_array __param_arr_##name \
60287 + static const struct kparam_array __param_arr_##name __used \
60288 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
60289 sizeof(array[0]), array }; \
60290 __module_param_call(MODULE_PARAM_PREFIX, name, \
60291 diff -urNp linux-2.6.32.44/include/linux/mutex.h linux-2.6.32.44/include/linux/mutex.h
60292 --- linux-2.6.32.44/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
60293 +++ linux-2.6.32.44/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
60294 @@ -51,7 +51,7 @@ struct mutex {
60295 spinlock_t wait_lock;
60296 struct list_head wait_list;
60297 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
60298 - struct thread_info *owner;
60299 + struct task_struct *owner;
60300 #endif
60301 #ifdef CONFIG_DEBUG_MUTEXES
60302 const char *name;
60303 diff -urNp linux-2.6.32.44/include/linux/namei.h linux-2.6.32.44/include/linux/namei.h
60304 --- linux-2.6.32.44/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
60305 +++ linux-2.6.32.44/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
60306 @@ -22,7 +22,7 @@ struct nameidata {
60307 unsigned int flags;
60308 int last_type;
60309 unsigned depth;
60310 - char *saved_names[MAX_NESTED_LINKS + 1];
60311 + const char *saved_names[MAX_NESTED_LINKS + 1];
60312
60313 /* Intent data */
60314 union {
60315 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
60316 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60317 extern void unlock_rename(struct dentry *, struct dentry *);
60318
60319 -static inline void nd_set_link(struct nameidata *nd, char *path)
60320 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60321 {
60322 nd->saved_names[nd->depth] = path;
60323 }
60324
60325 -static inline char *nd_get_link(struct nameidata *nd)
60326 +static inline const char *nd_get_link(const struct nameidata *nd)
60327 {
60328 return nd->saved_names[nd->depth];
60329 }
60330 diff -urNp linux-2.6.32.44/include/linux/netfilter/xt_gradm.h linux-2.6.32.44/include/linux/netfilter/xt_gradm.h
60331 --- linux-2.6.32.44/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
60332 +++ linux-2.6.32.44/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
60333 @@ -0,0 +1,9 @@
60334 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60335 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60336 +
60337 +struct xt_gradm_mtinfo {
60338 + __u16 flags;
60339 + __u16 invflags;
60340 +};
60341 +
60342 +#endif
60343 diff -urNp linux-2.6.32.44/include/linux/nodemask.h linux-2.6.32.44/include/linux/nodemask.h
60344 --- linux-2.6.32.44/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
60345 +++ linux-2.6.32.44/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
60346 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
60347
60348 #define any_online_node(mask) \
60349 ({ \
60350 - int node; \
60351 - for_each_node_mask(node, (mask)) \
60352 - if (node_online(node)) \
60353 + int __node; \
60354 + for_each_node_mask(__node, (mask)) \
60355 + if (node_online(__node)) \
60356 break; \
60357 - node; \
60358 + __node; \
60359 })
60360
60361 #define num_online_nodes() num_node_state(N_ONLINE)
60362 diff -urNp linux-2.6.32.44/include/linux/oprofile.h linux-2.6.32.44/include/linux/oprofile.h
60363 --- linux-2.6.32.44/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
60364 +++ linux-2.6.32.44/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
60365 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
60366 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60367 char const * name, ulong * val);
60368
60369 -/** Create a file for read-only access to an atomic_t. */
60370 +/** Create a file for read-only access to an atomic_unchecked_t. */
60371 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60372 - char const * name, atomic_t * val);
60373 + char const * name, atomic_unchecked_t * val);
60374
60375 /** create a directory */
60376 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60377 diff -urNp linux-2.6.32.44/include/linux/perf_event.h linux-2.6.32.44/include/linux/perf_event.h
60378 --- linux-2.6.32.44/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
60379 +++ linux-2.6.32.44/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
60380 @@ -476,7 +476,7 @@ struct hw_perf_event {
60381 struct hrtimer hrtimer;
60382 };
60383 };
60384 - atomic64_t prev_count;
60385 + atomic64_unchecked_t prev_count;
60386 u64 sample_period;
60387 u64 last_period;
60388 atomic64_t period_left;
60389 @@ -557,7 +557,7 @@ struct perf_event {
60390 const struct pmu *pmu;
60391
60392 enum perf_event_active_state state;
60393 - atomic64_t count;
60394 + atomic64_unchecked_t count;
60395
60396 /*
60397 * These are the total time in nanoseconds that the event
60398 @@ -595,8 +595,8 @@ struct perf_event {
60399 * These accumulate total time (in nanoseconds) that children
60400 * events have been enabled and running, respectively.
60401 */
60402 - atomic64_t child_total_time_enabled;
60403 - atomic64_t child_total_time_running;
60404 + atomic64_unchecked_t child_total_time_enabled;
60405 + atomic64_unchecked_t child_total_time_running;
60406
60407 /*
60408 * Protect attach/detach and child_list:
60409 diff -urNp linux-2.6.32.44/include/linux/pipe_fs_i.h linux-2.6.32.44/include/linux/pipe_fs_i.h
60410 --- linux-2.6.32.44/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
60411 +++ linux-2.6.32.44/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
60412 @@ -46,9 +46,9 @@ struct pipe_inode_info {
60413 wait_queue_head_t wait;
60414 unsigned int nrbufs, curbuf;
60415 struct page *tmp_page;
60416 - unsigned int readers;
60417 - unsigned int writers;
60418 - unsigned int waiting_writers;
60419 + atomic_t readers;
60420 + atomic_t writers;
60421 + atomic_t waiting_writers;
60422 unsigned int r_counter;
60423 unsigned int w_counter;
60424 struct fasync_struct *fasync_readers;
60425 diff -urNp linux-2.6.32.44/include/linux/poison.h linux-2.6.32.44/include/linux/poison.h
60426 --- linux-2.6.32.44/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
60427 +++ linux-2.6.32.44/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
60428 @@ -19,8 +19,8 @@
60429 * under normal circumstances, used to verify that nobody uses
60430 * non-initialized list entries.
60431 */
60432 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60433 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60434 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60435 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60436
60437 /********** include/linux/timer.h **********/
60438 /*
60439 diff -urNp linux-2.6.32.44/include/linux/posix-timers.h linux-2.6.32.44/include/linux/posix-timers.h
60440 --- linux-2.6.32.44/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
60441 +++ linux-2.6.32.44/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
60442 @@ -67,7 +67,7 @@ struct k_itimer {
60443 };
60444
60445 struct k_clock {
60446 - int res; /* in nanoseconds */
60447 + const int res; /* in nanoseconds */
60448 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60449 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60450 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60451 diff -urNp linux-2.6.32.44/include/linux/preempt.h linux-2.6.32.44/include/linux/preempt.h
60452 --- linux-2.6.32.44/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60453 +++ linux-2.6.32.44/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60454 @@ -110,7 +110,7 @@ struct preempt_ops {
60455 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60456 void (*sched_out)(struct preempt_notifier *notifier,
60457 struct task_struct *next);
60458 -};
60459 +} __no_const;
60460
60461 /**
60462 * preempt_notifier - key for installing preemption notifiers
60463 diff -urNp linux-2.6.32.44/include/linux/proc_fs.h linux-2.6.32.44/include/linux/proc_fs.h
60464 --- linux-2.6.32.44/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60465 +++ linux-2.6.32.44/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60466 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60467 return proc_create_data(name, mode, parent, proc_fops, NULL);
60468 }
60469
60470 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60471 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60472 +{
60473 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60474 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60475 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60476 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60477 +#else
60478 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60479 +#endif
60480 +}
60481 +
60482 +
60483 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60484 mode_t mode, struct proc_dir_entry *base,
60485 read_proc_t *read_proc, void * data)
60486 @@ -256,7 +269,7 @@ union proc_op {
60487 int (*proc_show)(struct seq_file *m,
60488 struct pid_namespace *ns, struct pid *pid,
60489 struct task_struct *task);
60490 -};
60491 +} __no_const;
60492
60493 struct ctl_table_header;
60494 struct ctl_table;
60495 diff -urNp linux-2.6.32.44/include/linux/ptrace.h linux-2.6.32.44/include/linux/ptrace.h
60496 --- linux-2.6.32.44/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60497 +++ linux-2.6.32.44/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60498 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60499 extern void exit_ptrace(struct task_struct *tracer);
60500 #define PTRACE_MODE_READ 1
60501 #define PTRACE_MODE_ATTACH 2
60502 -/* Returns 0 on success, -errno on denial. */
60503 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60504 /* Returns true on success, false on denial. */
60505 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60506 +/* Returns true on success, false on denial. */
60507 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60508
60509 static inline int ptrace_reparented(struct task_struct *child)
60510 {
60511 diff -urNp linux-2.6.32.44/include/linux/random.h linux-2.6.32.44/include/linux/random.h
60512 --- linux-2.6.32.44/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
60513 +++ linux-2.6.32.44/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60514 @@ -53,17 +53,6 @@ extern void add_interrupt_randomness(int
60515 extern void get_random_bytes(void *buf, int nbytes);
60516 void generate_random_uuid(unsigned char uuid_out[16]);
60517
60518 -extern __u32 secure_ip_id(__be32 daddr);
60519 -extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
60520 -extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
60521 - __be16 dport);
60522 -extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
60523 - __be16 sport, __be16 dport);
60524 -extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
60525 - __be16 sport, __be16 dport);
60526 -extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
60527 - __be16 sport, __be16 dport);
60528 -
60529 #ifndef MODULE
60530 extern const struct file_operations random_fops, urandom_fops;
60531 #endif
60532 @@ -74,6 +63,11 @@ unsigned long randomize_range(unsigned l
60533 u32 random32(void);
60534 void srandom32(u32 seed);
60535
60536 +static inline unsigned long pax_get_random_long(void)
60537 +{
60538 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60539 +}
60540 +
60541 #endif /* __KERNEL___ */
60542
60543 #endif /* _LINUX_RANDOM_H */
60544 diff -urNp linux-2.6.32.44/include/linux/reboot.h linux-2.6.32.44/include/linux/reboot.h
60545 --- linux-2.6.32.44/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60546 +++ linux-2.6.32.44/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60547 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60548 * Architecture-specific implementations of sys_reboot commands.
60549 */
60550
60551 -extern void machine_restart(char *cmd);
60552 -extern void machine_halt(void);
60553 -extern void machine_power_off(void);
60554 +extern void machine_restart(char *cmd) __noreturn;
60555 +extern void machine_halt(void) __noreturn;
60556 +extern void machine_power_off(void) __noreturn;
60557
60558 extern void machine_shutdown(void);
60559 struct pt_regs;
60560 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60561 */
60562
60563 extern void kernel_restart_prepare(char *cmd);
60564 -extern void kernel_restart(char *cmd);
60565 -extern void kernel_halt(void);
60566 -extern void kernel_power_off(void);
60567 +extern void kernel_restart(char *cmd) __noreturn;
60568 +extern void kernel_halt(void) __noreturn;
60569 +extern void kernel_power_off(void) __noreturn;
60570
60571 void ctrl_alt_del(void);
60572
60573 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60574 * Emergency restart, callable from an interrupt handler.
60575 */
60576
60577 -extern void emergency_restart(void);
60578 +extern void emergency_restart(void) __noreturn;
60579 #include <asm/emergency-restart.h>
60580
60581 #endif
60582 diff -urNp linux-2.6.32.44/include/linux/reiserfs_fs.h linux-2.6.32.44/include/linux/reiserfs_fs.h
60583 --- linux-2.6.32.44/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60584 +++ linux-2.6.32.44/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60585 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60586 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60587
60588 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60589 -#define get_generation(s) atomic_read (&fs_generation(s))
60590 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60591 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60592 #define __fs_changed(gen,s) (gen != get_generation (s))
60593 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60594 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60595 */
60596
60597 struct item_operations {
60598 - int (*bytes_number) (struct item_head * ih, int block_size);
60599 - void (*decrement_key) (struct cpu_key *);
60600 - int (*is_left_mergeable) (struct reiserfs_key * ih,
60601 + int (* const bytes_number) (struct item_head * ih, int block_size);
60602 + void (* const decrement_key) (struct cpu_key *);
60603 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
60604 unsigned long bsize);
60605 - void (*print_item) (struct item_head *, char *item);
60606 - void (*check_item) (struct item_head *, char *item);
60607 + void (* const print_item) (struct item_head *, char *item);
60608 + void (* const check_item) (struct item_head *, char *item);
60609
60610 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60611 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60612 int is_affected, int insert_size);
60613 - int (*check_left) (struct virtual_item * vi, int free,
60614 + int (* const check_left) (struct virtual_item * vi, int free,
60615 int start_skip, int end_skip);
60616 - int (*check_right) (struct virtual_item * vi, int free);
60617 - int (*part_size) (struct virtual_item * vi, int from, int to);
60618 - int (*unit_num) (struct virtual_item * vi);
60619 - void (*print_vi) (struct virtual_item * vi);
60620 + int (* const check_right) (struct virtual_item * vi, int free);
60621 + int (* const part_size) (struct virtual_item * vi, int from, int to);
60622 + int (* const unit_num) (struct virtual_item * vi);
60623 + void (* const print_vi) (struct virtual_item * vi);
60624 };
60625
60626 -extern struct item_operations *item_ops[TYPE_ANY + 1];
60627 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60628
60629 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60630 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60631 diff -urNp linux-2.6.32.44/include/linux/reiserfs_fs_sb.h linux-2.6.32.44/include/linux/reiserfs_fs_sb.h
60632 --- linux-2.6.32.44/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60633 +++ linux-2.6.32.44/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60634 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60635 /* Comment? -Hans */
60636 wait_queue_head_t s_wait;
60637 /* To be obsoleted soon by per buffer seals.. -Hans */
60638 - atomic_t s_generation_counter; // increased by one every time the
60639 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60640 // tree gets re-balanced
60641 unsigned long s_properties; /* File system properties. Currently holds
60642 on-disk FS format */
60643 diff -urNp linux-2.6.32.44/include/linux/relay.h linux-2.6.32.44/include/linux/relay.h
60644 --- linux-2.6.32.44/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60645 +++ linux-2.6.32.44/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60646 @@ -159,7 +159,7 @@ struct rchan_callbacks
60647 * The callback should return 0 if successful, negative if not.
60648 */
60649 int (*remove_buf_file)(struct dentry *dentry);
60650 -};
60651 +} __no_const;
60652
60653 /*
60654 * CONFIG_RELAY kernel API, kernel/relay.c
60655 diff -urNp linux-2.6.32.44/include/linux/sched.h linux-2.6.32.44/include/linux/sched.h
60656 --- linux-2.6.32.44/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60657 +++ linux-2.6.32.44/include/linux/sched.h 2011-08-05 20:33:55.000000000 -0400
60658 @@ -101,6 +101,7 @@ struct bio;
60659 struct fs_struct;
60660 struct bts_context;
60661 struct perf_event_context;
60662 +struct linux_binprm;
60663
60664 /*
60665 * List of flags we want to share for kernel threads,
60666 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60667 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60668 asmlinkage void __schedule(void);
60669 asmlinkage void schedule(void);
60670 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60671 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60672
60673 struct nsproxy;
60674 struct user_namespace;
60675 @@ -371,9 +372,12 @@ struct user_namespace;
60676 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60677
60678 extern int sysctl_max_map_count;
60679 +extern unsigned long sysctl_heap_stack_gap;
60680
60681 #include <linux/aio.h>
60682
60683 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60684 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60685 extern unsigned long
60686 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60687 unsigned long, unsigned long);
60688 @@ -666,6 +670,16 @@ struct signal_struct {
60689 struct tty_audit_buf *tty_audit_buf;
60690 #endif
60691
60692 +#ifdef CONFIG_GRKERNSEC
60693 + u32 curr_ip;
60694 + u32 saved_ip;
60695 + u32 gr_saddr;
60696 + u32 gr_daddr;
60697 + u16 gr_sport;
60698 + u16 gr_dport;
60699 + u8 used_accept:1;
60700 +#endif
60701 +
60702 int oom_adj; /* OOM kill score adjustment (bit shift) */
60703 };
60704
60705 @@ -723,6 +737,11 @@ struct user_struct {
60706 struct key *session_keyring; /* UID's default session keyring */
60707 #endif
60708
60709 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60710 + unsigned int banned;
60711 + unsigned long ban_expires;
60712 +#endif
60713 +
60714 /* Hash table maintenance information */
60715 struct hlist_node uidhash_node;
60716 uid_t uid;
60717 @@ -1328,8 +1347,8 @@ struct task_struct {
60718 struct list_head thread_group;
60719
60720 struct completion *vfork_done; /* for vfork() */
60721 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60722 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60723 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60724 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60725
60726 cputime_t utime, stime, utimescaled, stimescaled;
60727 cputime_t gtime;
60728 @@ -1343,16 +1362,6 @@ struct task_struct {
60729 struct task_cputime cputime_expires;
60730 struct list_head cpu_timers[3];
60731
60732 -/* process credentials */
60733 - const struct cred *real_cred; /* objective and real subjective task
60734 - * credentials (COW) */
60735 - const struct cred *cred; /* effective (overridable) subjective task
60736 - * credentials (COW) */
60737 - struct mutex cred_guard_mutex; /* guard against foreign influences on
60738 - * credential calculations
60739 - * (notably. ptrace) */
60740 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60741 -
60742 char comm[TASK_COMM_LEN]; /* executable name excluding path
60743 - access with [gs]et_task_comm (which lock
60744 it with task_lock())
60745 @@ -1369,6 +1378,10 @@ struct task_struct {
60746 #endif
60747 /* CPU-specific state of this task */
60748 struct thread_struct thread;
60749 +/* thread_info moved to task_struct */
60750 +#ifdef CONFIG_X86
60751 + struct thread_info tinfo;
60752 +#endif
60753 /* filesystem information */
60754 struct fs_struct *fs;
60755 /* open file information */
60756 @@ -1436,6 +1449,15 @@ struct task_struct {
60757 int hardirq_context;
60758 int softirq_context;
60759 #endif
60760 +
60761 +/* process credentials */
60762 + const struct cred *real_cred; /* objective and real subjective task
60763 + * credentials (COW) */
60764 + struct mutex cred_guard_mutex; /* guard against foreign influences on
60765 + * credential calculations
60766 + * (notably. ptrace) */
60767 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60768 +
60769 #ifdef CONFIG_LOCKDEP
60770 # define MAX_LOCK_DEPTH 48UL
60771 u64 curr_chain_key;
60772 @@ -1456,6 +1478,9 @@ struct task_struct {
60773
60774 struct backing_dev_info *backing_dev_info;
60775
60776 + const struct cred *cred; /* effective (overridable) subjective task
60777 + * credentials (COW) */
60778 +
60779 struct io_context *io_context;
60780
60781 unsigned long ptrace_message;
60782 @@ -1519,6 +1544,21 @@ struct task_struct {
60783 unsigned long default_timer_slack_ns;
60784
60785 struct list_head *scm_work_list;
60786 +
60787 +#ifdef CONFIG_GRKERNSEC
60788 + /* grsecurity */
60789 + struct dentry *gr_chroot_dentry;
60790 + struct acl_subject_label *acl;
60791 + struct acl_role_label *role;
60792 + struct file *exec_file;
60793 + u16 acl_role_id;
60794 + /* is this the task that authenticated to the special role */
60795 + u8 acl_sp_role;
60796 + u8 is_writable;
60797 + u8 brute;
60798 + u8 gr_is_chrooted;
60799 +#endif
60800 +
60801 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60802 /* Index of current stored adress in ret_stack */
60803 int curr_ret_stack;
60804 @@ -1542,6 +1582,57 @@ struct task_struct {
60805 #endif /* CONFIG_TRACING */
60806 };
60807
60808 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60809 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60810 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60811 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60812 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60813 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60814 +
60815 +#ifdef CONFIG_PAX_SOFTMODE
60816 +extern int pax_softmode;
60817 +#endif
60818 +
60819 +extern int pax_check_flags(unsigned long *);
60820 +
60821 +/* if tsk != current then task_lock must be held on it */
60822 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60823 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60824 +{
60825 + if (likely(tsk->mm))
60826 + return tsk->mm->pax_flags;
60827 + else
60828 + return 0UL;
60829 +}
60830 +
60831 +/* if tsk != current then task_lock must be held on it */
60832 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60833 +{
60834 + if (likely(tsk->mm)) {
60835 + tsk->mm->pax_flags = flags;
60836 + return 0;
60837 + }
60838 + return -EINVAL;
60839 +}
60840 +#endif
60841 +
60842 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60843 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60844 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60845 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60846 +#endif
60847 +
60848 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60849 +extern void pax_report_insns(void *pc, void *sp);
60850 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60851 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60852 +
60853 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60854 +extern void pax_track_stack(void);
60855 +#else
60856 +static inline void pax_track_stack(void) {}
60857 +#endif
60858 +
60859 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60860 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60861
60862 @@ -1978,7 +2069,9 @@ void yield(void);
60863 extern struct exec_domain default_exec_domain;
60864
60865 union thread_union {
60866 +#ifndef CONFIG_X86
60867 struct thread_info thread_info;
60868 +#endif
60869 unsigned long stack[THREAD_SIZE/sizeof(long)];
60870 };
60871
60872 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60873 */
60874
60875 extern struct task_struct *find_task_by_vpid(pid_t nr);
60876 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60877 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60878 struct pid_namespace *ns);
60879
60880 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60881 extern void exit_itimers(struct signal_struct *);
60882 extern void flush_itimer_signals(void);
60883
60884 -extern NORET_TYPE void do_group_exit(int);
60885 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60886
60887 extern void daemonize(const char *, ...);
60888 extern int allow_signal(int);
60889 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60890
60891 #endif
60892
60893 -static inline int object_is_on_stack(void *obj)
60894 +static inline int object_starts_on_stack(void *obj)
60895 {
60896 - void *stack = task_stack_page(current);
60897 + const void *stack = task_stack_page(current);
60898
60899 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60900 }
60901
60902 +#ifdef CONFIG_PAX_USERCOPY
60903 +extern int object_is_on_stack(const void *obj, unsigned long len);
60904 +#endif
60905 +
60906 extern void thread_info_cache_init(void);
60907
60908 #ifdef CONFIG_DEBUG_STACK_USAGE
60909 diff -urNp linux-2.6.32.44/include/linux/screen_info.h linux-2.6.32.44/include/linux/screen_info.h
60910 --- linux-2.6.32.44/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60911 +++ linux-2.6.32.44/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60912 @@ -42,7 +42,8 @@ struct screen_info {
60913 __u16 pages; /* 0x32 */
60914 __u16 vesa_attributes; /* 0x34 */
60915 __u32 capabilities; /* 0x36 */
60916 - __u8 _reserved[6]; /* 0x3a */
60917 + __u16 vesapm_size; /* 0x3a */
60918 + __u8 _reserved[4]; /* 0x3c */
60919 } __attribute__((packed));
60920
60921 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60922 diff -urNp linux-2.6.32.44/include/linux/security.h linux-2.6.32.44/include/linux/security.h
60923 --- linux-2.6.32.44/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60924 +++ linux-2.6.32.44/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60925 @@ -34,6 +34,7 @@
60926 #include <linux/key.h>
60927 #include <linux/xfrm.h>
60928 #include <linux/gfp.h>
60929 +#include <linux/grsecurity.h>
60930 #include <net/flow.h>
60931
60932 /* Maximum number of letters for an LSM name string */
60933 diff -urNp linux-2.6.32.44/include/linux/shm.h linux-2.6.32.44/include/linux/shm.h
60934 --- linux-2.6.32.44/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60935 +++ linux-2.6.32.44/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60936 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60937 pid_t shm_cprid;
60938 pid_t shm_lprid;
60939 struct user_struct *mlock_user;
60940 +#ifdef CONFIG_GRKERNSEC
60941 + time_t shm_createtime;
60942 + pid_t shm_lapid;
60943 +#endif
60944 };
60945
60946 /* shm_mode upper byte flags */
60947 diff -urNp linux-2.6.32.44/include/linux/skbuff.h linux-2.6.32.44/include/linux/skbuff.h
60948 --- linux-2.6.32.44/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60949 +++ linux-2.6.32.44/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
60950 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
60951 */
60952 static inline int skb_queue_empty(const struct sk_buff_head *list)
60953 {
60954 - return list->next == (struct sk_buff *)list;
60955 + return list->next == (const struct sk_buff *)list;
60956 }
60957
60958 /**
60959 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
60960 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60961 const struct sk_buff *skb)
60962 {
60963 - return (skb->next == (struct sk_buff *) list);
60964 + return (skb->next == (const struct sk_buff *) list);
60965 }
60966
60967 /**
60968 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
60969 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60970 const struct sk_buff *skb)
60971 {
60972 - return (skb->prev == (struct sk_buff *) list);
60973 + return (skb->prev == (const struct sk_buff *) list);
60974 }
60975
60976 /**
60977 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
60978 * headroom, you should not reduce this.
60979 */
60980 #ifndef NET_SKB_PAD
60981 -#define NET_SKB_PAD 32
60982 +#define NET_SKB_PAD (_AC(32,UL))
60983 #endif
60984
60985 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60986 diff -urNp linux-2.6.32.44/include/linux/slab_def.h linux-2.6.32.44/include/linux/slab_def.h
60987 --- linux-2.6.32.44/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60988 +++ linux-2.6.32.44/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60989 @@ -69,10 +69,10 @@ struct kmem_cache {
60990 unsigned long node_allocs;
60991 unsigned long node_frees;
60992 unsigned long node_overflow;
60993 - atomic_t allochit;
60994 - atomic_t allocmiss;
60995 - atomic_t freehit;
60996 - atomic_t freemiss;
60997 + atomic_unchecked_t allochit;
60998 + atomic_unchecked_t allocmiss;
60999 + atomic_unchecked_t freehit;
61000 + atomic_unchecked_t freemiss;
61001
61002 /*
61003 * If debugging is enabled, then the allocator can add additional
61004 diff -urNp linux-2.6.32.44/include/linux/slab.h linux-2.6.32.44/include/linux/slab.h
61005 --- linux-2.6.32.44/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
61006 +++ linux-2.6.32.44/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
61007 @@ -11,12 +11,20 @@
61008
61009 #include <linux/gfp.h>
61010 #include <linux/types.h>
61011 +#include <linux/err.h>
61012
61013 /*
61014 * Flags to pass to kmem_cache_create().
61015 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61016 */
61017 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61018 +
61019 +#ifdef CONFIG_PAX_USERCOPY
61020 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61021 +#else
61022 +#define SLAB_USERCOPY 0x00000000UL
61023 +#endif
61024 +
61025 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61026 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61027 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61028 @@ -82,10 +90,13 @@
61029 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61030 * Both make kfree a no-op.
61031 */
61032 -#define ZERO_SIZE_PTR ((void *)16)
61033 +#define ZERO_SIZE_PTR \
61034 +({ \
61035 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61036 + (void *)(-MAX_ERRNO-1L); \
61037 +})
61038
61039 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61040 - (unsigned long)ZERO_SIZE_PTR)
61041 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61042
61043 /*
61044 * struct kmem_cache related prototypes
61045 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
61046 void kfree(const void *);
61047 void kzfree(const void *);
61048 size_t ksize(const void *);
61049 +void check_object_size(const void *ptr, unsigned long n, bool to);
61050
61051 /*
61052 * Allocator specific definitions. These are mainly used to establish optimized
61053 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
61054
61055 void __init kmem_cache_init_late(void);
61056
61057 +#define kmalloc(x, y) \
61058 +({ \
61059 + void *___retval; \
61060 + intoverflow_t ___x = (intoverflow_t)x; \
61061 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
61062 + ___retval = NULL; \
61063 + else \
61064 + ___retval = kmalloc((size_t)___x, (y)); \
61065 + ___retval; \
61066 +})
61067 +
61068 +#define kmalloc_node(x, y, z) \
61069 +({ \
61070 + void *___retval; \
61071 + intoverflow_t ___x = (intoverflow_t)x; \
61072 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61073 + ___retval = NULL; \
61074 + else \
61075 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
61076 + ___retval; \
61077 +})
61078 +
61079 +#define kzalloc(x, y) \
61080 +({ \
61081 + void *___retval; \
61082 + intoverflow_t ___x = (intoverflow_t)x; \
61083 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
61084 + ___retval = NULL; \
61085 + else \
61086 + ___retval = kzalloc((size_t)___x, (y)); \
61087 + ___retval; \
61088 +})
61089 +
61090 #endif /* _LINUX_SLAB_H */
61091 diff -urNp linux-2.6.32.44/include/linux/slub_def.h linux-2.6.32.44/include/linux/slub_def.h
61092 --- linux-2.6.32.44/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
61093 +++ linux-2.6.32.44/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
61094 @@ -86,7 +86,7 @@ struct kmem_cache {
61095 struct kmem_cache_order_objects max;
61096 struct kmem_cache_order_objects min;
61097 gfp_t allocflags; /* gfp flags to use on each alloc */
61098 - int refcount; /* Refcount for slab cache destroy */
61099 + atomic_t refcount; /* Refcount for slab cache destroy */
61100 void (*ctor)(void *);
61101 int inuse; /* Offset to metadata */
61102 int align; /* Alignment */
61103 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
61104 #endif
61105
61106 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61107 -void *__kmalloc(size_t size, gfp_t flags);
61108 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61109
61110 #ifdef CONFIG_KMEMTRACE
61111 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
61112 diff -urNp linux-2.6.32.44/include/linux/sonet.h linux-2.6.32.44/include/linux/sonet.h
61113 --- linux-2.6.32.44/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
61114 +++ linux-2.6.32.44/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
61115 @@ -61,7 +61,7 @@ struct sonet_stats {
61116 #include <asm/atomic.h>
61117
61118 struct k_sonet_stats {
61119 -#define __HANDLE_ITEM(i) atomic_t i
61120 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
61121 __SONET_ITEMS
61122 #undef __HANDLE_ITEM
61123 };
61124 diff -urNp linux-2.6.32.44/include/linux/sunrpc/cache.h linux-2.6.32.44/include/linux/sunrpc/cache.h
61125 --- linux-2.6.32.44/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
61126 +++ linux-2.6.32.44/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
61127 @@ -125,7 +125,7 @@ struct cache_detail {
61128 */
61129 struct cache_req {
61130 struct cache_deferred_req *(*defer)(struct cache_req *req);
61131 -};
61132 +} __no_const;
61133 /* this must be embedded in a deferred_request that is being
61134 * delayed awaiting cache-fill
61135 */
61136 diff -urNp linux-2.6.32.44/include/linux/sunrpc/clnt.h linux-2.6.32.44/include/linux/sunrpc/clnt.h
61137 --- linux-2.6.32.44/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
61138 +++ linux-2.6.32.44/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
61139 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
61140 {
61141 switch (sap->sa_family) {
61142 case AF_INET:
61143 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
61144 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61145 case AF_INET6:
61146 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61147 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61148 }
61149 return 0;
61150 }
61151 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
61152 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61153 const struct sockaddr *src)
61154 {
61155 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61156 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61157 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61158
61159 dsin->sin_family = ssin->sin_family;
61160 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
61161 if (sa->sa_family != AF_INET6)
61162 return 0;
61163
61164 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61165 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61166 }
61167
61168 #endif /* __KERNEL__ */
61169 diff -urNp linux-2.6.32.44/include/linux/sunrpc/svc_rdma.h linux-2.6.32.44/include/linux/sunrpc/svc_rdma.h
61170 --- linux-2.6.32.44/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
61171 +++ linux-2.6.32.44/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
61172 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61173 extern unsigned int svcrdma_max_requests;
61174 extern unsigned int svcrdma_max_req_size;
61175
61176 -extern atomic_t rdma_stat_recv;
61177 -extern atomic_t rdma_stat_read;
61178 -extern atomic_t rdma_stat_write;
61179 -extern atomic_t rdma_stat_sq_starve;
61180 -extern atomic_t rdma_stat_rq_starve;
61181 -extern atomic_t rdma_stat_rq_poll;
61182 -extern atomic_t rdma_stat_rq_prod;
61183 -extern atomic_t rdma_stat_sq_poll;
61184 -extern atomic_t rdma_stat_sq_prod;
61185 +extern atomic_unchecked_t rdma_stat_recv;
61186 +extern atomic_unchecked_t rdma_stat_read;
61187 +extern atomic_unchecked_t rdma_stat_write;
61188 +extern atomic_unchecked_t rdma_stat_sq_starve;
61189 +extern atomic_unchecked_t rdma_stat_rq_starve;
61190 +extern atomic_unchecked_t rdma_stat_rq_poll;
61191 +extern atomic_unchecked_t rdma_stat_rq_prod;
61192 +extern atomic_unchecked_t rdma_stat_sq_poll;
61193 +extern atomic_unchecked_t rdma_stat_sq_prod;
61194
61195 #define RPCRDMA_VERSION 1
61196
61197 diff -urNp linux-2.6.32.44/include/linux/suspend.h linux-2.6.32.44/include/linux/suspend.h
61198 --- linux-2.6.32.44/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
61199 +++ linux-2.6.32.44/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
61200 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
61201 * which require special recovery actions in that situation.
61202 */
61203 struct platform_suspend_ops {
61204 - int (*valid)(suspend_state_t state);
61205 - int (*begin)(suspend_state_t state);
61206 - int (*prepare)(void);
61207 - int (*prepare_late)(void);
61208 - int (*enter)(suspend_state_t state);
61209 - void (*wake)(void);
61210 - void (*finish)(void);
61211 - void (*end)(void);
61212 - void (*recover)(void);
61213 + int (* const valid)(suspend_state_t state);
61214 + int (* const begin)(suspend_state_t state);
61215 + int (* const prepare)(void);
61216 + int (* const prepare_late)(void);
61217 + int (* const enter)(suspend_state_t state);
61218 + void (* const wake)(void);
61219 + void (* const finish)(void);
61220 + void (* const end)(void);
61221 + void (* const recover)(void);
61222 };
61223
61224 #ifdef CONFIG_SUSPEND
61225 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
61226 * suspend_set_ops - set platform dependent suspend operations
61227 * @ops: The new suspend operations to set.
61228 */
61229 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
61230 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
61231 extern int suspend_valid_only_mem(suspend_state_t state);
61232
61233 /**
61234 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
61235 #else /* !CONFIG_SUSPEND */
61236 #define suspend_valid_only_mem NULL
61237
61238 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
61239 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
61240 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
61241 #endif /* !CONFIG_SUSPEND */
61242
61243 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
61244 * platforms which require special recovery actions in that situation.
61245 */
61246 struct platform_hibernation_ops {
61247 - int (*begin)(void);
61248 - void (*end)(void);
61249 - int (*pre_snapshot)(void);
61250 - void (*finish)(void);
61251 - int (*prepare)(void);
61252 - int (*enter)(void);
61253 - void (*leave)(void);
61254 - int (*pre_restore)(void);
61255 - void (*restore_cleanup)(void);
61256 - void (*recover)(void);
61257 + int (* const begin)(void);
61258 + void (* const end)(void);
61259 + int (* const pre_snapshot)(void);
61260 + void (* const finish)(void);
61261 + int (* const prepare)(void);
61262 + int (* const enter)(void);
61263 + void (* const leave)(void);
61264 + int (* const pre_restore)(void);
61265 + void (* const restore_cleanup)(void);
61266 + void (* const recover)(void);
61267 };
61268
61269 #ifdef CONFIG_HIBERNATION
61270 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
61271 extern void swsusp_unset_page_free(struct page *);
61272 extern unsigned long get_safe_page(gfp_t gfp_mask);
61273
61274 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
61275 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
61276 extern int hibernate(void);
61277 extern bool system_entering_hibernation(void);
61278 #else /* CONFIG_HIBERNATION */
61279 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
61280 static inline void swsusp_set_page_free(struct page *p) {}
61281 static inline void swsusp_unset_page_free(struct page *p) {}
61282
61283 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
61284 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
61285 static inline int hibernate(void) { return -ENOSYS; }
61286 static inline bool system_entering_hibernation(void) { return false; }
61287 #endif /* CONFIG_HIBERNATION */
61288 diff -urNp linux-2.6.32.44/include/linux/sysctl.h linux-2.6.32.44/include/linux/sysctl.h
61289 --- linux-2.6.32.44/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
61290 +++ linux-2.6.32.44/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
61291 @@ -164,7 +164,11 @@ enum
61292 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61293 };
61294
61295 -
61296 +#ifdef CONFIG_PAX_SOFTMODE
61297 +enum {
61298 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61299 +};
61300 +#endif
61301
61302 /* CTL_VM names: */
61303 enum
61304 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
61305
61306 extern int proc_dostring(struct ctl_table *, int,
61307 void __user *, size_t *, loff_t *);
61308 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61309 + void __user *, size_t *, loff_t *);
61310 extern int proc_dointvec(struct ctl_table *, int,
61311 void __user *, size_t *, loff_t *);
61312 extern int proc_dointvec_minmax(struct ctl_table *, int,
61313 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
61314
61315 extern ctl_handler sysctl_data;
61316 extern ctl_handler sysctl_string;
61317 +extern ctl_handler sysctl_string_modpriv;
61318 extern ctl_handler sysctl_intvec;
61319 extern ctl_handler sysctl_jiffies;
61320 extern ctl_handler sysctl_ms_jiffies;
61321 diff -urNp linux-2.6.32.44/include/linux/sysfs.h linux-2.6.32.44/include/linux/sysfs.h
61322 --- linux-2.6.32.44/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
61323 +++ linux-2.6.32.44/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
61324 @@ -75,8 +75,8 @@ struct bin_attribute {
61325 };
61326
61327 struct sysfs_ops {
61328 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
61329 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
61330 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
61331 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
61332 };
61333
61334 struct sysfs_dirent;
61335 diff -urNp linux-2.6.32.44/include/linux/thread_info.h linux-2.6.32.44/include/linux/thread_info.h
61336 --- linux-2.6.32.44/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
61337 +++ linux-2.6.32.44/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
61338 @@ -23,7 +23,7 @@ struct restart_block {
61339 };
61340 /* For futex_wait and futex_wait_requeue_pi */
61341 struct {
61342 - u32 *uaddr;
61343 + u32 __user *uaddr;
61344 u32 val;
61345 u32 flags;
61346 u32 bitset;
61347 diff -urNp linux-2.6.32.44/include/linux/tty.h linux-2.6.32.44/include/linux/tty.h
61348 --- linux-2.6.32.44/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
61349 +++ linux-2.6.32.44/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
61350 @@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
61351 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
61352 extern void tty_ldisc_enable(struct tty_struct *tty);
61353
61354 -
61355 /* n_tty.c */
61356 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
61357
61358 diff -urNp linux-2.6.32.44/include/linux/tty_ldisc.h linux-2.6.32.44/include/linux/tty_ldisc.h
61359 --- linux-2.6.32.44/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
61360 +++ linux-2.6.32.44/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
61361 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
61362
61363 struct module *owner;
61364
61365 - int refcount;
61366 + atomic_t refcount;
61367 };
61368
61369 struct tty_ldisc {
61370 diff -urNp linux-2.6.32.44/include/linux/types.h linux-2.6.32.44/include/linux/types.h
61371 --- linux-2.6.32.44/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
61372 +++ linux-2.6.32.44/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
61373 @@ -191,10 +191,26 @@ typedef struct {
61374 volatile int counter;
61375 } atomic_t;
61376
61377 +#ifdef CONFIG_PAX_REFCOUNT
61378 +typedef struct {
61379 + volatile int counter;
61380 +} atomic_unchecked_t;
61381 +#else
61382 +typedef atomic_t atomic_unchecked_t;
61383 +#endif
61384 +
61385 #ifdef CONFIG_64BIT
61386 typedef struct {
61387 volatile long counter;
61388 } atomic64_t;
61389 +
61390 +#ifdef CONFIG_PAX_REFCOUNT
61391 +typedef struct {
61392 + volatile long counter;
61393 +} atomic64_unchecked_t;
61394 +#else
61395 +typedef atomic64_t atomic64_unchecked_t;
61396 +#endif
61397 #endif
61398
61399 struct ustat {
61400 diff -urNp linux-2.6.32.44/include/linux/uaccess.h linux-2.6.32.44/include/linux/uaccess.h
61401 --- linux-2.6.32.44/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
61402 +++ linux-2.6.32.44/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
61403 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
61404 long ret; \
61405 mm_segment_t old_fs = get_fs(); \
61406 \
61407 - set_fs(KERNEL_DS); \
61408 pagefault_disable(); \
61409 + set_fs(KERNEL_DS); \
61410 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61411 - pagefault_enable(); \
61412 set_fs(old_fs); \
61413 + pagefault_enable(); \
61414 ret; \
61415 })
61416
61417 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
61418 * Safely read from address @src to the buffer at @dst. If a kernel fault
61419 * happens, handle that and return -EFAULT.
61420 */
61421 -extern long probe_kernel_read(void *dst, void *src, size_t size);
61422 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
61423
61424 /*
61425 * probe_kernel_write(): safely attempt to write to a location
61426 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
61427 * Safely write to address @dst from the buffer at @src. If a kernel fault
61428 * happens, handle that and return -EFAULT.
61429 */
61430 -extern long probe_kernel_write(void *dst, void *src, size_t size);
61431 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
61432
61433 #endif /* __LINUX_UACCESS_H__ */
61434 diff -urNp linux-2.6.32.44/include/linux/unaligned/access_ok.h linux-2.6.32.44/include/linux/unaligned/access_ok.h
61435 --- linux-2.6.32.44/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
61436 +++ linux-2.6.32.44/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
61437 @@ -6,32 +6,32 @@
61438
61439 static inline u16 get_unaligned_le16(const void *p)
61440 {
61441 - return le16_to_cpup((__le16 *)p);
61442 + return le16_to_cpup((const __le16 *)p);
61443 }
61444
61445 static inline u32 get_unaligned_le32(const void *p)
61446 {
61447 - return le32_to_cpup((__le32 *)p);
61448 + return le32_to_cpup((const __le32 *)p);
61449 }
61450
61451 static inline u64 get_unaligned_le64(const void *p)
61452 {
61453 - return le64_to_cpup((__le64 *)p);
61454 + return le64_to_cpup((const __le64 *)p);
61455 }
61456
61457 static inline u16 get_unaligned_be16(const void *p)
61458 {
61459 - return be16_to_cpup((__be16 *)p);
61460 + return be16_to_cpup((const __be16 *)p);
61461 }
61462
61463 static inline u32 get_unaligned_be32(const void *p)
61464 {
61465 - return be32_to_cpup((__be32 *)p);
61466 + return be32_to_cpup((const __be32 *)p);
61467 }
61468
61469 static inline u64 get_unaligned_be64(const void *p)
61470 {
61471 - return be64_to_cpup((__be64 *)p);
61472 + return be64_to_cpup((const __be64 *)p);
61473 }
61474
61475 static inline void put_unaligned_le16(u16 val, void *p)
61476 diff -urNp linux-2.6.32.44/include/linux/vmalloc.h linux-2.6.32.44/include/linux/vmalloc.h
61477 --- linux-2.6.32.44/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61478 +++ linux-2.6.32.44/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61479 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61480 #define VM_MAP 0x00000004 /* vmap()ed pages */
61481 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61482 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61483 +
61484 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61485 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61486 +#endif
61487 +
61488 /* bits [20..32] reserved for arch specific ioremap internals */
61489
61490 /*
61491 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61492
61493 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61494
61495 +#define vmalloc(x) \
61496 +({ \
61497 + void *___retval; \
61498 + intoverflow_t ___x = (intoverflow_t)x; \
61499 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61500 + ___retval = NULL; \
61501 + else \
61502 + ___retval = vmalloc((unsigned long)___x); \
61503 + ___retval; \
61504 +})
61505 +
61506 +#define __vmalloc(x, y, z) \
61507 +({ \
61508 + void *___retval; \
61509 + intoverflow_t ___x = (intoverflow_t)x; \
61510 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61511 + ___retval = NULL; \
61512 + else \
61513 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61514 + ___retval; \
61515 +})
61516 +
61517 +#define vmalloc_user(x) \
61518 +({ \
61519 + void *___retval; \
61520 + intoverflow_t ___x = (intoverflow_t)x; \
61521 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61522 + ___retval = NULL; \
61523 + else \
61524 + ___retval = vmalloc_user((unsigned long)___x); \
61525 + ___retval; \
61526 +})
61527 +
61528 +#define vmalloc_exec(x) \
61529 +({ \
61530 + void *___retval; \
61531 + intoverflow_t ___x = (intoverflow_t)x; \
61532 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61533 + ___retval = NULL; \
61534 + else \
61535 + ___retval = vmalloc_exec((unsigned long)___x); \
61536 + ___retval; \
61537 +})
61538 +
61539 +#define vmalloc_node(x, y) \
61540 +({ \
61541 + void *___retval; \
61542 + intoverflow_t ___x = (intoverflow_t)x; \
61543 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61544 + ___retval = NULL; \
61545 + else \
61546 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61547 + ___retval; \
61548 +})
61549 +
61550 +#define vmalloc_32(x) \
61551 +({ \
61552 + void *___retval; \
61553 + intoverflow_t ___x = (intoverflow_t)x; \
61554 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61555 + ___retval = NULL; \
61556 + else \
61557 + ___retval = vmalloc_32((unsigned long)___x); \
61558 + ___retval; \
61559 +})
61560 +
61561 +#define vmalloc_32_user(x) \
61562 +({ \
61563 + void *___retval; \
61564 + intoverflow_t ___x = (intoverflow_t)x; \
61565 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61566 + ___retval = NULL; \
61567 + else \
61568 + ___retval = vmalloc_32_user((unsigned long)___x);\
61569 + ___retval; \
61570 +})
61571 +
61572 #endif /* _LINUX_VMALLOC_H */
61573 diff -urNp linux-2.6.32.44/include/linux/vmstat.h linux-2.6.32.44/include/linux/vmstat.h
61574 --- linux-2.6.32.44/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61575 +++ linux-2.6.32.44/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61576 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61577 /*
61578 * Zone based page accounting with per cpu differentials.
61579 */
61580 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61581 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61582
61583 static inline void zone_page_state_add(long x, struct zone *zone,
61584 enum zone_stat_item item)
61585 {
61586 - atomic_long_add(x, &zone->vm_stat[item]);
61587 - atomic_long_add(x, &vm_stat[item]);
61588 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61589 + atomic_long_add_unchecked(x, &vm_stat[item]);
61590 }
61591
61592 static inline unsigned long global_page_state(enum zone_stat_item item)
61593 {
61594 - long x = atomic_long_read(&vm_stat[item]);
61595 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61596 #ifdef CONFIG_SMP
61597 if (x < 0)
61598 x = 0;
61599 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
61600 static inline unsigned long zone_page_state(struct zone *zone,
61601 enum zone_stat_item item)
61602 {
61603 - long x = atomic_long_read(&zone->vm_stat[item]);
61604 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61605 #ifdef CONFIG_SMP
61606 if (x < 0)
61607 x = 0;
61608 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61609 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61610 enum zone_stat_item item)
61611 {
61612 - long x = atomic_long_read(&zone->vm_stat[item]);
61613 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61614
61615 #ifdef CONFIG_SMP
61616 int cpu;
61617 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61618
61619 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61620 {
61621 - atomic_long_inc(&zone->vm_stat[item]);
61622 - atomic_long_inc(&vm_stat[item]);
61623 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61624 + atomic_long_inc_unchecked(&vm_stat[item]);
61625 }
61626
61627 static inline void __inc_zone_page_state(struct page *page,
61628 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61629
61630 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61631 {
61632 - atomic_long_dec(&zone->vm_stat[item]);
61633 - atomic_long_dec(&vm_stat[item]);
61634 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61635 + atomic_long_dec_unchecked(&vm_stat[item]);
61636 }
61637
61638 static inline void __dec_zone_page_state(struct page *page,
61639 diff -urNp linux-2.6.32.44/include/media/v4l2-dev.h linux-2.6.32.44/include/media/v4l2-dev.h
61640 --- linux-2.6.32.44/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61641 +++ linux-2.6.32.44/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61642 @@ -34,7 +34,7 @@ struct v4l2_device;
61643 #define V4L2_FL_UNREGISTERED (0)
61644
61645 struct v4l2_file_operations {
61646 - struct module *owner;
61647 + struct module * const owner;
61648 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61649 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61650 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61651 diff -urNp linux-2.6.32.44/include/media/v4l2-device.h linux-2.6.32.44/include/media/v4l2-device.h
61652 --- linux-2.6.32.44/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61653 +++ linux-2.6.32.44/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61654 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61655 this function returns 0. If the name ends with a digit (e.g. cx18),
61656 then the name will be set to cx18-0 since cx180 looks really odd. */
61657 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61658 - atomic_t *instance);
61659 + atomic_unchecked_t *instance);
61660
61661 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61662 Since the parent disappears this ensures that v4l2_dev doesn't have an
61663 diff -urNp linux-2.6.32.44/include/net/flow.h linux-2.6.32.44/include/net/flow.h
61664 --- linux-2.6.32.44/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61665 +++ linux-2.6.32.44/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61666 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61667 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61668 u8 dir, flow_resolve_t resolver);
61669 extern void flow_cache_flush(void);
61670 -extern atomic_t flow_cache_genid;
61671 +extern atomic_unchecked_t flow_cache_genid;
61672
61673 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61674 {
61675 diff -urNp linux-2.6.32.44/include/net/inetpeer.h linux-2.6.32.44/include/net/inetpeer.h
61676 --- linux-2.6.32.44/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61677 +++ linux-2.6.32.44/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61678 @@ -24,7 +24,7 @@ struct inet_peer
61679 __u32 dtime; /* the time of last use of not
61680 * referenced entries */
61681 atomic_t refcnt;
61682 - atomic_t rid; /* Frag reception counter */
61683 + atomic_unchecked_t rid; /* Frag reception counter */
61684 __u32 tcp_ts;
61685 unsigned long tcp_ts_stamp;
61686 };
61687 diff -urNp linux-2.6.32.44/include/net/ip_vs.h linux-2.6.32.44/include/net/ip_vs.h
61688 --- linux-2.6.32.44/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61689 +++ linux-2.6.32.44/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61690 @@ -365,7 +365,7 @@ struct ip_vs_conn {
61691 struct ip_vs_conn *control; /* Master control connection */
61692 atomic_t n_control; /* Number of controlled ones */
61693 struct ip_vs_dest *dest; /* real server */
61694 - atomic_t in_pkts; /* incoming packet counter */
61695 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61696
61697 /* packet transmitter for different forwarding methods. If it
61698 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61699 @@ -466,7 +466,7 @@ struct ip_vs_dest {
61700 union nf_inet_addr addr; /* IP address of the server */
61701 __be16 port; /* port number of the server */
61702 volatile unsigned flags; /* dest status flags */
61703 - atomic_t conn_flags; /* flags to copy to conn */
61704 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61705 atomic_t weight; /* server weight */
61706
61707 atomic_t refcnt; /* reference counter */
61708 diff -urNp linux-2.6.32.44/include/net/irda/ircomm_core.h linux-2.6.32.44/include/net/irda/ircomm_core.h
61709 --- linux-2.6.32.44/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61710 +++ linux-2.6.32.44/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61711 @@ -51,7 +51,7 @@ typedef struct {
61712 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61713 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61714 struct ircomm_info *);
61715 -} call_t;
61716 +} __no_const call_t;
61717
61718 struct ircomm_cb {
61719 irda_queue_t queue;
61720 diff -urNp linux-2.6.32.44/include/net/irda/ircomm_tty.h linux-2.6.32.44/include/net/irda/ircomm_tty.h
61721 --- linux-2.6.32.44/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61722 +++ linux-2.6.32.44/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61723 @@ -35,6 +35,7 @@
61724 #include <linux/termios.h>
61725 #include <linux/timer.h>
61726 #include <linux/tty.h> /* struct tty_struct */
61727 +#include <asm/local.h>
61728
61729 #include <net/irda/irias_object.h>
61730 #include <net/irda/ircomm_core.h>
61731 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61732 unsigned short close_delay;
61733 unsigned short closing_wait; /* time to wait before closing */
61734
61735 - int open_count;
61736 - int blocked_open; /* # of blocked opens */
61737 + local_t open_count;
61738 + local_t blocked_open; /* # of blocked opens */
61739
61740 /* Protect concurent access to :
61741 * o self->open_count
61742 diff -urNp linux-2.6.32.44/include/net/iucv/af_iucv.h linux-2.6.32.44/include/net/iucv/af_iucv.h
61743 --- linux-2.6.32.44/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61744 +++ linux-2.6.32.44/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61745 @@ -87,7 +87,7 @@ struct iucv_sock {
61746 struct iucv_sock_list {
61747 struct hlist_head head;
61748 rwlock_t lock;
61749 - atomic_t autobind_name;
61750 + atomic_unchecked_t autobind_name;
61751 };
61752
61753 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61754 diff -urNp linux-2.6.32.44/include/net/lapb.h linux-2.6.32.44/include/net/lapb.h
61755 --- linux-2.6.32.44/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61756 +++ linux-2.6.32.44/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61757 @@ -95,7 +95,7 @@ struct lapb_cb {
61758 struct sk_buff_head write_queue;
61759 struct sk_buff_head ack_queue;
61760 unsigned char window;
61761 - struct lapb_register_struct callbacks;
61762 + struct lapb_register_struct *callbacks;
61763
61764 /* FRMR control information */
61765 struct lapb_frame frmr_data;
61766 diff -urNp linux-2.6.32.44/include/net/neighbour.h linux-2.6.32.44/include/net/neighbour.h
61767 --- linux-2.6.32.44/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61768 +++ linux-2.6.32.44/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61769 @@ -125,12 +125,12 @@ struct neighbour
61770 struct neigh_ops
61771 {
61772 int family;
61773 - void (*solicit)(struct neighbour *, struct sk_buff*);
61774 - void (*error_report)(struct neighbour *, struct sk_buff*);
61775 - int (*output)(struct sk_buff*);
61776 - int (*connected_output)(struct sk_buff*);
61777 - int (*hh_output)(struct sk_buff*);
61778 - int (*queue_xmit)(struct sk_buff*);
61779 + void (* const solicit)(struct neighbour *, struct sk_buff*);
61780 + void (* const error_report)(struct neighbour *, struct sk_buff*);
61781 + int (* const output)(struct sk_buff*);
61782 + int (* const connected_output)(struct sk_buff*);
61783 + int (* const hh_output)(struct sk_buff*);
61784 + int (* const queue_xmit)(struct sk_buff*);
61785 };
61786
61787 struct pneigh_entry
61788 diff -urNp linux-2.6.32.44/include/net/netlink.h linux-2.6.32.44/include/net/netlink.h
61789 --- linux-2.6.32.44/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61790 +++ linux-2.6.32.44/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61791 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61792 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61793 {
61794 if (mark)
61795 - skb_trim(skb, (unsigned char *) mark - skb->data);
61796 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61797 }
61798
61799 /**
61800 diff -urNp linux-2.6.32.44/include/net/netns/ipv4.h linux-2.6.32.44/include/net/netns/ipv4.h
61801 --- linux-2.6.32.44/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61802 +++ linux-2.6.32.44/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61803 @@ -54,7 +54,7 @@ struct netns_ipv4 {
61804 int current_rt_cache_rebuild_count;
61805
61806 struct timer_list rt_secret_timer;
61807 - atomic_t rt_genid;
61808 + atomic_unchecked_t rt_genid;
61809
61810 #ifdef CONFIG_IP_MROUTE
61811 struct sock *mroute_sk;
61812 diff -urNp linux-2.6.32.44/include/net/sctp/sctp.h linux-2.6.32.44/include/net/sctp/sctp.h
61813 --- linux-2.6.32.44/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61814 +++ linux-2.6.32.44/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61815 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61816
61817 #else /* SCTP_DEBUG */
61818
61819 -#define SCTP_DEBUG_PRINTK(whatever...)
61820 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61821 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61822 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61823 #define SCTP_ENABLE_DEBUG
61824 #define SCTP_DISABLE_DEBUG
61825 #define SCTP_ASSERT(expr, str, func)
61826 diff -urNp linux-2.6.32.44/include/net/secure_seq.h linux-2.6.32.44/include/net/secure_seq.h
61827 --- linux-2.6.32.44/include/net/secure_seq.h 1969-12-31 19:00:00.000000000 -0500
61828 +++ linux-2.6.32.44/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61829 @@ -0,0 +1,20 @@
61830 +#ifndef _NET_SECURE_SEQ
61831 +#define _NET_SECURE_SEQ
61832 +
61833 +#include <linux/types.h>
61834 +
61835 +extern __u32 secure_ip_id(__be32 daddr);
61836 +extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61837 +extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61838 +extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61839 + __be16 dport);
61840 +extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61841 + __be16 sport, __be16 dport);
61842 +extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61843 + __be16 sport, __be16 dport);
61844 +extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61845 + __be16 sport, __be16 dport);
61846 +extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61847 + __be16 sport, __be16 dport);
61848 +
61849 +#endif /* _NET_SECURE_SEQ */
61850 diff -urNp linux-2.6.32.44/include/net/sock.h linux-2.6.32.44/include/net/sock.h
61851 --- linux-2.6.32.44/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61852 +++ linux-2.6.32.44/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61853 @@ -272,7 +272,7 @@ struct sock {
61854 rwlock_t sk_callback_lock;
61855 int sk_err,
61856 sk_err_soft;
61857 - atomic_t sk_drops;
61858 + atomic_unchecked_t sk_drops;
61859 unsigned short sk_ack_backlog;
61860 unsigned short sk_max_ack_backlog;
61861 __u32 sk_priority;
61862 diff -urNp linux-2.6.32.44/include/net/tcp.h linux-2.6.32.44/include/net/tcp.h
61863 --- linux-2.6.32.44/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61864 +++ linux-2.6.32.44/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61865 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61866 struct tcp_seq_afinfo {
61867 char *name;
61868 sa_family_t family;
61869 + /* cannot be const */
61870 struct file_operations seq_fops;
61871 struct seq_operations seq_ops;
61872 };
61873 diff -urNp linux-2.6.32.44/include/net/udp.h linux-2.6.32.44/include/net/udp.h
61874 --- linux-2.6.32.44/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61875 +++ linux-2.6.32.44/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61876 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61877 char *name;
61878 sa_family_t family;
61879 struct udp_table *udp_table;
61880 + /* cannot be const */
61881 struct file_operations seq_fops;
61882 struct seq_operations seq_ops;
61883 };
61884 diff -urNp linux-2.6.32.44/include/rdma/iw_cm.h linux-2.6.32.44/include/rdma/iw_cm.h
61885 --- linux-2.6.32.44/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61886 +++ linux-2.6.32.44/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61887 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
61888 int backlog);
61889
61890 int (*destroy_listen)(struct iw_cm_id *cm_id);
61891 -};
61892 +} __no_const;
61893
61894 /**
61895 * iw_create_cm_id - Create an IW CM identifier.
61896 diff -urNp linux-2.6.32.44/include/scsi/scsi_device.h linux-2.6.32.44/include/scsi/scsi_device.h
61897 --- linux-2.6.32.44/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61898 +++ linux-2.6.32.44/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61899 @@ -156,9 +156,9 @@ struct scsi_device {
61900 unsigned int max_device_blocked; /* what device_blocked counts down from */
61901 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61902
61903 - atomic_t iorequest_cnt;
61904 - atomic_t iodone_cnt;
61905 - atomic_t ioerr_cnt;
61906 + atomic_unchecked_t iorequest_cnt;
61907 + atomic_unchecked_t iodone_cnt;
61908 + atomic_unchecked_t ioerr_cnt;
61909
61910 struct device sdev_gendev,
61911 sdev_dev;
61912 diff -urNp linux-2.6.32.44/include/scsi/scsi_transport_fc.h linux-2.6.32.44/include/scsi/scsi_transport_fc.h
61913 --- linux-2.6.32.44/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61914 +++ linux-2.6.32.44/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61915 @@ -663,9 +663,9 @@ struct fc_function_template {
61916 int (*bsg_timeout)(struct fc_bsg_job *);
61917
61918 /* allocation lengths for host-specific data */
61919 - u32 dd_fcrport_size;
61920 - u32 dd_fcvport_size;
61921 - u32 dd_bsg_size;
61922 + const u32 dd_fcrport_size;
61923 + const u32 dd_fcvport_size;
61924 + const u32 dd_bsg_size;
61925
61926 /*
61927 * The driver sets these to tell the transport class it
61928 @@ -675,39 +675,39 @@ struct fc_function_template {
61929 */
61930
61931 /* remote port fixed attributes */
61932 - unsigned long show_rport_maxframe_size:1;
61933 - unsigned long show_rport_supported_classes:1;
61934 - unsigned long show_rport_dev_loss_tmo:1;
61935 + const unsigned long show_rport_maxframe_size:1;
61936 + const unsigned long show_rport_supported_classes:1;
61937 + const unsigned long show_rport_dev_loss_tmo:1;
61938
61939 /*
61940 * target dynamic attributes
61941 * These should all be "1" if the driver uses the remote port
61942 * add/delete functions (so attributes reflect rport values).
61943 */
61944 - unsigned long show_starget_node_name:1;
61945 - unsigned long show_starget_port_name:1;
61946 - unsigned long show_starget_port_id:1;
61947 + const unsigned long show_starget_node_name:1;
61948 + const unsigned long show_starget_port_name:1;
61949 + const unsigned long show_starget_port_id:1;
61950
61951 /* host fixed attributes */
61952 - unsigned long show_host_node_name:1;
61953 - unsigned long show_host_port_name:1;
61954 - unsigned long show_host_permanent_port_name:1;
61955 - unsigned long show_host_supported_classes:1;
61956 - unsigned long show_host_supported_fc4s:1;
61957 - unsigned long show_host_supported_speeds:1;
61958 - unsigned long show_host_maxframe_size:1;
61959 - unsigned long show_host_serial_number:1;
61960 + const unsigned long show_host_node_name:1;
61961 + const unsigned long show_host_port_name:1;
61962 + const unsigned long show_host_permanent_port_name:1;
61963 + const unsigned long show_host_supported_classes:1;
61964 + const unsigned long show_host_supported_fc4s:1;
61965 + const unsigned long show_host_supported_speeds:1;
61966 + const unsigned long show_host_maxframe_size:1;
61967 + const unsigned long show_host_serial_number:1;
61968 /* host dynamic attributes */
61969 - unsigned long show_host_port_id:1;
61970 - unsigned long show_host_port_type:1;
61971 - unsigned long show_host_port_state:1;
61972 - unsigned long show_host_active_fc4s:1;
61973 - unsigned long show_host_speed:1;
61974 - unsigned long show_host_fabric_name:1;
61975 - unsigned long show_host_symbolic_name:1;
61976 - unsigned long show_host_system_hostname:1;
61977 + const unsigned long show_host_port_id:1;
61978 + const unsigned long show_host_port_type:1;
61979 + const unsigned long show_host_port_state:1;
61980 + const unsigned long show_host_active_fc4s:1;
61981 + const unsigned long show_host_speed:1;
61982 + const unsigned long show_host_fabric_name:1;
61983 + const unsigned long show_host_symbolic_name:1;
61984 + const unsigned long show_host_system_hostname:1;
61985
61986 - unsigned long disable_target_scan:1;
61987 + const unsigned long disable_target_scan:1;
61988 };
61989
61990
61991 diff -urNp linux-2.6.32.44/include/sound/ac97_codec.h linux-2.6.32.44/include/sound/ac97_codec.h
61992 --- linux-2.6.32.44/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61993 +++ linux-2.6.32.44/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61994 @@ -419,15 +419,15 @@
61995 struct snd_ac97;
61996
61997 struct snd_ac97_build_ops {
61998 - int (*build_3d) (struct snd_ac97 *ac97);
61999 - int (*build_specific) (struct snd_ac97 *ac97);
62000 - int (*build_spdif) (struct snd_ac97 *ac97);
62001 - int (*build_post_spdif) (struct snd_ac97 *ac97);
62002 + int (* const build_3d) (struct snd_ac97 *ac97);
62003 + int (* const build_specific) (struct snd_ac97 *ac97);
62004 + int (* const build_spdif) (struct snd_ac97 *ac97);
62005 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
62006 #ifdef CONFIG_PM
62007 - void (*suspend) (struct snd_ac97 *ac97);
62008 - void (*resume) (struct snd_ac97 *ac97);
62009 + void (* const suspend) (struct snd_ac97 *ac97);
62010 + void (* const resume) (struct snd_ac97 *ac97);
62011 #endif
62012 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
62013 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
62014 };
62015
62016 struct snd_ac97_bus_ops {
62017 @@ -477,7 +477,7 @@ struct snd_ac97_template {
62018
62019 struct snd_ac97 {
62020 /* -- lowlevel (hardware) driver specific -- */
62021 - struct snd_ac97_build_ops * build_ops;
62022 + const struct snd_ac97_build_ops * build_ops;
62023 void *private_data;
62024 void (*private_free) (struct snd_ac97 *ac97);
62025 /* --- */
62026 diff -urNp linux-2.6.32.44/include/sound/ak4xxx-adda.h linux-2.6.32.44/include/sound/ak4xxx-adda.h
62027 --- linux-2.6.32.44/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
62028 +++ linux-2.6.32.44/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
62029 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62030 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62031 unsigned char val);
62032 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62033 -};
62034 +} __no_const;
62035
62036 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62037
62038 diff -urNp linux-2.6.32.44/include/sound/hwdep.h linux-2.6.32.44/include/sound/hwdep.h
62039 --- linux-2.6.32.44/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
62040 +++ linux-2.6.32.44/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
62041 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62042 struct snd_hwdep_dsp_status *status);
62043 int (*dsp_load)(struct snd_hwdep *hw,
62044 struct snd_hwdep_dsp_image *image);
62045 -};
62046 +} __no_const;
62047
62048 struct snd_hwdep {
62049 struct snd_card *card;
62050 diff -urNp linux-2.6.32.44/include/sound/info.h linux-2.6.32.44/include/sound/info.h
62051 --- linux-2.6.32.44/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
62052 +++ linux-2.6.32.44/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
62053 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
62054 struct snd_info_buffer *buffer);
62055 void (*write)(struct snd_info_entry *entry,
62056 struct snd_info_buffer *buffer);
62057 -};
62058 +} __no_const;
62059
62060 struct snd_info_entry_ops {
62061 int (*open)(struct snd_info_entry *entry,
62062 diff -urNp linux-2.6.32.44/include/sound/sb16_csp.h linux-2.6.32.44/include/sound/sb16_csp.h
62063 --- linux-2.6.32.44/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
62064 +++ linux-2.6.32.44/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
62065 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
62066 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62067 int (*csp_stop) (struct snd_sb_csp * p);
62068 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62069 -};
62070 +} __no_const;
62071
62072 /*
62073 * CSP private data
62074 diff -urNp linux-2.6.32.44/include/sound/ymfpci.h linux-2.6.32.44/include/sound/ymfpci.h
62075 --- linux-2.6.32.44/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
62076 +++ linux-2.6.32.44/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
62077 @@ -358,7 +358,7 @@ struct snd_ymfpci {
62078 spinlock_t reg_lock;
62079 spinlock_t voice_lock;
62080 wait_queue_head_t interrupt_sleep;
62081 - atomic_t interrupt_sleep_count;
62082 + atomic_unchecked_t interrupt_sleep_count;
62083 struct snd_info_entry *proc_entry;
62084 const struct firmware *dsp_microcode;
62085 const struct firmware *controller_microcode;
62086 diff -urNp linux-2.6.32.44/include/trace/events/irq.h linux-2.6.32.44/include/trace/events/irq.h
62087 --- linux-2.6.32.44/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
62088 +++ linux-2.6.32.44/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
62089 @@ -34,7 +34,7 @@
62090 */
62091 TRACE_EVENT(irq_handler_entry,
62092
62093 - TP_PROTO(int irq, struct irqaction *action),
62094 + TP_PROTO(int irq, const struct irqaction *action),
62095
62096 TP_ARGS(irq, action),
62097
62098 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
62099 */
62100 TRACE_EVENT(irq_handler_exit,
62101
62102 - TP_PROTO(int irq, struct irqaction *action, int ret),
62103 + TP_PROTO(int irq, const struct irqaction *action, int ret),
62104
62105 TP_ARGS(irq, action, ret),
62106
62107 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
62108 */
62109 TRACE_EVENT(softirq_entry,
62110
62111 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
62112 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
62113
62114 TP_ARGS(h, vec),
62115
62116 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
62117 */
62118 TRACE_EVENT(softirq_exit,
62119
62120 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
62121 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
62122
62123 TP_ARGS(h, vec),
62124
62125 diff -urNp linux-2.6.32.44/include/video/uvesafb.h linux-2.6.32.44/include/video/uvesafb.h
62126 --- linux-2.6.32.44/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
62127 +++ linux-2.6.32.44/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
62128 @@ -177,6 +177,7 @@ struct uvesafb_par {
62129 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62130 u8 pmi_setpal; /* PMI for palette changes */
62131 u16 *pmi_base; /* protected mode interface location */
62132 + u8 *pmi_code; /* protected mode code location */
62133 void *pmi_start;
62134 void *pmi_pal;
62135 u8 *vbe_state_orig; /*
62136 diff -urNp linux-2.6.32.44/init/do_mounts.c linux-2.6.32.44/init/do_mounts.c
62137 --- linux-2.6.32.44/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
62138 +++ linux-2.6.32.44/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
62139 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
62140
62141 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62142 {
62143 - int err = sys_mount(name, "/root", fs, flags, data);
62144 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
62145 if (err)
62146 return err;
62147
62148 - sys_chdir("/root");
62149 + sys_chdir((__force const char __user *)"/root");
62150 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62151 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62152 current->fs->pwd.mnt->mnt_sb->s_type->name,
62153 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
62154 va_start(args, fmt);
62155 vsprintf(buf, fmt, args);
62156 va_end(args);
62157 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62158 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62159 if (fd >= 0) {
62160 sys_ioctl(fd, FDEJECT, 0);
62161 sys_close(fd);
62162 }
62163 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62164 - fd = sys_open("/dev/console", O_RDWR, 0);
62165 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
62166 if (fd >= 0) {
62167 sys_ioctl(fd, TCGETS, (long)&termios);
62168 termios.c_lflag &= ~ICANON;
62169 sys_ioctl(fd, TCSETSF, (long)&termios);
62170 - sys_read(fd, &c, 1);
62171 + sys_read(fd, (char __user *)&c, 1);
62172 termios.c_lflag |= ICANON;
62173 sys_ioctl(fd, TCSETSF, (long)&termios);
62174 sys_close(fd);
62175 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
62176 mount_root();
62177 out:
62178 devtmpfs_mount("dev");
62179 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62180 - sys_chroot(".");
62181 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
62182 + sys_chroot((__force char __user *)".");
62183 }
62184 diff -urNp linux-2.6.32.44/init/do_mounts.h linux-2.6.32.44/init/do_mounts.h
62185 --- linux-2.6.32.44/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
62186 +++ linux-2.6.32.44/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
62187 @@ -15,15 +15,15 @@ extern int root_mountflags;
62188
62189 static inline int create_dev(char *name, dev_t dev)
62190 {
62191 - sys_unlink(name);
62192 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62193 + sys_unlink((__force char __user *)name);
62194 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
62195 }
62196
62197 #if BITS_PER_LONG == 32
62198 static inline u32 bstat(char *name)
62199 {
62200 struct stat64 stat;
62201 - if (sys_stat64(name, &stat) != 0)
62202 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
62203 return 0;
62204 if (!S_ISBLK(stat.st_mode))
62205 return 0;
62206 diff -urNp linux-2.6.32.44/init/do_mounts_initrd.c linux-2.6.32.44/init/do_mounts_initrd.c
62207 --- linux-2.6.32.44/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
62208 +++ linux-2.6.32.44/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
62209 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
62210 sys_close(old_fd);sys_close(root_fd);
62211 sys_close(0);sys_close(1);sys_close(2);
62212 sys_setsid();
62213 - (void) sys_open("/dev/console",O_RDWR,0);
62214 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
62215 (void) sys_dup(0);
62216 (void) sys_dup(0);
62217 return kernel_execve(shell, argv, envp_init);
62218 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
62219 create_dev("/dev/root.old", Root_RAM0);
62220 /* mount initrd on rootfs' /root */
62221 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62222 - sys_mkdir("/old", 0700);
62223 - root_fd = sys_open("/", 0, 0);
62224 - old_fd = sys_open("/old", 0, 0);
62225 + sys_mkdir((__force const char __user *)"/old", 0700);
62226 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
62227 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
62228 /* move initrd over / and chdir/chroot in initrd root */
62229 - sys_chdir("/root");
62230 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62231 - sys_chroot(".");
62232 + sys_chdir((__force const char __user *)"/root");
62233 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
62234 + sys_chroot((__force const char __user *)".");
62235
62236 /*
62237 * In case that a resume from disk is carried out by linuxrc or one of
62238 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
62239
62240 /* move initrd to rootfs' /old */
62241 sys_fchdir(old_fd);
62242 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62243 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
62244 /* switch root and cwd back to / of rootfs */
62245 sys_fchdir(root_fd);
62246 - sys_chroot(".");
62247 + sys_chroot((__force const char __user *)".");
62248 sys_close(old_fd);
62249 sys_close(root_fd);
62250
62251 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62252 - sys_chdir("/old");
62253 + sys_chdir((__force const char __user *)"/old");
62254 return;
62255 }
62256
62257 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
62258 mount_root();
62259
62260 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62261 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62262 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
62263 if (!error)
62264 printk("okay\n");
62265 else {
62266 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62267 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
62268 if (error == -ENOENT)
62269 printk("/initrd does not exist. Ignored.\n");
62270 else
62271 printk("failed\n");
62272 printk(KERN_NOTICE "Unmounting old root\n");
62273 - sys_umount("/old", MNT_DETACH);
62274 + sys_umount((__force char __user *)"/old", MNT_DETACH);
62275 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62276 if (fd < 0) {
62277 error = fd;
62278 @@ -119,11 +119,11 @@ int __init initrd_load(void)
62279 * mounted in the normal path.
62280 */
62281 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62282 - sys_unlink("/initrd.image");
62283 + sys_unlink((__force const char __user *)"/initrd.image");
62284 handle_initrd();
62285 return 1;
62286 }
62287 }
62288 - sys_unlink("/initrd.image");
62289 + sys_unlink((__force const char __user *)"/initrd.image");
62290 return 0;
62291 }
62292 diff -urNp linux-2.6.32.44/init/do_mounts_md.c linux-2.6.32.44/init/do_mounts_md.c
62293 --- linux-2.6.32.44/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
62294 +++ linux-2.6.32.44/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
62295 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62296 partitioned ? "_d" : "", minor,
62297 md_setup_args[ent].device_names);
62298
62299 - fd = sys_open(name, 0, 0);
62300 + fd = sys_open((__force char __user *)name, 0, 0);
62301 if (fd < 0) {
62302 printk(KERN_ERR "md: open failed - cannot start "
62303 "array %s\n", name);
62304 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62305 * array without it
62306 */
62307 sys_close(fd);
62308 - fd = sys_open(name, 0, 0);
62309 + fd = sys_open((__force char __user *)name, 0, 0);
62310 sys_ioctl(fd, BLKRRPART, 0);
62311 }
62312 sys_close(fd);
62313 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62314
62315 wait_for_device_probe();
62316
62317 - fd = sys_open("/dev/md0", 0, 0);
62318 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
62319 if (fd >= 0) {
62320 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62321 sys_close(fd);
62322 diff -urNp linux-2.6.32.44/init/initramfs.c linux-2.6.32.44/init/initramfs.c
62323 --- linux-2.6.32.44/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
62324 +++ linux-2.6.32.44/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
62325 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62326 }
62327 }
62328
62329 -static long __init do_utime(char __user *filename, time_t mtime)
62330 +static long __init do_utime(__force char __user *filename, time_t mtime)
62331 {
62332 struct timespec t[2];
62333
62334 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62335 struct dir_entry *de, *tmp;
62336 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62337 list_del(&de->list);
62338 - do_utime(de->name, de->mtime);
62339 + do_utime((__force char __user *)de->name, de->mtime);
62340 kfree(de->name);
62341 kfree(de);
62342 }
62343 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62344 if (nlink >= 2) {
62345 char *old = find_link(major, minor, ino, mode, collected);
62346 if (old)
62347 - return (sys_link(old, collected) < 0) ? -1 : 1;
62348 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
62349 }
62350 return 0;
62351 }
62352 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
62353 {
62354 struct stat st;
62355
62356 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62357 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
62358 if (S_ISDIR(st.st_mode))
62359 - sys_rmdir(path);
62360 + sys_rmdir((__force char __user *)path);
62361 else
62362 - sys_unlink(path);
62363 + sys_unlink((__force char __user *)path);
62364 }
62365 }
62366
62367 @@ -305,7 +305,7 @@ static int __init do_name(void)
62368 int openflags = O_WRONLY|O_CREAT;
62369 if (ml != 1)
62370 openflags |= O_TRUNC;
62371 - wfd = sys_open(collected, openflags, mode);
62372 + wfd = sys_open((__force char __user *)collected, openflags, mode);
62373
62374 if (wfd >= 0) {
62375 sys_fchown(wfd, uid, gid);
62376 @@ -317,17 +317,17 @@ static int __init do_name(void)
62377 }
62378 }
62379 } else if (S_ISDIR(mode)) {
62380 - sys_mkdir(collected, mode);
62381 - sys_chown(collected, uid, gid);
62382 - sys_chmod(collected, mode);
62383 + sys_mkdir((__force char __user *)collected, mode);
62384 + sys_chown((__force char __user *)collected, uid, gid);
62385 + sys_chmod((__force char __user *)collected, mode);
62386 dir_add(collected, mtime);
62387 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62388 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62389 if (maybe_link() == 0) {
62390 - sys_mknod(collected, mode, rdev);
62391 - sys_chown(collected, uid, gid);
62392 - sys_chmod(collected, mode);
62393 - do_utime(collected, mtime);
62394 + sys_mknod((__force char __user *)collected, mode, rdev);
62395 + sys_chown((__force char __user *)collected, uid, gid);
62396 + sys_chmod((__force char __user *)collected, mode);
62397 + do_utime((__force char __user *)collected, mtime);
62398 }
62399 }
62400 return 0;
62401 @@ -336,15 +336,15 @@ static int __init do_name(void)
62402 static int __init do_copy(void)
62403 {
62404 if (count >= body_len) {
62405 - sys_write(wfd, victim, body_len);
62406 + sys_write(wfd, (__force char __user *)victim, body_len);
62407 sys_close(wfd);
62408 - do_utime(vcollected, mtime);
62409 + do_utime((__force char __user *)vcollected, mtime);
62410 kfree(vcollected);
62411 eat(body_len);
62412 state = SkipIt;
62413 return 0;
62414 } else {
62415 - sys_write(wfd, victim, count);
62416 + sys_write(wfd, (__force char __user *)victim, count);
62417 body_len -= count;
62418 eat(count);
62419 return 1;
62420 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62421 {
62422 collected[N_ALIGN(name_len) + body_len] = '\0';
62423 clean_path(collected, 0);
62424 - sys_symlink(collected + N_ALIGN(name_len), collected);
62425 - sys_lchown(collected, uid, gid);
62426 - do_utime(collected, mtime);
62427 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
62428 + sys_lchown((__force char __user *)collected, uid, gid);
62429 + do_utime((__force char __user *)collected, mtime);
62430 state = SkipIt;
62431 next_state = Reset;
62432 return 0;
62433 diff -urNp linux-2.6.32.44/init/Kconfig linux-2.6.32.44/init/Kconfig
62434 --- linux-2.6.32.44/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
62435 +++ linux-2.6.32.44/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
62436 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
62437
62438 config COMPAT_BRK
62439 bool "Disable heap randomization"
62440 - default y
62441 + default n
62442 help
62443 Randomizing heap placement makes heap exploits harder, but it
62444 also breaks ancient binaries (including anything libc5 based).
62445 diff -urNp linux-2.6.32.44/init/main.c linux-2.6.32.44/init/main.c
62446 --- linux-2.6.32.44/init/main.c 2011-05-10 22:12:01.000000000 -0400
62447 +++ linux-2.6.32.44/init/main.c 2011-08-05 20:33:55.000000000 -0400
62448 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
62449 #ifdef CONFIG_TC
62450 extern void tc_init(void);
62451 #endif
62452 +extern void grsecurity_init(void);
62453
62454 enum system_states system_state __read_mostly;
62455 EXPORT_SYMBOL(system_state);
62456 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
62457
62458 __setup("reset_devices", set_reset_devices);
62459
62460 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62461 +extern char pax_enter_kernel_user[];
62462 +extern char pax_exit_kernel_user[];
62463 +extern pgdval_t clone_pgd_mask;
62464 +#endif
62465 +
62466 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62467 +static int __init setup_pax_nouderef(char *str)
62468 +{
62469 +#ifdef CONFIG_X86_32
62470 + unsigned int cpu;
62471 + struct desc_struct *gdt;
62472 +
62473 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62474 + gdt = get_cpu_gdt_table(cpu);
62475 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62476 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62477 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62478 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62479 + }
62480 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62481 +#else
62482 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62483 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62484 + clone_pgd_mask = ~(pgdval_t)0UL;
62485 +#endif
62486 +
62487 + return 0;
62488 +}
62489 +early_param("pax_nouderef", setup_pax_nouderef);
62490 +#endif
62491 +
62492 +#ifdef CONFIG_PAX_SOFTMODE
62493 +int pax_softmode;
62494 +
62495 +static int __init setup_pax_softmode(char *str)
62496 +{
62497 + get_option(&str, &pax_softmode);
62498 + return 1;
62499 +}
62500 +__setup("pax_softmode=", setup_pax_softmode);
62501 +#endif
62502 +
62503 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62504 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62505 static const char *panic_later, *panic_param;
62506 @@ -705,52 +749,53 @@ int initcall_debug;
62507 core_param(initcall_debug, initcall_debug, bool, 0644);
62508
62509 static char msgbuf[64];
62510 -static struct boot_trace_call call;
62511 -static struct boot_trace_ret ret;
62512 +static struct boot_trace_call trace_call;
62513 +static struct boot_trace_ret trace_ret;
62514
62515 int do_one_initcall(initcall_t fn)
62516 {
62517 int count = preempt_count();
62518 ktime_t calltime, delta, rettime;
62519 + const char *msg1 = "", *msg2 = "";
62520
62521 if (initcall_debug) {
62522 - call.caller = task_pid_nr(current);
62523 - printk("calling %pF @ %i\n", fn, call.caller);
62524 + trace_call.caller = task_pid_nr(current);
62525 + printk("calling %pF @ %i\n", fn, trace_call.caller);
62526 calltime = ktime_get();
62527 - trace_boot_call(&call, fn);
62528 + trace_boot_call(&trace_call, fn);
62529 enable_boot_trace();
62530 }
62531
62532 - ret.result = fn();
62533 + trace_ret.result = fn();
62534
62535 if (initcall_debug) {
62536 disable_boot_trace();
62537 rettime = ktime_get();
62538 delta = ktime_sub(rettime, calltime);
62539 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62540 - trace_boot_ret(&ret, fn);
62541 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62542 + trace_boot_ret(&trace_ret, fn);
62543 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62544 - ret.result, ret.duration);
62545 + trace_ret.result, trace_ret.duration);
62546 }
62547
62548 msgbuf[0] = 0;
62549
62550 - if (ret.result && ret.result != -ENODEV && initcall_debug)
62551 - sprintf(msgbuf, "error code %d ", ret.result);
62552 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62553 + sprintf(msgbuf, "error code %d ", trace_ret.result);
62554
62555 if (preempt_count() != count) {
62556 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62557 + msg1 = " preemption imbalance";
62558 preempt_count() = count;
62559 }
62560 if (irqs_disabled()) {
62561 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62562 + msg2 = " disabled interrupts";
62563 local_irq_enable();
62564 }
62565 - if (msgbuf[0]) {
62566 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62567 + if (msgbuf[0] || *msg1 || *msg2) {
62568 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62569 }
62570
62571 - return ret.result;
62572 + return trace_ret.result;
62573 }
62574
62575
62576 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62577 if (!ramdisk_execute_command)
62578 ramdisk_execute_command = "/init";
62579
62580 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62581 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62582 ramdisk_execute_command = NULL;
62583 prepare_namespace();
62584 }
62585
62586 + grsecurity_init();
62587 +
62588 /*
62589 * Ok, we have completed the initial bootup, and
62590 * we're essentially up and running. Get rid of the
62591 diff -urNp linux-2.6.32.44/init/noinitramfs.c linux-2.6.32.44/init/noinitramfs.c
62592 --- linux-2.6.32.44/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62593 +++ linux-2.6.32.44/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62594 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62595 {
62596 int err;
62597
62598 - err = sys_mkdir("/dev", 0755);
62599 + err = sys_mkdir((const char __user *)"/dev", 0755);
62600 if (err < 0)
62601 goto out;
62602
62603 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62604 if (err < 0)
62605 goto out;
62606
62607 - err = sys_mkdir("/root", 0700);
62608 + err = sys_mkdir((const char __user *)"/root", 0700);
62609 if (err < 0)
62610 goto out;
62611
62612 diff -urNp linux-2.6.32.44/ipc/mqueue.c linux-2.6.32.44/ipc/mqueue.c
62613 --- linux-2.6.32.44/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62614 +++ linux-2.6.32.44/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62615 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62616 mq_bytes = (mq_msg_tblsz +
62617 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62618
62619 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62620 spin_lock(&mq_lock);
62621 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62622 u->mq_bytes + mq_bytes >
62623 diff -urNp linux-2.6.32.44/ipc/msg.c linux-2.6.32.44/ipc/msg.c
62624 --- linux-2.6.32.44/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62625 +++ linux-2.6.32.44/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62626 @@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62627 return security_msg_queue_associate(msq, msgflg);
62628 }
62629
62630 +static struct ipc_ops msg_ops = {
62631 + .getnew = newque,
62632 + .associate = msg_security,
62633 + .more_checks = NULL
62634 +};
62635 +
62636 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62637 {
62638 struct ipc_namespace *ns;
62639 - struct ipc_ops msg_ops;
62640 struct ipc_params msg_params;
62641
62642 ns = current->nsproxy->ipc_ns;
62643
62644 - msg_ops.getnew = newque;
62645 - msg_ops.associate = msg_security;
62646 - msg_ops.more_checks = NULL;
62647 -
62648 msg_params.key = key;
62649 msg_params.flg = msgflg;
62650
62651 diff -urNp linux-2.6.32.44/ipc/sem.c linux-2.6.32.44/ipc/sem.c
62652 --- linux-2.6.32.44/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62653 +++ linux-2.6.32.44/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62654 @@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62655 return 0;
62656 }
62657
62658 +static struct ipc_ops sem_ops = {
62659 + .getnew = newary,
62660 + .associate = sem_security,
62661 + .more_checks = sem_more_checks
62662 +};
62663 +
62664 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62665 {
62666 struct ipc_namespace *ns;
62667 - struct ipc_ops sem_ops;
62668 struct ipc_params sem_params;
62669
62670 ns = current->nsproxy->ipc_ns;
62671 @@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62672 if (nsems < 0 || nsems > ns->sc_semmsl)
62673 return -EINVAL;
62674
62675 - sem_ops.getnew = newary;
62676 - sem_ops.associate = sem_security;
62677 - sem_ops.more_checks = sem_more_checks;
62678 -
62679 sem_params.key = key;
62680 sem_params.flg = semflg;
62681 sem_params.u.nsems = nsems;
62682 @@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62683 ushort* sem_io = fast_sem_io;
62684 int nsems;
62685
62686 + pax_track_stack();
62687 +
62688 sma = sem_lock_check(ns, semid);
62689 if (IS_ERR(sma))
62690 return PTR_ERR(sma);
62691 @@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62692 unsigned long jiffies_left = 0;
62693 struct ipc_namespace *ns;
62694
62695 + pax_track_stack();
62696 +
62697 ns = current->nsproxy->ipc_ns;
62698
62699 if (nsops < 1 || semid < 0)
62700 diff -urNp linux-2.6.32.44/ipc/shm.c linux-2.6.32.44/ipc/shm.c
62701 --- linux-2.6.32.44/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62702 +++ linux-2.6.32.44/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62703 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62704 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62705 #endif
62706
62707 +#ifdef CONFIG_GRKERNSEC
62708 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62709 + const time_t shm_createtime, const uid_t cuid,
62710 + const int shmid);
62711 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62712 + const time_t shm_createtime);
62713 +#endif
62714 +
62715 void shm_init_ns(struct ipc_namespace *ns)
62716 {
62717 ns->shm_ctlmax = SHMMAX;
62718 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62719 shp->shm_lprid = 0;
62720 shp->shm_atim = shp->shm_dtim = 0;
62721 shp->shm_ctim = get_seconds();
62722 +#ifdef CONFIG_GRKERNSEC
62723 + {
62724 + struct timespec timeval;
62725 + do_posix_clock_monotonic_gettime(&timeval);
62726 +
62727 + shp->shm_createtime = timeval.tv_sec;
62728 + }
62729 +#endif
62730 shp->shm_segsz = size;
62731 shp->shm_nattch = 0;
62732 shp->shm_file = file;
62733 @@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62734 return 0;
62735 }
62736
62737 +static struct ipc_ops shm_ops = {
62738 + .getnew = newseg,
62739 + .associate = shm_security,
62740 + .more_checks = shm_more_checks
62741 +};
62742 +
62743 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62744 {
62745 struct ipc_namespace *ns;
62746 - struct ipc_ops shm_ops;
62747 struct ipc_params shm_params;
62748
62749 ns = current->nsproxy->ipc_ns;
62750
62751 - shm_ops.getnew = newseg;
62752 - shm_ops.associate = shm_security;
62753 - shm_ops.more_checks = shm_more_checks;
62754 -
62755 shm_params.key = key;
62756 shm_params.flg = shmflg;
62757 shm_params.u.size = size;
62758 @@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62759 if (err)
62760 goto out_unlock;
62761
62762 +#ifdef CONFIG_GRKERNSEC
62763 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62764 + shp->shm_perm.cuid, shmid) ||
62765 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62766 + err = -EACCES;
62767 + goto out_unlock;
62768 + }
62769 +#endif
62770 +
62771 path.dentry = dget(shp->shm_file->f_path.dentry);
62772 path.mnt = shp->shm_file->f_path.mnt;
62773 shp->shm_nattch++;
62774 +#ifdef CONFIG_GRKERNSEC
62775 + shp->shm_lapid = current->pid;
62776 +#endif
62777 size = i_size_read(path.dentry->d_inode);
62778 shm_unlock(shp);
62779
62780 diff -urNp linux-2.6.32.44/kernel/acct.c linux-2.6.32.44/kernel/acct.c
62781 --- linux-2.6.32.44/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62782 +++ linux-2.6.32.44/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62783 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62784 */
62785 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62786 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62787 - file->f_op->write(file, (char *)&ac,
62788 + file->f_op->write(file, (__force char __user *)&ac,
62789 sizeof(acct_t), &file->f_pos);
62790 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62791 set_fs(fs);
62792 diff -urNp linux-2.6.32.44/kernel/audit.c linux-2.6.32.44/kernel/audit.c
62793 --- linux-2.6.32.44/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62794 +++ linux-2.6.32.44/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62795 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62796 3) suppressed due to audit_rate_limit
62797 4) suppressed due to audit_backlog_limit
62798 */
62799 -static atomic_t audit_lost = ATOMIC_INIT(0);
62800 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62801
62802 /* The netlink socket. */
62803 static struct sock *audit_sock;
62804 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62805 unsigned long now;
62806 int print;
62807
62808 - atomic_inc(&audit_lost);
62809 + atomic_inc_unchecked(&audit_lost);
62810
62811 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62812
62813 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62814 printk(KERN_WARNING
62815 "audit: audit_lost=%d audit_rate_limit=%d "
62816 "audit_backlog_limit=%d\n",
62817 - atomic_read(&audit_lost),
62818 + atomic_read_unchecked(&audit_lost),
62819 audit_rate_limit,
62820 audit_backlog_limit);
62821 audit_panic(message);
62822 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62823 status_set.pid = audit_pid;
62824 status_set.rate_limit = audit_rate_limit;
62825 status_set.backlog_limit = audit_backlog_limit;
62826 - status_set.lost = atomic_read(&audit_lost);
62827 + status_set.lost = atomic_read_unchecked(&audit_lost);
62828 status_set.backlog = skb_queue_len(&audit_skb_queue);
62829 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62830 &status_set, sizeof(status_set));
62831 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62832 spin_unlock_irq(&tsk->sighand->siglock);
62833 }
62834 read_unlock(&tasklist_lock);
62835 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62836 - &s, sizeof(s));
62837 +
62838 + if (!err)
62839 + audit_send_reply(NETLINK_CB(skb).pid, seq,
62840 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62841 break;
62842 }
62843 case AUDIT_TTY_SET: {
62844 diff -urNp linux-2.6.32.44/kernel/auditsc.c linux-2.6.32.44/kernel/auditsc.c
62845 --- linux-2.6.32.44/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62846 +++ linux-2.6.32.44/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62847 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62848 }
62849
62850 /* global counter which is incremented every time something logs in */
62851 -static atomic_t session_id = ATOMIC_INIT(0);
62852 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62853
62854 /**
62855 * audit_set_loginuid - set a task's audit_context loginuid
62856 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62857 */
62858 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62859 {
62860 - unsigned int sessionid = atomic_inc_return(&session_id);
62861 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62862 struct audit_context *context = task->audit_context;
62863
62864 if (context && context->in_syscall) {
62865 diff -urNp linux-2.6.32.44/kernel/capability.c linux-2.6.32.44/kernel/capability.c
62866 --- linux-2.6.32.44/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62867 +++ linux-2.6.32.44/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62868 @@ -305,10 +305,26 @@ int capable(int cap)
62869 BUG();
62870 }
62871
62872 - if (security_capable(cap) == 0) {
62873 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62874 current->flags |= PF_SUPERPRIV;
62875 return 1;
62876 }
62877 return 0;
62878 }
62879 +
62880 +int capable_nolog(int cap)
62881 +{
62882 + if (unlikely(!cap_valid(cap))) {
62883 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62884 + BUG();
62885 + }
62886 +
62887 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62888 + current->flags |= PF_SUPERPRIV;
62889 + return 1;
62890 + }
62891 + return 0;
62892 +}
62893 +
62894 EXPORT_SYMBOL(capable);
62895 +EXPORT_SYMBOL(capable_nolog);
62896 diff -urNp linux-2.6.32.44/kernel/cgroup.c linux-2.6.32.44/kernel/cgroup.c
62897 --- linux-2.6.32.44/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62898 +++ linux-2.6.32.44/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62899 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62900 struct hlist_head *hhead;
62901 struct cg_cgroup_link *link;
62902
62903 + pax_track_stack();
62904 +
62905 /* First see if we already have a cgroup group that matches
62906 * the desired set */
62907 read_lock(&css_set_lock);
62908 diff -urNp linux-2.6.32.44/kernel/configs.c linux-2.6.32.44/kernel/configs.c
62909 --- linux-2.6.32.44/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62910 +++ linux-2.6.32.44/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62911 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62912 struct proc_dir_entry *entry;
62913
62914 /* create the current config file */
62915 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62916 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62917 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62918 + &ikconfig_file_ops);
62919 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62920 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62921 + &ikconfig_file_ops);
62922 +#endif
62923 +#else
62924 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62925 &ikconfig_file_ops);
62926 +#endif
62927 +
62928 if (!entry)
62929 return -ENOMEM;
62930
62931 diff -urNp linux-2.6.32.44/kernel/cpu.c linux-2.6.32.44/kernel/cpu.c
62932 --- linux-2.6.32.44/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62933 +++ linux-2.6.32.44/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62934 @@ -19,7 +19,7 @@
62935 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62936 static DEFINE_MUTEX(cpu_add_remove_lock);
62937
62938 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62939 +static RAW_NOTIFIER_HEAD(cpu_chain);
62940
62941 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62942 * Should always be manipulated under cpu_add_remove_lock
62943 diff -urNp linux-2.6.32.44/kernel/cred.c linux-2.6.32.44/kernel/cred.c
62944 --- linux-2.6.32.44/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62945 +++ linux-2.6.32.44/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
62946 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62947 */
62948 void __put_cred(struct cred *cred)
62949 {
62950 + pax_track_stack();
62951 +
62952 kdebug("__put_cred(%p{%d,%d})", cred,
62953 atomic_read(&cred->usage),
62954 read_cred_subscribers(cred));
62955 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62956 {
62957 struct cred *cred;
62958
62959 + pax_track_stack();
62960 +
62961 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62962 atomic_read(&tsk->cred->usage),
62963 read_cred_subscribers(tsk->cred));
62964 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62965 {
62966 const struct cred *cred;
62967
62968 + pax_track_stack();
62969 +
62970 rcu_read_lock();
62971
62972 do {
62973 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62974 {
62975 struct cred *new;
62976
62977 + pax_track_stack();
62978 +
62979 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62980 if (!new)
62981 return NULL;
62982 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62983 const struct cred *old;
62984 struct cred *new;
62985
62986 + pax_track_stack();
62987 +
62988 validate_process_creds();
62989
62990 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62991 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62992 struct thread_group_cred *tgcred = NULL;
62993 struct cred *new;
62994
62995 + pax_track_stack();
62996 +
62997 #ifdef CONFIG_KEYS
62998 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62999 if (!tgcred)
63000 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
63001 struct cred *new;
63002 int ret;
63003
63004 + pax_track_stack();
63005 +
63006 mutex_init(&p->cred_guard_mutex);
63007
63008 if (
63009 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
63010 struct task_struct *task = current;
63011 const struct cred *old = task->real_cred;
63012
63013 + pax_track_stack();
63014 +
63015 kdebug("commit_creds(%p{%d,%d})", new,
63016 atomic_read(&new->usage),
63017 read_cred_subscribers(new));
63018 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
63019
63020 get_cred(new); /* we will require a ref for the subj creds too */
63021
63022 + gr_set_role_label(task, new->uid, new->gid);
63023 +
63024 /* dumpability changes */
63025 if (old->euid != new->euid ||
63026 old->egid != new->egid ||
63027 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
63028 */
63029 void abort_creds(struct cred *new)
63030 {
63031 + pax_track_stack();
63032 +
63033 kdebug("abort_creds(%p{%d,%d})", new,
63034 atomic_read(&new->usage),
63035 read_cred_subscribers(new));
63036 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
63037 {
63038 const struct cred *old = current->cred;
63039
63040 + pax_track_stack();
63041 +
63042 kdebug("override_creds(%p{%d,%d})", new,
63043 atomic_read(&new->usage),
63044 read_cred_subscribers(new));
63045 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
63046 {
63047 const struct cred *override = current->cred;
63048
63049 + pax_track_stack();
63050 +
63051 kdebug("revert_creds(%p{%d,%d})", old,
63052 atomic_read(&old->usage),
63053 read_cred_subscribers(old));
63054 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
63055 const struct cred *old;
63056 struct cred *new;
63057
63058 + pax_track_stack();
63059 +
63060 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
63061 if (!new)
63062 return NULL;
63063 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
63064 */
63065 int set_security_override(struct cred *new, u32 secid)
63066 {
63067 + pax_track_stack();
63068 +
63069 return security_kernel_act_as(new, secid);
63070 }
63071 EXPORT_SYMBOL(set_security_override);
63072 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
63073 u32 secid;
63074 int ret;
63075
63076 + pax_track_stack();
63077 +
63078 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
63079 if (ret < 0)
63080 return ret;
63081 diff -urNp linux-2.6.32.44/kernel/exit.c linux-2.6.32.44/kernel/exit.c
63082 --- linux-2.6.32.44/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
63083 +++ linux-2.6.32.44/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
63084 @@ -55,6 +55,10 @@
63085 #include <asm/pgtable.h>
63086 #include <asm/mmu_context.h>
63087
63088 +#ifdef CONFIG_GRKERNSEC
63089 +extern rwlock_t grsec_exec_file_lock;
63090 +#endif
63091 +
63092 static void exit_mm(struct task_struct * tsk);
63093
63094 static void __unhash_process(struct task_struct *p)
63095 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
63096 struct task_struct *leader;
63097 int zap_leader;
63098 repeat:
63099 + gr_del_task_from_ip_table(p);
63100 +
63101 tracehook_prepare_release_task(p);
63102 /* don't need to get the RCU readlock here - the process is dead and
63103 * can't be modifying its own credentials */
63104 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
63105 {
63106 write_lock_irq(&tasklist_lock);
63107
63108 +#ifdef CONFIG_GRKERNSEC
63109 + write_lock(&grsec_exec_file_lock);
63110 + if (current->exec_file) {
63111 + fput(current->exec_file);
63112 + current->exec_file = NULL;
63113 + }
63114 + write_unlock(&grsec_exec_file_lock);
63115 +#endif
63116 +
63117 ptrace_unlink(current);
63118 /* Reparent to init */
63119 current->real_parent = current->parent = kthreadd_task;
63120 list_move_tail(&current->sibling, &current->real_parent->children);
63121
63122 + gr_set_kernel_label(current);
63123 +
63124 /* Set the exit signal to SIGCHLD so we signal init on exit */
63125 current->exit_signal = SIGCHLD;
63126
63127 @@ -397,7 +414,7 @@ int allow_signal(int sig)
63128 * know it'll be handled, so that they don't get converted to
63129 * SIGKILL or just silently dropped.
63130 */
63131 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63132 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63133 recalc_sigpending();
63134 spin_unlock_irq(&current->sighand->siglock);
63135 return 0;
63136 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
63137 vsnprintf(current->comm, sizeof(current->comm), name, args);
63138 va_end(args);
63139
63140 +#ifdef CONFIG_GRKERNSEC
63141 + write_lock(&grsec_exec_file_lock);
63142 + if (current->exec_file) {
63143 + fput(current->exec_file);
63144 + current->exec_file = NULL;
63145 + }
63146 + write_unlock(&grsec_exec_file_lock);
63147 +#endif
63148 +
63149 + gr_set_kernel_label(current);
63150 +
63151 /*
63152 * If we were started as result of loading a module, close all of the
63153 * user space pages. We don't need them, and if we didn't close them
63154 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
63155 struct task_struct *tsk = current;
63156 int group_dead;
63157
63158 - profile_task_exit(tsk);
63159 -
63160 - WARN_ON(atomic_read(&tsk->fs_excl));
63161 -
63162 + /*
63163 + * Check this first since set_fs() below depends on
63164 + * current_thread_info(), which we better not access when we're in
63165 + * interrupt context. Other than that, we want to do the set_fs()
63166 + * as early as possible.
63167 + */
63168 if (unlikely(in_interrupt()))
63169 panic("Aiee, killing interrupt handler!");
63170 - if (unlikely(!tsk->pid))
63171 - panic("Attempted to kill the idle task!");
63172
63173 /*
63174 - * If do_exit is called because this processes oopsed, it's possible
63175 + * If do_exit is called because this processes Oops'ed, it's possible
63176 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
63177 * continuing. Amongst other possible reasons, this is to prevent
63178 * mm_release()->clear_child_tid() from writing to a user-controlled
63179 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
63180 */
63181 set_fs(USER_DS);
63182
63183 + profile_task_exit(tsk);
63184 +
63185 + WARN_ON(atomic_read(&tsk->fs_excl));
63186 +
63187 + if (unlikely(!tsk->pid))
63188 + panic("Attempted to kill the idle task!");
63189 +
63190 tracehook_report_exit(&code);
63191
63192 validate_creds_for_do_exit(tsk);
63193 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
63194 tsk->exit_code = code;
63195 taskstats_exit(tsk, group_dead);
63196
63197 + gr_acl_handle_psacct(tsk, code);
63198 + gr_acl_handle_exit();
63199 +
63200 exit_mm(tsk);
63201
63202 if (group_dead)
63203 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
63204
63205 if (unlikely(wo->wo_flags & WNOWAIT)) {
63206 int exit_code = p->exit_code;
63207 - int why, status;
63208 + int why;
63209
63210 get_task_struct(p);
63211 read_unlock(&tasklist_lock);
63212 diff -urNp linux-2.6.32.44/kernel/fork.c linux-2.6.32.44/kernel/fork.c
63213 --- linux-2.6.32.44/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
63214 +++ linux-2.6.32.44/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
63215 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
63216 *stackend = STACK_END_MAGIC; /* for overflow detection */
63217
63218 #ifdef CONFIG_CC_STACKPROTECTOR
63219 - tsk->stack_canary = get_random_int();
63220 + tsk->stack_canary = pax_get_random_long();
63221 #endif
63222
63223 /* One for us, one for whoever does the "release_task()" (usually parent) */
63224 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
63225 mm->locked_vm = 0;
63226 mm->mmap = NULL;
63227 mm->mmap_cache = NULL;
63228 - mm->free_area_cache = oldmm->mmap_base;
63229 - mm->cached_hole_size = ~0UL;
63230 + mm->free_area_cache = oldmm->free_area_cache;
63231 + mm->cached_hole_size = oldmm->cached_hole_size;
63232 mm->map_count = 0;
63233 cpumask_clear(mm_cpumask(mm));
63234 mm->mm_rb = RB_ROOT;
63235 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
63236 tmp->vm_flags &= ~VM_LOCKED;
63237 tmp->vm_mm = mm;
63238 tmp->vm_next = tmp->vm_prev = NULL;
63239 + tmp->vm_mirror = NULL;
63240 anon_vma_link(tmp);
63241 file = tmp->vm_file;
63242 if (file) {
63243 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
63244 if (retval)
63245 goto out;
63246 }
63247 +
63248 +#ifdef CONFIG_PAX_SEGMEXEC
63249 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63250 + struct vm_area_struct *mpnt_m;
63251 +
63252 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63253 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63254 +
63255 + if (!mpnt->vm_mirror)
63256 + continue;
63257 +
63258 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63259 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63260 + mpnt->vm_mirror = mpnt_m;
63261 + } else {
63262 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63263 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63264 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63265 + mpnt->vm_mirror->vm_mirror = mpnt;
63266 + }
63267 + }
63268 + BUG_ON(mpnt_m);
63269 + }
63270 +#endif
63271 +
63272 /* a new mm has just been created */
63273 arch_dup_mmap(oldmm, mm);
63274 retval = 0;
63275 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
63276 write_unlock(&fs->lock);
63277 return -EAGAIN;
63278 }
63279 - fs->users++;
63280 + atomic_inc(&fs->users);
63281 write_unlock(&fs->lock);
63282 return 0;
63283 }
63284 tsk->fs = copy_fs_struct(fs);
63285 if (!tsk->fs)
63286 return -ENOMEM;
63287 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63288 return 0;
63289 }
63290
63291 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
63292 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63293 #endif
63294 retval = -EAGAIN;
63295 +
63296 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63297 +
63298 if (atomic_read(&p->real_cred->user->processes) >=
63299 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
63300 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63301 - p->real_cred->user != INIT_USER)
63302 + if (p->real_cred->user != INIT_USER &&
63303 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
63304 goto bad_fork_free;
63305 }
63306
63307 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
63308 goto bad_fork_free_pid;
63309 }
63310
63311 + gr_copy_label(p);
63312 +
63313 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63314 /*
63315 * Clear TID on mm_release()?
63316 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
63317 bad_fork_free:
63318 free_task(p);
63319 fork_out:
63320 + gr_log_forkfail(retval);
63321 +
63322 return ERR_PTR(retval);
63323 }
63324
63325 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
63326 if (clone_flags & CLONE_PARENT_SETTID)
63327 put_user(nr, parent_tidptr);
63328
63329 + gr_handle_brute_check();
63330 +
63331 if (clone_flags & CLONE_VFORK) {
63332 p->vfork_done = &vfork;
63333 init_completion(&vfork);
63334 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
63335 return 0;
63336
63337 /* don't need lock here; in the worst case we'll do useless copy */
63338 - if (fs->users == 1)
63339 + if (atomic_read(&fs->users) == 1)
63340 return 0;
63341
63342 *new_fsp = copy_fs_struct(fs);
63343 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63344 fs = current->fs;
63345 write_lock(&fs->lock);
63346 current->fs = new_fs;
63347 - if (--fs->users)
63348 + gr_set_chroot_entries(current, &current->fs->root);
63349 + if (atomic_dec_return(&fs->users))
63350 new_fs = NULL;
63351 else
63352 new_fs = fs;
63353 diff -urNp linux-2.6.32.44/kernel/futex.c linux-2.6.32.44/kernel/futex.c
63354 --- linux-2.6.32.44/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
63355 +++ linux-2.6.32.44/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
63356 @@ -54,6 +54,7 @@
63357 #include <linux/mount.h>
63358 #include <linux/pagemap.h>
63359 #include <linux/syscalls.h>
63360 +#include <linux/ptrace.h>
63361 #include <linux/signal.h>
63362 #include <linux/module.h>
63363 #include <linux/magic.h>
63364 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63365 struct page *page;
63366 int err;
63367
63368 +#ifdef CONFIG_PAX_SEGMEXEC
63369 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63370 + return -EFAULT;
63371 +#endif
63372 +
63373 /*
63374 * The futex address must be "naturally" aligned.
63375 */
63376 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
63377 struct futex_q q;
63378 int ret;
63379
63380 + pax_track_stack();
63381 +
63382 if (!bitset)
63383 return -EINVAL;
63384
63385 @@ -1841,7 +1849,7 @@ retry:
63386
63387 restart = &current_thread_info()->restart_block;
63388 restart->fn = futex_wait_restart;
63389 - restart->futex.uaddr = (u32 *)uaddr;
63390 + restart->futex.uaddr = uaddr;
63391 restart->futex.val = val;
63392 restart->futex.time = abs_time->tv64;
63393 restart->futex.bitset = bitset;
63394 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
63395 struct futex_q q;
63396 int res, ret;
63397
63398 + pax_track_stack();
63399 +
63400 if (!bitset)
63401 return -EINVAL;
63402
63403 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63404 {
63405 struct robust_list_head __user *head;
63406 unsigned long ret;
63407 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63408 const struct cred *cred = current_cred(), *pcred;
63409 +#endif
63410
63411 if (!futex_cmpxchg_enabled)
63412 return -ENOSYS;
63413 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63414 if (!p)
63415 goto err_unlock;
63416 ret = -EPERM;
63417 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63418 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63419 + goto err_unlock;
63420 +#else
63421 pcred = __task_cred(p);
63422 if (cred->euid != pcred->euid &&
63423 cred->euid != pcred->uid &&
63424 !capable(CAP_SYS_PTRACE))
63425 goto err_unlock;
63426 +#endif
63427 head = p->robust_list;
63428 rcu_read_unlock();
63429 }
63430 @@ -2459,7 +2476,7 @@ retry:
63431 */
63432 static inline int fetch_robust_entry(struct robust_list __user **entry,
63433 struct robust_list __user * __user *head,
63434 - int *pi)
63435 + unsigned int *pi)
63436 {
63437 unsigned long uentry;
63438
63439 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63440 {
63441 u32 curval;
63442 int i;
63443 + mm_segment_t oldfs;
63444
63445 /*
63446 * This will fail and we want it. Some arch implementations do
63447 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63448 * implementation, the non functional ones will return
63449 * -ENOSYS.
63450 */
63451 + oldfs = get_fs();
63452 + set_fs(USER_DS);
63453 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63454 + set_fs(oldfs);
63455 if (curval == -EFAULT)
63456 futex_cmpxchg_enabled = 1;
63457
63458 diff -urNp linux-2.6.32.44/kernel/futex_compat.c linux-2.6.32.44/kernel/futex_compat.c
63459 --- linux-2.6.32.44/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63460 +++ linux-2.6.32.44/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63461 @@ -10,6 +10,7 @@
63462 #include <linux/compat.h>
63463 #include <linux/nsproxy.h>
63464 #include <linux/futex.h>
63465 +#include <linux/ptrace.h>
63466
63467 #include <asm/uaccess.h>
63468
63469 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63470 {
63471 struct compat_robust_list_head __user *head;
63472 unsigned long ret;
63473 - const struct cred *cred = current_cred(), *pcred;
63474 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63475 + const struct cred *cred = current_cred();
63476 + const struct cred *pcred;
63477 +#endif
63478
63479 if (!futex_cmpxchg_enabled)
63480 return -ENOSYS;
63481 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63482 if (!p)
63483 goto err_unlock;
63484 ret = -EPERM;
63485 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63486 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63487 + goto err_unlock;
63488 +#else
63489 pcred = __task_cred(p);
63490 if (cred->euid != pcred->euid &&
63491 cred->euid != pcred->uid &&
63492 !capable(CAP_SYS_PTRACE))
63493 goto err_unlock;
63494 +#endif
63495 head = p->compat_robust_list;
63496 read_unlock(&tasklist_lock);
63497 }
63498 diff -urNp linux-2.6.32.44/kernel/gcov/base.c linux-2.6.32.44/kernel/gcov/base.c
63499 --- linux-2.6.32.44/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63500 +++ linux-2.6.32.44/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63501 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63502 }
63503
63504 #ifdef CONFIG_MODULES
63505 -static inline int within(void *addr, void *start, unsigned long size)
63506 -{
63507 - return ((addr >= start) && (addr < start + size));
63508 -}
63509 -
63510 /* Update list and generate events when modules are unloaded. */
63511 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63512 void *data)
63513 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63514 prev = NULL;
63515 /* Remove entries located in module from linked list. */
63516 for (info = gcov_info_head; info; info = info->next) {
63517 - if (within(info, mod->module_core, mod->core_size)) {
63518 + if (within_module_core_rw((unsigned long)info, mod)) {
63519 if (prev)
63520 prev->next = info->next;
63521 else
63522 diff -urNp linux-2.6.32.44/kernel/hrtimer.c linux-2.6.32.44/kernel/hrtimer.c
63523 --- linux-2.6.32.44/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63524 +++ linux-2.6.32.44/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63525 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63526 local_irq_restore(flags);
63527 }
63528
63529 -static void run_hrtimer_softirq(struct softirq_action *h)
63530 +static void run_hrtimer_softirq(void)
63531 {
63532 hrtimer_peek_ahead_timers();
63533 }
63534 diff -urNp linux-2.6.32.44/kernel/kallsyms.c linux-2.6.32.44/kernel/kallsyms.c
63535 --- linux-2.6.32.44/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63536 +++ linux-2.6.32.44/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63537 @@ -11,6 +11,9 @@
63538 * Changed the compression method from stem compression to "table lookup"
63539 * compression (see scripts/kallsyms.c for a more complete description)
63540 */
63541 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63542 +#define __INCLUDED_BY_HIDESYM 1
63543 +#endif
63544 #include <linux/kallsyms.h>
63545 #include <linux/module.h>
63546 #include <linux/init.h>
63547 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63548
63549 static inline int is_kernel_inittext(unsigned long addr)
63550 {
63551 + if (system_state != SYSTEM_BOOTING)
63552 + return 0;
63553 +
63554 if (addr >= (unsigned long)_sinittext
63555 && addr <= (unsigned long)_einittext)
63556 return 1;
63557 return 0;
63558 }
63559
63560 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63561 +#ifdef CONFIG_MODULES
63562 +static inline int is_module_text(unsigned long addr)
63563 +{
63564 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63565 + return 1;
63566 +
63567 + addr = ktla_ktva(addr);
63568 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63569 +}
63570 +#else
63571 +static inline int is_module_text(unsigned long addr)
63572 +{
63573 + return 0;
63574 +}
63575 +#endif
63576 +#endif
63577 +
63578 static inline int is_kernel_text(unsigned long addr)
63579 {
63580 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63581 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63582
63583 static inline int is_kernel(unsigned long addr)
63584 {
63585 +
63586 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63587 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63588 + return 1;
63589 +
63590 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63591 +#else
63592 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63593 +#endif
63594 +
63595 return 1;
63596 return in_gate_area_no_task(addr);
63597 }
63598
63599 static int is_ksym_addr(unsigned long addr)
63600 {
63601 +
63602 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63603 + if (is_module_text(addr))
63604 + return 0;
63605 +#endif
63606 +
63607 if (all_var)
63608 return is_kernel(addr);
63609
63610 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63611
63612 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63613 {
63614 - iter->name[0] = '\0';
63615 iter->nameoff = get_symbol_offset(new_pos);
63616 iter->pos = new_pos;
63617 }
63618 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63619 {
63620 struct kallsym_iter *iter = m->private;
63621
63622 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63623 + if (current_uid())
63624 + return 0;
63625 +#endif
63626 +
63627 /* Some debugging symbols have no name. Ignore them. */
63628 if (!iter->name[0])
63629 return 0;
63630 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63631 struct kallsym_iter *iter;
63632 int ret;
63633
63634 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63635 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63636 if (!iter)
63637 return -ENOMEM;
63638 reset_iter(iter, 0);
63639 diff -urNp linux-2.6.32.44/kernel/kgdb.c linux-2.6.32.44/kernel/kgdb.c
63640 --- linux-2.6.32.44/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63641 +++ linux-2.6.32.44/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63642 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63643 /* Guard for recursive entry */
63644 static int exception_level;
63645
63646 -static struct kgdb_io *kgdb_io_ops;
63647 +static const struct kgdb_io *kgdb_io_ops;
63648 static DEFINE_SPINLOCK(kgdb_registration_lock);
63649
63650 /* kgdb console driver is loaded */
63651 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63652 */
63653 static atomic_t passive_cpu_wait[NR_CPUS];
63654 static atomic_t cpu_in_kgdb[NR_CPUS];
63655 -atomic_t kgdb_setting_breakpoint;
63656 +atomic_unchecked_t kgdb_setting_breakpoint;
63657
63658 struct task_struct *kgdb_usethread;
63659 struct task_struct *kgdb_contthread;
63660 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63661 sizeof(unsigned long)];
63662
63663 /* to keep track of the CPU which is doing the single stepping*/
63664 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63665 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63666
63667 /*
63668 * If you are debugging a problem where roundup (the collection of
63669 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63670 return 0;
63671 if (kgdb_connected)
63672 return 1;
63673 - if (atomic_read(&kgdb_setting_breakpoint))
63674 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63675 return 1;
63676 if (print_wait)
63677 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63678 @@ -1426,8 +1426,8 @@ acquirelock:
63679 * instance of the exception handler wanted to come into the
63680 * debugger on a different CPU via a single step
63681 */
63682 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63683 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63684 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63685 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63686
63687 atomic_set(&kgdb_active, -1);
63688 touch_softlockup_watchdog();
63689 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63690 *
63691 * Register it with the KGDB core.
63692 */
63693 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63694 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63695 {
63696 int err;
63697
63698 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63699 *
63700 * Unregister it with the KGDB core.
63701 */
63702 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63703 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63704 {
63705 BUG_ON(kgdb_connected);
63706
63707 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63708 */
63709 void kgdb_breakpoint(void)
63710 {
63711 - atomic_set(&kgdb_setting_breakpoint, 1);
63712 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63713 wmb(); /* Sync point before breakpoint */
63714 arch_kgdb_breakpoint();
63715 wmb(); /* Sync point after breakpoint */
63716 - atomic_set(&kgdb_setting_breakpoint, 0);
63717 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63718 }
63719 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63720
63721 diff -urNp linux-2.6.32.44/kernel/kmod.c linux-2.6.32.44/kernel/kmod.c
63722 --- linux-2.6.32.44/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63723 +++ linux-2.6.32.44/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63724 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63725 * If module auto-loading support is disabled then this function
63726 * becomes a no-operation.
63727 */
63728 -int __request_module(bool wait, const char *fmt, ...)
63729 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63730 {
63731 - va_list args;
63732 char module_name[MODULE_NAME_LEN];
63733 unsigned int max_modprobes;
63734 int ret;
63735 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63736 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63737 static char *envp[] = { "HOME=/",
63738 "TERM=linux",
63739 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63740 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63741 if (ret)
63742 return ret;
63743
63744 - va_start(args, fmt);
63745 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63746 - va_end(args);
63747 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63748 if (ret >= MODULE_NAME_LEN)
63749 return -ENAMETOOLONG;
63750
63751 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63752 + if (!current_uid()) {
63753 + /* hack to workaround consolekit/udisks stupidity */
63754 + read_lock(&tasklist_lock);
63755 + if (!strcmp(current->comm, "mount") &&
63756 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63757 + read_unlock(&tasklist_lock);
63758 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63759 + return -EPERM;
63760 + }
63761 + read_unlock(&tasklist_lock);
63762 + }
63763 +#endif
63764 +
63765 /* If modprobe needs a service that is in a module, we get a recursive
63766 * loop. Limit the number of running kmod threads to max_threads/2 or
63767 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63768 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63769 atomic_dec(&kmod_concurrent);
63770 return ret;
63771 }
63772 +
63773 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63774 +{
63775 + va_list args;
63776 + int ret;
63777 +
63778 + va_start(args, fmt);
63779 + ret = ____request_module(wait, module_param, fmt, args);
63780 + va_end(args);
63781 +
63782 + return ret;
63783 +}
63784 +
63785 +int __request_module(bool wait, const char *fmt, ...)
63786 +{
63787 + va_list args;
63788 + int ret;
63789 +
63790 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63791 + if (current_uid()) {
63792 + char module_param[MODULE_NAME_LEN];
63793 +
63794 + memset(module_param, 0, sizeof(module_param));
63795 +
63796 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63797 +
63798 + va_start(args, fmt);
63799 + ret = ____request_module(wait, module_param, fmt, args);
63800 + va_end(args);
63801 +
63802 + return ret;
63803 + }
63804 +#endif
63805 +
63806 + va_start(args, fmt);
63807 + ret = ____request_module(wait, NULL, fmt, args);
63808 + va_end(args);
63809 +
63810 + return ret;
63811 +}
63812 +
63813 +
63814 EXPORT_SYMBOL(__request_module);
63815 #endif /* CONFIG_MODULES */
63816
63817 diff -urNp linux-2.6.32.44/kernel/kprobes.c linux-2.6.32.44/kernel/kprobes.c
63818 --- linux-2.6.32.44/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63819 +++ linux-2.6.32.44/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63820 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63821 * kernel image and loaded module images reside. This is required
63822 * so x86_64 can correctly handle the %rip-relative fixups.
63823 */
63824 - kip->insns = module_alloc(PAGE_SIZE);
63825 + kip->insns = module_alloc_exec(PAGE_SIZE);
63826 if (!kip->insns) {
63827 kfree(kip);
63828 return NULL;
63829 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63830 */
63831 if (!list_is_singular(&kprobe_insn_pages)) {
63832 list_del(&kip->list);
63833 - module_free(NULL, kip->insns);
63834 + module_free_exec(NULL, kip->insns);
63835 kfree(kip);
63836 }
63837 return 1;
63838 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63839 {
63840 int i, err = 0;
63841 unsigned long offset = 0, size = 0;
63842 - char *modname, namebuf[128];
63843 + char *modname, namebuf[KSYM_NAME_LEN];
63844 const char *symbol_name;
63845 void *addr;
63846 struct kprobe_blackpoint *kb;
63847 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63848 const char *sym = NULL;
63849 unsigned int i = *(loff_t *) v;
63850 unsigned long offset = 0;
63851 - char *modname, namebuf[128];
63852 + char *modname, namebuf[KSYM_NAME_LEN];
63853
63854 head = &kprobe_table[i];
63855 preempt_disable();
63856 diff -urNp linux-2.6.32.44/kernel/lockdep.c linux-2.6.32.44/kernel/lockdep.c
63857 --- linux-2.6.32.44/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63858 +++ linux-2.6.32.44/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63859 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63860 /*
63861 * Various lockdep statistics:
63862 */
63863 -atomic_t chain_lookup_hits;
63864 -atomic_t chain_lookup_misses;
63865 -atomic_t hardirqs_on_events;
63866 -atomic_t hardirqs_off_events;
63867 -atomic_t redundant_hardirqs_on;
63868 -atomic_t redundant_hardirqs_off;
63869 -atomic_t softirqs_on_events;
63870 -atomic_t softirqs_off_events;
63871 -atomic_t redundant_softirqs_on;
63872 -atomic_t redundant_softirqs_off;
63873 -atomic_t nr_unused_locks;
63874 -atomic_t nr_cyclic_checks;
63875 -atomic_t nr_find_usage_forwards_checks;
63876 -atomic_t nr_find_usage_backwards_checks;
63877 +atomic_unchecked_t chain_lookup_hits;
63878 +atomic_unchecked_t chain_lookup_misses;
63879 +atomic_unchecked_t hardirqs_on_events;
63880 +atomic_unchecked_t hardirqs_off_events;
63881 +atomic_unchecked_t redundant_hardirqs_on;
63882 +atomic_unchecked_t redundant_hardirqs_off;
63883 +atomic_unchecked_t softirqs_on_events;
63884 +atomic_unchecked_t softirqs_off_events;
63885 +atomic_unchecked_t redundant_softirqs_on;
63886 +atomic_unchecked_t redundant_softirqs_off;
63887 +atomic_unchecked_t nr_unused_locks;
63888 +atomic_unchecked_t nr_cyclic_checks;
63889 +atomic_unchecked_t nr_find_usage_forwards_checks;
63890 +atomic_unchecked_t nr_find_usage_backwards_checks;
63891 #endif
63892
63893 /*
63894 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
63895 int i;
63896 #endif
63897
63898 +#ifdef CONFIG_PAX_KERNEXEC
63899 + start = ktla_ktva(start);
63900 +#endif
63901 +
63902 /*
63903 * static variable?
63904 */
63905 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
63906 */
63907 for_each_possible_cpu(i) {
63908 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63909 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63910 - + per_cpu_offset(i);
63911 + end = start + PERCPU_ENOUGH_ROOM;
63912
63913 if ((addr >= start) && (addr < end))
63914 return 1;
63915 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63916 if (!static_obj(lock->key)) {
63917 debug_locks_off();
63918 printk("INFO: trying to register non-static key.\n");
63919 + printk("lock:%pS key:%pS.\n", lock, lock->key);
63920 printk("the code is fine but needs lockdep annotation.\n");
63921 printk("turning off the locking correctness validator.\n");
63922 dump_stack();
63923 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63924 if (!class)
63925 return 0;
63926 }
63927 - debug_atomic_inc((atomic_t *)&class->ops);
63928 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63929 if (very_verbose(class)) {
63930 printk("\nacquire class [%p] %s", class->key, class->name);
63931 if (class->name_version > 1)
63932 diff -urNp linux-2.6.32.44/kernel/lockdep_internals.h linux-2.6.32.44/kernel/lockdep_internals.h
63933 --- linux-2.6.32.44/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63934 +++ linux-2.6.32.44/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63935 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63936 /*
63937 * Various lockdep statistics:
63938 */
63939 -extern atomic_t chain_lookup_hits;
63940 -extern atomic_t chain_lookup_misses;
63941 -extern atomic_t hardirqs_on_events;
63942 -extern atomic_t hardirqs_off_events;
63943 -extern atomic_t redundant_hardirqs_on;
63944 -extern atomic_t redundant_hardirqs_off;
63945 -extern atomic_t softirqs_on_events;
63946 -extern atomic_t softirqs_off_events;
63947 -extern atomic_t redundant_softirqs_on;
63948 -extern atomic_t redundant_softirqs_off;
63949 -extern atomic_t nr_unused_locks;
63950 -extern atomic_t nr_cyclic_checks;
63951 -extern atomic_t nr_cyclic_check_recursions;
63952 -extern atomic_t nr_find_usage_forwards_checks;
63953 -extern atomic_t nr_find_usage_forwards_recursions;
63954 -extern atomic_t nr_find_usage_backwards_checks;
63955 -extern atomic_t nr_find_usage_backwards_recursions;
63956 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
63957 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
63958 -# define debug_atomic_read(ptr) atomic_read(ptr)
63959 +extern atomic_unchecked_t chain_lookup_hits;
63960 +extern atomic_unchecked_t chain_lookup_misses;
63961 +extern atomic_unchecked_t hardirqs_on_events;
63962 +extern atomic_unchecked_t hardirqs_off_events;
63963 +extern atomic_unchecked_t redundant_hardirqs_on;
63964 +extern atomic_unchecked_t redundant_hardirqs_off;
63965 +extern atomic_unchecked_t softirqs_on_events;
63966 +extern atomic_unchecked_t softirqs_off_events;
63967 +extern atomic_unchecked_t redundant_softirqs_on;
63968 +extern atomic_unchecked_t redundant_softirqs_off;
63969 +extern atomic_unchecked_t nr_unused_locks;
63970 +extern atomic_unchecked_t nr_cyclic_checks;
63971 +extern atomic_unchecked_t nr_cyclic_check_recursions;
63972 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
63973 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63974 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
63975 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63976 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63977 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63978 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63979 #else
63980 # define debug_atomic_inc(ptr) do { } while (0)
63981 # define debug_atomic_dec(ptr) do { } while (0)
63982 diff -urNp linux-2.6.32.44/kernel/lockdep_proc.c linux-2.6.32.44/kernel/lockdep_proc.c
63983 --- linux-2.6.32.44/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63984 +++ linux-2.6.32.44/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63985 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63986
63987 static void print_name(struct seq_file *m, struct lock_class *class)
63988 {
63989 - char str[128];
63990 + char str[KSYM_NAME_LEN];
63991 const char *name = class->name;
63992
63993 if (!name) {
63994 diff -urNp linux-2.6.32.44/kernel/module.c linux-2.6.32.44/kernel/module.c
63995 --- linux-2.6.32.44/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63996 +++ linux-2.6.32.44/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63997 @@ -55,6 +55,7 @@
63998 #include <linux/async.h>
63999 #include <linux/percpu.h>
64000 #include <linux/kmemleak.h>
64001 +#include <linux/grsecurity.h>
64002
64003 #define CREATE_TRACE_POINTS
64004 #include <trace/events/module.h>
64005 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
64006 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64007
64008 /* Bounds of module allocation, for speeding __module_address */
64009 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64010 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64011 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64012
64013 int register_module_notifier(struct notifier_block * nb)
64014 {
64015 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
64016 return true;
64017
64018 list_for_each_entry_rcu(mod, &modules, list) {
64019 - struct symsearch arr[] = {
64020 + struct symsearch modarr[] = {
64021 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64022 NOT_GPL_ONLY, false },
64023 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64024 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
64025 #endif
64026 };
64027
64028 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64029 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64030 return true;
64031 }
64032 return false;
64033 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
64034 void *ptr;
64035 int cpu;
64036
64037 - if (align > PAGE_SIZE) {
64038 + if (align-1 >= PAGE_SIZE) {
64039 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64040 name, align, PAGE_SIZE);
64041 align = PAGE_SIZE;
64042 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
64043 * /sys/module/foo/sections stuff
64044 * J. Corbet <corbet@lwn.net>
64045 */
64046 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
64047 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64048
64049 static inline bool sect_empty(const Elf_Shdr *sect)
64050 {
64051 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
64052 destroy_params(mod->kp, mod->num_kp);
64053
64054 /* This may be NULL, but that's OK */
64055 - module_free(mod, mod->module_init);
64056 + module_free(mod, mod->module_init_rw);
64057 + module_free_exec(mod, mod->module_init_rx);
64058 kfree(mod->args);
64059 if (mod->percpu)
64060 percpu_modfree(mod->percpu);
64061 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
64062 percpu_modfree(mod->refptr);
64063 #endif
64064 /* Free lock-classes: */
64065 - lockdep_free_key_range(mod->module_core, mod->core_size);
64066 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64067 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64068
64069 /* Finally, free the core (containing the module structure) */
64070 - module_free(mod, mod->module_core);
64071 + module_free_exec(mod, mod->module_core_rx);
64072 + module_free(mod, mod->module_core_rw);
64073
64074 #ifdef CONFIG_MPU
64075 update_protections(current->mm);
64076 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
64077 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
64078 int ret = 0;
64079 const struct kernel_symbol *ksym;
64080 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64081 + int is_fs_load = 0;
64082 + int register_filesystem_found = 0;
64083 + char *p;
64084 +
64085 + p = strstr(mod->args, "grsec_modharden_fs");
64086 +
64087 + if (p) {
64088 + char *endptr = p + strlen("grsec_modharden_fs");
64089 + /* copy \0 as well */
64090 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64091 + is_fs_load = 1;
64092 + }
64093 +#endif
64094 +
64095
64096 for (i = 1; i < n; i++) {
64097 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64098 + const char *name = strtab + sym[i].st_name;
64099 +
64100 + /* it's a real shame this will never get ripped and copied
64101 + upstream! ;(
64102 + */
64103 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64104 + register_filesystem_found = 1;
64105 +#endif
64106 switch (sym[i].st_shndx) {
64107 case SHN_COMMON:
64108 /* We compiled with -fno-common. These are not
64109 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
64110 strtab + sym[i].st_name, mod);
64111 /* Ok if resolved. */
64112 if (ksym) {
64113 + pax_open_kernel();
64114 sym[i].st_value = ksym->value;
64115 + pax_close_kernel();
64116 break;
64117 }
64118
64119 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
64120 secbase = (unsigned long)mod->percpu;
64121 else
64122 secbase = sechdrs[sym[i].st_shndx].sh_addr;
64123 + pax_open_kernel();
64124 sym[i].st_value += secbase;
64125 + pax_close_kernel();
64126 break;
64127 }
64128 }
64129
64130 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64131 + if (is_fs_load && !register_filesystem_found) {
64132 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64133 + ret = -EPERM;
64134 + }
64135 +#endif
64136 +
64137 return ret;
64138 }
64139
64140 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
64141 || s->sh_entsize != ~0UL
64142 || strstarts(secstrings + s->sh_name, ".init"))
64143 continue;
64144 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64145 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64146 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64147 + else
64148 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64149 DEBUGP("\t%s\n", secstrings + s->sh_name);
64150 }
64151 - if (m == 0)
64152 - mod->core_text_size = mod->core_size;
64153 }
64154
64155 DEBUGP("Init section allocation order:\n");
64156 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
64157 || s->sh_entsize != ~0UL
64158 || !strstarts(secstrings + s->sh_name, ".init"))
64159 continue;
64160 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64161 - | INIT_OFFSET_MASK);
64162 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64163 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64164 + else
64165 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64166 + s->sh_entsize |= INIT_OFFSET_MASK;
64167 DEBUGP("\t%s\n", secstrings + s->sh_name);
64168 }
64169 - if (m == 0)
64170 - mod->init_text_size = mod->init_size;
64171 }
64172 }
64173
64174 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
64175
64176 /* As per nm */
64177 static char elf_type(const Elf_Sym *sym,
64178 - Elf_Shdr *sechdrs,
64179 - const char *secstrings,
64180 - struct module *mod)
64181 + const Elf_Shdr *sechdrs,
64182 + const char *secstrings)
64183 {
64184 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
64185 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
64186 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
64187
64188 /* Put symbol section at end of init part of module. */
64189 symsect->sh_flags |= SHF_ALLOC;
64190 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64191 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64192 symindex) | INIT_OFFSET_MASK;
64193 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
64194
64195 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
64196 }
64197
64198 /* Append room for core symbols at end of core part. */
64199 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64200 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
64201 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64202 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
64203
64204 /* Put string table section at end of init part of module. */
64205 strsect->sh_flags |= SHF_ALLOC;
64206 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64207 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64208 strindex) | INIT_OFFSET_MASK;
64209 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
64210
64211 /* Append room for core symbols' strings at end of core part. */
64212 - *pstroffs = mod->core_size;
64213 + *pstroffs = mod->core_size_rx;
64214 __set_bit(0, strmap);
64215 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
64216 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
64217
64218 return symoffs;
64219 }
64220 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
64221 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
64222 mod->strtab = (void *)sechdrs[strindex].sh_addr;
64223
64224 + pax_open_kernel();
64225 +
64226 /* Set types up while we still have access to sections. */
64227 for (i = 0; i < mod->num_symtab; i++)
64228 mod->symtab[i].st_info
64229 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
64230 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
64231
64232 - mod->core_symtab = dst = mod->module_core + symoffs;
64233 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
64234 src = mod->symtab;
64235 *dst = *src;
64236 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64237 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
64238 }
64239 mod->core_num_syms = ndst;
64240
64241 - mod->core_strtab = s = mod->module_core + stroffs;
64242 + mod->core_strtab = s = mod->module_core_rx + stroffs;
64243 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
64244 if (test_bit(i, strmap))
64245 *++s = mod->strtab[i];
64246 +
64247 + pax_close_kernel();
64248 }
64249 #else
64250 static inline unsigned long layout_symtab(struct module *mod,
64251 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
64252 #endif
64253 }
64254
64255 -static void *module_alloc_update_bounds(unsigned long size)
64256 +static void *module_alloc_update_bounds_rw(unsigned long size)
64257 {
64258 void *ret = module_alloc(size);
64259
64260 if (ret) {
64261 /* Update module bounds. */
64262 - if ((unsigned long)ret < module_addr_min)
64263 - module_addr_min = (unsigned long)ret;
64264 - if ((unsigned long)ret + size > module_addr_max)
64265 - module_addr_max = (unsigned long)ret + size;
64266 + if ((unsigned long)ret < module_addr_min_rw)
64267 + module_addr_min_rw = (unsigned long)ret;
64268 + if ((unsigned long)ret + size > module_addr_max_rw)
64269 + module_addr_max_rw = (unsigned long)ret + size;
64270 + }
64271 + return ret;
64272 +}
64273 +
64274 +static void *module_alloc_update_bounds_rx(unsigned long size)
64275 +{
64276 + void *ret = module_alloc_exec(size);
64277 +
64278 + if (ret) {
64279 + /* Update module bounds. */
64280 + if ((unsigned long)ret < module_addr_min_rx)
64281 + module_addr_min_rx = (unsigned long)ret;
64282 + if ((unsigned long)ret + size > module_addr_max_rx)
64283 + module_addr_max_rx = (unsigned long)ret + size;
64284 }
64285 return ret;
64286 }
64287 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
64288 unsigned int i;
64289
64290 /* only scan the sections containing data */
64291 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
64292 - (unsigned long)mod->module_core,
64293 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
64294 + (unsigned long)mod->module_core_rw,
64295 sizeof(struct module), GFP_KERNEL);
64296
64297 for (i = 1; i < hdr->e_shnum; i++) {
64298 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
64299 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
64300 continue;
64301
64302 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
64303 - (unsigned long)mod->module_core,
64304 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
64305 + (unsigned long)mod->module_core_rw,
64306 sechdrs[i].sh_size, GFP_KERNEL);
64307 }
64308 }
64309 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
64310 secstrings, &stroffs, strmap);
64311
64312 /* Do the allocs. */
64313 - ptr = module_alloc_update_bounds(mod->core_size);
64314 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64315 /*
64316 * The pointer to this block is stored in the module structure
64317 * which is inside the block. Just mark it as not being a
64318 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
64319 err = -ENOMEM;
64320 goto free_percpu;
64321 }
64322 - memset(ptr, 0, mod->core_size);
64323 - mod->module_core = ptr;
64324 + memset(ptr, 0, mod->core_size_rw);
64325 + mod->module_core_rw = ptr;
64326
64327 - ptr = module_alloc_update_bounds(mod->init_size);
64328 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64329 /*
64330 * The pointer to this block is stored in the module structure
64331 * which is inside the block. This block doesn't need to be
64332 * scanned as it contains data and code that will be freed
64333 * after the module is initialized.
64334 */
64335 - kmemleak_ignore(ptr);
64336 - if (!ptr && mod->init_size) {
64337 + kmemleak_not_leak(ptr);
64338 + if (!ptr && mod->init_size_rw) {
64339 + err = -ENOMEM;
64340 + goto free_core_rw;
64341 + }
64342 + memset(ptr, 0, mod->init_size_rw);
64343 + mod->module_init_rw = ptr;
64344 +
64345 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64346 + kmemleak_not_leak(ptr);
64347 + if (!ptr) {
64348 err = -ENOMEM;
64349 - goto free_core;
64350 + goto free_init_rw;
64351 }
64352 - memset(ptr, 0, mod->init_size);
64353 - mod->module_init = ptr;
64354 +
64355 + pax_open_kernel();
64356 + memset(ptr, 0, mod->core_size_rx);
64357 + pax_close_kernel();
64358 + mod->module_core_rx = ptr;
64359 +
64360 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64361 + kmemleak_not_leak(ptr);
64362 + if (!ptr && mod->init_size_rx) {
64363 + err = -ENOMEM;
64364 + goto free_core_rx;
64365 + }
64366 +
64367 + pax_open_kernel();
64368 + memset(ptr, 0, mod->init_size_rx);
64369 + pax_close_kernel();
64370 + mod->module_init_rx = ptr;
64371
64372 /* Transfer each section which specifies SHF_ALLOC */
64373 DEBUGP("final section addresses:\n");
64374 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
64375 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
64376 continue;
64377
64378 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
64379 - dest = mod->module_init
64380 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64381 - else
64382 - dest = mod->module_core + sechdrs[i].sh_entsize;
64383 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
64384 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64385 + dest = mod->module_init_rw
64386 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64387 + else
64388 + dest = mod->module_init_rx
64389 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64390 + } else {
64391 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64392 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
64393 + else
64394 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
64395 + }
64396 +
64397 + if (sechdrs[i].sh_type != SHT_NOBITS) {
64398
64399 - if (sechdrs[i].sh_type != SHT_NOBITS)
64400 - memcpy(dest, (void *)sechdrs[i].sh_addr,
64401 - sechdrs[i].sh_size);
64402 +#ifdef CONFIG_PAX_KERNEXEC
64403 +#ifdef CONFIG_X86_64
64404 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
64405 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64406 +#endif
64407 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
64408 + pax_open_kernel();
64409 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64410 + pax_close_kernel();
64411 + } else
64412 +#endif
64413 +
64414 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64415 + }
64416 /* Update sh_addr to point to copy in image. */
64417 - sechdrs[i].sh_addr = (unsigned long)dest;
64418 +
64419 +#ifdef CONFIG_PAX_KERNEXEC
64420 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
64421 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
64422 + else
64423 +#endif
64424 +
64425 + sechdrs[i].sh_addr = (unsigned long)dest;
64426 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
64427 }
64428 /* Module has been moved. */
64429 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
64430 mod->name);
64431 if (!mod->refptr) {
64432 err = -ENOMEM;
64433 - goto free_init;
64434 + goto free_init_rx;
64435 }
64436 #endif
64437 /* Now we've moved module, initialize linked lists, etc. */
64438 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
64439 /* Set up MODINFO_ATTR fields */
64440 setup_modinfo(mod, sechdrs, infoindex);
64441
64442 + mod->args = args;
64443 +
64444 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64445 + {
64446 + char *p, *p2;
64447 +
64448 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64449 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64450 + err = -EPERM;
64451 + goto cleanup;
64452 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64453 + p += strlen("grsec_modharden_normal");
64454 + p2 = strstr(p, "_");
64455 + if (p2) {
64456 + *p2 = '\0';
64457 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64458 + *p2 = '_';
64459 + }
64460 + err = -EPERM;
64461 + goto cleanup;
64462 + }
64463 + }
64464 +#endif
64465 +
64466 +
64467 /* Fix up syms, so that st_value is a pointer to location. */
64468 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64469 mod);
64470 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64471
64472 /* Now do relocations. */
64473 for (i = 1; i < hdr->e_shnum; i++) {
64474 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
64475 unsigned int info = sechdrs[i].sh_info;
64476 + strtab = (char *)sechdrs[strindex].sh_addr;
64477
64478 /* Not a valid relocation section? */
64479 if (info >= hdr->e_shnum)
64480 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64481 * Do it before processing of module parameters, so the module
64482 * can provide parameter accessor functions of its own.
64483 */
64484 - if (mod->module_init)
64485 - flush_icache_range((unsigned long)mod->module_init,
64486 - (unsigned long)mod->module_init
64487 - + mod->init_size);
64488 - flush_icache_range((unsigned long)mod->module_core,
64489 - (unsigned long)mod->module_core + mod->core_size);
64490 + if (mod->module_init_rx)
64491 + flush_icache_range((unsigned long)mod->module_init_rx,
64492 + (unsigned long)mod->module_init_rx
64493 + + mod->init_size_rx);
64494 + flush_icache_range((unsigned long)mod->module_core_rx,
64495 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64496
64497 set_fs(old_fs);
64498
64499 - mod->args = args;
64500 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64501 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64502 mod->name);
64503 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64504 free_unload:
64505 module_unload_free(mod);
64506 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64507 + free_init_rx:
64508 percpu_modfree(mod->refptr);
64509 - free_init:
64510 #endif
64511 - module_free(mod, mod->module_init);
64512 - free_core:
64513 - module_free(mod, mod->module_core);
64514 + module_free_exec(mod, mod->module_init_rx);
64515 + free_core_rx:
64516 + module_free_exec(mod, mod->module_core_rx);
64517 + free_init_rw:
64518 + module_free(mod, mod->module_init_rw);
64519 + free_core_rw:
64520 + module_free(mod, mod->module_core_rw);
64521 /* mod will be freed with core. Don't access it beyond this line! */
64522 free_percpu:
64523 if (percpu)
64524 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64525 mod->symtab = mod->core_symtab;
64526 mod->strtab = mod->core_strtab;
64527 #endif
64528 - module_free(mod, mod->module_init);
64529 - mod->module_init = NULL;
64530 - mod->init_size = 0;
64531 - mod->init_text_size = 0;
64532 + module_free(mod, mod->module_init_rw);
64533 + module_free_exec(mod, mod->module_init_rx);
64534 + mod->module_init_rw = NULL;
64535 + mod->module_init_rx = NULL;
64536 + mod->init_size_rw = 0;
64537 + mod->init_size_rx = 0;
64538 mutex_unlock(&module_mutex);
64539
64540 return 0;
64541 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64542 unsigned long nextval;
64543
64544 /* At worse, next value is at end of module */
64545 - if (within_module_init(addr, mod))
64546 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64547 + if (within_module_init_rx(addr, mod))
64548 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64549 + else if (within_module_init_rw(addr, mod))
64550 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64551 + else if (within_module_core_rx(addr, mod))
64552 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64553 + else if (within_module_core_rw(addr, mod))
64554 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64555 else
64556 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64557 + return NULL;
64558
64559 /* Scan for closest preceeding symbol, and next symbol. (ELF
64560 starts real symbols at 1). */
64561 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64562 char buf[8];
64563
64564 seq_printf(m, "%s %u",
64565 - mod->name, mod->init_size + mod->core_size);
64566 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64567 print_unload_info(m, mod);
64568
64569 /* Informative for users. */
64570 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64571 mod->state == MODULE_STATE_COMING ? "Loading":
64572 "Live");
64573 /* Used by oprofile and other similar tools. */
64574 - seq_printf(m, " 0x%p", mod->module_core);
64575 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64576
64577 /* Taints info */
64578 if (mod->taints)
64579 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
64580
64581 static int __init proc_modules_init(void)
64582 {
64583 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64584 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64585 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64586 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64587 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64588 +#else
64589 proc_create("modules", 0, NULL, &proc_modules_operations);
64590 +#endif
64591 +#else
64592 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64593 +#endif
64594 return 0;
64595 }
64596 module_init(proc_modules_init);
64597 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64598 {
64599 struct module *mod;
64600
64601 - if (addr < module_addr_min || addr > module_addr_max)
64602 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64603 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64604 return NULL;
64605
64606 list_for_each_entry_rcu(mod, &modules, list)
64607 - if (within_module_core(addr, mod)
64608 - || within_module_init(addr, mod))
64609 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64610 return mod;
64611 return NULL;
64612 }
64613 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64614 */
64615 struct module *__module_text_address(unsigned long addr)
64616 {
64617 - struct module *mod = __module_address(addr);
64618 + struct module *mod;
64619 +
64620 +#ifdef CONFIG_X86_32
64621 + addr = ktla_ktva(addr);
64622 +#endif
64623 +
64624 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64625 + return NULL;
64626 +
64627 + mod = __module_address(addr);
64628 +
64629 if (mod) {
64630 /* Make sure it's within the text section. */
64631 - if (!within(addr, mod->module_init, mod->init_text_size)
64632 - && !within(addr, mod->module_core, mod->core_text_size))
64633 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64634 mod = NULL;
64635 }
64636 return mod;
64637 diff -urNp linux-2.6.32.44/kernel/mutex.c linux-2.6.32.44/kernel/mutex.c
64638 --- linux-2.6.32.44/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64639 +++ linux-2.6.32.44/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64640 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64641 */
64642
64643 for (;;) {
64644 - struct thread_info *owner;
64645 + struct task_struct *owner;
64646
64647 /*
64648 * If we own the BKL, then don't spin. The owner of
64649 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64650 spin_lock_mutex(&lock->wait_lock, flags);
64651
64652 debug_mutex_lock_common(lock, &waiter);
64653 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64654 + debug_mutex_add_waiter(lock, &waiter, task);
64655
64656 /* add waiting tasks to the end of the waitqueue (FIFO): */
64657 list_add_tail(&waiter.list, &lock->wait_list);
64658 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64659 * TASK_UNINTERRUPTIBLE case.)
64660 */
64661 if (unlikely(signal_pending_state(state, task))) {
64662 - mutex_remove_waiter(lock, &waiter,
64663 - task_thread_info(task));
64664 + mutex_remove_waiter(lock, &waiter, task);
64665 mutex_release(&lock->dep_map, 1, ip);
64666 spin_unlock_mutex(&lock->wait_lock, flags);
64667
64668 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64669 done:
64670 lock_acquired(&lock->dep_map, ip);
64671 /* got the lock - rejoice! */
64672 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64673 + mutex_remove_waiter(lock, &waiter, task);
64674 mutex_set_owner(lock);
64675
64676 /* set it to 0 if there are no waiters left: */
64677 diff -urNp linux-2.6.32.44/kernel/mutex-debug.c linux-2.6.32.44/kernel/mutex-debug.c
64678 --- linux-2.6.32.44/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64679 +++ linux-2.6.32.44/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64680 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64681 }
64682
64683 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64684 - struct thread_info *ti)
64685 + struct task_struct *task)
64686 {
64687 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64688
64689 /* Mark the current thread as blocked on the lock: */
64690 - ti->task->blocked_on = waiter;
64691 + task->blocked_on = waiter;
64692 }
64693
64694 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64695 - struct thread_info *ti)
64696 + struct task_struct *task)
64697 {
64698 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64699 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64700 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64701 - ti->task->blocked_on = NULL;
64702 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64703 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64704 + task->blocked_on = NULL;
64705
64706 list_del_init(&waiter->list);
64707 waiter->task = NULL;
64708 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64709 return;
64710
64711 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64712 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64713 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
64714 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64715 mutex_clear_owner(lock);
64716 }
64717 diff -urNp linux-2.6.32.44/kernel/mutex-debug.h linux-2.6.32.44/kernel/mutex-debug.h
64718 --- linux-2.6.32.44/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64719 +++ linux-2.6.32.44/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64720 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64721 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64722 extern void debug_mutex_add_waiter(struct mutex *lock,
64723 struct mutex_waiter *waiter,
64724 - struct thread_info *ti);
64725 + struct task_struct *task);
64726 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64727 - struct thread_info *ti);
64728 + struct task_struct *task);
64729 extern void debug_mutex_unlock(struct mutex *lock);
64730 extern void debug_mutex_init(struct mutex *lock, const char *name,
64731 struct lock_class_key *key);
64732
64733 static inline void mutex_set_owner(struct mutex *lock)
64734 {
64735 - lock->owner = current_thread_info();
64736 + lock->owner = current;
64737 }
64738
64739 static inline void mutex_clear_owner(struct mutex *lock)
64740 diff -urNp linux-2.6.32.44/kernel/mutex.h linux-2.6.32.44/kernel/mutex.h
64741 --- linux-2.6.32.44/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64742 +++ linux-2.6.32.44/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64743 @@ -19,7 +19,7 @@
64744 #ifdef CONFIG_SMP
64745 static inline void mutex_set_owner(struct mutex *lock)
64746 {
64747 - lock->owner = current_thread_info();
64748 + lock->owner = current;
64749 }
64750
64751 static inline void mutex_clear_owner(struct mutex *lock)
64752 diff -urNp linux-2.6.32.44/kernel/panic.c linux-2.6.32.44/kernel/panic.c
64753 --- linux-2.6.32.44/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64754 +++ linux-2.6.32.44/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64755 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64756 const char *board;
64757
64758 printk(KERN_WARNING "------------[ cut here ]------------\n");
64759 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64760 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64761 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64762 if (board)
64763 printk(KERN_WARNING "Hardware name: %s\n", board);
64764 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64765 */
64766 void __stack_chk_fail(void)
64767 {
64768 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64769 + dump_stack();
64770 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64771 __builtin_return_address(0));
64772 }
64773 EXPORT_SYMBOL(__stack_chk_fail);
64774 diff -urNp linux-2.6.32.44/kernel/params.c linux-2.6.32.44/kernel/params.c
64775 --- linux-2.6.32.44/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64776 +++ linux-2.6.32.44/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64777 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64778 return ret;
64779 }
64780
64781 -static struct sysfs_ops module_sysfs_ops = {
64782 +static const struct sysfs_ops module_sysfs_ops = {
64783 .show = module_attr_show,
64784 .store = module_attr_store,
64785 };
64786 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64787 return 0;
64788 }
64789
64790 -static struct kset_uevent_ops module_uevent_ops = {
64791 +static const struct kset_uevent_ops module_uevent_ops = {
64792 .filter = uevent_filter,
64793 };
64794
64795 diff -urNp linux-2.6.32.44/kernel/perf_event.c linux-2.6.32.44/kernel/perf_event.c
64796 --- linux-2.6.32.44/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64797 +++ linux-2.6.32.44/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64798 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64799 */
64800 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64801
64802 -static atomic64_t perf_event_id;
64803 +static atomic64_unchecked_t perf_event_id;
64804
64805 /*
64806 * Lock for (sysadmin-configurable) event reservations:
64807 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64808 * In order to keep per-task stats reliable we need to flip the event
64809 * values when we flip the contexts.
64810 */
64811 - value = atomic64_read(&next_event->count);
64812 - value = atomic64_xchg(&event->count, value);
64813 - atomic64_set(&next_event->count, value);
64814 + value = atomic64_read_unchecked(&next_event->count);
64815 + value = atomic64_xchg_unchecked(&event->count, value);
64816 + atomic64_set_unchecked(&next_event->count, value);
64817
64818 swap(event->total_time_enabled, next_event->total_time_enabled);
64819 swap(event->total_time_running, next_event->total_time_running);
64820 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64821 update_event_times(event);
64822 }
64823
64824 - return atomic64_read(&event->count);
64825 + return atomic64_read_unchecked(&event->count);
64826 }
64827
64828 /*
64829 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64830 values[n++] = 1 + leader->nr_siblings;
64831 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64832 values[n++] = leader->total_time_enabled +
64833 - atomic64_read(&leader->child_total_time_enabled);
64834 + atomic64_read_unchecked(&leader->child_total_time_enabled);
64835 }
64836 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64837 values[n++] = leader->total_time_running +
64838 - atomic64_read(&leader->child_total_time_running);
64839 + atomic64_read_unchecked(&leader->child_total_time_running);
64840 }
64841
64842 size = n * sizeof(u64);
64843 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64844 values[n++] = perf_event_read_value(event);
64845 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64846 values[n++] = event->total_time_enabled +
64847 - atomic64_read(&event->child_total_time_enabled);
64848 + atomic64_read_unchecked(&event->child_total_time_enabled);
64849 }
64850 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64851 values[n++] = event->total_time_running +
64852 - atomic64_read(&event->child_total_time_running);
64853 + atomic64_read_unchecked(&event->child_total_time_running);
64854 }
64855 if (read_format & PERF_FORMAT_ID)
64856 values[n++] = primary_event_id(event);
64857 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64858 static void perf_event_reset(struct perf_event *event)
64859 {
64860 (void)perf_event_read(event);
64861 - atomic64_set(&event->count, 0);
64862 + atomic64_set_unchecked(&event->count, 0);
64863 perf_event_update_userpage(event);
64864 }
64865
64866 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64867 ++userpg->lock;
64868 barrier();
64869 userpg->index = perf_event_index(event);
64870 - userpg->offset = atomic64_read(&event->count);
64871 + userpg->offset = atomic64_read_unchecked(&event->count);
64872 if (event->state == PERF_EVENT_STATE_ACTIVE)
64873 - userpg->offset -= atomic64_read(&event->hw.prev_count);
64874 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64875
64876 userpg->time_enabled = event->total_time_enabled +
64877 - atomic64_read(&event->child_total_time_enabled);
64878 + atomic64_read_unchecked(&event->child_total_time_enabled);
64879
64880 userpg->time_running = event->total_time_running +
64881 - atomic64_read(&event->child_total_time_running);
64882 + atomic64_read_unchecked(&event->child_total_time_running);
64883
64884 barrier();
64885 ++userpg->lock;
64886 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64887 u64 values[4];
64888 int n = 0;
64889
64890 - values[n++] = atomic64_read(&event->count);
64891 + values[n++] = atomic64_read_unchecked(&event->count);
64892 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64893 values[n++] = event->total_time_enabled +
64894 - atomic64_read(&event->child_total_time_enabled);
64895 + atomic64_read_unchecked(&event->child_total_time_enabled);
64896 }
64897 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64898 values[n++] = event->total_time_running +
64899 - atomic64_read(&event->child_total_time_running);
64900 + atomic64_read_unchecked(&event->child_total_time_running);
64901 }
64902 if (read_format & PERF_FORMAT_ID)
64903 values[n++] = primary_event_id(event);
64904 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64905 if (leader != event)
64906 leader->pmu->read(leader);
64907
64908 - values[n++] = atomic64_read(&leader->count);
64909 + values[n++] = atomic64_read_unchecked(&leader->count);
64910 if (read_format & PERF_FORMAT_ID)
64911 values[n++] = primary_event_id(leader);
64912
64913 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64914 if (sub != event)
64915 sub->pmu->read(sub);
64916
64917 - values[n++] = atomic64_read(&sub->count);
64918 + values[n++] = atomic64_read_unchecked(&sub->count);
64919 if (read_format & PERF_FORMAT_ID)
64920 values[n++] = primary_event_id(sub);
64921
64922 @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64923 {
64924 struct hw_perf_event *hwc = &event->hw;
64925
64926 - atomic64_add(nr, &event->count);
64927 + atomic64_add_unchecked(nr, &event->count);
64928
64929 if (!hwc->sample_period)
64930 return;
64931 @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64932 u64 now;
64933
64934 now = cpu_clock(cpu);
64935 - prev = atomic64_read(&event->hw.prev_count);
64936 - atomic64_set(&event->hw.prev_count, now);
64937 - atomic64_add(now - prev, &event->count);
64938 + prev = atomic64_read_unchecked(&event->hw.prev_count);
64939 + atomic64_set_unchecked(&event->hw.prev_count, now);
64940 + atomic64_add_unchecked(now - prev, &event->count);
64941 }
64942
64943 static int cpu_clock_perf_event_enable(struct perf_event *event)
64944 @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64945 struct hw_perf_event *hwc = &event->hw;
64946 int cpu = raw_smp_processor_id();
64947
64948 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64949 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64950 perf_swevent_start_hrtimer(event);
64951
64952 return 0;
64953 @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64954 u64 prev;
64955 s64 delta;
64956
64957 - prev = atomic64_xchg(&event->hw.prev_count, now);
64958 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64959 delta = now - prev;
64960 - atomic64_add(delta, &event->count);
64961 + atomic64_add_unchecked(delta, &event->count);
64962 }
64963
64964 static int task_clock_perf_event_enable(struct perf_event *event)
64965 @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64966
64967 now = event->ctx->time;
64968
64969 - atomic64_set(&hwc->prev_count, now);
64970 + atomic64_set_unchecked(&hwc->prev_count, now);
64971
64972 perf_swevent_start_hrtimer(event);
64973
64974 @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64975 event->parent = parent_event;
64976
64977 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64978 - event->id = atomic64_inc_return(&perf_event_id);
64979 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64980
64981 event->state = PERF_EVENT_STATE_INACTIVE;
64982
64983 @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64984 if (child_event->attr.inherit_stat)
64985 perf_event_read_event(child_event, child);
64986
64987 - child_val = atomic64_read(&child_event->count);
64988 + child_val = atomic64_read_unchecked(&child_event->count);
64989
64990 /*
64991 * Add back the child's count to the parent's count:
64992 */
64993 - atomic64_add(child_val, &parent_event->count);
64994 - atomic64_add(child_event->total_time_enabled,
64995 + atomic64_add_unchecked(child_val, &parent_event->count);
64996 + atomic64_add_unchecked(child_event->total_time_enabled,
64997 &parent_event->child_total_time_enabled);
64998 - atomic64_add(child_event->total_time_running,
64999 + atomic64_add_unchecked(child_event->total_time_running,
65000 &parent_event->child_total_time_running);
65001
65002 /*
65003 diff -urNp linux-2.6.32.44/kernel/pid.c linux-2.6.32.44/kernel/pid.c
65004 --- linux-2.6.32.44/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
65005 +++ linux-2.6.32.44/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
65006 @@ -33,6 +33,7 @@
65007 #include <linux/rculist.h>
65008 #include <linux/bootmem.h>
65009 #include <linux/hash.h>
65010 +#include <linux/security.h>
65011 #include <linux/pid_namespace.h>
65012 #include <linux/init_task.h>
65013 #include <linux/syscalls.h>
65014 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
65015
65016 int pid_max = PID_MAX_DEFAULT;
65017
65018 -#define RESERVED_PIDS 300
65019 +#define RESERVED_PIDS 500
65020
65021 int pid_max_min = RESERVED_PIDS + 1;
65022 int pid_max_max = PID_MAX_LIMIT;
65023 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
65024 */
65025 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65026 {
65027 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65028 + struct task_struct *task;
65029 +
65030 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65031 +
65032 + if (gr_pid_is_chrooted(task))
65033 + return NULL;
65034 +
65035 + return task;
65036 }
65037
65038 struct task_struct *find_task_by_vpid(pid_t vnr)
65039 @@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
65040 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65041 }
65042
65043 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65044 +{
65045 + struct task_struct *task;
65046 +
65047 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65048 +}
65049 +
65050 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65051 {
65052 struct pid *pid;
65053 diff -urNp linux-2.6.32.44/kernel/posix-cpu-timers.c linux-2.6.32.44/kernel/posix-cpu-timers.c
65054 --- linux-2.6.32.44/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
65055 +++ linux-2.6.32.44/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
65056 @@ -6,6 +6,7 @@
65057 #include <linux/posix-timers.h>
65058 #include <linux/errno.h>
65059 #include <linux/math64.h>
65060 +#include <linux/security.h>
65061 #include <asm/uaccess.h>
65062 #include <linux/kernel_stat.h>
65063 #include <trace/events/timer.h>
65064 @@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
65065
65066 static __init int init_posix_cpu_timers(void)
65067 {
65068 - struct k_clock process = {
65069 + static struct k_clock process = {
65070 .clock_getres = process_cpu_clock_getres,
65071 .clock_get = process_cpu_clock_get,
65072 .clock_set = do_posix_clock_nosettime,
65073 @@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
65074 .nsleep = process_cpu_nsleep,
65075 .nsleep_restart = process_cpu_nsleep_restart,
65076 };
65077 - struct k_clock thread = {
65078 + static struct k_clock thread = {
65079 .clock_getres = thread_cpu_clock_getres,
65080 .clock_get = thread_cpu_clock_get,
65081 .clock_set = do_posix_clock_nosettime,
65082 diff -urNp linux-2.6.32.44/kernel/posix-timers.c linux-2.6.32.44/kernel/posix-timers.c
65083 --- linux-2.6.32.44/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
65084 +++ linux-2.6.32.44/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
65085 @@ -42,6 +42,7 @@
65086 #include <linux/compiler.h>
65087 #include <linux/idr.h>
65088 #include <linux/posix-timers.h>
65089 +#include <linux/grsecurity.h>
65090 #include <linux/syscalls.h>
65091 #include <linux/wait.h>
65092 #include <linux/workqueue.h>
65093 @@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
65094 * which we beg off on and pass to do_sys_settimeofday().
65095 */
65096
65097 -static struct k_clock posix_clocks[MAX_CLOCKS];
65098 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65099
65100 /*
65101 * These ones are defined below.
65102 @@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
65103 */
65104 #define CLOCK_DISPATCH(clock, call, arglist) \
65105 ((clock) < 0 ? posix_cpu_##call arglist : \
65106 - (posix_clocks[clock].call != NULL \
65107 - ? (*posix_clocks[clock].call) arglist : common_##call arglist))
65108 + (posix_clocks[clock]->call != NULL \
65109 + ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
65110
65111 /*
65112 * Default clock hook functions when the struct k_clock passed
65113 @@ -172,7 +173,7 @@ static inline int common_clock_getres(co
65114 struct timespec *tp)
65115 {
65116 tp->tv_sec = 0;
65117 - tp->tv_nsec = posix_clocks[which_clock].res;
65118 + tp->tv_nsec = posix_clocks[which_clock]->res;
65119 return 0;
65120 }
65121
65122 @@ -217,9 +218,11 @@ static inline int invalid_clockid(const
65123 return 0;
65124 if ((unsigned) which_clock >= MAX_CLOCKS)
65125 return 1;
65126 - if (posix_clocks[which_clock].clock_getres != NULL)
65127 + if (!posix_clocks[which_clock])
65128 return 0;
65129 - if (posix_clocks[which_clock].res != 0)
65130 + if (posix_clocks[which_clock]->clock_getres != NULL)
65131 + return 0;
65132 + if (posix_clocks[which_clock]->res != 0)
65133 return 0;
65134 return 1;
65135 }
65136 @@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
65137 */
65138 static __init int init_posix_timers(void)
65139 {
65140 - struct k_clock clock_realtime = {
65141 + static struct k_clock clock_realtime = {
65142 .clock_getres = hrtimer_get_res,
65143 };
65144 - struct k_clock clock_monotonic = {
65145 + static struct k_clock clock_monotonic = {
65146 .clock_getres = hrtimer_get_res,
65147 .clock_get = posix_ktime_get_ts,
65148 .clock_set = do_posix_clock_nosettime,
65149 };
65150 - struct k_clock clock_monotonic_raw = {
65151 + static struct k_clock clock_monotonic_raw = {
65152 .clock_getres = hrtimer_get_res,
65153 .clock_get = posix_get_monotonic_raw,
65154 .clock_set = do_posix_clock_nosettime,
65155 .timer_create = no_timer_create,
65156 .nsleep = no_nsleep,
65157 };
65158 - struct k_clock clock_realtime_coarse = {
65159 + static struct k_clock clock_realtime_coarse = {
65160 .clock_getres = posix_get_coarse_res,
65161 .clock_get = posix_get_realtime_coarse,
65162 .clock_set = do_posix_clock_nosettime,
65163 .timer_create = no_timer_create,
65164 .nsleep = no_nsleep,
65165 };
65166 - struct k_clock clock_monotonic_coarse = {
65167 + static struct k_clock clock_monotonic_coarse = {
65168 .clock_getres = posix_get_coarse_res,
65169 .clock_get = posix_get_monotonic_coarse,
65170 .clock_set = do_posix_clock_nosettime,
65171 @@ -296,6 +299,8 @@ static __init int init_posix_timers(void
65172 .nsleep = no_nsleep,
65173 };
65174
65175 + pax_track_stack();
65176 +
65177 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
65178 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
65179 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
65180 @@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
65181 return;
65182 }
65183
65184 - posix_clocks[clock_id] = *new_clock;
65185 + posix_clocks[clock_id] = new_clock;
65186 }
65187 EXPORT_SYMBOL_GPL(register_posix_clock);
65188
65189 @@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
65190 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65191 return -EFAULT;
65192
65193 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65194 + have their clock_set fptr set to a nosettime dummy function
65195 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65196 + call common_clock_set, which calls do_sys_settimeofday, which
65197 + we hook
65198 + */
65199 +
65200 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
65201 }
65202
65203 diff -urNp linux-2.6.32.44/kernel/power/hibernate.c linux-2.6.32.44/kernel/power/hibernate.c
65204 --- linux-2.6.32.44/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
65205 +++ linux-2.6.32.44/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
65206 @@ -48,14 +48,14 @@ enum {
65207
65208 static int hibernation_mode = HIBERNATION_SHUTDOWN;
65209
65210 -static struct platform_hibernation_ops *hibernation_ops;
65211 +static const struct platform_hibernation_ops *hibernation_ops;
65212
65213 /**
65214 * hibernation_set_ops - set the global hibernate operations
65215 * @ops: the hibernation operations to use in subsequent hibernation transitions
65216 */
65217
65218 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
65219 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
65220 {
65221 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
65222 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
65223 diff -urNp linux-2.6.32.44/kernel/power/poweroff.c linux-2.6.32.44/kernel/power/poweroff.c
65224 --- linux-2.6.32.44/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
65225 +++ linux-2.6.32.44/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
65226 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
65227 .enable_mask = SYSRQ_ENABLE_BOOT,
65228 };
65229
65230 -static int pm_sysrq_init(void)
65231 +static int __init pm_sysrq_init(void)
65232 {
65233 register_sysrq_key('o', &sysrq_poweroff_op);
65234 return 0;
65235 diff -urNp linux-2.6.32.44/kernel/power/process.c linux-2.6.32.44/kernel/power/process.c
65236 --- linux-2.6.32.44/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
65237 +++ linux-2.6.32.44/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
65238 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
65239 struct timeval start, end;
65240 u64 elapsed_csecs64;
65241 unsigned int elapsed_csecs;
65242 + bool timedout = false;
65243
65244 do_gettimeofday(&start);
65245
65246 end_time = jiffies + TIMEOUT;
65247 do {
65248 todo = 0;
65249 + if (time_after(jiffies, end_time))
65250 + timedout = true;
65251 read_lock(&tasklist_lock);
65252 do_each_thread(g, p) {
65253 if (frozen(p) || !freezeable(p))
65254 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
65255 * It is "frozen enough". If the task does wake
65256 * up, it will immediately call try_to_freeze.
65257 */
65258 - if (!task_is_stopped_or_traced(p) &&
65259 - !freezer_should_skip(p))
65260 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65261 todo++;
65262 + if (timedout) {
65263 + printk(KERN_ERR "Task refusing to freeze:\n");
65264 + sched_show_task(p);
65265 + }
65266 + }
65267 } while_each_thread(g, p);
65268 read_unlock(&tasklist_lock);
65269 yield(); /* Yield is okay here */
65270 - if (time_after(jiffies, end_time))
65271 - break;
65272 - } while (todo);
65273 + } while (todo && !timedout);
65274
65275 do_gettimeofday(&end);
65276 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
65277 diff -urNp linux-2.6.32.44/kernel/power/suspend.c linux-2.6.32.44/kernel/power/suspend.c
65278 --- linux-2.6.32.44/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
65279 +++ linux-2.6.32.44/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
65280 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
65281 [PM_SUSPEND_MEM] = "mem",
65282 };
65283
65284 -static struct platform_suspend_ops *suspend_ops;
65285 +static const struct platform_suspend_ops *suspend_ops;
65286
65287 /**
65288 * suspend_set_ops - Set the global suspend method table.
65289 * @ops: Pointer to ops structure.
65290 */
65291 -void suspend_set_ops(struct platform_suspend_ops *ops)
65292 +void suspend_set_ops(const struct platform_suspend_ops *ops)
65293 {
65294 mutex_lock(&pm_mutex);
65295 suspend_ops = ops;
65296 diff -urNp linux-2.6.32.44/kernel/printk.c linux-2.6.32.44/kernel/printk.c
65297 --- linux-2.6.32.44/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
65298 +++ linux-2.6.32.44/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
65299 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
65300 char c;
65301 int error = 0;
65302
65303 +#ifdef CONFIG_GRKERNSEC_DMESG
65304 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
65305 + return -EPERM;
65306 +#endif
65307 +
65308 error = security_syslog(type);
65309 if (error)
65310 return error;
65311 diff -urNp linux-2.6.32.44/kernel/profile.c linux-2.6.32.44/kernel/profile.c
65312 --- linux-2.6.32.44/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
65313 +++ linux-2.6.32.44/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
65314 @@ -39,7 +39,7 @@ struct profile_hit {
65315 /* Oprofile timer tick hook */
65316 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65317
65318 -static atomic_t *prof_buffer;
65319 +static atomic_unchecked_t *prof_buffer;
65320 static unsigned long prof_len, prof_shift;
65321
65322 int prof_on __read_mostly;
65323 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
65324 hits[i].pc = 0;
65325 continue;
65326 }
65327 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65328 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65329 hits[i].hits = hits[i].pc = 0;
65330 }
65331 }
65332 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
65333 * Add the current hit(s) and flush the write-queue out
65334 * to the global buffer:
65335 */
65336 - atomic_add(nr_hits, &prof_buffer[pc]);
65337 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65338 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65339 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65340 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65341 hits[i].pc = hits[i].hits = 0;
65342 }
65343 out:
65344 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
65345 if (prof_on != type || !prof_buffer)
65346 return;
65347 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65348 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65349 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65350 }
65351 #endif /* !CONFIG_SMP */
65352 EXPORT_SYMBOL_GPL(profile_hits);
65353 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
65354 return -EFAULT;
65355 buf++; p++; count--; read++;
65356 }
65357 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65358 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65359 if (copy_to_user(buf, (void *)pnt, count))
65360 return -EFAULT;
65361 read += count;
65362 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
65363 }
65364 #endif
65365 profile_discard_flip_buffers();
65366 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65367 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65368 return count;
65369 }
65370
65371 diff -urNp linux-2.6.32.44/kernel/ptrace.c linux-2.6.32.44/kernel/ptrace.c
65372 --- linux-2.6.32.44/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
65373 +++ linux-2.6.32.44/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
65374 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
65375 return ret;
65376 }
65377
65378 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65379 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65380 + unsigned int log)
65381 {
65382 const struct cred *cred = current_cred(), *tcred;
65383
65384 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
65385 cred->gid != tcred->egid ||
65386 cred->gid != tcred->sgid ||
65387 cred->gid != tcred->gid) &&
65388 - !capable(CAP_SYS_PTRACE)) {
65389 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65390 + (log && !capable(CAP_SYS_PTRACE)))
65391 + ) {
65392 rcu_read_unlock();
65393 return -EPERM;
65394 }
65395 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
65396 smp_rmb();
65397 if (task->mm)
65398 dumpable = get_dumpable(task->mm);
65399 - if (!dumpable && !capable(CAP_SYS_PTRACE))
65400 + if (!dumpable &&
65401 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65402 + (log && !capable(CAP_SYS_PTRACE))))
65403 return -EPERM;
65404
65405 return security_ptrace_access_check(task, mode);
65406 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
65407 {
65408 int err;
65409 task_lock(task);
65410 - err = __ptrace_may_access(task, mode);
65411 + err = __ptrace_may_access(task, mode, 0);
65412 + task_unlock(task);
65413 + return !err;
65414 +}
65415 +
65416 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65417 +{
65418 + int err;
65419 + task_lock(task);
65420 + err = __ptrace_may_access(task, mode, 1);
65421 task_unlock(task);
65422 return !err;
65423 }
65424 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
65425 goto out;
65426
65427 task_lock(task);
65428 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65429 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65430 task_unlock(task);
65431 if (retval)
65432 goto unlock_creds;
65433 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
65434 goto unlock_tasklist;
65435
65436 task->ptrace = PT_PTRACED;
65437 - if (capable(CAP_SYS_PTRACE))
65438 + if (capable_nolog(CAP_SYS_PTRACE))
65439 task->ptrace |= PT_PTRACE_CAP;
65440
65441 __ptrace_link(task, current);
65442 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65443 {
65444 int copied = 0;
65445
65446 + pax_track_stack();
65447 +
65448 while (len > 0) {
65449 char buf[128];
65450 int this_len, retval;
65451 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65452 {
65453 int copied = 0;
65454
65455 + pax_track_stack();
65456 +
65457 while (len > 0) {
65458 char buf[128];
65459 int this_len, retval;
65460 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65461 int ret = -EIO;
65462 siginfo_t siginfo;
65463
65464 + pax_track_stack();
65465 +
65466 switch (request) {
65467 case PTRACE_PEEKTEXT:
65468 case PTRACE_PEEKDATA:
65469 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65470 ret = ptrace_setoptions(child, data);
65471 break;
65472 case PTRACE_GETEVENTMSG:
65473 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65474 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65475 break;
65476
65477 case PTRACE_GETSIGINFO:
65478 ret = ptrace_getsiginfo(child, &siginfo);
65479 if (!ret)
65480 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
65481 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65482 &siginfo);
65483 break;
65484
65485 case PTRACE_SETSIGINFO:
65486 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65487 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65488 sizeof siginfo))
65489 ret = -EFAULT;
65490 else
65491 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65492 goto out;
65493 }
65494
65495 + if (gr_handle_ptrace(child, request)) {
65496 + ret = -EPERM;
65497 + goto out_put_task_struct;
65498 + }
65499 +
65500 if (request == PTRACE_ATTACH) {
65501 ret = ptrace_attach(child);
65502 /*
65503 * Some architectures need to do book-keeping after
65504 * a ptrace attach.
65505 */
65506 - if (!ret)
65507 + if (!ret) {
65508 arch_ptrace_attach(child);
65509 + gr_audit_ptrace(child);
65510 + }
65511 goto out_put_task_struct;
65512 }
65513
65514 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65515 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65516 if (copied != sizeof(tmp))
65517 return -EIO;
65518 - return put_user(tmp, (unsigned long __user *)data);
65519 + return put_user(tmp, (__force unsigned long __user *)data);
65520 }
65521
65522 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65523 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65524 siginfo_t siginfo;
65525 int ret;
65526
65527 + pax_track_stack();
65528 +
65529 switch (request) {
65530 case PTRACE_PEEKTEXT:
65531 case PTRACE_PEEKDATA:
65532 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65533 goto out;
65534 }
65535
65536 + if (gr_handle_ptrace(child, request)) {
65537 + ret = -EPERM;
65538 + goto out_put_task_struct;
65539 + }
65540 +
65541 if (request == PTRACE_ATTACH) {
65542 ret = ptrace_attach(child);
65543 /*
65544 * Some architectures need to do book-keeping after
65545 * a ptrace attach.
65546 */
65547 - if (!ret)
65548 + if (!ret) {
65549 arch_ptrace_attach(child);
65550 + gr_audit_ptrace(child);
65551 + }
65552 goto out_put_task_struct;
65553 }
65554
65555 diff -urNp linux-2.6.32.44/kernel/rcutorture.c linux-2.6.32.44/kernel/rcutorture.c
65556 --- linux-2.6.32.44/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65557 +++ linux-2.6.32.44/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65558 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65559 { 0 };
65560 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65561 { 0 };
65562 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65563 -static atomic_t n_rcu_torture_alloc;
65564 -static atomic_t n_rcu_torture_alloc_fail;
65565 -static atomic_t n_rcu_torture_free;
65566 -static atomic_t n_rcu_torture_mberror;
65567 -static atomic_t n_rcu_torture_error;
65568 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65569 +static atomic_unchecked_t n_rcu_torture_alloc;
65570 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65571 +static atomic_unchecked_t n_rcu_torture_free;
65572 +static atomic_unchecked_t n_rcu_torture_mberror;
65573 +static atomic_unchecked_t n_rcu_torture_error;
65574 static long n_rcu_torture_timers;
65575 static struct list_head rcu_torture_removed;
65576 static cpumask_var_t shuffle_tmp_mask;
65577 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65578
65579 spin_lock_bh(&rcu_torture_lock);
65580 if (list_empty(&rcu_torture_freelist)) {
65581 - atomic_inc(&n_rcu_torture_alloc_fail);
65582 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65583 spin_unlock_bh(&rcu_torture_lock);
65584 return NULL;
65585 }
65586 - atomic_inc(&n_rcu_torture_alloc);
65587 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65588 p = rcu_torture_freelist.next;
65589 list_del_init(p);
65590 spin_unlock_bh(&rcu_torture_lock);
65591 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65592 static void
65593 rcu_torture_free(struct rcu_torture *p)
65594 {
65595 - atomic_inc(&n_rcu_torture_free);
65596 + atomic_inc_unchecked(&n_rcu_torture_free);
65597 spin_lock_bh(&rcu_torture_lock);
65598 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65599 spin_unlock_bh(&rcu_torture_lock);
65600 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65601 i = rp->rtort_pipe_count;
65602 if (i > RCU_TORTURE_PIPE_LEN)
65603 i = RCU_TORTURE_PIPE_LEN;
65604 - atomic_inc(&rcu_torture_wcount[i]);
65605 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65606 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65607 rp->rtort_mbtest = 0;
65608 rcu_torture_free(rp);
65609 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65610 i = rp->rtort_pipe_count;
65611 if (i > RCU_TORTURE_PIPE_LEN)
65612 i = RCU_TORTURE_PIPE_LEN;
65613 - atomic_inc(&rcu_torture_wcount[i]);
65614 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65615 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65616 rp->rtort_mbtest = 0;
65617 list_del(&rp->rtort_free);
65618 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65619 i = old_rp->rtort_pipe_count;
65620 if (i > RCU_TORTURE_PIPE_LEN)
65621 i = RCU_TORTURE_PIPE_LEN;
65622 - atomic_inc(&rcu_torture_wcount[i]);
65623 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65624 old_rp->rtort_pipe_count++;
65625 cur_ops->deferred_free(old_rp);
65626 }
65627 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65628 return;
65629 }
65630 if (p->rtort_mbtest == 0)
65631 - atomic_inc(&n_rcu_torture_mberror);
65632 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65633 spin_lock(&rand_lock);
65634 cur_ops->read_delay(&rand);
65635 n_rcu_torture_timers++;
65636 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65637 continue;
65638 }
65639 if (p->rtort_mbtest == 0)
65640 - atomic_inc(&n_rcu_torture_mberror);
65641 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65642 cur_ops->read_delay(&rand);
65643 preempt_disable();
65644 pipe_count = p->rtort_pipe_count;
65645 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65646 rcu_torture_current,
65647 rcu_torture_current_version,
65648 list_empty(&rcu_torture_freelist),
65649 - atomic_read(&n_rcu_torture_alloc),
65650 - atomic_read(&n_rcu_torture_alloc_fail),
65651 - atomic_read(&n_rcu_torture_free),
65652 - atomic_read(&n_rcu_torture_mberror),
65653 + atomic_read_unchecked(&n_rcu_torture_alloc),
65654 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65655 + atomic_read_unchecked(&n_rcu_torture_free),
65656 + atomic_read_unchecked(&n_rcu_torture_mberror),
65657 n_rcu_torture_timers);
65658 - if (atomic_read(&n_rcu_torture_mberror) != 0)
65659 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65660 cnt += sprintf(&page[cnt], " !!!");
65661 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65662 if (i > 1) {
65663 cnt += sprintf(&page[cnt], "!!! ");
65664 - atomic_inc(&n_rcu_torture_error);
65665 + atomic_inc_unchecked(&n_rcu_torture_error);
65666 WARN_ON_ONCE(1);
65667 }
65668 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65669 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65670 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65671 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65672 cnt += sprintf(&page[cnt], " %d",
65673 - atomic_read(&rcu_torture_wcount[i]));
65674 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65675 }
65676 cnt += sprintf(&page[cnt], "\n");
65677 if (cur_ops->stats)
65678 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65679
65680 if (cur_ops->cleanup)
65681 cur_ops->cleanup();
65682 - if (atomic_read(&n_rcu_torture_error))
65683 + if (atomic_read_unchecked(&n_rcu_torture_error))
65684 rcu_torture_print_module_parms("End of test: FAILURE");
65685 else
65686 rcu_torture_print_module_parms("End of test: SUCCESS");
65687 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65688
65689 rcu_torture_current = NULL;
65690 rcu_torture_current_version = 0;
65691 - atomic_set(&n_rcu_torture_alloc, 0);
65692 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65693 - atomic_set(&n_rcu_torture_free, 0);
65694 - atomic_set(&n_rcu_torture_mberror, 0);
65695 - atomic_set(&n_rcu_torture_error, 0);
65696 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65697 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65698 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65699 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65700 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65701 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65702 - atomic_set(&rcu_torture_wcount[i], 0);
65703 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65704 for_each_possible_cpu(cpu) {
65705 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65706 per_cpu(rcu_torture_count, cpu)[i] = 0;
65707 diff -urNp linux-2.6.32.44/kernel/rcutree.c linux-2.6.32.44/kernel/rcutree.c
65708 --- linux-2.6.32.44/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65709 +++ linux-2.6.32.44/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65710 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65711 /*
65712 * Do softirq processing for the current CPU.
65713 */
65714 -static void rcu_process_callbacks(struct softirq_action *unused)
65715 +static void rcu_process_callbacks(void)
65716 {
65717 /*
65718 * Memory references from any prior RCU read-side critical sections
65719 diff -urNp linux-2.6.32.44/kernel/rcutree_plugin.h linux-2.6.32.44/kernel/rcutree_plugin.h
65720 --- linux-2.6.32.44/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65721 +++ linux-2.6.32.44/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65722 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65723 */
65724 void __rcu_read_lock(void)
65725 {
65726 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65727 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65728 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65729 }
65730 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65731 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65732 struct task_struct *t = current;
65733
65734 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65735 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65736 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65737 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65738 rcu_read_unlock_special(t);
65739 }
65740 diff -urNp linux-2.6.32.44/kernel/relay.c linux-2.6.32.44/kernel/relay.c
65741 --- linux-2.6.32.44/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65742 +++ linux-2.6.32.44/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65743 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65744 unsigned int flags,
65745 int *nonpad_ret)
65746 {
65747 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65748 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65749 struct rchan_buf *rbuf = in->private_data;
65750 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65751 uint64_t pos = (uint64_t) *ppos;
65752 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65753 .ops = &relay_pipe_buf_ops,
65754 .spd_release = relay_page_release,
65755 };
65756 + ssize_t ret;
65757 +
65758 + pax_track_stack();
65759
65760 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65761 return 0;
65762 diff -urNp linux-2.6.32.44/kernel/resource.c linux-2.6.32.44/kernel/resource.c
65763 --- linux-2.6.32.44/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65764 +++ linux-2.6.32.44/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65765 @@ -132,8 +132,18 @@ static const struct file_operations proc
65766
65767 static int __init ioresources_init(void)
65768 {
65769 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65770 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65771 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65772 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65773 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65774 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65775 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65776 +#endif
65777 +#else
65778 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65779 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65780 +#endif
65781 return 0;
65782 }
65783 __initcall(ioresources_init);
65784 diff -urNp linux-2.6.32.44/kernel/rtmutex.c linux-2.6.32.44/kernel/rtmutex.c
65785 --- linux-2.6.32.44/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65786 +++ linux-2.6.32.44/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65787 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65788 */
65789 spin_lock_irqsave(&pendowner->pi_lock, flags);
65790
65791 - WARN_ON(!pendowner->pi_blocked_on);
65792 + BUG_ON(!pendowner->pi_blocked_on);
65793 WARN_ON(pendowner->pi_blocked_on != waiter);
65794 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65795
65796 diff -urNp linux-2.6.32.44/kernel/rtmutex-tester.c linux-2.6.32.44/kernel/rtmutex-tester.c
65797 --- linux-2.6.32.44/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65798 +++ linux-2.6.32.44/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65799 @@ -21,7 +21,7 @@
65800 #define MAX_RT_TEST_MUTEXES 8
65801
65802 static spinlock_t rttest_lock;
65803 -static atomic_t rttest_event;
65804 +static atomic_unchecked_t rttest_event;
65805
65806 struct test_thread_data {
65807 int opcode;
65808 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65809
65810 case RTTEST_LOCKCONT:
65811 td->mutexes[td->opdata] = 1;
65812 - td->event = atomic_add_return(1, &rttest_event);
65813 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65814 return 0;
65815
65816 case RTTEST_RESET:
65817 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65818 return 0;
65819
65820 case RTTEST_RESETEVENT:
65821 - atomic_set(&rttest_event, 0);
65822 + atomic_set_unchecked(&rttest_event, 0);
65823 return 0;
65824
65825 default:
65826 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65827 return ret;
65828
65829 td->mutexes[id] = 1;
65830 - td->event = atomic_add_return(1, &rttest_event);
65831 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65832 rt_mutex_lock(&mutexes[id]);
65833 - td->event = atomic_add_return(1, &rttest_event);
65834 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65835 td->mutexes[id] = 4;
65836 return 0;
65837
65838 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65839 return ret;
65840
65841 td->mutexes[id] = 1;
65842 - td->event = atomic_add_return(1, &rttest_event);
65843 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65844 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65845 - td->event = atomic_add_return(1, &rttest_event);
65846 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65847 td->mutexes[id] = ret ? 0 : 4;
65848 return ret ? -EINTR : 0;
65849
65850 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65851 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65852 return ret;
65853
65854 - td->event = atomic_add_return(1, &rttest_event);
65855 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65856 rt_mutex_unlock(&mutexes[id]);
65857 - td->event = atomic_add_return(1, &rttest_event);
65858 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65859 td->mutexes[id] = 0;
65860 return 0;
65861
65862 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65863 break;
65864
65865 td->mutexes[dat] = 2;
65866 - td->event = atomic_add_return(1, &rttest_event);
65867 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65868 break;
65869
65870 case RTTEST_LOCKBKL:
65871 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65872 return;
65873
65874 td->mutexes[dat] = 3;
65875 - td->event = atomic_add_return(1, &rttest_event);
65876 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65877 break;
65878
65879 case RTTEST_LOCKNOWAIT:
65880 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65881 return;
65882
65883 td->mutexes[dat] = 1;
65884 - td->event = atomic_add_return(1, &rttest_event);
65885 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65886 return;
65887
65888 case RTTEST_LOCKBKL:
65889 diff -urNp linux-2.6.32.44/kernel/sched.c linux-2.6.32.44/kernel/sched.c
65890 --- linux-2.6.32.44/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65891 +++ linux-2.6.32.44/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65892 @@ -5043,7 +5043,7 @@ out:
65893 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65894 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65895 */
65896 -static void run_rebalance_domains(struct softirq_action *h)
65897 +static void run_rebalance_domains(void)
65898 {
65899 int this_cpu = smp_processor_id();
65900 struct rq *this_rq = cpu_rq(this_cpu);
65901 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65902 struct rq *rq;
65903 int cpu;
65904
65905 + pax_track_stack();
65906 +
65907 need_resched:
65908 preempt_disable();
65909 cpu = smp_processor_id();
65910 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65911 * Look out! "owner" is an entirely speculative pointer
65912 * access and not reliable.
65913 */
65914 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65915 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65916 {
65917 unsigned int cpu;
65918 struct rq *rq;
65919 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65920 * DEBUG_PAGEALLOC could have unmapped it if
65921 * the mutex owner just released it and exited.
65922 */
65923 - if (probe_kernel_address(&owner->cpu, cpu))
65924 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65925 return 0;
65926 #else
65927 - cpu = owner->cpu;
65928 + cpu = task_thread_info(owner)->cpu;
65929 #endif
65930
65931 /*
65932 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65933 /*
65934 * Is that owner really running on that cpu?
65935 */
65936 - if (task_thread_info(rq->curr) != owner || need_resched())
65937 + if (rq->curr != owner || need_resched())
65938 return 0;
65939
65940 cpu_relax();
65941 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65942 /* convert nice value [19,-20] to rlimit style value [1,40] */
65943 int nice_rlim = 20 - nice;
65944
65945 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65946 +
65947 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65948 capable(CAP_SYS_NICE));
65949 }
65950 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65951 if (nice > 19)
65952 nice = 19;
65953
65954 - if (increment < 0 && !can_nice(current, nice))
65955 + if (increment < 0 && (!can_nice(current, nice) ||
65956 + gr_handle_chroot_nice()))
65957 return -EPERM;
65958
65959 retval = security_task_setnice(current, nice);
65960 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
65961 long power;
65962 int weight;
65963
65964 - WARN_ON(!sd || !sd->groups);
65965 + BUG_ON(!sd || !sd->groups);
65966
65967 if (cpu != group_first_cpu(sd->groups))
65968 return;
65969 diff -urNp linux-2.6.32.44/kernel/signal.c linux-2.6.32.44/kernel/signal.c
65970 --- linux-2.6.32.44/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65971 +++ linux-2.6.32.44/kernel/signal.c 2011-07-14 20:33:33.000000000 -0400
65972 @@ -41,12 +41,12 @@
65973
65974 static struct kmem_cache *sigqueue_cachep;
65975
65976 -static void __user *sig_handler(struct task_struct *t, int sig)
65977 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
65978 {
65979 return t->sighand->action[sig - 1].sa.sa_handler;
65980 }
65981
65982 -static int sig_handler_ignored(void __user *handler, int sig)
65983 +static int sig_handler_ignored(__sighandler_t handler, int sig)
65984 {
65985 /* Is it explicitly or implicitly ignored? */
65986 return handler == SIG_IGN ||
65987 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65988 static int sig_task_ignored(struct task_struct *t, int sig,
65989 int from_ancestor_ns)
65990 {
65991 - void __user *handler;
65992 + __sighandler_t handler;
65993
65994 handler = sig_handler(t, sig);
65995
65996 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65997 */
65998 user = get_uid(__task_cred(t)->user);
65999 atomic_inc(&user->sigpending);
66000 +
66001 + if (!override_rlimit)
66002 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66003 if (override_rlimit ||
66004 atomic_read(&user->sigpending) <=
66005 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
66006 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
66007
66008 int unhandled_signal(struct task_struct *tsk, int sig)
66009 {
66010 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66011 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66012 if (is_global_init(tsk))
66013 return 1;
66014 if (handler != SIG_IGN && handler != SIG_DFL)
66015 @@ -627,6 +630,12 @@ static int check_kill_permission(int sig
66016 }
66017 }
66018
66019 + /* allow glibc communication via tgkill to other threads in our
66020 + thread group */
66021 + if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
66022 + task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
66023 + return -EPERM;
66024 +
66025 return security_task_kill(t, info, sig, 0);
66026 }
66027
66028 @@ -968,7 +977,7 @@ __group_send_sig_info(int sig, struct si
66029 return send_signal(sig, info, p, 1);
66030 }
66031
66032 -static int
66033 +int
66034 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66035 {
66036 return send_signal(sig, info, t, 0);
66037 @@ -1005,6 +1014,7 @@ force_sig_info(int sig, struct siginfo *
66038 unsigned long int flags;
66039 int ret, blocked, ignored;
66040 struct k_sigaction *action;
66041 + int is_unhandled = 0;
66042
66043 spin_lock_irqsave(&t->sighand->siglock, flags);
66044 action = &t->sighand->action[sig-1];
66045 @@ -1019,9 +1029,18 @@ force_sig_info(int sig, struct siginfo *
66046 }
66047 if (action->sa.sa_handler == SIG_DFL)
66048 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66049 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66050 + is_unhandled = 1;
66051 ret = specific_send_sig_info(sig, info, t);
66052 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66053
66054 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66055 + normal operation */
66056 + if (is_unhandled) {
66057 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66058 + gr_handle_crash(t, sig);
66059 + }
66060 +
66061 return ret;
66062 }
66063
66064 @@ -1081,8 +1100,11 @@ int group_send_sig_info(int sig, struct
66065 {
66066 int ret = check_kill_permission(sig, info, p);
66067
66068 - if (!ret && sig)
66069 + if (!ret && sig) {
66070 ret = do_send_sig_info(sig, info, p, true);
66071 + if (!ret)
66072 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66073 + }
66074
66075 return ret;
66076 }
66077 @@ -1644,6 +1666,8 @@ void ptrace_notify(int exit_code)
66078 {
66079 siginfo_t info;
66080
66081 + pax_track_stack();
66082 +
66083 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
66084
66085 memset(&info, 0, sizeof info);
66086 @@ -2275,7 +2299,15 @@ do_send_specific(pid_t tgid, pid_t pid,
66087 int error = -ESRCH;
66088
66089 rcu_read_lock();
66090 - p = find_task_by_vpid(pid);
66091 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66092 + /* allow glibc communication via tgkill to other threads in our
66093 + thread group */
66094 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66095 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66096 + p = find_task_by_vpid_unrestricted(pid);
66097 + else
66098 +#endif
66099 + p = find_task_by_vpid(pid);
66100 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66101 error = check_kill_permission(sig, info, p);
66102 /*
66103 diff -urNp linux-2.6.32.44/kernel/smp.c linux-2.6.32.44/kernel/smp.c
66104 --- linux-2.6.32.44/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
66105 +++ linux-2.6.32.44/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
66106 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
66107 }
66108 EXPORT_SYMBOL(smp_call_function);
66109
66110 -void ipi_call_lock(void)
66111 +void ipi_call_lock(void) __acquires(call_function.lock)
66112 {
66113 spin_lock(&call_function.lock);
66114 }
66115
66116 -void ipi_call_unlock(void)
66117 +void ipi_call_unlock(void) __releases(call_function.lock)
66118 {
66119 spin_unlock(&call_function.lock);
66120 }
66121
66122 -void ipi_call_lock_irq(void)
66123 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66124 {
66125 spin_lock_irq(&call_function.lock);
66126 }
66127
66128 -void ipi_call_unlock_irq(void)
66129 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66130 {
66131 spin_unlock_irq(&call_function.lock);
66132 }
66133 diff -urNp linux-2.6.32.44/kernel/softirq.c linux-2.6.32.44/kernel/softirq.c
66134 --- linux-2.6.32.44/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
66135 +++ linux-2.6.32.44/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
66136 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
66137
66138 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66139
66140 -char *softirq_to_name[NR_SOFTIRQS] = {
66141 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66142 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66143 "TASKLET", "SCHED", "HRTIMER", "RCU"
66144 };
66145 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
66146
66147 asmlinkage void __do_softirq(void)
66148 {
66149 - struct softirq_action *h;
66150 + const struct softirq_action *h;
66151 __u32 pending;
66152 int max_restart = MAX_SOFTIRQ_RESTART;
66153 int cpu;
66154 @@ -233,7 +233,7 @@ restart:
66155 kstat_incr_softirqs_this_cpu(h - softirq_vec);
66156
66157 trace_softirq_entry(h, softirq_vec);
66158 - h->action(h);
66159 + h->action();
66160 trace_softirq_exit(h, softirq_vec);
66161 if (unlikely(prev_count != preempt_count())) {
66162 printk(KERN_ERR "huh, entered softirq %td %s %p"
66163 @@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
66164 local_irq_restore(flags);
66165 }
66166
66167 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66168 +void open_softirq(int nr, void (*action)(void))
66169 {
66170 - softirq_vec[nr].action = action;
66171 + pax_open_kernel();
66172 + *(void **)&softirq_vec[nr].action = action;
66173 + pax_close_kernel();
66174 }
66175
66176 /*
66177 @@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
66178
66179 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66180
66181 -static void tasklet_action(struct softirq_action *a)
66182 +static void tasklet_action(void)
66183 {
66184 struct tasklet_struct *list;
66185
66186 @@ -454,7 +456,7 @@ static void tasklet_action(struct softir
66187 }
66188 }
66189
66190 -static void tasklet_hi_action(struct softirq_action *a)
66191 +static void tasklet_hi_action(void)
66192 {
66193 struct tasklet_struct *list;
66194
66195 diff -urNp linux-2.6.32.44/kernel/sys.c linux-2.6.32.44/kernel/sys.c
66196 --- linux-2.6.32.44/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
66197 +++ linux-2.6.32.44/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
66198 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
66199 error = -EACCES;
66200 goto out;
66201 }
66202 +
66203 + if (gr_handle_chroot_setpriority(p, niceval)) {
66204 + error = -EACCES;
66205 + goto out;
66206 + }
66207 +
66208 no_nice = security_task_setnice(p, niceval);
66209 if (no_nice) {
66210 error = no_nice;
66211 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
66212 !(user = find_user(who)))
66213 goto out_unlock; /* No processes for this user */
66214
66215 - do_each_thread(g, p)
66216 + do_each_thread(g, p) {
66217 if (__task_cred(p)->uid == who)
66218 error = set_one_prio(p, niceval, error);
66219 - while_each_thread(g, p);
66220 + } while_each_thread(g, p);
66221 if (who != cred->uid)
66222 free_uid(user); /* For find_user() */
66223 break;
66224 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
66225 !(user = find_user(who)))
66226 goto out_unlock; /* No processes for this user */
66227
66228 - do_each_thread(g, p)
66229 + do_each_thread(g, p) {
66230 if (__task_cred(p)->uid == who) {
66231 niceval = 20 - task_nice(p);
66232 if (niceval > retval)
66233 retval = niceval;
66234 }
66235 - while_each_thread(g, p);
66236 + } while_each_thread(g, p);
66237 if (who != cred->uid)
66238 free_uid(user); /* for find_user() */
66239 break;
66240 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
66241 goto error;
66242 }
66243
66244 + if (gr_check_group_change(new->gid, new->egid, -1))
66245 + goto error;
66246 +
66247 if (rgid != (gid_t) -1 ||
66248 (egid != (gid_t) -1 && egid != old->gid))
66249 new->sgid = new->egid;
66250 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66251 goto error;
66252
66253 retval = -EPERM;
66254 +
66255 + if (gr_check_group_change(gid, gid, gid))
66256 + goto error;
66257 +
66258 if (capable(CAP_SETGID))
66259 new->gid = new->egid = new->sgid = new->fsgid = gid;
66260 else if (gid == old->gid || gid == old->sgid)
66261 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
66262 goto error;
66263 }
66264
66265 + if (gr_check_user_change(new->uid, new->euid, -1))
66266 + goto error;
66267 +
66268 if (new->uid != old->uid) {
66269 retval = set_user(new);
66270 if (retval < 0)
66271 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66272 goto error;
66273
66274 retval = -EPERM;
66275 +
66276 + if (gr_check_crash_uid(uid))
66277 + goto error;
66278 + if (gr_check_user_change(uid, uid, uid))
66279 + goto error;
66280 +
66281 if (capable(CAP_SETUID)) {
66282 new->suid = new->uid = uid;
66283 if (uid != old->uid) {
66284 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
66285 goto error;
66286 }
66287
66288 + if (gr_check_user_change(ruid, euid, -1))
66289 + goto error;
66290 +
66291 if (ruid != (uid_t) -1) {
66292 new->uid = ruid;
66293 if (ruid != old->uid) {
66294 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
66295 goto error;
66296 }
66297
66298 + if (gr_check_group_change(rgid, egid, -1))
66299 + goto error;
66300 +
66301 if (rgid != (gid_t) -1)
66302 new->gid = rgid;
66303 if (egid != (gid_t) -1)
66304 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66305 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
66306 goto error;
66307
66308 + if (gr_check_user_change(-1, -1, uid))
66309 + goto error;
66310 +
66311 if (uid == old->uid || uid == old->euid ||
66312 uid == old->suid || uid == old->fsuid ||
66313 capable(CAP_SETUID)) {
66314 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66315 if (gid == old->gid || gid == old->egid ||
66316 gid == old->sgid || gid == old->fsgid ||
66317 capable(CAP_SETGID)) {
66318 + if (gr_check_group_change(-1, -1, gid))
66319 + goto error;
66320 +
66321 if (gid != old_fsgid) {
66322 new->fsgid = gid;
66323 goto change_okay;
66324 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66325 error = get_dumpable(me->mm);
66326 break;
66327 case PR_SET_DUMPABLE:
66328 - if (arg2 < 0 || arg2 > 1) {
66329 + if (arg2 > 1) {
66330 error = -EINVAL;
66331 break;
66332 }
66333 diff -urNp linux-2.6.32.44/kernel/sysctl.c linux-2.6.32.44/kernel/sysctl.c
66334 --- linux-2.6.32.44/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
66335 +++ linux-2.6.32.44/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
66336 @@ -63,6 +63,13 @@
66337 static int deprecated_sysctl_warning(struct __sysctl_args *args);
66338
66339 #if defined(CONFIG_SYSCTL)
66340 +#include <linux/grsecurity.h>
66341 +#include <linux/grinternal.h>
66342 +
66343 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66344 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66345 + const int op);
66346 +extern int gr_handle_chroot_sysctl(const int op);
66347
66348 /* External variables not in a header file. */
66349 extern int C_A_D;
66350 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
66351 static int proc_taint(struct ctl_table *table, int write,
66352 void __user *buffer, size_t *lenp, loff_t *ppos);
66353 #endif
66354 +extern ctl_table grsecurity_table[];
66355
66356 static struct ctl_table root_table[];
66357 static struct ctl_table_root sysctl_table_root;
66358 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
66359 int sysctl_legacy_va_layout;
66360 #endif
66361
66362 +#ifdef CONFIG_PAX_SOFTMODE
66363 +static ctl_table pax_table[] = {
66364 + {
66365 + .ctl_name = CTL_UNNUMBERED,
66366 + .procname = "softmode",
66367 + .data = &pax_softmode,
66368 + .maxlen = sizeof(unsigned int),
66369 + .mode = 0600,
66370 + .proc_handler = &proc_dointvec,
66371 + },
66372 +
66373 + { .ctl_name = 0 }
66374 +};
66375 +#endif
66376 +
66377 extern int prove_locking;
66378 extern int lock_stat;
66379
66380 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
66381 #endif
66382
66383 static struct ctl_table kern_table[] = {
66384 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66385 + {
66386 + .ctl_name = CTL_UNNUMBERED,
66387 + .procname = "grsecurity",
66388 + .mode = 0500,
66389 + .child = grsecurity_table,
66390 + },
66391 +#endif
66392 +
66393 +#ifdef CONFIG_PAX_SOFTMODE
66394 + {
66395 + .ctl_name = CTL_UNNUMBERED,
66396 + .procname = "pax",
66397 + .mode = 0500,
66398 + .child = pax_table,
66399 + },
66400 +#endif
66401 +
66402 {
66403 .ctl_name = CTL_UNNUMBERED,
66404 .procname = "sched_child_runs_first",
66405 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
66406 .data = &modprobe_path,
66407 .maxlen = KMOD_PATH_LEN,
66408 .mode = 0644,
66409 - .proc_handler = &proc_dostring,
66410 - .strategy = &sysctl_string,
66411 + .proc_handler = &proc_dostring_modpriv,
66412 + .strategy = &sysctl_string_modpriv,
66413 },
66414 {
66415 .ctl_name = CTL_UNNUMBERED,
66416 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66417 .mode = 0644,
66418 .proc_handler = &proc_dointvec
66419 },
66420 + {
66421 + .procname = "heap_stack_gap",
66422 + .data = &sysctl_heap_stack_gap,
66423 + .maxlen = sizeof(sysctl_heap_stack_gap),
66424 + .mode = 0644,
66425 + .proc_handler = proc_doulongvec_minmax,
66426 + },
66427 #else
66428 {
66429 .ctl_name = CTL_UNNUMBERED,
66430 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66431 return 0;
66432 }
66433
66434 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66435 +
66436 static int parse_table(int __user *name, int nlen,
66437 void __user *oldval, size_t __user *oldlenp,
66438 void __user *newval, size_t newlen,
66439 @@ -1821,7 +1871,7 @@ repeat:
66440 if (n == table->ctl_name) {
66441 int error;
66442 if (table->child) {
66443 - if (sysctl_perm(root, table, MAY_EXEC))
66444 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
66445 return -EPERM;
66446 name++;
66447 nlen--;
66448 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66449 int error;
66450 int mode;
66451
66452 + if (table->parent != NULL && table->parent->procname != NULL &&
66453 + table->procname != NULL &&
66454 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66455 + return -EACCES;
66456 + if (gr_handle_chroot_sysctl(op))
66457 + return -EACCES;
66458 + error = gr_handle_sysctl(table, op);
66459 + if (error)
66460 + return error;
66461 +
66462 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66463 + if (error)
66464 + return error;
66465 +
66466 + if (root->permissions)
66467 + mode = root->permissions(root, current->nsproxy, table);
66468 + else
66469 + mode = table->mode;
66470 +
66471 + return test_perm(mode, op);
66472 +}
66473 +
66474 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66475 +{
66476 + int error;
66477 + int mode;
66478 +
66479 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66480 if (error)
66481 return error;
66482 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66483 buffer, lenp, ppos);
66484 }
66485
66486 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66487 + void __user *buffer, size_t *lenp, loff_t *ppos)
66488 +{
66489 + if (write && !capable(CAP_SYS_MODULE))
66490 + return -EPERM;
66491 +
66492 + return _proc_do_string(table->data, table->maxlen, write,
66493 + buffer, lenp, ppos);
66494 +}
66495 +
66496
66497 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66498 int *valp,
66499 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66500 vleft = table->maxlen / sizeof(unsigned long);
66501 left = *lenp;
66502
66503 - for (; left && vleft--; i++, min++, max++, first=0) {
66504 + for (; left && vleft--; i++, first=0) {
66505 if (write) {
66506 while (left) {
66507 char c;
66508 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66509 return -ENOSYS;
66510 }
66511
66512 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66513 + void __user *buffer, size_t *lenp, loff_t *ppos)
66514 +{
66515 + return -ENOSYS;
66516 +}
66517 +
66518 int proc_dointvec(struct ctl_table *table, int write,
66519 void __user *buffer, size_t *lenp, loff_t *ppos)
66520 {
66521 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66522 return 1;
66523 }
66524
66525 +int sysctl_string_modpriv(struct ctl_table *table,
66526 + void __user *oldval, size_t __user *oldlenp,
66527 + void __user *newval, size_t newlen)
66528 +{
66529 + if (newval && newlen && !capable(CAP_SYS_MODULE))
66530 + return -EPERM;
66531 +
66532 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
66533 +}
66534 +
66535 /*
66536 * This function makes sure that all of the integers in the vector
66537 * are between the minimum and maximum values given in the arrays
66538 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66539 return -ENOSYS;
66540 }
66541
66542 +int sysctl_string_modpriv(struct ctl_table *table,
66543 + void __user *oldval, size_t __user *oldlenp,
66544 + void __user *newval, size_t newlen)
66545 +{
66546 + return -ENOSYS;
66547 +}
66548 +
66549 int sysctl_intvec(struct ctl_table *table,
66550 void __user *oldval, size_t __user *oldlenp,
66551 void __user *newval, size_t newlen)
66552 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66553 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66554 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66555 EXPORT_SYMBOL(proc_dostring);
66556 +EXPORT_SYMBOL(proc_dostring_modpriv);
66557 EXPORT_SYMBOL(proc_doulongvec_minmax);
66558 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66559 EXPORT_SYMBOL(register_sysctl_table);
66560 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66561 EXPORT_SYMBOL(sysctl_jiffies);
66562 EXPORT_SYMBOL(sysctl_ms_jiffies);
66563 EXPORT_SYMBOL(sysctl_string);
66564 +EXPORT_SYMBOL(sysctl_string_modpriv);
66565 EXPORT_SYMBOL(sysctl_data);
66566 EXPORT_SYMBOL(unregister_sysctl_table);
66567 diff -urNp linux-2.6.32.44/kernel/sysctl_check.c linux-2.6.32.44/kernel/sysctl_check.c
66568 --- linux-2.6.32.44/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66569 +++ linux-2.6.32.44/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66570 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66571 } else {
66572 if ((table->strategy == sysctl_data) ||
66573 (table->strategy == sysctl_string) ||
66574 + (table->strategy == sysctl_string_modpriv) ||
66575 (table->strategy == sysctl_intvec) ||
66576 (table->strategy == sysctl_jiffies) ||
66577 (table->strategy == sysctl_ms_jiffies) ||
66578 (table->proc_handler == proc_dostring) ||
66579 + (table->proc_handler == proc_dostring_modpriv) ||
66580 (table->proc_handler == proc_dointvec) ||
66581 (table->proc_handler == proc_dointvec_minmax) ||
66582 (table->proc_handler == proc_dointvec_jiffies) ||
66583 diff -urNp linux-2.6.32.44/kernel/taskstats.c linux-2.6.32.44/kernel/taskstats.c
66584 --- linux-2.6.32.44/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66585 +++ linux-2.6.32.44/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66586 @@ -26,9 +26,12 @@
66587 #include <linux/cgroup.h>
66588 #include <linux/fs.h>
66589 #include <linux/file.h>
66590 +#include <linux/grsecurity.h>
66591 #include <net/genetlink.h>
66592 #include <asm/atomic.h>
66593
66594 +extern int gr_is_taskstats_denied(int pid);
66595 +
66596 /*
66597 * Maximum length of a cpumask that can be specified in
66598 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66599 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66600 size_t size;
66601 cpumask_var_t mask;
66602
66603 + if (gr_is_taskstats_denied(current->pid))
66604 + return -EACCES;
66605 +
66606 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66607 return -ENOMEM;
66608
66609 diff -urNp linux-2.6.32.44/kernel/time/tick-broadcast.c linux-2.6.32.44/kernel/time/tick-broadcast.c
66610 --- linux-2.6.32.44/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66611 +++ linux-2.6.32.44/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66612 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66613 * then clear the broadcast bit.
66614 */
66615 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66616 - int cpu = smp_processor_id();
66617 + cpu = smp_processor_id();
66618
66619 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66620 tick_broadcast_clear_oneshot(cpu);
66621 diff -urNp linux-2.6.32.44/kernel/time/timekeeping.c linux-2.6.32.44/kernel/time/timekeeping.c
66622 --- linux-2.6.32.44/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66623 +++ linux-2.6.32.44/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66624 @@ -14,6 +14,7 @@
66625 #include <linux/init.h>
66626 #include <linux/mm.h>
66627 #include <linux/sched.h>
66628 +#include <linux/grsecurity.h>
66629 #include <linux/sysdev.h>
66630 #include <linux/clocksource.h>
66631 #include <linux/jiffies.h>
66632 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66633 */
66634 struct timespec ts = xtime;
66635 timespec_add_ns(&ts, nsec);
66636 - ACCESS_ONCE(xtime_cache) = ts;
66637 + ACCESS_ONCE_RW(xtime_cache) = ts;
66638 }
66639
66640 /* must hold xtime_lock */
66641 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66642 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66643 return -EINVAL;
66644
66645 + gr_log_timechange();
66646 +
66647 write_seqlock_irqsave(&xtime_lock, flags);
66648
66649 timekeeping_forward_now();
66650 diff -urNp linux-2.6.32.44/kernel/time/timer_list.c linux-2.6.32.44/kernel/time/timer_list.c
66651 --- linux-2.6.32.44/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66652 +++ linux-2.6.32.44/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66653 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66654
66655 static void print_name_offset(struct seq_file *m, void *sym)
66656 {
66657 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66658 + SEQ_printf(m, "<%p>", NULL);
66659 +#else
66660 char symname[KSYM_NAME_LEN];
66661
66662 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66663 SEQ_printf(m, "<%p>", sym);
66664 else
66665 SEQ_printf(m, "%s", symname);
66666 +#endif
66667 }
66668
66669 static void
66670 @@ -112,7 +116,11 @@ next_one:
66671 static void
66672 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66673 {
66674 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66675 + SEQ_printf(m, " .base: %p\n", NULL);
66676 +#else
66677 SEQ_printf(m, " .base: %p\n", base);
66678 +#endif
66679 SEQ_printf(m, " .index: %d\n",
66680 base->index);
66681 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66682 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66683 {
66684 struct proc_dir_entry *pe;
66685
66686 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66687 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66688 +#else
66689 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66690 +#endif
66691 if (!pe)
66692 return -ENOMEM;
66693 return 0;
66694 diff -urNp linux-2.6.32.44/kernel/time/timer_stats.c linux-2.6.32.44/kernel/time/timer_stats.c
66695 --- linux-2.6.32.44/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66696 +++ linux-2.6.32.44/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66697 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66698 static unsigned long nr_entries;
66699 static struct entry entries[MAX_ENTRIES];
66700
66701 -static atomic_t overflow_count;
66702 +static atomic_unchecked_t overflow_count;
66703
66704 /*
66705 * The entries are in a hash-table, for fast lookup:
66706 @@ -140,7 +140,7 @@ static void reset_entries(void)
66707 nr_entries = 0;
66708 memset(entries, 0, sizeof(entries));
66709 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66710 - atomic_set(&overflow_count, 0);
66711 + atomic_set_unchecked(&overflow_count, 0);
66712 }
66713
66714 static struct entry *alloc_entry(void)
66715 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66716 if (likely(entry))
66717 entry->count++;
66718 else
66719 - atomic_inc(&overflow_count);
66720 + atomic_inc_unchecked(&overflow_count);
66721
66722 out_unlock:
66723 spin_unlock_irqrestore(lock, flags);
66724 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66725
66726 static void print_name_offset(struct seq_file *m, unsigned long addr)
66727 {
66728 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66729 + seq_printf(m, "<%p>", NULL);
66730 +#else
66731 char symname[KSYM_NAME_LEN];
66732
66733 if (lookup_symbol_name(addr, symname) < 0)
66734 seq_printf(m, "<%p>", (void *)addr);
66735 else
66736 seq_printf(m, "%s", symname);
66737 +#endif
66738 }
66739
66740 static int tstats_show(struct seq_file *m, void *v)
66741 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66742
66743 seq_puts(m, "Timer Stats Version: v0.2\n");
66744 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66745 - if (atomic_read(&overflow_count))
66746 + if (atomic_read_unchecked(&overflow_count))
66747 seq_printf(m, "Overflow: %d entries\n",
66748 - atomic_read(&overflow_count));
66749 + atomic_read_unchecked(&overflow_count));
66750
66751 for (i = 0; i < nr_entries; i++) {
66752 entry = entries + i;
66753 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66754 {
66755 struct proc_dir_entry *pe;
66756
66757 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66758 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66759 +#else
66760 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66761 +#endif
66762 if (!pe)
66763 return -ENOMEM;
66764 return 0;
66765 diff -urNp linux-2.6.32.44/kernel/time.c linux-2.6.32.44/kernel/time.c
66766 --- linux-2.6.32.44/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66767 +++ linux-2.6.32.44/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66768 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66769 return error;
66770
66771 if (tz) {
66772 + /* we log in do_settimeofday called below, so don't log twice
66773 + */
66774 + if (!tv)
66775 + gr_log_timechange();
66776 +
66777 /* SMP safe, global irq locking makes it work. */
66778 sys_tz = *tz;
66779 update_vsyscall_tz();
66780 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66781 * Avoid unnecessary multiplications/divisions in the
66782 * two most common HZ cases:
66783 */
66784 -unsigned int inline jiffies_to_msecs(const unsigned long j)
66785 +inline unsigned int jiffies_to_msecs(const unsigned long j)
66786 {
66787 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66788 return (MSEC_PER_SEC / HZ) * j;
66789 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66790 }
66791 EXPORT_SYMBOL(jiffies_to_msecs);
66792
66793 -unsigned int inline jiffies_to_usecs(const unsigned long j)
66794 +inline unsigned int jiffies_to_usecs(const unsigned long j)
66795 {
66796 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66797 return (USEC_PER_SEC / HZ) * j;
66798 diff -urNp linux-2.6.32.44/kernel/timer.c linux-2.6.32.44/kernel/timer.c
66799 --- linux-2.6.32.44/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66800 +++ linux-2.6.32.44/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66801 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66802 /*
66803 * This function runs timers and the timer-tq in bottom half context.
66804 */
66805 -static void run_timer_softirq(struct softirq_action *h)
66806 +static void run_timer_softirq(void)
66807 {
66808 struct tvec_base *base = __get_cpu_var(tvec_bases);
66809
66810 diff -urNp linux-2.6.32.44/kernel/trace/blktrace.c linux-2.6.32.44/kernel/trace/blktrace.c
66811 --- linux-2.6.32.44/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66812 +++ linux-2.6.32.44/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66813 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66814 struct blk_trace *bt = filp->private_data;
66815 char buf[16];
66816
66817 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66818 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66819
66820 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66821 }
66822 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66823 return 1;
66824
66825 bt = buf->chan->private_data;
66826 - atomic_inc(&bt->dropped);
66827 + atomic_inc_unchecked(&bt->dropped);
66828 return 0;
66829 }
66830
66831 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66832
66833 bt->dir = dir;
66834 bt->dev = dev;
66835 - atomic_set(&bt->dropped, 0);
66836 + atomic_set_unchecked(&bt->dropped, 0);
66837
66838 ret = -EIO;
66839 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66840 diff -urNp linux-2.6.32.44/kernel/trace/ftrace.c linux-2.6.32.44/kernel/trace/ftrace.c
66841 --- linux-2.6.32.44/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66842 +++ linux-2.6.32.44/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66843 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66844
66845 ip = rec->ip;
66846
66847 + ret = ftrace_arch_code_modify_prepare();
66848 + FTRACE_WARN_ON(ret);
66849 + if (ret)
66850 + return 0;
66851 +
66852 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66853 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66854 if (ret) {
66855 ftrace_bug(ret, ip);
66856 rec->flags |= FTRACE_FL_FAILED;
66857 - return 0;
66858 }
66859 - return 1;
66860 + return ret ? 0 : 1;
66861 }
66862
66863 /*
66864 diff -urNp linux-2.6.32.44/kernel/trace/ring_buffer.c linux-2.6.32.44/kernel/trace/ring_buffer.c
66865 --- linux-2.6.32.44/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66866 +++ linux-2.6.32.44/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66867 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66868 * the reader page). But if the next page is a header page,
66869 * its flags will be non zero.
66870 */
66871 -static int inline
66872 +static inline int
66873 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66874 struct buffer_page *page, struct list_head *list)
66875 {
66876 diff -urNp linux-2.6.32.44/kernel/trace/trace.c linux-2.6.32.44/kernel/trace/trace.c
66877 --- linux-2.6.32.44/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66878 +++ linux-2.6.32.44/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66879 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66880 size_t rem;
66881 unsigned int i;
66882
66883 + pax_track_stack();
66884 +
66885 /* copy the tracer to avoid using a global lock all around */
66886 mutex_lock(&trace_types_lock);
66887 if (unlikely(old_tracer != current_trace && current_trace)) {
66888 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66889 int entries, size, i;
66890 size_t ret;
66891
66892 + pax_track_stack();
66893 +
66894 if (*ppos & (PAGE_SIZE - 1)) {
66895 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66896 return -EINVAL;
66897 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
66898 };
66899 #endif
66900
66901 -static struct dentry *d_tracer;
66902 -
66903 struct dentry *tracing_init_dentry(void)
66904 {
66905 + static struct dentry *d_tracer;
66906 static int once;
66907
66908 if (d_tracer)
66909 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66910 return d_tracer;
66911 }
66912
66913 -static struct dentry *d_percpu;
66914 -
66915 struct dentry *tracing_dentry_percpu(void)
66916 {
66917 + static struct dentry *d_percpu;
66918 static int once;
66919 struct dentry *d_tracer;
66920
66921 diff -urNp linux-2.6.32.44/kernel/trace/trace_events.c linux-2.6.32.44/kernel/trace/trace_events.c
66922 --- linux-2.6.32.44/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66923 +++ linux-2.6.32.44/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66924 @@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66925 * Modules must own their file_operations to keep up with
66926 * reference counting.
66927 */
66928 +
66929 struct ftrace_module_file_ops {
66930 struct list_head list;
66931 struct module *mod;
66932 - struct file_operations id;
66933 - struct file_operations enable;
66934 - struct file_operations format;
66935 - struct file_operations filter;
66936 };
66937
66938 static void remove_subsystem_dir(const char *name)
66939 @@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66940
66941 file_ops->mod = mod;
66942
66943 - file_ops->id = ftrace_event_id_fops;
66944 - file_ops->id.owner = mod;
66945 -
66946 - file_ops->enable = ftrace_enable_fops;
66947 - file_ops->enable.owner = mod;
66948 -
66949 - file_ops->filter = ftrace_event_filter_fops;
66950 - file_ops->filter.owner = mod;
66951 -
66952 - file_ops->format = ftrace_event_format_fops;
66953 - file_ops->format.owner = mod;
66954 + pax_open_kernel();
66955 + *(void **)&mod->trace_id.owner = mod;
66956 + *(void **)&mod->trace_enable.owner = mod;
66957 + *(void **)&mod->trace_filter.owner = mod;
66958 + *(void **)&mod->trace_format.owner = mod;
66959 + pax_close_kernel();
66960
66961 list_add(&file_ops->list, &ftrace_module_file_list);
66962
66963 @@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66964 call->mod = mod;
66965 list_add(&call->list, &ftrace_events);
66966 event_create_dir(call, d_events,
66967 - &file_ops->id, &file_ops->enable,
66968 - &file_ops->filter, &file_ops->format);
66969 + &mod->trace_id, &mod->trace_enable,
66970 + &mod->trace_filter, &mod->trace_format);
66971 }
66972 }
66973
66974 diff -urNp linux-2.6.32.44/kernel/trace/trace_mmiotrace.c linux-2.6.32.44/kernel/trace/trace_mmiotrace.c
66975 --- linux-2.6.32.44/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66976 +++ linux-2.6.32.44/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66977 @@ -23,7 +23,7 @@ struct header_iter {
66978 static struct trace_array *mmio_trace_array;
66979 static bool overrun_detected;
66980 static unsigned long prev_overruns;
66981 -static atomic_t dropped_count;
66982 +static atomic_unchecked_t dropped_count;
66983
66984 static void mmio_reset_data(struct trace_array *tr)
66985 {
66986 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66987
66988 static unsigned long count_overruns(struct trace_iterator *iter)
66989 {
66990 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
66991 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66992 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66993
66994 if (over > prev_overruns)
66995 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66996 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66997 sizeof(*entry), 0, pc);
66998 if (!event) {
66999 - atomic_inc(&dropped_count);
67000 + atomic_inc_unchecked(&dropped_count);
67001 return;
67002 }
67003 entry = ring_buffer_event_data(event);
67004 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
67005 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67006 sizeof(*entry), 0, pc);
67007 if (!event) {
67008 - atomic_inc(&dropped_count);
67009 + atomic_inc_unchecked(&dropped_count);
67010 return;
67011 }
67012 entry = ring_buffer_event_data(event);
67013 diff -urNp linux-2.6.32.44/kernel/trace/trace_output.c linux-2.6.32.44/kernel/trace/trace_output.c
67014 --- linux-2.6.32.44/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
67015 +++ linux-2.6.32.44/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
67016 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
67017 return 0;
67018 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67019 if (!IS_ERR(p)) {
67020 - p = mangle_path(s->buffer + s->len, p, "\n");
67021 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67022 if (p) {
67023 s->len = p - s->buffer;
67024 return 1;
67025 diff -urNp linux-2.6.32.44/kernel/trace/trace_stack.c linux-2.6.32.44/kernel/trace/trace_stack.c
67026 --- linux-2.6.32.44/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
67027 +++ linux-2.6.32.44/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
67028 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67029 return;
67030
67031 /* we do not handle interrupt stacks yet */
67032 - if (!object_is_on_stack(&this_size))
67033 + if (!object_starts_on_stack(&this_size))
67034 return;
67035
67036 local_irq_save(flags);
67037 diff -urNp linux-2.6.32.44/kernel/trace/trace_workqueue.c linux-2.6.32.44/kernel/trace/trace_workqueue.c
67038 --- linux-2.6.32.44/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
67039 +++ linux-2.6.32.44/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
67040 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
67041 int cpu;
67042 pid_t pid;
67043 /* Can be inserted from interrupt or user context, need to be atomic */
67044 - atomic_t inserted;
67045 + atomic_unchecked_t inserted;
67046 /*
67047 * Don't need to be atomic, works are serialized in a single workqueue thread
67048 * on a single CPU.
67049 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
67050 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67051 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67052 if (node->pid == wq_thread->pid) {
67053 - atomic_inc(&node->inserted);
67054 + atomic_inc_unchecked(&node->inserted);
67055 goto found;
67056 }
67057 }
67058 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
67059 tsk = get_pid_task(pid, PIDTYPE_PID);
67060 if (tsk) {
67061 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67062 - atomic_read(&cws->inserted), cws->executed,
67063 + atomic_read_unchecked(&cws->inserted), cws->executed,
67064 tsk->comm);
67065 put_task_struct(tsk);
67066 }
67067 diff -urNp linux-2.6.32.44/kernel/user.c linux-2.6.32.44/kernel/user.c
67068 --- linux-2.6.32.44/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
67069 +++ linux-2.6.32.44/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
67070 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
67071 spin_lock_irq(&uidhash_lock);
67072 up = uid_hash_find(uid, hashent);
67073 if (up) {
67074 + put_user_ns(ns);
67075 key_put(new->uid_keyring);
67076 key_put(new->session_keyring);
67077 kmem_cache_free(uid_cachep, new);
67078 diff -urNp linux-2.6.32.44/lib/bug.c linux-2.6.32.44/lib/bug.c
67079 --- linux-2.6.32.44/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
67080 +++ linux-2.6.32.44/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
67081 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
67082 return BUG_TRAP_TYPE_NONE;
67083
67084 bug = find_bug(bugaddr);
67085 + if (!bug)
67086 + return BUG_TRAP_TYPE_NONE;
67087
67088 printk(KERN_EMERG "------------[ cut here ]------------\n");
67089
67090 diff -urNp linux-2.6.32.44/lib/debugobjects.c linux-2.6.32.44/lib/debugobjects.c
67091 --- linux-2.6.32.44/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
67092 +++ linux-2.6.32.44/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
67093 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
67094 if (limit > 4)
67095 return;
67096
67097 - is_on_stack = object_is_on_stack(addr);
67098 + is_on_stack = object_starts_on_stack(addr);
67099 if (is_on_stack == onstack)
67100 return;
67101
67102 diff -urNp linux-2.6.32.44/lib/dma-debug.c linux-2.6.32.44/lib/dma-debug.c
67103 --- linux-2.6.32.44/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
67104 +++ linux-2.6.32.44/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
67105 @@ -861,7 +861,7 @@ out:
67106
67107 static void check_for_stack(struct device *dev, void *addr)
67108 {
67109 - if (object_is_on_stack(addr))
67110 + if (object_starts_on_stack(addr))
67111 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67112 "stack [addr=%p]\n", addr);
67113 }
67114 diff -urNp linux-2.6.32.44/lib/idr.c linux-2.6.32.44/lib/idr.c
67115 --- linux-2.6.32.44/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
67116 +++ linux-2.6.32.44/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
67117 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
67118 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
67119
67120 /* if already at the top layer, we need to grow */
67121 - if (id >= 1 << (idp->layers * IDR_BITS)) {
67122 + if (id >= (1 << (idp->layers * IDR_BITS))) {
67123 *starting_id = id;
67124 return IDR_NEED_TO_GROW;
67125 }
67126 diff -urNp linux-2.6.32.44/lib/inflate.c linux-2.6.32.44/lib/inflate.c
67127 --- linux-2.6.32.44/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
67128 +++ linux-2.6.32.44/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
67129 @@ -266,7 +266,7 @@ static void free(void *where)
67130 malloc_ptr = free_mem_ptr;
67131 }
67132 #else
67133 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67134 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67135 #define free(a) kfree(a)
67136 #endif
67137
67138 diff -urNp linux-2.6.32.44/lib/Kconfig.debug linux-2.6.32.44/lib/Kconfig.debug
67139 --- linux-2.6.32.44/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
67140 +++ linux-2.6.32.44/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
67141 @@ -905,7 +905,7 @@ config LATENCYTOP
67142 select STACKTRACE
67143 select SCHEDSTATS
67144 select SCHED_DEBUG
67145 - depends on HAVE_LATENCYTOP_SUPPORT
67146 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
67147 help
67148 Enable this option if you want to use the LatencyTOP tool
67149 to find out which userspace is blocking on what kernel operations.
67150 diff -urNp linux-2.6.32.44/lib/kobject.c linux-2.6.32.44/lib/kobject.c
67151 --- linux-2.6.32.44/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
67152 +++ linux-2.6.32.44/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
67153 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
67154 return ret;
67155 }
67156
67157 -struct sysfs_ops kobj_sysfs_ops = {
67158 +const struct sysfs_ops kobj_sysfs_ops = {
67159 .show = kobj_attr_show,
67160 .store = kobj_attr_store,
67161 };
67162 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
67163 * If the kset was not able to be created, NULL will be returned.
67164 */
67165 static struct kset *kset_create(const char *name,
67166 - struct kset_uevent_ops *uevent_ops,
67167 + const struct kset_uevent_ops *uevent_ops,
67168 struct kobject *parent_kobj)
67169 {
67170 struct kset *kset;
67171 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
67172 * If the kset was not able to be created, NULL will be returned.
67173 */
67174 struct kset *kset_create_and_add(const char *name,
67175 - struct kset_uevent_ops *uevent_ops,
67176 + const struct kset_uevent_ops *uevent_ops,
67177 struct kobject *parent_kobj)
67178 {
67179 struct kset *kset;
67180 diff -urNp linux-2.6.32.44/lib/kobject_uevent.c linux-2.6.32.44/lib/kobject_uevent.c
67181 --- linux-2.6.32.44/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
67182 +++ linux-2.6.32.44/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
67183 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
67184 const char *subsystem;
67185 struct kobject *top_kobj;
67186 struct kset *kset;
67187 - struct kset_uevent_ops *uevent_ops;
67188 + const struct kset_uevent_ops *uevent_ops;
67189 u64 seq;
67190 int i = 0;
67191 int retval = 0;
67192 diff -urNp linux-2.6.32.44/lib/kref.c linux-2.6.32.44/lib/kref.c
67193 --- linux-2.6.32.44/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
67194 +++ linux-2.6.32.44/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
67195 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
67196 */
67197 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67198 {
67199 - WARN_ON(release == NULL);
67200 + BUG_ON(release == NULL);
67201 WARN_ON(release == (void (*)(struct kref *))kfree);
67202
67203 if (atomic_dec_and_test(&kref->refcount)) {
67204 diff -urNp linux-2.6.32.44/lib/Makefile linux-2.6.32.44/lib/Makefile
67205 --- linux-2.6.32.44/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
67206 +++ linux-2.6.32.44/lib/Makefile 2011-08-07 19:48:09.000000000 -0400
67207 @@ -10,7 +10,7 @@ endif
67208 lib-y := ctype.o string.o vsprintf.o cmdline.o \
67209 rbtree.o radix-tree.o dump_stack.o \
67210 idr.o int_sqrt.o extable.o prio_tree.o \
67211 - sha1.o irq_regs.o reciprocal_div.o argv_split.o \
67212 + sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
67213 proportions.o prio_heap.o ratelimit.o show_mem.o \
67214 is_single_threaded.o plist.o decompress.o flex_array.o
67215
67216 diff -urNp linux-2.6.32.44/lib/md5.c linux-2.6.32.44/lib/md5.c
67217 --- linux-2.6.32.44/lib/md5.c 1969-12-31 19:00:00.000000000 -0500
67218 +++ linux-2.6.32.44/lib/md5.c 2011-08-07 19:48:09.000000000 -0400
67219 @@ -0,0 +1,95 @@
67220 +#include <linux/kernel.h>
67221 +#include <linux/module.h>
67222 +#include <linux/cryptohash.h>
67223 +
67224 +#define F1(x, y, z) (z ^ (x & (y ^ z)))
67225 +#define F2(x, y, z) F1(z, x, y)
67226 +#define F3(x, y, z) (x ^ y ^ z)
67227 +#define F4(x, y, z) (y ^ (x | ~z))
67228 +
67229 +#define MD5STEP(f, w, x, y, z, in, s) \
67230 + (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
67231 +
67232 +void md5_transform(__u32 *hash, __u32 const *in)
67233 +{
67234 + u32 a, b, c, d;
67235 +
67236 + a = hash[0];
67237 + b = hash[1];
67238 + c = hash[2];
67239 + d = hash[3];
67240 +
67241 + MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
67242 + MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
67243 + MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
67244 + MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
67245 + MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
67246 + MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
67247 + MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
67248 + MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
67249 + MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
67250 + MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
67251 + MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
67252 + MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
67253 + MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
67254 + MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
67255 + MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
67256 + MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
67257 +
67258 + MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
67259 + MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
67260 + MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
67261 + MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
67262 + MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
67263 + MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
67264 + MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
67265 + MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
67266 + MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
67267 + MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
67268 + MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
67269 + MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
67270 + MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
67271 + MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
67272 + MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
67273 + MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
67274 +
67275 + MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
67276 + MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
67277 + MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
67278 + MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
67279 + MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
67280 + MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
67281 + MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
67282 + MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
67283 + MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
67284 + MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
67285 + MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
67286 + MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
67287 + MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
67288 + MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
67289 + MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
67290 + MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
67291 +
67292 + MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
67293 + MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
67294 + MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
67295 + MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
67296 + MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
67297 + MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
67298 + MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
67299 + MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
67300 + MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
67301 + MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
67302 + MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
67303 + MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
67304 + MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
67305 + MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
67306 + MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
67307 + MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
67308 +
67309 + hash[0] += a;
67310 + hash[1] += b;
67311 + hash[2] += c;
67312 + hash[3] += d;
67313 +}
67314 +EXPORT_SYMBOL(md5_transform);
67315 diff -urNp linux-2.6.32.44/lib/parser.c linux-2.6.32.44/lib/parser.c
67316 --- linux-2.6.32.44/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
67317 +++ linux-2.6.32.44/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
67318 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
67319 char *buf;
67320 int ret;
67321
67322 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
67323 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
67324 if (!buf)
67325 return -ENOMEM;
67326 memcpy(buf, s->from, s->to - s->from);
67327 diff -urNp linux-2.6.32.44/lib/radix-tree.c linux-2.6.32.44/lib/radix-tree.c
67328 --- linux-2.6.32.44/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
67329 +++ linux-2.6.32.44/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
67330 @@ -81,7 +81,7 @@ struct radix_tree_preload {
67331 int nr;
67332 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67333 };
67334 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67335 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67336
67337 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
67338 {
67339 diff -urNp linux-2.6.32.44/lib/random32.c linux-2.6.32.44/lib/random32.c
67340 --- linux-2.6.32.44/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
67341 +++ linux-2.6.32.44/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
67342 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
67343 */
67344 static inline u32 __seed(u32 x, u32 m)
67345 {
67346 - return (x < m) ? x + m : x;
67347 + return (x <= m) ? x + m + 1 : x;
67348 }
67349
67350 /**
67351 diff -urNp linux-2.6.32.44/lib/vsprintf.c linux-2.6.32.44/lib/vsprintf.c
67352 --- linux-2.6.32.44/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
67353 +++ linux-2.6.32.44/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
67354 @@ -16,6 +16,9 @@
67355 * - scnprintf and vscnprintf
67356 */
67357
67358 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67359 +#define __INCLUDED_BY_HIDESYM 1
67360 +#endif
67361 #include <stdarg.h>
67362 #include <linux/module.h>
67363 #include <linux/types.h>
67364 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
67365 return buf;
67366 }
67367
67368 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
67369 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
67370 {
67371 int len, i;
67372
67373 if ((unsigned long)s < PAGE_SIZE)
67374 - s = "<NULL>";
67375 + s = "(null)";
67376
67377 len = strnlen(s, spec.precision);
67378
67379 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
67380 unsigned long value = (unsigned long) ptr;
67381 #ifdef CONFIG_KALLSYMS
67382 char sym[KSYM_SYMBOL_LEN];
67383 - if (ext != 'f' && ext != 's')
67384 + if (ext != 'f' && ext != 's' && ext != 'a')
67385 sprint_symbol(sym, value);
67386 else
67387 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67388 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
67389 * - 'f' For simple symbolic function names without offset
67390 * - 'S' For symbolic direct pointers with offset
67391 * - 's' For symbolic direct pointers without offset
67392 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67393 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67394 * - 'R' For a struct resource pointer, it prints the range of
67395 * addresses (not the name nor the flags)
67396 * - 'M' For a 6-byte MAC address, it prints the address in the
67397 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
67398 struct printf_spec spec)
67399 {
67400 if (!ptr)
67401 - return string(buf, end, "(null)", spec);
67402 + return string(buf, end, "(nil)", spec);
67403
67404 switch (*fmt) {
67405 case 'F':
67406 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
67407 case 's':
67408 /* Fallthrough */
67409 case 'S':
67410 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67411 + break;
67412 +#else
67413 + return symbol_string(buf, end, ptr, spec, *fmt);
67414 +#endif
67415 + case 'a':
67416 + /* Fallthrough */
67417 + case 'A':
67418 return symbol_string(buf, end, ptr, spec, *fmt);
67419 case 'R':
67420 return resource_string(buf, end, ptr, spec);
67421 @@ -1445,7 +1458,7 @@ do { \
67422 size_t len;
67423 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
67424 || (unsigned long)save_str < PAGE_SIZE)
67425 - save_str = "<NULL>";
67426 + save_str = "(null)";
67427 len = strlen(save_str);
67428 if (str + len + 1 < end)
67429 memcpy(str, save_str, len + 1);
67430 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
67431 typeof(type) value; \
67432 if (sizeof(type) == 8) { \
67433 args = PTR_ALIGN(args, sizeof(u32)); \
67434 - *(u32 *)&value = *(u32 *)args; \
67435 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67436 + *(u32 *)&value = *(const u32 *)args; \
67437 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67438 } else { \
67439 args = PTR_ALIGN(args, sizeof(type)); \
67440 - value = *(typeof(type) *)args; \
67441 + value = *(const typeof(type) *)args; \
67442 } \
67443 args += sizeof(type); \
67444 value; \
67445 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
67446 const char *str_arg = args;
67447 size_t len = strlen(str_arg);
67448 args += len + 1;
67449 - str = string(str, end, (char *)str_arg, spec);
67450 + str = string(str, end, str_arg, spec);
67451 break;
67452 }
67453
67454 diff -urNp linux-2.6.32.44/localversion-grsec linux-2.6.32.44/localversion-grsec
67455 --- linux-2.6.32.44/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67456 +++ linux-2.6.32.44/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
67457 @@ -0,0 +1 @@
67458 +-grsec
67459 diff -urNp linux-2.6.32.44/Makefile linux-2.6.32.44/Makefile
67460 --- linux-2.6.32.44/Makefile 2011-08-09 18:35:28.000000000 -0400
67461 +++ linux-2.6.32.44/Makefile 2011-08-09 18:33:56.000000000 -0400
67462 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67463
67464 HOSTCC = gcc
67465 HOSTCXX = g++
67466 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67467 -HOSTCXXFLAGS = -O2
67468 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67469 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
67470 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
67471
67472 # Decide whether to build built-in, modular, or both.
67473 # Normally, just do built-in.
67474 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
67475 KBUILD_CPPFLAGS := -D__KERNEL__
67476
67477 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
67478 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
67479 -fno-strict-aliasing -fno-common \
67480 -Werror-implicit-function-declaration \
67481 -Wno-format-security \
67482 -fno-delete-null-pointer-checks
67483 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
67484 KBUILD_AFLAGS := -D__ASSEMBLY__
67485
67486 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
67487 @@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67488 # Rules shared between *config targets and build targets
67489
67490 # Basic helpers built in scripts/
67491 -PHONY += scripts_basic
67492 -scripts_basic:
67493 +PHONY += scripts_basic gcc-plugins
67494 +scripts_basic: gcc-plugins
67495 $(Q)$(MAKE) $(build)=scripts/basic
67496
67497 # To avoid any implicit rule to kick in, define an empty command.
67498 @@ -403,7 +406,7 @@ endif
67499 # of make so .config is not included in this case either (for *config).
67500
67501 no-dot-config-targets := clean mrproper distclean \
67502 - cscope TAGS tags help %docs check% \
67503 + cscope gtags TAGS tags help %docs check% \
67504 include/linux/version.h headers_% \
67505 kernelrelease kernelversion
67506
67507 @@ -526,6 +529,25 @@ else
67508 KBUILD_CFLAGS += -O2
67509 endif
67510
67511 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
67512 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
67513 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
67514 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67515 +endif
67516 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
67517 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
67518 +gcc-plugins:
67519 + $(Q)$(MAKE) $(build)=tools/gcc
67520 +else
67521 +gcc-plugins:
67522 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67523 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67524 +else
67525 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67526 +endif
67527 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67528 +endif
67529 +
67530 include $(srctree)/arch/$(SRCARCH)/Makefile
67531
67532 ifneq ($(CONFIG_FRAME_WARN),0)
67533 @@ -644,7 +666,7 @@ export mod_strip_cmd
67534
67535
67536 ifeq ($(KBUILD_EXTMOD),)
67537 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67538 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67539
67540 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67541 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67542 @@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67543 endif
67544
67545 # prepare2 creates a makefile if using a separate output directory
67546 -prepare2: prepare3 outputmakefile
67547 +prepare2: prepare3 outputmakefile gcc-plugins
67548
67549 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67550 include/asm include/config/auto.conf
67551 @@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67552 include/linux/autoconf.h include/linux/version.h \
67553 include/linux/utsrelease.h \
67554 include/linux/bounds.h include/asm*/asm-offsets.h \
67555 - Module.symvers Module.markers tags TAGS cscope*
67556 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67557
67558 # clean - Delete most, but leave enough to build external modules
67559 #
67560 @@ -1289,6 +1311,7 @@ help:
67561 @echo ' modules_prepare - Set up for building external modules'
67562 @echo ' tags/TAGS - Generate tags file for editors'
67563 @echo ' cscope - Generate cscope index'
67564 + @echo ' gtags - Generate GNU GLOBAL index'
67565 @echo ' kernelrelease - Output the release version string'
67566 @echo ' kernelversion - Output the version stored in Makefile'
67567 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67568 @@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67569 $(call cmd,rmdirs)
67570 $(call cmd,rmfiles)
67571 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67572 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67573 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67574 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67575 -o -name '*.gcno' \) -type f -print | xargs rm -f
67576
67577 @@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67578 quiet_cmd_tags = GEN $@
67579 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67580
67581 -tags TAGS cscope: FORCE
67582 +tags TAGS cscope gtags: FORCE
67583 $(call cmd,tags)
67584
67585 # Scripts to check various things for consistency
67586 diff -urNp linux-2.6.32.44/mm/backing-dev.c linux-2.6.32.44/mm/backing-dev.c
67587 --- linux-2.6.32.44/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67588 +++ linux-2.6.32.44/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
67589 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67590 * Add the default flusher task that gets created for any bdi
67591 * that has dirty data pending writeout
67592 */
67593 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67594 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67595 {
67596 if (!bdi_cap_writeback_dirty(bdi))
67597 return;
67598 diff -urNp linux-2.6.32.44/mm/filemap.c linux-2.6.32.44/mm/filemap.c
67599 --- linux-2.6.32.44/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67600 +++ linux-2.6.32.44/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67601 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67602 struct address_space *mapping = file->f_mapping;
67603
67604 if (!mapping->a_ops->readpage)
67605 - return -ENOEXEC;
67606 + return -ENODEV;
67607 file_accessed(file);
67608 vma->vm_ops = &generic_file_vm_ops;
67609 vma->vm_flags |= VM_CAN_NONLINEAR;
67610 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67611 *pos = i_size_read(inode);
67612
67613 if (limit != RLIM_INFINITY) {
67614 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67615 if (*pos >= limit) {
67616 send_sig(SIGXFSZ, current, 0);
67617 return -EFBIG;
67618 diff -urNp linux-2.6.32.44/mm/fremap.c linux-2.6.32.44/mm/fremap.c
67619 --- linux-2.6.32.44/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67620 +++ linux-2.6.32.44/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67621 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67622 retry:
67623 vma = find_vma(mm, start);
67624
67625 +#ifdef CONFIG_PAX_SEGMEXEC
67626 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67627 + goto out;
67628 +#endif
67629 +
67630 /*
67631 * Make sure the vma is shared, that it supports prefaulting,
67632 * and that the remapped range is valid and fully within
67633 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67634 /*
67635 * drop PG_Mlocked flag for over-mapped range
67636 */
67637 - unsigned int saved_flags = vma->vm_flags;
67638 + unsigned long saved_flags = vma->vm_flags;
67639 munlock_vma_pages_range(vma, start, start + size);
67640 vma->vm_flags = saved_flags;
67641 }
67642 diff -urNp linux-2.6.32.44/mm/highmem.c linux-2.6.32.44/mm/highmem.c
67643 --- linux-2.6.32.44/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67644 +++ linux-2.6.32.44/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67645 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67646 * So no dangers, even with speculative execution.
67647 */
67648 page = pte_page(pkmap_page_table[i]);
67649 + pax_open_kernel();
67650 pte_clear(&init_mm, (unsigned long)page_address(page),
67651 &pkmap_page_table[i]);
67652 -
67653 + pax_close_kernel();
67654 set_page_address(page, NULL);
67655 need_flush = 1;
67656 }
67657 @@ -177,9 +178,11 @@ start:
67658 }
67659 }
67660 vaddr = PKMAP_ADDR(last_pkmap_nr);
67661 +
67662 + pax_open_kernel();
67663 set_pte_at(&init_mm, vaddr,
67664 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67665 -
67666 + pax_close_kernel();
67667 pkmap_count[last_pkmap_nr] = 1;
67668 set_page_address(page, (void *)vaddr);
67669
67670 diff -urNp linux-2.6.32.44/mm/hugetlb.c linux-2.6.32.44/mm/hugetlb.c
67671 --- linux-2.6.32.44/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67672 +++ linux-2.6.32.44/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67673 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67674 return 1;
67675 }
67676
67677 +#ifdef CONFIG_PAX_SEGMEXEC
67678 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67679 +{
67680 + struct mm_struct *mm = vma->vm_mm;
67681 + struct vm_area_struct *vma_m;
67682 + unsigned long address_m;
67683 + pte_t *ptep_m;
67684 +
67685 + vma_m = pax_find_mirror_vma(vma);
67686 + if (!vma_m)
67687 + return;
67688 +
67689 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67690 + address_m = address + SEGMEXEC_TASK_SIZE;
67691 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67692 + get_page(page_m);
67693 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67694 +}
67695 +#endif
67696 +
67697 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67698 unsigned long address, pte_t *ptep, pte_t pte,
67699 struct page *pagecache_page)
67700 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
67701 huge_ptep_clear_flush(vma, address, ptep);
67702 set_huge_pte_at(mm, address, ptep,
67703 make_huge_pte(vma, new_page, 1));
67704 +
67705 +#ifdef CONFIG_PAX_SEGMEXEC
67706 + pax_mirror_huge_pte(vma, address, new_page);
67707 +#endif
67708 +
67709 /* Make the old page be freed below */
67710 new_page = old_page;
67711 }
67712 @@ -2135,6 +2160,10 @@ retry:
67713 && (vma->vm_flags & VM_SHARED)));
67714 set_huge_pte_at(mm, address, ptep, new_pte);
67715
67716 +#ifdef CONFIG_PAX_SEGMEXEC
67717 + pax_mirror_huge_pte(vma, address, page);
67718 +#endif
67719 +
67720 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67721 /* Optimization, do the COW without a second fault */
67722 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67723 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67724 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67725 struct hstate *h = hstate_vma(vma);
67726
67727 +#ifdef CONFIG_PAX_SEGMEXEC
67728 + struct vm_area_struct *vma_m;
67729 +
67730 + vma_m = pax_find_mirror_vma(vma);
67731 + if (vma_m) {
67732 + unsigned long address_m;
67733 +
67734 + if (vma->vm_start > vma_m->vm_start) {
67735 + address_m = address;
67736 + address -= SEGMEXEC_TASK_SIZE;
67737 + vma = vma_m;
67738 + h = hstate_vma(vma);
67739 + } else
67740 + address_m = address + SEGMEXEC_TASK_SIZE;
67741 +
67742 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67743 + return VM_FAULT_OOM;
67744 + address_m &= HPAGE_MASK;
67745 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67746 + }
67747 +#endif
67748 +
67749 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67750 if (!ptep)
67751 return VM_FAULT_OOM;
67752 diff -urNp linux-2.6.32.44/mm/internal.h linux-2.6.32.44/mm/internal.h
67753 --- linux-2.6.32.44/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67754 +++ linux-2.6.32.44/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67755 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67756 * in mm/page_alloc.c
67757 */
67758 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67759 +extern void free_compound_page(struct page *page);
67760 extern void prep_compound_page(struct page *page, unsigned long order);
67761
67762
67763 diff -urNp linux-2.6.32.44/mm/Kconfig linux-2.6.32.44/mm/Kconfig
67764 --- linux-2.6.32.44/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67765 +++ linux-2.6.32.44/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67766 @@ -228,7 +228,7 @@ config KSM
67767 config DEFAULT_MMAP_MIN_ADDR
67768 int "Low address space to protect from user allocation"
67769 depends on MMU
67770 - default 4096
67771 + default 65536
67772 help
67773 This is the portion of low virtual memory which should be protected
67774 from userspace allocation. Keeping a user from writing to low pages
67775 diff -urNp linux-2.6.32.44/mm/kmemleak.c linux-2.6.32.44/mm/kmemleak.c
67776 --- linux-2.6.32.44/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67777 +++ linux-2.6.32.44/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67778 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67779
67780 for (i = 0; i < object->trace_len; i++) {
67781 void *ptr = (void *)object->trace[i];
67782 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67783 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67784 }
67785 }
67786
67787 diff -urNp linux-2.6.32.44/mm/maccess.c linux-2.6.32.44/mm/maccess.c
67788 --- linux-2.6.32.44/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67789 +++ linux-2.6.32.44/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67790 @@ -14,7 +14,7 @@
67791 * Safely read from address @src to the buffer at @dst. If a kernel fault
67792 * happens, handle that and return -EFAULT.
67793 */
67794 -long probe_kernel_read(void *dst, void *src, size_t size)
67795 +long probe_kernel_read(void *dst, const void *src, size_t size)
67796 {
67797 long ret;
67798 mm_segment_t old_fs = get_fs();
67799 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67800 * Safely write to address @dst from the buffer at @src. If a kernel fault
67801 * happens, handle that and return -EFAULT.
67802 */
67803 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67804 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67805 {
67806 long ret;
67807 mm_segment_t old_fs = get_fs();
67808 diff -urNp linux-2.6.32.44/mm/madvise.c linux-2.6.32.44/mm/madvise.c
67809 --- linux-2.6.32.44/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67810 +++ linux-2.6.32.44/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67811 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67812 pgoff_t pgoff;
67813 unsigned long new_flags = vma->vm_flags;
67814
67815 +#ifdef CONFIG_PAX_SEGMEXEC
67816 + struct vm_area_struct *vma_m;
67817 +#endif
67818 +
67819 switch (behavior) {
67820 case MADV_NORMAL:
67821 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67822 @@ -103,6 +107,13 @@ success:
67823 /*
67824 * vm_flags is protected by the mmap_sem held in write mode.
67825 */
67826 +
67827 +#ifdef CONFIG_PAX_SEGMEXEC
67828 + vma_m = pax_find_mirror_vma(vma);
67829 + if (vma_m)
67830 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67831 +#endif
67832 +
67833 vma->vm_flags = new_flags;
67834
67835 out:
67836 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67837 struct vm_area_struct ** prev,
67838 unsigned long start, unsigned long end)
67839 {
67840 +
67841 +#ifdef CONFIG_PAX_SEGMEXEC
67842 + struct vm_area_struct *vma_m;
67843 +#endif
67844 +
67845 *prev = vma;
67846 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67847 return -EINVAL;
67848 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67849 zap_page_range(vma, start, end - start, &details);
67850 } else
67851 zap_page_range(vma, start, end - start, NULL);
67852 +
67853 +#ifdef CONFIG_PAX_SEGMEXEC
67854 + vma_m = pax_find_mirror_vma(vma);
67855 + if (vma_m) {
67856 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67857 + struct zap_details details = {
67858 + .nonlinear_vma = vma_m,
67859 + .last_index = ULONG_MAX,
67860 + };
67861 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67862 + } else
67863 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67864 + }
67865 +#endif
67866 +
67867 return 0;
67868 }
67869
67870 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67871 if (end < start)
67872 goto out;
67873
67874 +#ifdef CONFIG_PAX_SEGMEXEC
67875 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67876 + if (end > SEGMEXEC_TASK_SIZE)
67877 + goto out;
67878 + } else
67879 +#endif
67880 +
67881 + if (end > TASK_SIZE)
67882 + goto out;
67883 +
67884 error = 0;
67885 if (end == start)
67886 goto out;
67887 diff -urNp linux-2.6.32.44/mm/memory.c linux-2.6.32.44/mm/memory.c
67888 --- linux-2.6.32.44/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67889 +++ linux-2.6.32.44/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67890 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67891 return;
67892
67893 pmd = pmd_offset(pud, start);
67894 +
67895 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67896 pud_clear(pud);
67897 pmd_free_tlb(tlb, pmd, start);
67898 +#endif
67899 +
67900 }
67901
67902 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67903 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67904 if (end - 1 > ceiling - 1)
67905 return;
67906
67907 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67908 pud = pud_offset(pgd, start);
67909 pgd_clear(pgd);
67910 pud_free_tlb(tlb, pud, start);
67911 +#endif
67912 +
67913 }
67914
67915 /*
67916 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67917 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67918 i = 0;
67919
67920 - do {
67921 + while (nr_pages) {
67922 struct vm_area_struct *vma;
67923
67924 - vma = find_extend_vma(mm, start);
67925 + vma = find_vma(mm, start);
67926 if (!vma && in_gate_area(tsk, start)) {
67927 unsigned long pg = start & PAGE_MASK;
67928 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67929 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67930 continue;
67931 }
67932
67933 - if (!vma ||
67934 + if (!vma || start < vma->vm_start ||
67935 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67936 !(vm_flags & vma->vm_flags))
67937 return i ? : -EFAULT;
67938 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67939 start += PAGE_SIZE;
67940 nr_pages--;
67941 } while (nr_pages && start < vma->vm_end);
67942 - } while (nr_pages);
67943 + }
67944 return i;
67945 }
67946
67947 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67948 page_add_file_rmap(page);
67949 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67950
67951 +#ifdef CONFIG_PAX_SEGMEXEC
67952 + pax_mirror_file_pte(vma, addr, page, ptl);
67953 +#endif
67954 +
67955 retval = 0;
67956 pte_unmap_unlock(pte, ptl);
67957 return retval;
67958 @@ -1560,10 +1571,22 @@ out:
67959 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67960 struct page *page)
67961 {
67962 +
67963 +#ifdef CONFIG_PAX_SEGMEXEC
67964 + struct vm_area_struct *vma_m;
67965 +#endif
67966 +
67967 if (addr < vma->vm_start || addr >= vma->vm_end)
67968 return -EFAULT;
67969 if (!page_count(page))
67970 return -EINVAL;
67971 +
67972 +#ifdef CONFIG_PAX_SEGMEXEC
67973 + vma_m = pax_find_mirror_vma(vma);
67974 + if (vma_m)
67975 + vma_m->vm_flags |= VM_INSERTPAGE;
67976 +#endif
67977 +
67978 vma->vm_flags |= VM_INSERTPAGE;
67979 return insert_page(vma, addr, page, vma->vm_page_prot);
67980 }
67981 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67982 unsigned long pfn)
67983 {
67984 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67985 + BUG_ON(vma->vm_mirror);
67986
67987 if (addr < vma->vm_start || addr >= vma->vm_end)
67988 return -EFAULT;
67989 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67990 copy_user_highpage(dst, src, va, vma);
67991 }
67992
67993 +#ifdef CONFIG_PAX_SEGMEXEC
67994 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67995 +{
67996 + struct mm_struct *mm = vma->vm_mm;
67997 + spinlock_t *ptl;
67998 + pte_t *pte, entry;
67999 +
68000 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68001 + entry = *pte;
68002 + if (!pte_present(entry)) {
68003 + if (!pte_none(entry)) {
68004 + BUG_ON(pte_file(entry));
68005 + free_swap_and_cache(pte_to_swp_entry(entry));
68006 + pte_clear_not_present_full(mm, address, pte, 0);
68007 + }
68008 + } else {
68009 + struct page *page;
68010 +
68011 + flush_cache_page(vma, address, pte_pfn(entry));
68012 + entry = ptep_clear_flush(vma, address, pte);
68013 + BUG_ON(pte_dirty(entry));
68014 + page = vm_normal_page(vma, address, entry);
68015 + if (page) {
68016 + update_hiwater_rss(mm);
68017 + if (PageAnon(page))
68018 + dec_mm_counter(mm, anon_rss);
68019 + else
68020 + dec_mm_counter(mm, file_rss);
68021 + page_remove_rmap(page);
68022 + page_cache_release(page);
68023 + }
68024 + }
68025 + pte_unmap_unlock(pte, ptl);
68026 +}
68027 +
68028 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68029 + *
68030 + * the ptl of the lower mapped page is held on entry and is not released on exit
68031 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68032 + */
68033 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68034 +{
68035 + struct mm_struct *mm = vma->vm_mm;
68036 + unsigned long address_m;
68037 + spinlock_t *ptl_m;
68038 + struct vm_area_struct *vma_m;
68039 + pmd_t *pmd_m;
68040 + pte_t *pte_m, entry_m;
68041 +
68042 + BUG_ON(!page_m || !PageAnon(page_m));
68043 +
68044 + vma_m = pax_find_mirror_vma(vma);
68045 + if (!vma_m)
68046 + return;
68047 +
68048 + BUG_ON(!PageLocked(page_m));
68049 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68050 + address_m = address + SEGMEXEC_TASK_SIZE;
68051 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68052 + pte_m = pte_offset_map_nested(pmd_m, address_m);
68053 + ptl_m = pte_lockptr(mm, pmd_m);
68054 + if (ptl != ptl_m) {
68055 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68056 + if (!pte_none(*pte_m))
68057 + goto out;
68058 + }
68059 +
68060 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68061 + page_cache_get(page_m);
68062 + page_add_anon_rmap(page_m, vma_m, address_m);
68063 + inc_mm_counter(mm, anon_rss);
68064 + set_pte_at(mm, address_m, pte_m, entry_m);
68065 + update_mmu_cache(vma_m, address_m, entry_m);
68066 +out:
68067 + if (ptl != ptl_m)
68068 + spin_unlock(ptl_m);
68069 + pte_unmap_nested(pte_m);
68070 + unlock_page(page_m);
68071 +}
68072 +
68073 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68074 +{
68075 + struct mm_struct *mm = vma->vm_mm;
68076 + unsigned long address_m;
68077 + spinlock_t *ptl_m;
68078 + struct vm_area_struct *vma_m;
68079 + pmd_t *pmd_m;
68080 + pte_t *pte_m, entry_m;
68081 +
68082 + BUG_ON(!page_m || PageAnon(page_m));
68083 +
68084 + vma_m = pax_find_mirror_vma(vma);
68085 + if (!vma_m)
68086 + return;
68087 +
68088 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68089 + address_m = address + SEGMEXEC_TASK_SIZE;
68090 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68091 + pte_m = pte_offset_map_nested(pmd_m, address_m);
68092 + ptl_m = pte_lockptr(mm, pmd_m);
68093 + if (ptl != ptl_m) {
68094 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68095 + if (!pte_none(*pte_m))
68096 + goto out;
68097 + }
68098 +
68099 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68100 + page_cache_get(page_m);
68101 + page_add_file_rmap(page_m);
68102 + inc_mm_counter(mm, file_rss);
68103 + set_pte_at(mm, address_m, pte_m, entry_m);
68104 + update_mmu_cache(vma_m, address_m, entry_m);
68105 +out:
68106 + if (ptl != ptl_m)
68107 + spin_unlock(ptl_m);
68108 + pte_unmap_nested(pte_m);
68109 +}
68110 +
68111 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68112 +{
68113 + struct mm_struct *mm = vma->vm_mm;
68114 + unsigned long address_m;
68115 + spinlock_t *ptl_m;
68116 + struct vm_area_struct *vma_m;
68117 + pmd_t *pmd_m;
68118 + pte_t *pte_m, entry_m;
68119 +
68120 + vma_m = pax_find_mirror_vma(vma);
68121 + if (!vma_m)
68122 + return;
68123 +
68124 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68125 + address_m = address + SEGMEXEC_TASK_SIZE;
68126 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68127 + pte_m = pte_offset_map_nested(pmd_m, address_m);
68128 + ptl_m = pte_lockptr(mm, pmd_m);
68129 + if (ptl != ptl_m) {
68130 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68131 + if (!pte_none(*pte_m))
68132 + goto out;
68133 + }
68134 +
68135 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68136 + set_pte_at(mm, address_m, pte_m, entry_m);
68137 +out:
68138 + if (ptl != ptl_m)
68139 + spin_unlock(ptl_m);
68140 + pte_unmap_nested(pte_m);
68141 +}
68142 +
68143 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68144 +{
68145 + struct page *page_m;
68146 + pte_t entry;
68147 +
68148 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68149 + goto out;
68150 +
68151 + entry = *pte;
68152 + page_m = vm_normal_page(vma, address, entry);
68153 + if (!page_m)
68154 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68155 + else if (PageAnon(page_m)) {
68156 + if (pax_find_mirror_vma(vma)) {
68157 + pte_unmap_unlock(pte, ptl);
68158 + lock_page(page_m);
68159 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68160 + if (pte_same(entry, *pte))
68161 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68162 + else
68163 + unlock_page(page_m);
68164 + }
68165 + } else
68166 + pax_mirror_file_pte(vma, address, page_m, ptl);
68167 +
68168 +out:
68169 + pte_unmap_unlock(pte, ptl);
68170 +}
68171 +#endif
68172 +
68173 /*
68174 * This routine handles present pages, when users try to write
68175 * to a shared page. It is done by copying the page to a new address
68176 @@ -2156,6 +2360,12 @@ gotten:
68177 */
68178 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68179 if (likely(pte_same(*page_table, orig_pte))) {
68180 +
68181 +#ifdef CONFIG_PAX_SEGMEXEC
68182 + if (pax_find_mirror_vma(vma))
68183 + BUG_ON(!trylock_page(new_page));
68184 +#endif
68185 +
68186 if (old_page) {
68187 if (!PageAnon(old_page)) {
68188 dec_mm_counter(mm, file_rss);
68189 @@ -2207,6 +2417,10 @@ gotten:
68190 page_remove_rmap(old_page);
68191 }
68192
68193 +#ifdef CONFIG_PAX_SEGMEXEC
68194 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68195 +#endif
68196 +
68197 /* Free the old page.. */
68198 new_page = old_page;
68199 ret |= VM_FAULT_WRITE;
68200 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
68201 swap_free(entry);
68202 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68203 try_to_free_swap(page);
68204 +
68205 +#ifdef CONFIG_PAX_SEGMEXEC
68206 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68207 +#endif
68208 +
68209 unlock_page(page);
68210
68211 if (flags & FAULT_FLAG_WRITE) {
68212 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
68213
68214 /* No need to invalidate - it was non-present before */
68215 update_mmu_cache(vma, address, pte);
68216 +
68217 +#ifdef CONFIG_PAX_SEGMEXEC
68218 + pax_mirror_anon_pte(vma, address, page, ptl);
68219 +#endif
68220 +
68221 unlock:
68222 pte_unmap_unlock(page_table, ptl);
68223 out:
68224 @@ -2632,40 +2856,6 @@ out_release:
68225 }
68226
68227 /*
68228 - * This is like a special single-page "expand_{down|up}wards()",
68229 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68230 - * doesn't hit another vma.
68231 - */
68232 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68233 -{
68234 - address &= PAGE_MASK;
68235 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68236 - struct vm_area_struct *prev = vma->vm_prev;
68237 -
68238 - /*
68239 - * Is there a mapping abutting this one below?
68240 - *
68241 - * That's only ok if it's the same stack mapping
68242 - * that has gotten split..
68243 - */
68244 - if (prev && prev->vm_end == address)
68245 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68246 -
68247 - expand_stack(vma, address - PAGE_SIZE);
68248 - }
68249 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68250 - struct vm_area_struct *next = vma->vm_next;
68251 -
68252 - /* As VM_GROWSDOWN but s/below/above/ */
68253 - if (next && next->vm_start == address + PAGE_SIZE)
68254 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68255 -
68256 - expand_upwards(vma, address + PAGE_SIZE);
68257 - }
68258 - return 0;
68259 -}
68260 -
68261 -/*
68262 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68263 * but allow concurrent faults), and pte mapped but not yet locked.
68264 * We return with mmap_sem still held, but pte unmapped and unlocked.
68265 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
68266 unsigned long address, pte_t *page_table, pmd_t *pmd,
68267 unsigned int flags)
68268 {
68269 - struct page *page;
68270 + struct page *page = NULL;
68271 spinlock_t *ptl;
68272 pte_t entry;
68273
68274 - pte_unmap(page_table);
68275 -
68276 - /* Check if we need to add a guard page to the stack */
68277 - if (check_stack_guard_page(vma, address) < 0)
68278 - return VM_FAULT_SIGBUS;
68279 -
68280 - /* Use the zero-page for reads */
68281 if (!(flags & FAULT_FLAG_WRITE)) {
68282 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68283 vma->vm_page_prot));
68284 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68285 + ptl = pte_lockptr(mm, pmd);
68286 + spin_lock(ptl);
68287 if (!pte_none(*page_table))
68288 goto unlock;
68289 goto setpte;
68290 }
68291
68292 /* Allocate our own private page. */
68293 + pte_unmap(page_table);
68294 +
68295 if (unlikely(anon_vma_prepare(vma)))
68296 goto oom;
68297 page = alloc_zeroed_user_highpage_movable(vma, address);
68298 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
68299 if (!pte_none(*page_table))
68300 goto release;
68301
68302 +#ifdef CONFIG_PAX_SEGMEXEC
68303 + if (pax_find_mirror_vma(vma))
68304 + BUG_ON(!trylock_page(page));
68305 +#endif
68306 +
68307 inc_mm_counter(mm, anon_rss);
68308 page_add_new_anon_rmap(page, vma, address);
68309 setpte:
68310 @@ -2720,6 +2911,12 @@ setpte:
68311
68312 /* No need to invalidate - it was non-present before */
68313 update_mmu_cache(vma, address, entry);
68314 +
68315 +#ifdef CONFIG_PAX_SEGMEXEC
68316 + if (page)
68317 + pax_mirror_anon_pte(vma, address, page, ptl);
68318 +#endif
68319 +
68320 unlock:
68321 pte_unmap_unlock(page_table, ptl);
68322 return 0;
68323 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
68324 */
68325 /* Only go through if we didn't race with anybody else... */
68326 if (likely(pte_same(*page_table, orig_pte))) {
68327 +
68328 +#ifdef CONFIG_PAX_SEGMEXEC
68329 + if (anon && pax_find_mirror_vma(vma))
68330 + BUG_ON(!trylock_page(page));
68331 +#endif
68332 +
68333 flush_icache_page(vma, page);
68334 entry = mk_pte(page, vma->vm_page_prot);
68335 if (flags & FAULT_FLAG_WRITE)
68336 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
68337
68338 /* no need to invalidate: a not-present page won't be cached */
68339 update_mmu_cache(vma, address, entry);
68340 +
68341 +#ifdef CONFIG_PAX_SEGMEXEC
68342 + if (anon)
68343 + pax_mirror_anon_pte(vma, address, page, ptl);
68344 + else
68345 + pax_mirror_file_pte(vma, address, page, ptl);
68346 +#endif
68347 +
68348 } else {
68349 if (charged)
68350 mem_cgroup_uncharge_page(page);
68351 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
68352 if (flags & FAULT_FLAG_WRITE)
68353 flush_tlb_page(vma, address);
68354 }
68355 +
68356 +#ifdef CONFIG_PAX_SEGMEXEC
68357 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68358 + return 0;
68359 +#endif
68360 +
68361 unlock:
68362 pte_unmap_unlock(pte, ptl);
68363 return 0;
68364 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
68365 pmd_t *pmd;
68366 pte_t *pte;
68367
68368 +#ifdef CONFIG_PAX_SEGMEXEC
68369 + struct vm_area_struct *vma_m;
68370 +#endif
68371 +
68372 __set_current_state(TASK_RUNNING);
68373
68374 count_vm_event(PGFAULT);
68375 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
68376 if (unlikely(is_vm_hugetlb_page(vma)))
68377 return hugetlb_fault(mm, vma, address, flags);
68378
68379 +#ifdef CONFIG_PAX_SEGMEXEC
68380 + vma_m = pax_find_mirror_vma(vma);
68381 + if (vma_m) {
68382 + unsigned long address_m;
68383 + pgd_t *pgd_m;
68384 + pud_t *pud_m;
68385 + pmd_t *pmd_m;
68386 +
68387 + if (vma->vm_start > vma_m->vm_start) {
68388 + address_m = address;
68389 + address -= SEGMEXEC_TASK_SIZE;
68390 + vma = vma_m;
68391 + } else
68392 + address_m = address + SEGMEXEC_TASK_SIZE;
68393 +
68394 + pgd_m = pgd_offset(mm, address_m);
68395 + pud_m = pud_alloc(mm, pgd_m, address_m);
68396 + if (!pud_m)
68397 + return VM_FAULT_OOM;
68398 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68399 + if (!pmd_m)
68400 + return VM_FAULT_OOM;
68401 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
68402 + return VM_FAULT_OOM;
68403 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68404 + }
68405 +#endif
68406 +
68407 pgd = pgd_offset(mm, address);
68408 pud = pud_alloc(mm, pgd, address);
68409 if (!pud)
68410 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
68411 gate_vma.vm_start = FIXADDR_USER_START;
68412 gate_vma.vm_end = FIXADDR_USER_END;
68413 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68414 - gate_vma.vm_page_prot = __P101;
68415 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68416 /*
68417 * Make sure the vDSO gets into every core dump.
68418 * Dumping its contents makes post-mortem fully interpretable later
68419 diff -urNp linux-2.6.32.44/mm/memory-failure.c linux-2.6.32.44/mm/memory-failure.c
68420 --- linux-2.6.32.44/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
68421 +++ linux-2.6.32.44/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
68422 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
68423
68424 int sysctl_memory_failure_recovery __read_mostly = 1;
68425
68426 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68427 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68428
68429 /*
68430 * Send all the processes who have the page mapped an ``action optional''
68431 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
68432 return 0;
68433 }
68434
68435 - atomic_long_add(1, &mce_bad_pages);
68436 + atomic_long_add_unchecked(1, &mce_bad_pages);
68437
68438 /*
68439 * We need/can do nothing about count=0 pages.
68440 diff -urNp linux-2.6.32.44/mm/mempolicy.c linux-2.6.32.44/mm/mempolicy.c
68441 --- linux-2.6.32.44/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
68442 +++ linux-2.6.32.44/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
68443 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
68444 struct vm_area_struct *next;
68445 int err;
68446
68447 +#ifdef CONFIG_PAX_SEGMEXEC
68448 + struct vm_area_struct *vma_m;
68449 +#endif
68450 +
68451 err = 0;
68452 for (; vma && vma->vm_start < end; vma = next) {
68453 next = vma->vm_next;
68454 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
68455 err = policy_vma(vma, new);
68456 if (err)
68457 break;
68458 +
68459 +#ifdef CONFIG_PAX_SEGMEXEC
68460 + vma_m = pax_find_mirror_vma(vma);
68461 + if (vma_m) {
68462 + err = policy_vma(vma_m, new);
68463 + if (err)
68464 + break;
68465 + }
68466 +#endif
68467 +
68468 }
68469 return err;
68470 }
68471 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
68472
68473 if (end < start)
68474 return -EINVAL;
68475 +
68476 +#ifdef CONFIG_PAX_SEGMEXEC
68477 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68478 + if (end > SEGMEXEC_TASK_SIZE)
68479 + return -EINVAL;
68480 + } else
68481 +#endif
68482 +
68483 + if (end > TASK_SIZE)
68484 + return -EINVAL;
68485 +
68486 if (end == start)
68487 return 0;
68488
68489 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68490 if (!mm)
68491 return -EINVAL;
68492
68493 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68494 + if (mm != current->mm &&
68495 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68496 + err = -EPERM;
68497 + goto out;
68498 + }
68499 +#endif
68500 +
68501 /*
68502 * Check if this process has the right to modify the specified
68503 * process. The right exists if the process has administrative
68504 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68505 rcu_read_lock();
68506 tcred = __task_cred(task);
68507 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68508 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68509 - !capable(CAP_SYS_NICE)) {
68510 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68511 rcu_read_unlock();
68512 err = -EPERM;
68513 goto out;
68514 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
68515
68516 if (file) {
68517 seq_printf(m, " file=");
68518 - seq_path(m, &file->f_path, "\n\t= ");
68519 + seq_path(m, &file->f_path, "\n\t\\= ");
68520 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68521 seq_printf(m, " heap");
68522 } else if (vma->vm_start <= mm->start_stack &&
68523 diff -urNp linux-2.6.32.44/mm/migrate.c linux-2.6.32.44/mm/migrate.c
68524 --- linux-2.6.32.44/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68525 +++ linux-2.6.32.44/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68526 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68527 unsigned long chunk_start;
68528 int err;
68529
68530 + pax_track_stack();
68531 +
68532 task_nodes = cpuset_mems_allowed(task);
68533
68534 err = -ENOMEM;
68535 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68536 if (!mm)
68537 return -EINVAL;
68538
68539 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68540 + if (mm != current->mm &&
68541 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68542 + err = -EPERM;
68543 + goto out;
68544 + }
68545 +#endif
68546 +
68547 /*
68548 * Check if this process has the right to modify the specified
68549 * process. The right exists if the process has administrative
68550 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68551 rcu_read_lock();
68552 tcred = __task_cred(task);
68553 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68554 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68555 - !capable(CAP_SYS_NICE)) {
68556 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68557 rcu_read_unlock();
68558 err = -EPERM;
68559 goto out;
68560 diff -urNp linux-2.6.32.44/mm/mlock.c linux-2.6.32.44/mm/mlock.c
68561 --- linux-2.6.32.44/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68562 +++ linux-2.6.32.44/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68563 @@ -13,6 +13,7 @@
68564 #include <linux/pagemap.h>
68565 #include <linux/mempolicy.h>
68566 #include <linux/syscalls.h>
68567 +#include <linux/security.h>
68568 #include <linux/sched.h>
68569 #include <linux/module.h>
68570 #include <linux/rmap.h>
68571 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68572 }
68573 }
68574
68575 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68576 -{
68577 - return (vma->vm_flags & VM_GROWSDOWN) &&
68578 - (vma->vm_start == addr) &&
68579 - !vma_stack_continue(vma->vm_prev, addr);
68580 -}
68581 -
68582 /**
68583 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68584 * @vma: target vma
68585 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68586 if (vma->vm_flags & VM_WRITE)
68587 gup_flags |= FOLL_WRITE;
68588
68589 - /* We don't try to access the guard page of a stack vma */
68590 - if (stack_guard_page(vma, start)) {
68591 - addr += PAGE_SIZE;
68592 - nr_pages--;
68593 - }
68594 -
68595 while (nr_pages > 0) {
68596 int i;
68597
68598 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68599 {
68600 unsigned long nstart, end, tmp;
68601 struct vm_area_struct * vma, * prev;
68602 - int error;
68603 + int error = -EINVAL;
68604
68605 len = PAGE_ALIGN(len);
68606 end = start + len;
68607 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68608 return -EINVAL;
68609 if (end == start)
68610 return 0;
68611 + if (end > TASK_SIZE)
68612 + return -EINVAL;
68613 +
68614 vma = find_vma_prev(current->mm, start, &prev);
68615 if (!vma || vma->vm_start > start)
68616 return -ENOMEM;
68617 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68618 for (nstart = start ; ; ) {
68619 unsigned int newflags;
68620
68621 +#ifdef CONFIG_PAX_SEGMEXEC
68622 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68623 + break;
68624 +#endif
68625 +
68626 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68627
68628 newflags = vma->vm_flags | VM_LOCKED;
68629 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68630 lock_limit >>= PAGE_SHIFT;
68631
68632 /* check against resource limits */
68633 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68634 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68635 error = do_mlock(start, len, 1);
68636 up_write(&current->mm->mmap_sem);
68637 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68638 static int do_mlockall(int flags)
68639 {
68640 struct vm_area_struct * vma, * prev = NULL;
68641 - unsigned int def_flags = 0;
68642
68643 if (flags & MCL_FUTURE)
68644 - def_flags = VM_LOCKED;
68645 - current->mm->def_flags = def_flags;
68646 + current->mm->def_flags |= VM_LOCKED;
68647 + else
68648 + current->mm->def_flags &= ~VM_LOCKED;
68649 if (flags == MCL_FUTURE)
68650 goto out;
68651
68652 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68653 - unsigned int newflags;
68654 + unsigned long newflags;
68655 +
68656 +#ifdef CONFIG_PAX_SEGMEXEC
68657 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68658 + break;
68659 +#endif
68660
68661 + BUG_ON(vma->vm_end > TASK_SIZE);
68662 newflags = vma->vm_flags | VM_LOCKED;
68663 if (!(flags & MCL_CURRENT))
68664 newflags &= ~VM_LOCKED;
68665 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68666 lock_limit >>= PAGE_SHIFT;
68667
68668 ret = -ENOMEM;
68669 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68670 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68671 capable(CAP_IPC_LOCK))
68672 ret = do_mlockall(flags);
68673 diff -urNp linux-2.6.32.44/mm/mmap.c linux-2.6.32.44/mm/mmap.c
68674 --- linux-2.6.32.44/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68675 +++ linux-2.6.32.44/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68676 @@ -45,6 +45,16 @@
68677 #define arch_rebalance_pgtables(addr, len) (addr)
68678 #endif
68679
68680 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68681 +{
68682 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68683 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68684 + up_read(&mm->mmap_sem);
68685 + BUG();
68686 + }
68687 +#endif
68688 +}
68689 +
68690 static void unmap_region(struct mm_struct *mm,
68691 struct vm_area_struct *vma, struct vm_area_struct *prev,
68692 unsigned long start, unsigned long end);
68693 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68694 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68695 *
68696 */
68697 -pgprot_t protection_map[16] = {
68698 +pgprot_t protection_map[16] __read_only = {
68699 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68700 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68701 };
68702
68703 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68704 {
68705 - return __pgprot(pgprot_val(protection_map[vm_flags &
68706 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68707 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68708 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68709 +
68710 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68711 + if (!nx_enabled &&
68712 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68713 + (vm_flags & (VM_READ | VM_WRITE)))
68714 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68715 +#endif
68716 +
68717 + return prot;
68718 }
68719 EXPORT_SYMBOL(vm_get_page_prot);
68720
68721 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68722 int sysctl_overcommit_ratio = 50; /* default is 50% */
68723 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68724 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68725 struct percpu_counter vm_committed_as;
68726
68727 /*
68728 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68729 struct vm_area_struct *next = vma->vm_next;
68730
68731 might_sleep();
68732 + BUG_ON(vma->vm_mirror);
68733 if (vma->vm_ops && vma->vm_ops->close)
68734 vma->vm_ops->close(vma);
68735 if (vma->vm_file) {
68736 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68737 * not page aligned -Ram Gupta
68738 */
68739 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68740 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68741 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68742 (mm->end_data - mm->start_data) > rlim)
68743 goto out;
68744 @@ -704,6 +726,12 @@ static int
68745 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68746 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68747 {
68748 +
68749 +#ifdef CONFIG_PAX_SEGMEXEC
68750 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68751 + return 0;
68752 +#endif
68753 +
68754 if (is_mergeable_vma(vma, file, vm_flags) &&
68755 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68756 if (vma->vm_pgoff == vm_pgoff)
68757 @@ -723,6 +751,12 @@ static int
68758 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68759 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68760 {
68761 +
68762 +#ifdef CONFIG_PAX_SEGMEXEC
68763 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68764 + return 0;
68765 +#endif
68766 +
68767 if (is_mergeable_vma(vma, file, vm_flags) &&
68768 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68769 pgoff_t vm_pglen;
68770 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68771 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68772 struct vm_area_struct *prev, unsigned long addr,
68773 unsigned long end, unsigned long vm_flags,
68774 - struct anon_vma *anon_vma, struct file *file,
68775 + struct anon_vma *anon_vma, struct file *file,
68776 pgoff_t pgoff, struct mempolicy *policy)
68777 {
68778 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68779 struct vm_area_struct *area, *next;
68780
68781 +#ifdef CONFIG_PAX_SEGMEXEC
68782 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68783 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68784 +
68785 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68786 +#endif
68787 +
68788 /*
68789 * We later require that vma->vm_flags == vm_flags,
68790 * so this tests vma->vm_flags & VM_SPECIAL, too.
68791 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68792 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68793 next = next->vm_next;
68794
68795 +#ifdef CONFIG_PAX_SEGMEXEC
68796 + if (prev)
68797 + prev_m = pax_find_mirror_vma(prev);
68798 + if (area)
68799 + area_m = pax_find_mirror_vma(area);
68800 + if (next)
68801 + next_m = pax_find_mirror_vma(next);
68802 +#endif
68803 +
68804 /*
68805 * Can it merge with the predecessor?
68806 */
68807 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68808 /* cases 1, 6 */
68809 vma_adjust(prev, prev->vm_start,
68810 next->vm_end, prev->vm_pgoff, NULL);
68811 - } else /* cases 2, 5, 7 */
68812 +
68813 +#ifdef CONFIG_PAX_SEGMEXEC
68814 + if (prev_m)
68815 + vma_adjust(prev_m, prev_m->vm_start,
68816 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68817 +#endif
68818 +
68819 + } else { /* cases 2, 5, 7 */
68820 vma_adjust(prev, prev->vm_start,
68821 end, prev->vm_pgoff, NULL);
68822 +
68823 +#ifdef CONFIG_PAX_SEGMEXEC
68824 + if (prev_m)
68825 + vma_adjust(prev_m, prev_m->vm_start,
68826 + end_m, prev_m->vm_pgoff, NULL);
68827 +#endif
68828 +
68829 + }
68830 return prev;
68831 }
68832
68833 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68834 mpol_equal(policy, vma_policy(next)) &&
68835 can_vma_merge_before(next, vm_flags,
68836 anon_vma, file, pgoff+pglen)) {
68837 - if (prev && addr < prev->vm_end) /* case 4 */
68838 + if (prev && addr < prev->vm_end) { /* case 4 */
68839 vma_adjust(prev, prev->vm_start,
68840 addr, prev->vm_pgoff, NULL);
68841 - else /* cases 3, 8 */
68842 +
68843 +#ifdef CONFIG_PAX_SEGMEXEC
68844 + if (prev_m)
68845 + vma_adjust(prev_m, prev_m->vm_start,
68846 + addr_m, prev_m->vm_pgoff, NULL);
68847 +#endif
68848 +
68849 + } else { /* cases 3, 8 */
68850 vma_adjust(area, addr, next->vm_end,
68851 next->vm_pgoff - pglen, NULL);
68852 +
68853 +#ifdef CONFIG_PAX_SEGMEXEC
68854 + if (area_m)
68855 + vma_adjust(area_m, addr_m, next_m->vm_end,
68856 + next_m->vm_pgoff - pglen, NULL);
68857 +#endif
68858 +
68859 + }
68860 return area;
68861 }
68862
68863 @@ -898,14 +978,11 @@ none:
68864 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68865 struct file *file, long pages)
68866 {
68867 - const unsigned long stack_flags
68868 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68869 -
68870 if (file) {
68871 mm->shared_vm += pages;
68872 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68873 mm->exec_vm += pages;
68874 - } else if (flags & stack_flags)
68875 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68876 mm->stack_vm += pages;
68877 if (flags & (VM_RESERVED|VM_IO))
68878 mm->reserved_vm += pages;
68879 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68880 * (the exception is when the underlying filesystem is noexec
68881 * mounted, in which case we dont add PROT_EXEC.)
68882 */
68883 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68884 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68885 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68886 prot |= PROT_EXEC;
68887
68888 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68889 /* Obtain the address to map to. we verify (or select) it and ensure
68890 * that it represents a valid section of the address space.
68891 */
68892 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68893 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68894 if (addr & ~PAGE_MASK)
68895 return addr;
68896
68897 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68898 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68899 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68900
68901 +#ifdef CONFIG_PAX_MPROTECT
68902 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68903 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68904 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68905 + gr_log_rwxmmap(file);
68906 +
68907 +#ifdef CONFIG_PAX_EMUPLT
68908 + vm_flags &= ~VM_EXEC;
68909 +#else
68910 + return -EPERM;
68911 +#endif
68912 +
68913 + }
68914 +
68915 + if (!(vm_flags & VM_EXEC))
68916 + vm_flags &= ~VM_MAYEXEC;
68917 +#else
68918 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68919 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68920 +#endif
68921 + else
68922 + vm_flags &= ~VM_MAYWRITE;
68923 + }
68924 +#endif
68925 +
68926 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68927 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68928 + vm_flags &= ~VM_PAGEEXEC;
68929 +#endif
68930 +
68931 if (flags & MAP_LOCKED)
68932 if (!can_do_mlock())
68933 return -EPERM;
68934 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68935 locked += mm->locked_vm;
68936 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68937 lock_limit >>= PAGE_SHIFT;
68938 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68939 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68940 return -EAGAIN;
68941 }
68942 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68943 if (error)
68944 return error;
68945
68946 + if (!gr_acl_handle_mmap(file, prot))
68947 + return -EACCES;
68948 +
68949 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68950 }
68951 EXPORT_SYMBOL(do_mmap_pgoff);
68952 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68953 */
68954 int vma_wants_writenotify(struct vm_area_struct *vma)
68955 {
68956 - unsigned int vm_flags = vma->vm_flags;
68957 + unsigned long vm_flags = vma->vm_flags;
68958
68959 /* If it was private or non-writable, the write bit is already clear */
68960 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68961 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68962 return 0;
68963
68964 /* The backer wishes to know when pages are first written to? */
68965 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68966 unsigned long charged = 0;
68967 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68968
68969 +#ifdef CONFIG_PAX_SEGMEXEC
68970 + struct vm_area_struct *vma_m = NULL;
68971 +#endif
68972 +
68973 + /*
68974 + * mm->mmap_sem is required to protect against another thread
68975 + * changing the mappings in case we sleep.
68976 + */
68977 + verify_mm_writelocked(mm);
68978 +
68979 /* Clear old maps */
68980 error = -ENOMEM;
68981 -munmap_back:
68982 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68983 if (vma && vma->vm_start < addr + len) {
68984 if (do_munmap(mm, addr, len))
68985 return -ENOMEM;
68986 - goto munmap_back;
68987 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68988 + BUG_ON(vma && vma->vm_start < addr + len);
68989 }
68990
68991 /* Check against address space limit. */
68992 @@ -1173,6 +1294,16 @@ munmap_back:
68993 goto unacct_error;
68994 }
68995
68996 +#ifdef CONFIG_PAX_SEGMEXEC
68997 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68998 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68999 + if (!vma_m) {
69000 + error = -ENOMEM;
69001 + goto free_vma;
69002 + }
69003 + }
69004 +#endif
69005 +
69006 vma->vm_mm = mm;
69007 vma->vm_start = addr;
69008 vma->vm_end = addr + len;
69009 @@ -1195,6 +1326,19 @@ munmap_back:
69010 error = file->f_op->mmap(file, vma);
69011 if (error)
69012 goto unmap_and_free_vma;
69013 +
69014 +#ifdef CONFIG_PAX_SEGMEXEC
69015 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69016 + added_exe_file_vma(mm);
69017 +#endif
69018 +
69019 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69020 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69021 + vma->vm_flags |= VM_PAGEEXEC;
69022 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69023 + }
69024 +#endif
69025 +
69026 if (vm_flags & VM_EXECUTABLE)
69027 added_exe_file_vma(mm);
69028
69029 @@ -1218,6 +1362,11 @@ munmap_back:
69030 vma_link(mm, vma, prev, rb_link, rb_parent);
69031 file = vma->vm_file;
69032
69033 +#ifdef CONFIG_PAX_SEGMEXEC
69034 + if (vma_m)
69035 + pax_mirror_vma(vma_m, vma);
69036 +#endif
69037 +
69038 /* Once vma denies write, undo our temporary denial count */
69039 if (correct_wcount)
69040 atomic_inc(&inode->i_writecount);
69041 @@ -1226,6 +1375,7 @@ out:
69042
69043 mm->total_vm += len >> PAGE_SHIFT;
69044 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69045 + track_exec_limit(mm, addr, addr + len, vm_flags);
69046 if (vm_flags & VM_LOCKED) {
69047 /*
69048 * makes pages present; downgrades, drops, reacquires mmap_sem
69049 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
69050 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69051 charged = 0;
69052 free_vma:
69053 +
69054 +#ifdef CONFIG_PAX_SEGMEXEC
69055 + if (vma_m)
69056 + kmem_cache_free(vm_area_cachep, vma_m);
69057 +#endif
69058 +
69059 kmem_cache_free(vm_area_cachep, vma);
69060 unacct_error:
69061 if (charged)
69062 @@ -1255,6 +1411,44 @@ unacct_error:
69063 return error;
69064 }
69065
69066 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69067 +{
69068 + if (!vma) {
69069 +#ifdef CONFIG_STACK_GROWSUP
69070 + if (addr > sysctl_heap_stack_gap)
69071 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69072 + else
69073 + vma = find_vma(current->mm, 0);
69074 + if (vma && (vma->vm_flags & VM_GROWSUP))
69075 + return false;
69076 +#endif
69077 + return true;
69078 + }
69079 +
69080 + if (addr + len > vma->vm_start)
69081 + return false;
69082 +
69083 + if (vma->vm_flags & VM_GROWSDOWN)
69084 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69085 +#ifdef CONFIG_STACK_GROWSUP
69086 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69087 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69088 +#endif
69089 +
69090 + return true;
69091 +}
69092 +
69093 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69094 +{
69095 + if (vma->vm_start < len)
69096 + return -ENOMEM;
69097 + if (!(vma->vm_flags & VM_GROWSDOWN))
69098 + return vma->vm_start - len;
69099 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69100 + return vma->vm_start - len - sysctl_heap_stack_gap;
69101 + return -ENOMEM;
69102 +}
69103 +
69104 /* Get an address range which is currently unmapped.
69105 * For shmat() with addr=0.
69106 *
69107 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
69108 if (flags & MAP_FIXED)
69109 return addr;
69110
69111 +#ifdef CONFIG_PAX_RANDMMAP
69112 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69113 +#endif
69114 +
69115 if (addr) {
69116 addr = PAGE_ALIGN(addr);
69117 - vma = find_vma(mm, addr);
69118 - if (TASK_SIZE - len >= addr &&
69119 - (!vma || addr + len <= vma->vm_start))
69120 - return addr;
69121 + if (TASK_SIZE - len >= addr) {
69122 + vma = find_vma(mm, addr);
69123 + if (check_heap_stack_gap(vma, addr, len))
69124 + return addr;
69125 + }
69126 }
69127 if (len > mm->cached_hole_size) {
69128 - start_addr = addr = mm->free_area_cache;
69129 + start_addr = addr = mm->free_area_cache;
69130 } else {
69131 - start_addr = addr = TASK_UNMAPPED_BASE;
69132 - mm->cached_hole_size = 0;
69133 + start_addr = addr = mm->mmap_base;
69134 + mm->cached_hole_size = 0;
69135 }
69136
69137 full_search:
69138 @@ -1303,34 +1502,40 @@ full_search:
69139 * Start a new search - just in case we missed
69140 * some holes.
69141 */
69142 - if (start_addr != TASK_UNMAPPED_BASE) {
69143 - addr = TASK_UNMAPPED_BASE;
69144 - start_addr = addr;
69145 + if (start_addr != mm->mmap_base) {
69146 + start_addr = addr = mm->mmap_base;
69147 mm->cached_hole_size = 0;
69148 goto full_search;
69149 }
69150 return -ENOMEM;
69151 }
69152 - if (!vma || addr + len <= vma->vm_start) {
69153 - /*
69154 - * Remember the place where we stopped the search:
69155 - */
69156 - mm->free_area_cache = addr + len;
69157 - return addr;
69158 - }
69159 + if (check_heap_stack_gap(vma, addr, len))
69160 + break;
69161 if (addr + mm->cached_hole_size < vma->vm_start)
69162 mm->cached_hole_size = vma->vm_start - addr;
69163 addr = vma->vm_end;
69164 }
69165 +
69166 + /*
69167 + * Remember the place where we stopped the search:
69168 + */
69169 + mm->free_area_cache = addr + len;
69170 + return addr;
69171 }
69172 #endif
69173
69174 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69175 {
69176 +
69177 +#ifdef CONFIG_PAX_SEGMEXEC
69178 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69179 + return;
69180 +#endif
69181 +
69182 /*
69183 * Is this a new hole at the lowest possible address?
69184 */
69185 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69186 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69187 mm->free_area_cache = addr;
69188 mm->cached_hole_size = ~0UL;
69189 }
69190 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
69191 {
69192 struct vm_area_struct *vma;
69193 struct mm_struct *mm = current->mm;
69194 - unsigned long addr = addr0;
69195 + unsigned long base = mm->mmap_base, addr = addr0;
69196
69197 /* requested length too big for entire address space */
69198 if (len > TASK_SIZE)
69199 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
69200 if (flags & MAP_FIXED)
69201 return addr;
69202
69203 +#ifdef CONFIG_PAX_RANDMMAP
69204 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69205 +#endif
69206 +
69207 /* requesting a specific address */
69208 if (addr) {
69209 addr = PAGE_ALIGN(addr);
69210 - vma = find_vma(mm, addr);
69211 - if (TASK_SIZE - len >= addr &&
69212 - (!vma || addr + len <= vma->vm_start))
69213 - return addr;
69214 + if (TASK_SIZE - len >= addr) {
69215 + vma = find_vma(mm, addr);
69216 + if (check_heap_stack_gap(vma, addr, len))
69217 + return addr;
69218 + }
69219 }
69220
69221 /* check if free_area_cache is useful for us */
69222 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
69223 /* make sure it can fit in the remaining address space */
69224 if (addr > len) {
69225 vma = find_vma(mm, addr-len);
69226 - if (!vma || addr <= vma->vm_start)
69227 + if (check_heap_stack_gap(vma, addr - len, len))
69228 /* remember the address as a hint for next time */
69229 return (mm->free_area_cache = addr-len);
69230 }
69231 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
69232 * return with success:
69233 */
69234 vma = find_vma(mm, addr);
69235 - if (!vma || addr+len <= vma->vm_start)
69236 + if (check_heap_stack_gap(vma, addr, len))
69237 /* remember the address as a hint for next time */
69238 return (mm->free_area_cache = addr);
69239
69240 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
69241 mm->cached_hole_size = vma->vm_start - addr;
69242
69243 /* try just below the current vma->vm_start */
69244 - addr = vma->vm_start-len;
69245 - } while (len < vma->vm_start);
69246 + addr = skip_heap_stack_gap(vma, len);
69247 + } while (!IS_ERR_VALUE(addr));
69248
69249 bottomup:
69250 /*
69251 @@ -1414,13 +1624,21 @@ bottomup:
69252 * can happen with large stack limits and large mmap()
69253 * allocations.
69254 */
69255 + mm->mmap_base = TASK_UNMAPPED_BASE;
69256 +
69257 +#ifdef CONFIG_PAX_RANDMMAP
69258 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69259 + mm->mmap_base += mm->delta_mmap;
69260 +#endif
69261 +
69262 + mm->free_area_cache = mm->mmap_base;
69263 mm->cached_hole_size = ~0UL;
69264 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69265 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69266 /*
69267 * Restore the topdown base:
69268 */
69269 - mm->free_area_cache = mm->mmap_base;
69270 + mm->mmap_base = base;
69271 + mm->free_area_cache = base;
69272 mm->cached_hole_size = ~0UL;
69273
69274 return addr;
69275 @@ -1429,6 +1647,12 @@ bottomup:
69276
69277 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69278 {
69279 +
69280 +#ifdef CONFIG_PAX_SEGMEXEC
69281 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69282 + return;
69283 +#endif
69284 +
69285 /*
69286 * Is this a new hole at the highest possible address?
69287 */
69288 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
69289 mm->free_area_cache = addr;
69290
69291 /* dont allow allocations above current base */
69292 - if (mm->free_area_cache > mm->mmap_base)
69293 + if (mm->free_area_cache > mm->mmap_base) {
69294 mm->free_area_cache = mm->mmap_base;
69295 + mm->cached_hole_size = ~0UL;
69296 + }
69297 }
69298
69299 unsigned long
69300 @@ -1545,6 +1771,27 @@ out:
69301 return prev ? prev->vm_next : vma;
69302 }
69303
69304 +#ifdef CONFIG_PAX_SEGMEXEC
69305 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69306 +{
69307 + struct vm_area_struct *vma_m;
69308 +
69309 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69310 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69311 + BUG_ON(vma->vm_mirror);
69312 + return NULL;
69313 + }
69314 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69315 + vma_m = vma->vm_mirror;
69316 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69317 + BUG_ON(vma->vm_file != vma_m->vm_file);
69318 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69319 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
69320 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69321 + return vma_m;
69322 +}
69323 +#endif
69324 +
69325 /*
69326 * Verify that the stack growth is acceptable and
69327 * update accounting. This is shared with both the
69328 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
69329 return -ENOMEM;
69330
69331 /* Stack limit test */
69332 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69333 if (size > rlim[RLIMIT_STACK].rlim_cur)
69334 return -ENOMEM;
69335
69336 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
69337 unsigned long limit;
69338 locked = mm->locked_vm + grow;
69339 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
69340 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69341 if (locked > limit && !capable(CAP_IPC_LOCK))
69342 return -ENOMEM;
69343 }
69344 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
69345 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69346 * vma is the last one with address > vma->vm_end. Have to extend vma.
69347 */
69348 +#ifndef CONFIG_IA64
69349 +static
69350 +#endif
69351 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69352 {
69353 int error;
69354 + bool locknext;
69355
69356 if (!(vma->vm_flags & VM_GROWSUP))
69357 return -EFAULT;
69358
69359 + /* Also guard against wrapping around to address 0. */
69360 + if (address < PAGE_ALIGN(address+1))
69361 + address = PAGE_ALIGN(address+1);
69362 + else
69363 + return -ENOMEM;
69364 +
69365 /*
69366 * We must make sure the anon_vma is allocated
69367 * so that the anon_vma locking is not a noop.
69368 */
69369 if (unlikely(anon_vma_prepare(vma)))
69370 return -ENOMEM;
69371 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69372 + if (locknext && anon_vma_prepare(vma->vm_next))
69373 + return -ENOMEM;
69374 anon_vma_lock(vma);
69375 + if (locknext)
69376 + anon_vma_lock(vma->vm_next);
69377
69378 /*
69379 * vma->vm_start/vm_end cannot change under us because the caller
69380 * is required to hold the mmap_sem in read mode. We need the
69381 - * anon_vma lock to serialize against concurrent expand_stacks.
69382 - * Also guard against wrapping around to address 0.
69383 + * anon_vma locks to serialize against concurrent expand_stacks
69384 + * and expand_upwards.
69385 */
69386 - if (address < PAGE_ALIGN(address+4))
69387 - address = PAGE_ALIGN(address+4);
69388 - else {
69389 - anon_vma_unlock(vma);
69390 - return -ENOMEM;
69391 - }
69392 error = 0;
69393
69394 /* Somebody else might have raced and expanded it already */
69395 - if (address > vma->vm_end) {
69396 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69397 + error = -ENOMEM;
69398 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69399 unsigned long size, grow;
69400
69401 size = address - vma->vm_start;
69402 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
69403 if (!error)
69404 vma->vm_end = address;
69405 }
69406 + if (locknext)
69407 + anon_vma_unlock(vma->vm_next);
69408 anon_vma_unlock(vma);
69409 return error;
69410 }
69411 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
69412 unsigned long address)
69413 {
69414 int error;
69415 + bool lockprev = false;
69416 + struct vm_area_struct *prev;
69417
69418 /*
69419 * We must make sure the anon_vma is allocated
69420 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
69421 if (error)
69422 return error;
69423
69424 + prev = vma->vm_prev;
69425 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69426 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69427 +#endif
69428 + if (lockprev && anon_vma_prepare(prev))
69429 + return -ENOMEM;
69430 + if (lockprev)
69431 + anon_vma_lock(prev);
69432 +
69433 anon_vma_lock(vma);
69434
69435 /*
69436 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
69437 */
69438
69439 /* Somebody else might have raced and expanded it already */
69440 - if (address < vma->vm_start) {
69441 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69442 + error = -ENOMEM;
69443 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69444 unsigned long size, grow;
69445
69446 +#ifdef CONFIG_PAX_SEGMEXEC
69447 + struct vm_area_struct *vma_m;
69448 +
69449 + vma_m = pax_find_mirror_vma(vma);
69450 +#endif
69451 +
69452 size = vma->vm_end - address;
69453 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69454
69455 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
69456 if (!error) {
69457 vma->vm_start = address;
69458 vma->vm_pgoff -= grow;
69459 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69460 +
69461 +#ifdef CONFIG_PAX_SEGMEXEC
69462 + if (vma_m) {
69463 + vma_m->vm_start -= grow << PAGE_SHIFT;
69464 + vma_m->vm_pgoff -= grow;
69465 + }
69466 +#endif
69467 +
69468 }
69469 }
69470 anon_vma_unlock(vma);
69471 + if (lockprev)
69472 + anon_vma_unlock(prev);
69473 return error;
69474 }
69475
69476 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
69477 do {
69478 long nrpages = vma_pages(vma);
69479
69480 +#ifdef CONFIG_PAX_SEGMEXEC
69481 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69482 + vma = remove_vma(vma);
69483 + continue;
69484 + }
69485 +#endif
69486 +
69487 mm->total_vm -= nrpages;
69488 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69489 vma = remove_vma(vma);
69490 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69491 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69492 vma->vm_prev = NULL;
69493 do {
69494 +
69495 +#ifdef CONFIG_PAX_SEGMEXEC
69496 + if (vma->vm_mirror) {
69497 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69498 + vma->vm_mirror->vm_mirror = NULL;
69499 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69500 + vma->vm_mirror = NULL;
69501 + }
69502 +#endif
69503 +
69504 rb_erase(&vma->vm_rb, &mm->mm_rb);
69505 mm->map_count--;
69506 tail_vma = vma;
69507 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
69508 struct mempolicy *pol;
69509 struct vm_area_struct *new;
69510
69511 +#ifdef CONFIG_PAX_SEGMEXEC
69512 + struct vm_area_struct *vma_m, *new_m = NULL;
69513 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69514 +#endif
69515 +
69516 if (is_vm_hugetlb_page(vma) && (addr &
69517 ~(huge_page_mask(hstate_vma(vma)))))
69518 return -EINVAL;
69519
69520 +#ifdef CONFIG_PAX_SEGMEXEC
69521 + vma_m = pax_find_mirror_vma(vma);
69522 +
69523 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69524 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69525 + if (mm->map_count >= sysctl_max_map_count-1)
69526 + return -ENOMEM;
69527 + } else
69528 +#endif
69529 +
69530 if (mm->map_count >= sysctl_max_map_count)
69531 return -ENOMEM;
69532
69533 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69534 if (!new)
69535 return -ENOMEM;
69536
69537 +#ifdef CONFIG_PAX_SEGMEXEC
69538 + if (vma_m) {
69539 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69540 + if (!new_m) {
69541 + kmem_cache_free(vm_area_cachep, new);
69542 + return -ENOMEM;
69543 + }
69544 + }
69545 +#endif
69546 +
69547 /* most fields are the same, copy all, and then fixup */
69548 *new = *vma;
69549
69550 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69551 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69552 }
69553
69554 +#ifdef CONFIG_PAX_SEGMEXEC
69555 + if (vma_m) {
69556 + *new_m = *vma_m;
69557 + new_m->vm_mirror = new;
69558 + new->vm_mirror = new_m;
69559 +
69560 + if (new_below)
69561 + new_m->vm_end = addr_m;
69562 + else {
69563 + new_m->vm_start = addr_m;
69564 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69565 + }
69566 + }
69567 +#endif
69568 +
69569 pol = mpol_dup(vma_policy(vma));
69570 if (IS_ERR(pol)) {
69571 +
69572 +#ifdef CONFIG_PAX_SEGMEXEC
69573 + if (new_m)
69574 + kmem_cache_free(vm_area_cachep, new_m);
69575 +#endif
69576 +
69577 kmem_cache_free(vm_area_cachep, new);
69578 return PTR_ERR(pol);
69579 }
69580 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69581 else
69582 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69583
69584 +#ifdef CONFIG_PAX_SEGMEXEC
69585 + if (vma_m) {
69586 + mpol_get(pol);
69587 + vma_set_policy(new_m, pol);
69588 +
69589 + if (new_m->vm_file) {
69590 + get_file(new_m->vm_file);
69591 + if (vma_m->vm_flags & VM_EXECUTABLE)
69592 + added_exe_file_vma(mm);
69593 + }
69594 +
69595 + if (new_m->vm_ops && new_m->vm_ops->open)
69596 + new_m->vm_ops->open(new_m);
69597 +
69598 + if (new_below)
69599 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69600 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69601 + else
69602 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69603 + }
69604 +#endif
69605 +
69606 return 0;
69607 }
69608
69609 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69610 * work. This now handles partial unmappings.
69611 * Jeremy Fitzhardinge <jeremy@goop.org>
69612 */
69613 +#ifdef CONFIG_PAX_SEGMEXEC
69614 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69615 +{
69616 + int ret = __do_munmap(mm, start, len);
69617 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69618 + return ret;
69619 +
69620 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69621 +}
69622 +
69623 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69624 +#else
69625 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69626 +#endif
69627 {
69628 unsigned long end;
69629 struct vm_area_struct *vma, *prev, *last;
69630
69631 + /*
69632 + * mm->mmap_sem is required to protect against another thread
69633 + * changing the mappings in case we sleep.
69634 + */
69635 + verify_mm_writelocked(mm);
69636 +
69637 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69638 return -EINVAL;
69639
69640 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69641 /* Fix up all other VM information */
69642 remove_vma_list(mm, vma);
69643
69644 + track_exec_limit(mm, start, end, 0UL);
69645 +
69646 return 0;
69647 }
69648
69649 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69650
69651 profile_munmap(addr);
69652
69653 +#ifdef CONFIG_PAX_SEGMEXEC
69654 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69655 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69656 + return -EINVAL;
69657 +#endif
69658 +
69659 down_write(&mm->mmap_sem);
69660 ret = do_munmap(mm, addr, len);
69661 up_write(&mm->mmap_sem);
69662 return ret;
69663 }
69664
69665 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69666 -{
69667 -#ifdef CONFIG_DEBUG_VM
69668 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69669 - WARN_ON(1);
69670 - up_read(&mm->mmap_sem);
69671 - }
69672 -#endif
69673 -}
69674 -
69675 /*
69676 * this is really a simplified "do_mmap". it only handles
69677 * anonymous maps. eventually we may be able to do some
69678 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69679 struct rb_node ** rb_link, * rb_parent;
69680 pgoff_t pgoff = addr >> PAGE_SHIFT;
69681 int error;
69682 + unsigned long charged;
69683
69684 len = PAGE_ALIGN(len);
69685 if (!len)
69686 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69687
69688 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69689
69690 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69691 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69692 + flags &= ~VM_EXEC;
69693 +
69694 +#ifdef CONFIG_PAX_MPROTECT
69695 + if (mm->pax_flags & MF_PAX_MPROTECT)
69696 + flags &= ~VM_MAYEXEC;
69697 +#endif
69698 +
69699 + }
69700 +#endif
69701 +
69702 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69703 if (error & ~PAGE_MASK)
69704 return error;
69705
69706 + charged = len >> PAGE_SHIFT;
69707 +
69708 /*
69709 * mlock MCL_FUTURE?
69710 */
69711 if (mm->def_flags & VM_LOCKED) {
69712 unsigned long locked, lock_limit;
69713 - locked = len >> PAGE_SHIFT;
69714 + locked = charged;
69715 locked += mm->locked_vm;
69716 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69717 lock_limit >>= PAGE_SHIFT;
69718 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69719 /*
69720 * Clear old maps. this also does some error checking for us
69721 */
69722 - munmap_back:
69723 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69724 if (vma && vma->vm_start < addr + len) {
69725 if (do_munmap(mm, addr, len))
69726 return -ENOMEM;
69727 - goto munmap_back;
69728 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69729 + BUG_ON(vma && vma->vm_start < addr + len);
69730 }
69731
69732 /* Check against address space limits *after* clearing old maps... */
69733 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69734 + if (!may_expand_vm(mm, charged))
69735 return -ENOMEM;
69736
69737 if (mm->map_count > sysctl_max_map_count)
69738 return -ENOMEM;
69739
69740 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69741 + if (security_vm_enough_memory(charged))
69742 return -ENOMEM;
69743
69744 /* Can we just expand an old private anonymous mapping? */
69745 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69746 */
69747 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69748 if (!vma) {
69749 - vm_unacct_memory(len >> PAGE_SHIFT);
69750 + vm_unacct_memory(charged);
69751 return -ENOMEM;
69752 }
69753
69754 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69755 vma->vm_page_prot = vm_get_page_prot(flags);
69756 vma_link(mm, vma, prev, rb_link, rb_parent);
69757 out:
69758 - mm->total_vm += len >> PAGE_SHIFT;
69759 + mm->total_vm += charged;
69760 if (flags & VM_LOCKED) {
69761 if (!mlock_vma_pages_range(vma, addr, addr + len))
69762 - mm->locked_vm += (len >> PAGE_SHIFT);
69763 + mm->locked_vm += charged;
69764 }
69765 + track_exec_limit(mm, addr, addr + len, flags);
69766 return addr;
69767 }
69768
69769 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69770 * Walk the list again, actually closing and freeing it,
69771 * with preemption enabled, without holding any MM locks.
69772 */
69773 - while (vma)
69774 + while (vma) {
69775 + vma->vm_mirror = NULL;
69776 vma = remove_vma(vma);
69777 + }
69778
69779 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69780 }
69781 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69782 struct vm_area_struct * __vma, * prev;
69783 struct rb_node ** rb_link, * rb_parent;
69784
69785 +#ifdef CONFIG_PAX_SEGMEXEC
69786 + struct vm_area_struct *vma_m = NULL;
69787 +#endif
69788 +
69789 /*
69790 * The vm_pgoff of a purely anonymous vma should be irrelevant
69791 * until its first write fault, when page's anon_vma and index
69792 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69793 if ((vma->vm_flags & VM_ACCOUNT) &&
69794 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69795 return -ENOMEM;
69796 +
69797 +#ifdef CONFIG_PAX_SEGMEXEC
69798 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69799 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69800 + if (!vma_m)
69801 + return -ENOMEM;
69802 + }
69803 +#endif
69804 +
69805 vma_link(mm, vma, prev, rb_link, rb_parent);
69806 +
69807 +#ifdef CONFIG_PAX_SEGMEXEC
69808 + if (vma_m)
69809 + pax_mirror_vma(vma_m, vma);
69810 +#endif
69811 +
69812 return 0;
69813 }
69814
69815 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69816 struct rb_node **rb_link, *rb_parent;
69817 struct mempolicy *pol;
69818
69819 + BUG_ON(vma->vm_mirror);
69820 +
69821 /*
69822 * If anonymous vma has not yet been faulted, update new pgoff
69823 * to match new location, to increase its chance of merging.
69824 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69825 return new_vma;
69826 }
69827
69828 +#ifdef CONFIG_PAX_SEGMEXEC
69829 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69830 +{
69831 + struct vm_area_struct *prev_m;
69832 + struct rb_node **rb_link_m, *rb_parent_m;
69833 + struct mempolicy *pol_m;
69834 +
69835 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69836 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69837 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69838 + *vma_m = *vma;
69839 + pol_m = vma_policy(vma_m);
69840 + mpol_get(pol_m);
69841 + vma_set_policy(vma_m, pol_m);
69842 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69843 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69844 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69845 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69846 + if (vma_m->vm_file)
69847 + get_file(vma_m->vm_file);
69848 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69849 + vma_m->vm_ops->open(vma_m);
69850 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69851 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69852 + vma_m->vm_mirror = vma;
69853 + vma->vm_mirror = vma_m;
69854 +}
69855 +#endif
69856 +
69857 /*
69858 * Return true if the calling process may expand its vm space by the passed
69859 * number of pages
69860 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69861 unsigned long lim;
69862
69863 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69864 -
69865 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69866 if (cur + npages > lim)
69867 return 0;
69868 return 1;
69869 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69870 vma->vm_start = addr;
69871 vma->vm_end = addr + len;
69872
69873 +#ifdef CONFIG_PAX_MPROTECT
69874 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69875 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69876 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69877 + return -EPERM;
69878 + if (!(vm_flags & VM_EXEC))
69879 + vm_flags &= ~VM_MAYEXEC;
69880 +#else
69881 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69882 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69883 +#endif
69884 + else
69885 + vm_flags &= ~VM_MAYWRITE;
69886 + }
69887 +#endif
69888 +
69889 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69890 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69891
69892 diff -urNp linux-2.6.32.44/mm/mprotect.c linux-2.6.32.44/mm/mprotect.c
69893 --- linux-2.6.32.44/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69894 +++ linux-2.6.32.44/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69895 @@ -24,10 +24,16 @@
69896 #include <linux/mmu_notifier.h>
69897 #include <linux/migrate.h>
69898 #include <linux/perf_event.h>
69899 +
69900 +#ifdef CONFIG_PAX_MPROTECT
69901 +#include <linux/elf.h>
69902 +#endif
69903 +
69904 #include <asm/uaccess.h>
69905 #include <asm/pgtable.h>
69906 #include <asm/cacheflush.h>
69907 #include <asm/tlbflush.h>
69908 +#include <asm/mmu_context.h>
69909
69910 #ifndef pgprot_modify
69911 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69912 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
69913 flush_tlb_range(vma, start, end);
69914 }
69915
69916 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69917 +/* called while holding the mmap semaphor for writing except stack expansion */
69918 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69919 +{
69920 + unsigned long oldlimit, newlimit = 0UL;
69921 +
69922 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69923 + return;
69924 +
69925 + spin_lock(&mm->page_table_lock);
69926 + oldlimit = mm->context.user_cs_limit;
69927 + if ((prot & VM_EXEC) && oldlimit < end)
69928 + /* USER_CS limit moved up */
69929 + newlimit = end;
69930 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69931 + /* USER_CS limit moved down */
69932 + newlimit = start;
69933 +
69934 + if (newlimit) {
69935 + mm->context.user_cs_limit = newlimit;
69936 +
69937 +#ifdef CONFIG_SMP
69938 + wmb();
69939 + cpus_clear(mm->context.cpu_user_cs_mask);
69940 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69941 +#endif
69942 +
69943 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69944 + }
69945 + spin_unlock(&mm->page_table_lock);
69946 + if (newlimit == end) {
69947 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69948 +
69949 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69950 + if (is_vm_hugetlb_page(vma))
69951 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69952 + else
69953 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69954 + }
69955 +}
69956 +#endif
69957 +
69958 int
69959 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69960 unsigned long start, unsigned long end, unsigned long newflags)
69961 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69962 int error;
69963 int dirty_accountable = 0;
69964
69965 +#ifdef CONFIG_PAX_SEGMEXEC
69966 + struct vm_area_struct *vma_m = NULL;
69967 + unsigned long start_m, end_m;
69968 +
69969 + start_m = start + SEGMEXEC_TASK_SIZE;
69970 + end_m = end + SEGMEXEC_TASK_SIZE;
69971 +#endif
69972 +
69973 if (newflags == oldflags) {
69974 *pprev = vma;
69975 return 0;
69976 }
69977
69978 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69979 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69980 +
69981 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69982 + return -ENOMEM;
69983 +
69984 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69985 + return -ENOMEM;
69986 + }
69987 +
69988 /*
69989 * If we make a private mapping writable we increase our commit;
69990 * but (without finer accounting) cannot reduce our commit if we
69991 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69992 }
69993 }
69994
69995 +#ifdef CONFIG_PAX_SEGMEXEC
69996 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69997 + if (start != vma->vm_start) {
69998 + error = split_vma(mm, vma, start, 1);
69999 + if (error)
70000 + goto fail;
70001 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70002 + *pprev = (*pprev)->vm_next;
70003 + }
70004 +
70005 + if (end != vma->vm_end) {
70006 + error = split_vma(mm, vma, end, 0);
70007 + if (error)
70008 + goto fail;
70009 + }
70010 +
70011 + if (pax_find_mirror_vma(vma)) {
70012 + error = __do_munmap(mm, start_m, end_m - start_m);
70013 + if (error)
70014 + goto fail;
70015 + } else {
70016 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70017 + if (!vma_m) {
70018 + error = -ENOMEM;
70019 + goto fail;
70020 + }
70021 + vma->vm_flags = newflags;
70022 + pax_mirror_vma(vma_m, vma);
70023 + }
70024 + }
70025 +#endif
70026 +
70027 /*
70028 * First try to merge with previous and/or next vma.
70029 */
70030 @@ -195,9 +293,21 @@ success:
70031 * vm_flags and vm_page_prot are protected by the mmap_sem
70032 * held in write mode.
70033 */
70034 +
70035 +#ifdef CONFIG_PAX_SEGMEXEC
70036 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70037 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70038 +#endif
70039 +
70040 vma->vm_flags = newflags;
70041 +
70042 +#ifdef CONFIG_PAX_MPROTECT
70043 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70044 + mm->binfmt->handle_mprotect(vma, newflags);
70045 +#endif
70046 +
70047 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70048 - vm_get_page_prot(newflags));
70049 + vm_get_page_prot(vma->vm_flags));
70050
70051 if (vma_wants_writenotify(vma)) {
70052 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70053 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70054 end = start + len;
70055 if (end <= start)
70056 return -ENOMEM;
70057 +
70058 +#ifdef CONFIG_PAX_SEGMEXEC
70059 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70060 + if (end > SEGMEXEC_TASK_SIZE)
70061 + return -EINVAL;
70062 + } else
70063 +#endif
70064 +
70065 + if (end > TASK_SIZE)
70066 + return -EINVAL;
70067 +
70068 if (!arch_validate_prot(prot))
70069 return -EINVAL;
70070
70071 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70072 /*
70073 * Does the application expect PROT_READ to imply PROT_EXEC:
70074 */
70075 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70076 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70077 prot |= PROT_EXEC;
70078
70079 vm_flags = calc_vm_prot_bits(prot);
70080 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70081 if (start > vma->vm_start)
70082 prev = vma;
70083
70084 +#ifdef CONFIG_PAX_MPROTECT
70085 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70086 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70087 +#endif
70088 +
70089 for (nstart = start ; ; ) {
70090 unsigned long newflags;
70091
70092 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70093
70094 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70095 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70096 + if (prot & (PROT_WRITE | PROT_EXEC))
70097 + gr_log_rwxmprotect(vma->vm_file);
70098 +
70099 + error = -EACCES;
70100 + goto out;
70101 + }
70102 +
70103 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70104 error = -EACCES;
70105 goto out;
70106 }
70107 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70108 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70109 if (error)
70110 goto out;
70111 +
70112 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70113 +
70114 nstart = tmp;
70115
70116 if (nstart < prev->vm_end)
70117 diff -urNp linux-2.6.32.44/mm/mremap.c linux-2.6.32.44/mm/mremap.c
70118 --- linux-2.6.32.44/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
70119 +++ linux-2.6.32.44/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
70120 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
70121 continue;
70122 pte = ptep_clear_flush(vma, old_addr, old_pte);
70123 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70124 +
70125 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70126 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70127 + pte = pte_exprotect(pte);
70128 +#endif
70129 +
70130 set_pte_at(mm, new_addr, new_pte, pte);
70131 }
70132
70133 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
70134 if (is_vm_hugetlb_page(vma))
70135 goto Einval;
70136
70137 +#ifdef CONFIG_PAX_SEGMEXEC
70138 + if (pax_find_mirror_vma(vma))
70139 + goto Einval;
70140 +#endif
70141 +
70142 /* We can't remap across vm area boundaries */
70143 if (old_len > vma->vm_end - addr)
70144 goto Efault;
70145 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
70146 unsigned long ret = -EINVAL;
70147 unsigned long charged = 0;
70148 unsigned long map_flags;
70149 + unsigned long pax_task_size = TASK_SIZE;
70150
70151 if (new_addr & ~PAGE_MASK)
70152 goto out;
70153
70154 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70155 +#ifdef CONFIG_PAX_SEGMEXEC
70156 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70157 + pax_task_size = SEGMEXEC_TASK_SIZE;
70158 +#endif
70159 +
70160 + pax_task_size -= PAGE_SIZE;
70161 +
70162 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70163 goto out;
70164
70165 /* Check if the location we're moving into overlaps the
70166 * old location at all, and fail if it does.
70167 */
70168 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70169 - goto out;
70170 -
70171 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70172 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70173 goto out;
70174
70175 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70176 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
70177 struct vm_area_struct *vma;
70178 unsigned long ret = -EINVAL;
70179 unsigned long charged = 0;
70180 + unsigned long pax_task_size = TASK_SIZE;
70181
70182 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70183 goto out;
70184 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
70185 if (!new_len)
70186 goto out;
70187
70188 +#ifdef CONFIG_PAX_SEGMEXEC
70189 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70190 + pax_task_size = SEGMEXEC_TASK_SIZE;
70191 +#endif
70192 +
70193 + pax_task_size -= PAGE_SIZE;
70194 +
70195 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70196 + old_len > pax_task_size || addr > pax_task_size-old_len)
70197 + goto out;
70198 +
70199 if (flags & MREMAP_FIXED) {
70200 if (flags & MREMAP_MAYMOVE)
70201 ret = mremap_to(addr, old_len, new_addr, new_len);
70202 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
70203 addr + new_len);
70204 }
70205 ret = addr;
70206 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70207 goto out;
70208 }
70209 }
70210 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
70211 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70212 if (ret)
70213 goto out;
70214 +
70215 + map_flags = vma->vm_flags;
70216 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70217 + if (!(ret & ~PAGE_MASK)) {
70218 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70219 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70220 + }
70221 }
70222 out:
70223 if (ret & ~PAGE_MASK)
70224 diff -urNp linux-2.6.32.44/mm/nommu.c linux-2.6.32.44/mm/nommu.c
70225 --- linux-2.6.32.44/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
70226 +++ linux-2.6.32.44/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
70227 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70228 int sysctl_overcommit_ratio = 50; /* default is 50% */
70229 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70230 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70231 -int heap_stack_gap = 0;
70232
70233 atomic_long_t mmap_pages_allocated;
70234
70235 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
70236 EXPORT_SYMBOL(find_vma);
70237
70238 /*
70239 - * find a VMA
70240 - * - we don't extend stack VMAs under NOMMU conditions
70241 - */
70242 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70243 -{
70244 - return find_vma(mm, addr);
70245 -}
70246 -
70247 -/*
70248 * expand a stack to a given address
70249 * - not supported under NOMMU conditions
70250 */
70251 diff -urNp linux-2.6.32.44/mm/page_alloc.c linux-2.6.32.44/mm/page_alloc.c
70252 --- linux-2.6.32.44/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
70253 +++ linux-2.6.32.44/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
70254 @@ -289,7 +289,7 @@ out:
70255 * This usage means that zero-order pages may not be compound.
70256 */
70257
70258 -static void free_compound_page(struct page *page)
70259 +void free_compound_page(struct page *page)
70260 {
70261 __free_pages_ok(page, compound_order(page));
70262 }
70263 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
70264 int bad = 0;
70265 int wasMlocked = __TestClearPageMlocked(page);
70266
70267 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70268 + unsigned long index = 1UL << order;
70269 +#endif
70270 +
70271 kmemcheck_free_shadow(page, order);
70272
70273 for (i = 0 ; i < (1 << order) ; ++i)
70274 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
70275 debug_check_no_obj_freed(page_address(page),
70276 PAGE_SIZE << order);
70277 }
70278 +
70279 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70280 + for (; index; --index)
70281 + sanitize_highpage(page + index - 1);
70282 +#endif
70283 +
70284 arch_free_page(page, order);
70285 kernel_map_pages(page, 1 << order, 0);
70286
70287 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
70288 arch_alloc_page(page, order);
70289 kernel_map_pages(page, 1 << order, 1);
70290
70291 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70292 if (gfp_flags & __GFP_ZERO)
70293 prep_zero_page(page, order, gfp_flags);
70294 +#endif
70295
70296 if (order && (gfp_flags & __GFP_COMP))
70297 prep_compound_page(page, order);
70298 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
70299 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
70300 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
70301 }
70302 +
70303 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70304 + sanitize_highpage(page);
70305 +#endif
70306 +
70307 arch_free_page(page, 0);
70308 kernel_map_pages(page, 1, 0);
70309
70310 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
70311 int cpu;
70312 struct zone *zone;
70313
70314 + pax_track_stack();
70315 +
70316 for_each_populated_zone(zone) {
70317 show_node(zone);
70318 printk("%s per-cpu:\n", zone->name);
70319 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
70320 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
70321 }
70322 #else
70323 -static void inline setup_usemap(struct pglist_data *pgdat,
70324 +static inline void setup_usemap(struct pglist_data *pgdat,
70325 struct zone *zone, unsigned long zonesize) {}
70326 #endif /* CONFIG_SPARSEMEM */
70327
70328 diff -urNp linux-2.6.32.44/mm/percpu.c linux-2.6.32.44/mm/percpu.c
70329 --- linux-2.6.32.44/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
70330 +++ linux-2.6.32.44/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
70331 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
70332 static unsigned int pcpu_last_unit_cpu __read_mostly;
70333
70334 /* the address of the first chunk which starts with the kernel static area */
70335 -void *pcpu_base_addr __read_mostly;
70336 +void *pcpu_base_addr __read_only;
70337 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70338
70339 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70340 diff -urNp linux-2.6.32.44/mm/rmap.c linux-2.6.32.44/mm/rmap.c
70341 --- linux-2.6.32.44/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
70342 +++ linux-2.6.32.44/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
70343 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
70344 /* page_table_lock to protect against threads */
70345 spin_lock(&mm->page_table_lock);
70346 if (likely(!vma->anon_vma)) {
70347 +
70348 +#ifdef CONFIG_PAX_SEGMEXEC
70349 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70350 +
70351 + if (vma_m) {
70352 + BUG_ON(vma_m->anon_vma);
70353 + vma_m->anon_vma = anon_vma;
70354 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
70355 + }
70356 +#endif
70357 +
70358 vma->anon_vma = anon_vma;
70359 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
70360 allocated = NULL;
70361 diff -urNp linux-2.6.32.44/mm/shmem.c linux-2.6.32.44/mm/shmem.c
70362 --- linux-2.6.32.44/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
70363 +++ linux-2.6.32.44/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
70364 @@ -31,7 +31,7 @@
70365 #include <linux/swap.h>
70366 #include <linux/ima.h>
70367
70368 -static struct vfsmount *shm_mnt;
70369 +struct vfsmount *shm_mnt;
70370
70371 #ifdef CONFIG_SHMEM
70372 /*
70373 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
70374 goto unlock;
70375 }
70376 entry = shmem_swp_entry(info, index, NULL);
70377 + if (!entry)
70378 + goto unlock;
70379 if (entry->val) {
70380 /*
70381 * The more uptodate page coming down from a stacked
70382 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
70383 struct vm_area_struct pvma;
70384 struct page *page;
70385
70386 + pax_track_stack();
70387 +
70388 spol = mpol_cond_copy(&mpol,
70389 mpol_shared_policy_lookup(&info->policy, idx));
70390
70391 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
70392
70393 info = SHMEM_I(inode);
70394 inode->i_size = len-1;
70395 - if (len <= (char *)inode - (char *)info) {
70396 + if (len <= (char *)inode - (char *)info && len <= 64) {
70397 /* do it inline */
70398 memcpy(info, symname, len);
70399 inode->i_op = &shmem_symlink_inline_operations;
70400 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
70401 int err = -ENOMEM;
70402
70403 /* Round up to L1_CACHE_BYTES to resist false sharing */
70404 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70405 - L1_CACHE_BYTES), GFP_KERNEL);
70406 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70407 if (!sbinfo)
70408 return -ENOMEM;
70409
70410 diff -urNp linux-2.6.32.44/mm/slab.c linux-2.6.32.44/mm/slab.c
70411 --- linux-2.6.32.44/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
70412 +++ linux-2.6.32.44/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
70413 @@ -174,7 +174,7 @@
70414
70415 /* Legal flag mask for kmem_cache_create(). */
70416 #if DEBUG
70417 -# define CREATE_MASK (SLAB_RED_ZONE | \
70418 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70419 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70420 SLAB_CACHE_DMA | \
70421 SLAB_STORE_USER | \
70422 @@ -182,7 +182,7 @@
70423 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70424 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70425 #else
70426 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70427 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70428 SLAB_CACHE_DMA | \
70429 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70430 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70431 @@ -308,7 +308,7 @@ struct kmem_list3 {
70432 * Need this for bootstrapping a per node allocator.
70433 */
70434 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70435 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70436 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70437 #define CACHE_CACHE 0
70438 #define SIZE_AC MAX_NUMNODES
70439 #define SIZE_L3 (2 * MAX_NUMNODES)
70440 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
70441 if ((x)->max_freeable < i) \
70442 (x)->max_freeable = i; \
70443 } while (0)
70444 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70445 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70446 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70447 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70448 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70449 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70450 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70451 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70452 #else
70453 #define STATS_INC_ACTIVE(x) do { } while (0)
70454 #define STATS_DEC_ACTIVE(x) do { } while (0)
70455 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
70456 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70457 */
70458 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70459 - const struct slab *slab, void *obj)
70460 + const struct slab *slab, const void *obj)
70461 {
70462 u32 offset = (obj - slab->s_mem);
70463 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70464 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
70465 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70466 sizes[INDEX_AC].cs_size,
70467 ARCH_KMALLOC_MINALIGN,
70468 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70469 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70470 NULL);
70471
70472 if (INDEX_AC != INDEX_L3) {
70473 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
70474 kmem_cache_create(names[INDEX_L3].name,
70475 sizes[INDEX_L3].cs_size,
70476 ARCH_KMALLOC_MINALIGN,
70477 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70478 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70479 NULL);
70480 }
70481
70482 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
70483 sizes->cs_cachep = kmem_cache_create(names->name,
70484 sizes->cs_size,
70485 ARCH_KMALLOC_MINALIGN,
70486 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70487 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70488 NULL);
70489 }
70490 #ifdef CONFIG_ZONE_DMA
70491 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
70492 }
70493 /* cpu stats */
70494 {
70495 - unsigned long allochit = atomic_read(&cachep->allochit);
70496 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70497 - unsigned long freehit = atomic_read(&cachep->freehit);
70498 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70499 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70500 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70501 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70502 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70503
70504 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70505 allochit, allocmiss, freehit, freemiss);
70506 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
70507
70508 static int __init slab_proc_init(void)
70509 {
70510 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70511 + mode_t gr_mode = S_IRUGO;
70512 +
70513 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70514 + gr_mode = S_IRUSR;
70515 +#endif
70516 +
70517 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70518 #ifdef CONFIG_DEBUG_SLAB_LEAK
70519 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70520 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70521 #endif
70522 return 0;
70523 }
70524 module_init(slab_proc_init);
70525 #endif
70526
70527 +void check_object_size(const void *ptr, unsigned long n, bool to)
70528 +{
70529 +
70530 +#ifdef CONFIG_PAX_USERCOPY
70531 + struct page *page;
70532 + struct kmem_cache *cachep = NULL;
70533 + struct slab *slabp;
70534 + unsigned int objnr;
70535 + unsigned long offset;
70536 +
70537 + if (!n)
70538 + return;
70539 +
70540 + if (ZERO_OR_NULL_PTR(ptr))
70541 + goto report;
70542 +
70543 + if (!virt_addr_valid(ptr))
70544 + return;
70545 +
70546 + page = virt_to_head_page(ptr);
70547 +
70548 + if (!PageSlab(page)) {
70549 + if (object_is_on_stack(ptr, n) == -1)
70550 + goto report;
70551 + return;
70552 + }
70553 +
70554 + cachep = page_get_cache(page);
70555 + if (!(cachep->flags & SLAB_USERCOPY))
70556 + goto report;
70557 +
70558 + slabp = page_get_slab(page);
70559 + objnr = obj_to_index(cachep, slabp, ptr);
70560 + BUG_ON(objnr >= cachep->num);
70561 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70562 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70563 + return;
70564 +
70565 +report:
70566 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70567 +#endif
70568 +
70569 +}
70570 +EXPORT_SYMBOL(check_object_size);
70571 +
70572 /**
70573 * ksize - get the actual amount of memory allocated for a given object
70574 * @objp: Pointer to the object
70575 diff -urNp linux-2.6.32.44/mm/slob.c linux-2.6.32.44/mm/slob.c
70576 --- linux-2.6.32.44/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70577 +++ linux-2.6.32.44/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70578 @@ -29,7 +29,7 @@
70579 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70580 * alloc_pages() directly, allocating compound pages so the page order
70581 * does not have to be separately tracked, and also stores the exact
70582 - * allocation size in page->private so that it can be used to accurately
70583 + * allocation size in slob_page->size so that it can be used to accurately
70584 * provide ksize(). These objects are detected in kfree() because slob_page()
70585 * is false for them.
70586 *
70587 @@ -58,6 +58,7 @@
70588 */
70589
70590 #include <linux/kernel.h>
70591 +#include <linux/sched.h>
70592 #include <linux/slab.h>
70593 #include <linux/mm.h>
70594 #include <linux/swap.h> /* struct reclaim_state */
70595 @@ -100,7 +101,8 @@ struct slob_page {
70596 unsigned long flags; /* mandatory */
70597 atomic_t _count; /* mandatory */
70598 slobidx_t units; /* free units left in page */
70599 - unsigned long pad[2];
70600 + unsigned long pad[1];
70601 + unsigned long size; /* size when >=PAGE_SIZE */
70602 slob_t *free; /* first free slob_t in page */
70603 struct list_head list; /* linked list of free pages */
70604 };
70605 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70606 */
70607 static inline int is_slob_page(struct slob_page *sp)
70608 {
70609 - return PageSlab((struct page *)sp);
70610 + return PageSlab((struct page *)sp) && !sp->size;
70611 }
70612
70613 static inline void set_slob_page(struct slob_page *sp)
70614 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70615
70616 static inline struct slob_page *slob_page(const void *addr)
70617 {
70618 - return (struct slob_page *)virt_to_page(addr);
70619 + return (struct slob_page *)virt_to_head_page(addr);
70620 }
70621
70622 /*
70623 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70624 /*
70625 * Return the size of a slob block.
70626 */
70627 -static slobidx_t slob_units(slob_t *s)
70628 +static slobidx_t slob_units(const slob_t *s)
70629 {
70630 if (s->units > 0)
70631 return s->units;
70632 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70633 /*
70634 * Return the next free slob block pointer after this one.
70635 */
70636 -static slob_t *slob_next(slob_t *s)
70637 +static slob_t *slob_next(const slob_t *s)
70638 {
70639 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70640 slobidx_t next;
70641 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70642 /*
70643 * Returns true if s is the last free block in its page.
70644 */
70645 -static int slob_last(slob_t *s)
70646 +static int slob_last(const slob_t *s)
70647 {
70648 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70649 }
70650 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70651 if (!page)
70652 return NULL;
70653
70654 + set_slob_page(page);
70655 return page_address(page);
70656 }
70657
70658 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70659 if (!b)
70660 return NULL;
70661 sp = slob_page(b);
70662 - set_slob_page(sp);
70663
70664 spin_lock_irqsave(&slob_lock, flags);
70665 sp->units = SLOB_UNITS(PAGE_SIZE);
70666 sp->free = b;
70667 + sp->size = 0;
70668 INIT_LIST_HEAD(&sp->list);
70669 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70670 set_slob_page_free(sp, slob_list);
70671 @@ -475,10 +478,9 @@ out:
70672 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70673 #endif
70674
70675 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70676 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70677 {
70678 - unsigned int *m;
70679 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70680 + slob_t *m;
70681 void *ret;
70682
70683 lockdep_trace_alloc(gfp);
70684 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70685
70686 if (!m)
70687 return NULL;
70688 - *m = size;
70689 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70690 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70691 + m[0].units = size;
70692 + m[1].units = align;
70693 ret = (void *)m + align;
70694
70695 trace_kmalloc_node(_RET_IP_, ret,
70696 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70697
70698 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70699 if (ret) {
70700 - struct page *page;
70701 - page = virt_to_page(ret);
70702 - page->private = size;
70703 + struct slob_page *sp;
70704 + sp = slob_page(ret);
70705 + sp->size = size;
70706 }
70707
70708 trace_kmalloc_node(_RET_IP_, ret,
70709 size, PAGE_SIZE << order, gfp, node);
70710 }
70711
70712 - kmemleak_alloc(ret, size, 1, gfp);
70713 + return ret;
70714 +}
70715 +
70716 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70717 +{
70718 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70719 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70720 +
70721 + if (!ZERO_OR_NULL_PTR(ret))
70722 + kmemleak_alloc(ret, size, 1, gfp);
70723 return ret;
70724 }
70725 EXPORT_SYMBOL(__kmalloc_node);
70726 @@ -528,13 +542,88 @@ void kfree(const void *block)
70727 sp = slob_page(block);
70728 if (is_slob_page(sp)) {
70729 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70730 - unsigned int *m = (unsigned int *)(block - align);
70731 - slob_free(m, *m + align);
70732 - } else
70733 + slob_t *m = (slob_t *)(block - align);
70734 + slob_free(m, m[0].units + align);
70735 + } else {
70736 + clear_slob_page(sp);
70737 + free_slob_page(sp);
70738 + sp->size = 0;
70739 put_page(&sp->page);
70740 + }
70741 }
70742 EXPORT_SYMBOL(kfree);
70743
70744 +void check_object_size(const void *ptr, unsigned long n, bool to)
70745 +{
70746 +
70747 +#ifdef CONFIG_PAX_USERCOPY
70748 + struct slob_page *sp;
70749 + const slob_t *free;
70750 + const void *base;
70751 + unsigned long flags;
70752 +
70753 + if (!n)
70754 + return;
70755 +
70756 + if (ZERO_OR_NULL_PTR(ptr))
70757 + goto report;
70758 +
70759 + if (!virt_addr_valid(ptr))
70760 + return;
70761 +
70762 + sp = slob_page(ptr);
70763 + if (!PageSlab((struct page*)sp)) {
70764 + if (object_is_on_stack(ptr, n) == -1)
70765 + goto report;
70766 + return;
70767 + }
70768 +
70769 + if (sp->size) {
70770 + base = page_address(&sp->page);
70771 + if (base <= ptr && n <= sp->size - (ptr - base))
70772 + return;
70773 + goto report;
70774 + }
70775 +
70776 + /* some tricky double walking to find the chunk */
70777 + spin_lock_irqsave(&slob_lock, flags);
70778 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70779 + free = sp->free;
70780 +
70781 + while (!slob_last(free) && (void *)free <= ptr) {
70782 + base = free + slob_units(free);
70783 + free = slob_next(free);
70784 + }
70785 +
70786 + while (base < (void *)free) {
70787 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70788 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70789 + int offset;
70790 +
70791 + if (ptr < base + align)
70792 + break;
70793 +
70794 + offset = ptr - base - align;
70795 + if (offset >= m) {
70796 + base += size;
70797 + continue;
70798 + }
70799 +
70800 + if (n > m - offset)
70801 + break;
70802 +
70803 + spin_unlock_irqrestore(&slob_lock, flags);
70804 + return;
70805 + }
70806 +
70807 + spin_unlock_irqrestore(&slob_lock, flags);
70808 +report:
70809 + pax_report_usercopy(ptr, n, to, NULL);
70810 +#endif
70811 +
70812 +}
70813 +EXPORT_SYMBOL(check_object_size);
70814 +
70815 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70816 size_t ksize(const void *block)
70817 {
70818 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
70819 sp = slob_page(block);
70820 if (is_slob_page(sp)) {
70821 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70822 - unsigned int *m = (unsigned int *)(block - align);
70823 - return SLOB_UNITS(*m) * SLOB_UNIT;
70824 + slob_t *m = (slob_t *)(block - align);
70825 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70826 } else
70827 - return sp->page.private;
70828 + return sp->size;
70829 }
70830 EXPORT_SYMBOL(ksize);
70831
70832 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70833 {
70834 struct kmem_cache *c;
70835
70836 +#ifdef CONFIG_PAX_USERCOPY
70837 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70838 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70839 +#else
70840 c = slob_alloc(sizeof(struct kmem_cache),
70841 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70842 +#endif
70843
70844 if (c) {
70845 c->name = name;
70846 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70847 {
70848 void *b;
70849
70850 +#ifdef CONFIG_PAX_USERCOPY
70851 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70852 +#else
70853 if (c->size < PAGE_SIZE) {
70854 b = slob_alloc(c->size, flags, c->align, node);
70855 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70856 SLOB_UNITS(c->size) * SLOB_UNIT,
70857 flags, node);
70858 } else {
70859 + struct slob_page *sp;
70860 +
70861 b = slob_new_pages(flags, get_order(c->size), node);
70862 + sp = slob_page(b);
70863 + sp->size = c->size;
70864 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70865 PAGE_SIZE << get_order(c->size),
70866 flags, node);
70867 }
70868 +#endif
70869
70870 if (c->ctor)
70871 c->ctor(b);
70872 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70873
70874 static void __kmem_cache_free(void *b, int size)
70875 {
70876 - if (size < PAGE_SIZE)
70877 + struct slob_page *sp = slob_page(b);
70878 +
70879 + if (is_slob_page(sp))
70880 slob_free(b, size);
70881 - else
70882 + else {
70883 + clear_slob_page(sp);
70884 + free_slob_page(sp);
70885 + sp->size = 0;
70886 slob_free_pages(b, get_order(size));
70887 + }
70888 }
70889
70890 static void kmem_rcu_free(struct rcu_head *head)
70891 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70892
70893 void kmem_cache_free(struct kmem_cache *c, void *b)
70894 {
70895 + int size = c->size;
70896 +
70897 +#ifdef CONFIG_PAX_USERCOPY
70898 + if (size + c->align < PAGE_SIZE) {
70899 + size += c->align;
70900 + b -= c->align;
70901 + }
70902 +#endif
70903 +
70904 kmemleak_free_recursive(b, c->flags);
70905 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70906 struct slob_rcu *slob_rcu;
70907 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70908 + slob_rcu = b + (size - sizeof(struct slob_rcu));
70909 INIT_RCU_HEAD(&slob_rcu->head);
70910 - slob_rcu->size = c->size;
70911 + slob_rcu->size = size;
70912 call_rcu(&slob_rcu->head, kmem_rcu_free);
70913 } else {
70914 - __kmem_cache_free(b, c->size);
70915 + __kmem_cache_free(b, size);
70916 }
70917
70918 +#ifdef CONFIG_PAX_USERCOPY
70919 + trace_kfree(_RET_IP_, b);
70920 +#else
70921 trace_kmem_cache_free(_RET_IP_, b);
70922 +#endif
70923 +
70924 }
70925 EXPORT_SYMBOL(kmem_cache_free);
70926
70927 diff -urNp linux-2.6.32.44/mm/slub.c linux-2.6.32.44/mm/slub.c
70928 --- linux-2.6.32.44/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70929 +++ linux-2.6.32.44/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70930 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
70931 if (!t->addr)
70932 return;
70933
70934 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70935 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70936 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70937 }
70938
70939 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70940
70941 page = virt_to_head_page(x);
70942
70943 + BUG_ON(!PageSlab(page));
70944 +
70945 slab_free(s, page, x, _RET_IP_);
70946
70947 trace_kmem_cache_free(_RET_IP_, x);
70948 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
70949 * Merge control. If this is set then no merging of slab caches will occur.
70950 * (Could be removed. This was introduced to pacify the merge skeptics.)
70951 */
70952 -static int slub_nomerge;
70953 +static int slub_nomerge = 1;
70954
70955 /*
70956 * Calculate the order of allocation given an slab object size.
70957 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70958 * list to avoid pounding the page allocator excessively.
70959 */
70960 set_min_partial(s, ilog2(s->size));
70961 - s->refcount = 1;
70962 + atomic_set(&s->refcount, 1);
70963 #ifdef CONFIG_NUMA
70964 s->remote_node_defrag_ratio = 1000;
70965 #endif
70966 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70967 void kmem_cache_destroy(struct kmem_cache *s)
70968 {
70969 down_write(&slub_lock);
70970 - s->refcount--;
70971 - if (!s->refcount) {
70972 + if (atomic_dec_and_test(&s->refcount)) {
70973 list_del(&s->list);
70974 up_write(&slub_lock);
70975 if (kmem_cache_close(s)) {
70976 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70977 __setup("slub_nomerge", setup_slub_nomerge);
70978
70979 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70980 - const char *name, int size, gfp_t gfp_flags)
70981 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70982 {
70983 - unsigned int flags = 0;
70984 -
70985 if (gfp_flags & SLUB_DMA)
70986 - flags = SLAB_CACHE_DMA;
70987 + flags |= SLAB_CACHE_DMA;
70988
70989 /*
70990 * This function is called with IRQs disabled during early-boot on
70991 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70992 EXPORT_SYMBOL(__kmalloc_node);
70993 #endif
70994
70995 +void check_object_size(const void *ptr, unsigned long n, bool to)
70996 +{
70997 +
70998 +#ifdef CONFIG_PAX_USERCOPY
70999 + struct page *page;
71000 + struct kmem_cache *s = NULL;
71001 + unsigned long offset;
71002 +
71003 + if (!n)
71004 + return;
71005 +
71006 + if (ZERO_OR_NULL_PTR(ptr))
71007 + goto report;
71008 +
71009 + if (!virt_addr_valid(ptr))
71010 + return;
71011 +
71012 + page = get_object_page(ptr);
71013 +
71014 + if (!page) {
71015 + if (object_is_on_stack(ptr, n) == -1)
71016 + goto report;
71017 + return;
71018 + }
71019 +
71020 + s = page->slab;
71021 + if (!(s->flags & SLAB_USERCOPY))
71022 + goto report;
71023 +
71024 + offset = (ptr - page_address(page)) % s->size;
71025 + if (offset <= s->objsize && n <= s->objsize - offset)
71026 + return;
71027 +
71028 +report:
71029 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
71030 +#endif
71031 +
71032 +}
71033 +EXPORT_SYMBOL(check_object_size);
71034 +
71035 size_t ksize(const void *object)
71036 {
71037 struct page *page;
71038 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
71039 * kmem_cache_open for slab_state == DOWN.
71040 */
71041 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
71042 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
71043 - kmalloc_caches[0].refcount = -1;
71044 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
71045 + atomic_set(&kmalloc_caches[0].refcount, -1);
71046 caches++;
71047
71048 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
71049 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
71050 /* Caches that are not of the two-to-the-power-of size */
71051 if (KMALLOC_MIN_SIZE <= 32) {
71052 create_kmalloc_cache(&kmalloc_caches[1],
71053 - "kmalloc-96", 96, GFP_NOWAIT);
71054 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
71055 caches++;
71056 }
71057 if (KMALLOC_MIN_SIZE <= 64) {
71058 create_kmalloc_cache(&kmalloc_caches[2],
71059 - "kmalloc-192", 192, GFP_NOWAIT);
71060 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
71061 caches++;
71062 }
71063
71064 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71065 create_kmalloc_cache(&kmalloc_caches[i],
71066 - "kmalloc", 1 << i, GFP_NOWAIT);
71067 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
71068 caches++;
71069 }
71070
71071 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
71072 /*
71073 * We may have set a slab to be unmergeable during bootstrap.
71074 */
71075 - if (s->refcount < 0)
71076 + if (atomic_read(&s->refcount) < 0)
71077 return 1;
71078
71079 return 0;
71080 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
71081 if (s) {
71082 int cpu;
71083
71084 - s->refcount++;
71085 + atomic_inc(&s->refcount);
71086 /*
71087 * Adjust the object sizes so that we clear
71088 * the complete object on kzalloc.
71089 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
71090
71091 if (sysfs_slab_alias(s, name)) {
71092 down_write(&slub_lock);
71093 - s->refcount--;
71094 + atomic_dec(&s->refcount);
71095 up_write(&slub_lock);
71096 goto err;
71097 }
71098 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
71099
71100 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71101 {
71102 - return sprintf(buf, "%d\n", s->refcount - 1);
71103 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71104 }
71105 SLAB_ATTR_RO(aliases);
71106
71107 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
71108 kfree(s);
71109 }
71110
71111 -static struct sysfs_ops slab_sysfs_ops = {
71112 +static const struct sysfs_ops slab_sysfs_ops = {
71113 .show = slab_attr_show,
71114 .store = slab_attr_store,
71115 };
71116 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
71117 return 0;
71118 }
71119
71120 -static struct kset_uevent_ops slab_uevent_ops = {
71121 +static const struct kset_uevent_ops slab_uevent_ops = {
71122 .filter = uevent_filter,
71123 };
71124
71125 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
71126
71127 static int __init slab_proc_init(void)
71128 {
71129 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71130 + mode_t gr_mode = S_IRUGO;
71131 +
71132 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71133 + gr_mode = S_IRUSR;
71134 +#endif
71135 +
71136 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71137 return 0;
71138 }
71139 module_init(slab_proc_init);
71140 diff -urNp linux-2.6.32.44/mm/swap.c linux-2.6.32.44/mm/swap.c
71141 --- linux-2.6.32.44/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
71142 +++ linux-2.6.32.44/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
71143 @@ -30,6 +30,7 @@
71144 #include <linux/notifier.h>
71145 #include <linux/backing-dev.h>
71146 #include <linux/memcontrol.h>
71147 +#include <linux/hugetlb.h>
71148
71149 #include "internal.h"
71150
71151 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
71152 compound_page_dtor *dtor;
71153
71154 dtor = get_compound_page_dtor(page);
71155 + if (!PageHuge(page))
71156 + BUG_ON(dtor != free_compound_page);
71157 (*dtor)(page);
71158 }
71159 }
71160 diff -urNp linux-2.6.32.44/mm/util.c linux-2.6.32.44/mm/util.c
71161 --- linux-2.6.32.44/mm/util.c 2011-03-27 14:31:47.000000000 -0400
71162 +++ linux-2.6.32.44/mm/util.c 2011-04-17 15:56:46.000000000 -0400
71163 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
71164 void arch_pick_mmap_layout(struct mm_struct *mm)
71165 {
71166 mm->mmap_base = TASK_UNMAPPED_BASE;
71167 +
71168 +#ifdef CONFIG_PAX_RANDMMAP
71169 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71170 + mm->mmap_base += mm->delta_mmap;
71171 +#endif
71172 +
71173 mm->get_unmapped_area = arch_get_unmapped_area;
71174 mm->unmap_area = arch_unmap_area;
71175 }
71176 diff -urNp linux-2.6.32.44/mm/vmalloc.c linux-2.6.32.44/mm/vmalloc.c
71177 --- linux-2.6.32.44/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
71178 +++ linux-2.6.32.44/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
71179 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71180
71181 pte = pte_offset_kernel(pmd, addr);
71182 do {
71183 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71184 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71185 +
71186 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71187 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71188 + BUG_ON(!pte_exec(*pte));
71189 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71190 + continue;
71191 + }
71192 +#endif
71193 +
71194 + {
71195 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71196 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71197 + }
71198 } while (pte++, addr += PAGE_SIZE, addr != end);
71199 }
71200
71201 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71202 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71203 {
71204 pte_t *pte;
71205 + int ret = -ENOMEM;
71206
71207 /*
71208 * nr is a running index into the array which helps higher level
71209 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
71210 pte = pte_alloc_kernel(pmd, addr);
71211 if (!pte)
71212 return -ENOMEM;
71213 +
71214 + pax_open_kernel();
71215 do {
71216 struct page *page = pages[*nr];
71217
71218 - if (WARN_ON(!pte_none(*pte)))
71219 - return -EBUSY;
71220 - if (WARN_ON(!page))
71221 - return -ENOMEM;
71222 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71223 + if (!(pgprot_val(prot) & _PAGE_NX))
71224 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
71225 + else
71226 +#endif
71227 +
71228 + if (WARN_ON(!pte_none(*pte))) {
71229 + ret = -EBUSY;
71230 + goto out;
71231 + }
71232 + if (WARN_ON(!page)) {
71233 + ret = -ENOMEM;
71234 + goto out;
71235 + }
71236 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71237 (*nr)++;
71238 } while (pte++, addr += PAGE_SIZE, addr != end);
71239 - return 0;
71240 + ret = 0;
71241 +out:
71242 + pax_close_kernel();
71243 + return ret;
71244 }
71245
71246 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71247 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
71248 * and fall back on vmalloc() if that fails. Others
71249 * just put it in the vmalloc space.
71250 */
71251 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71252 +#ifdef CONFIG_MODULES
71253 +#ifdef MODULES_VADDR
71254 unsigned long addr = (unsigned long)x;
71255 if (addr >= MODULES_VADDR && addr < MODULES_END)
71256 return 1;
71257 #endif
71258 +
71259 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71260 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71261 + return 1;
71262 +#endif
71263 +
71264 +#endif
71265 +
71266 return is_vmalloc_addr(x);
71267 }
71268
71269 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
71270
71271 if (!pgd_none(*pgd)) {
71272 pud_t *pud = pud_offset(pgd, addr);
71273 +#ifdef CONFIG_X86
71274 + if (!pud_large(*pud))
71275 +#endif
71276 if (!pud_none(*pud)) {
71277 pmd_t *pmd = pmd_offset(pud, addr);
71278 +#ifdef CONFIG_X86
71279 + if (!pmd_large(*pmd))
71280 +#endif
71281 if (!pmd_none(*pmd)) {
71282 pte_t *ptep, pte;
71283
71284 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
71285 struct rb_node *tmp;
71286
71287 while (*p) {
71288 - struct vmap_area *tmp;
71289 + struct vmap_area *varea;
71290
71291 parent = *p;
71292 - tmp = rb_entry(parent, struct vmap_area, rb_node);
71293 - if (va->va_start < tmp->va_end)
71294 + varea = rb_entry(parent, struct vmap_area, rb_node);
71295 + if (va->va_start < varea->va_end)
71296 p = &(*p)->rb_left;
71297 - else if (va->va_end > tmp->va_start)
71298 + else if (va->va_end > varea->va_start)
71299 p = &(*p)->rb_right;
71300 else
71301 BUG();
71302 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
71303 struct vm_struct *area;
71304
71305 BUG_ON(in_interrupt());
71306 +
71307 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71308 + if (flags & VM_KERNEXEC) {
71309 + if (start != VMALLOC_START || end != VMALLOC_END)
71310 + return NULL;
71311 + start = (unsigned long)MODULES_EXEC_VADDR;
71312 + end = (unsigned long)MODULES_EXEC_END;
71313 + }
71314 +#endif
71315 +
71316 if (flags & VM_IOREMAP) {
71317 int bit = fls(size);
71318
71319 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
71320 if (count > totalram_pages)
71321 return NULL;
71322
71323 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71324 + if (!(pgprot_val(prot) & _PAGE_NX))
71325 + flags |= VM_KERNEXEC;
71326 +#endif
71327 +
71328 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71329 __builtin_return_address(0));
71330 if (!area)
71331 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
71332 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71333 return NULL;
71334
71335 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71336 + if (!(pgprot_val(prot) & _PAGE_NX))
71337 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
71338 + node, gfp_mask, caller);
71339 + else
71340 +#endif
71341 +
71342 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
71343 VMALLOC_END, node, gfp_mask, caller);
71344
71345 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
71346 return addr;
71347 }
71348
71349 +#undef __vmalloc
71350 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71351 {
71352 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71353 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
71354 * For tight control over page level allocator and protection flags
71355 * use __vmalloc() instead.
71356 */
71357 +#undef vmalloc
71358 void *vmalloc(unsigned long size)
71359 {
71360 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71361 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
71362 * The resulting memory area is zeroed so it can be mapped to userspace
71363 * without leaking data.
71364 */
71365 +#undef vmalloc_user
71366 void *vmalloc_user(unsigned long size)
71367 {
71368 struct vm_struct *area;
71369 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
71370 * For tight control over page level allocator and protection flags
71371 * use __vmalloc() instead.
71372 */
71373 +#undef vmalloc_node
71374 void *vmalloc_node(unsigned long size, int node)
71375 {
71376 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71377 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
71378 * For tight control over page level allocator and protection flags
71379 * use __vmalloc() instead.
71380 */
71381 -
71382 +#undef vmalloc_exec
71383 void *vmalloc_exec(unsigned long size)
71384 {
71385 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71386 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71387 -1, __builtin_return_address(0));
71388 }
71389
71390 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
71391 * Allocate enough 32bit PA addressable pages to cover @size from the
71392 * page level allocator and map them into contiguous kernel virtual space.
71393 */
71394 +#undef vmalloc_32
71395 void *vmalloc_32(unsigned long size)
71396 {
71397 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71398 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
71399 * The resulting memory area is 32bit addressable and zeroed so it can be
71400 * mapped to userspace without leaking data.
71401 */
71402 +#undef vmalloc_32_user
71403 void *vmalloc_32_user(unsigned long size)
71404 {
71405 struct vm_struct *area;
71406 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
71407 unsigned long uaddr = vma->vm_start;
71408 unsigned long usize = vma->vm_end - vma->vm_start;
71409
71410 + BUG_ON(vma->vm_mirror);
71411 +
71412 if ((PAGE_SIZE-1) & (unsigned long)addr)
71413 return -EINVAL;
71414
71415 diff -urNp linux-2.6.32.44/mm/vmstat.c linux-2.6.32.44/mm/vmstat.c
71416 --- linux-2.6.32.44/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
71417 +++ linux-2.6.32.44/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
71418 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
71419 *
71420 * vm_stat contains the global counters
71421 */
71422 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71423 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71424 EXPORT_SYMBOL(vm_stat);
71425
71426 #ifdef CONFIG_SMP
71427 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
71428 v = p->vm_stat_diff[i];
71429 p->vm_stat_diff[i] = 0;
71430 local_irq_restore(flags);
71431 - atomic_long_add(v, &zone->vm_stat[i]);
71432 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71433 global_diff[i] += v;
71434 #ifdef CONFIG_NUMA
71435 /* 3 seconds idle till flush */
71436 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
71437
71438 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71439 if (global_diff[i])
71440 - atomic_long_add(global_diff[i], &vm_stat[i]);
71441 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71442 }
71443
71444 #endif
71445 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
71446 start_cpu_timer(cpu);
71447 #endif
71448 #ifdef CONFIG_PROC_FS
71449 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71450 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71451 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71452 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71453 + {
71454 + mode_t gr_mode = S_IRUGO;
71455 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71456 + gr_mode = S_IRUSR;
71457 +#endif
71458 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71459 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71460 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71461 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71462 +#else
71463 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71464 +#endif
71465 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71466 + }
71467 #endif
71468 return 0;
71469 }
71470 diff -urNp linux-2.6.32.44/net/8021q/vlan.c linux-2.6.32.44/net/8021q/vlan.c
71471 --- linux-2.6.32.44/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
71472 +++ linux-2.6.32.44/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
71473 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
71474 err = -EPERM;
71475 if (!capable(CAP_NET_ADMIN))
71476 break;
71477 - if ((args.u.name_type >= 0) &&
71478 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71479 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71480 struct vlan_net *vn;
71481
71482 vn = net_generic(net, vlan_net_id);
71483 diff -urNp linux-2.6.32.44/net/atm/atm_misc.c linux-2.6.32.44/net/atm/atm_misc.c
71484 --- linux-2.6.32.44/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
71485 +++ linux-2.6.32.44/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
71486 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
71487 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71488 return 1;
71489 atm_return(vcc,truesize);
71490 - atomic_inc(&vcc->stats->rx_drop);
71491 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71492 return 0;
71493 }
71494
71495 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
71496 }
71497 }
71498 atm_return(vcc,guess);
71499 - atomic_inc(&vcc->stats->rx_drop);
71500 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71501 return NULL;
71502 }
71503
71504 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
71505
71506 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71507 {
71508 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71509 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71510 __SONET_ITEMS
71511 #undef __HANDLE_ITEM
71512 }
71513 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
71514
71515 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71516 {
71517 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71518 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71519 __SONET_ITEMS
71520 #undef __HANDLE_ITEM
71521 }
71522 diff -urNp linux-2.6.32.44/net/atm/lec.h linux-2.6.32.44/net/atm/lec.h
71523 --- linux-2.6.32.44/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71524 +++ linux-2.6.32.44/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71525 @@ -48,7 +48,7 @@ struct lane2_ops {
71526 const u8 *tlvs, u32 sizeoftlvs);
71527 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71528 const u8 *tlvs, u32 sizeoftlvs);
71529 -};
71530 +} __no_const;
71531
71532 /*
71533 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71534 diff -urNp linux-2.6.32.44/net/atm/mpc.c linux-2.6.32.44/net/atm/mpc.c
71535 --- linux-2.6.32.44/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71536 +++ linux-2.6.32.44/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71537 @@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71538 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71539 else {
71540 mpc->old_ops = dev->netdev_ops;
71541 - mpc->new_ops = *mpc->old_ops;
71542 - mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71543 + memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71544 + *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71545 dev->netdev_ops = &mpc->new_ops;
71546 }
71547 }
71548 diff -urNp linux-2.6.32.44/net/atm/mpoa_caches.c linux-2.6.32.44/net/atm/mpoa_caches.c
71549 --- linux-2.6.32.44/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71550 +++ linux-2.6.32.44/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71551 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71552 struct timeval now;
71553 struct k_message msg;
71554
71555 + pax_track_stack();
71556 +
71557 do_gettimeofday(&now);
71558
71559 write_lock_irq(&client->egress_lock);
71560 diff -urNp linux-2.6.32.44/net/atm/proc.c linux-2.6.32.44/net/atm/proc.c
71561 --- linux-2.6.32.44/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71562 +++ linux-2.6.32.44/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71563 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71564 const struct k_atm_aal_stats *stats)
71565 {
71566 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71567 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71568 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71569 - atomic_read(&stats->rx_drop));
71570 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71571 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71572 + atomic_read_unchecked(&stats->rx_drop));
71573 }
71574
71575 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71576 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71577 {
71578 struct sock *sk = sk_atm(vcc);
71579
71580 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71581 + seq_printf(seq, "%p ", NULL);
71582 +#else
71583 seq_printf(seq, "%p ", vcc);
71584 +#endif
71585 +
71586 if (!vcc->dev)
71587 seq_printf(seq, "Unassigned ");
71588 else
71589 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71590 {
71591 if (!vcc->dev)
71592 seq_printf(seq, sizeof(void *) == 4 ?
71593 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71594 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71595 +#else
71596 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71597 +#endif
71598 else
71599 seq_printf(seq, "%3d %3d %5d ",
71600 vcc->dev->number, vcc->vpi, vcc->vci);
71601 diff -urNp linux-2.6.32.44/net/atm/resources.c linux-2.6.32.44/net/atm/resources.c
71602 --- linux-2.6.32.44/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71603 +++ linux-2.6.32.44/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71604 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71605 static void copy_aal_stats(struct k_atm_aal_stats *from,
71606 struct atm_aal_stats *to)
71607 {
71608 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71609 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71610 __AAL_STAT_ITEMS
71611 #undef __HANDLE_ITEM
71612 }
71613 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71614 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71615 struct atm_aal_stats *to)
71616 {
71617 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71618 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71619 __AAL_STAT_ITEMS
71620 #undef __HANDLE_ITEM
71621 }
71622 diff -urNp linux-2.6.32.44/net/bluetooth/l2cap.c linux-2.6.32.44/net/bluetooth/l2cap.c
71623 --- linux-2.6.32.44/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71624 +++ linux-2.6.32.44/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71625 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71626 err = -ENOTCONN;
71627 break;
71628 }
71629 -
71630 + memset(&cinfo, 0, sizeof(cinfo));
71631 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71632 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71633
71634 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71635
71636 /* Reject if config buffer is too small. */
71637 len = cmd_len - sizeof(*req);
71638 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71639 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71641 l2cap_build_conf_rsp(sk, rsp,
71642 L2CAP_CONF_REJECT, flags), rsp);
71643 diff -urNp linux-2.6.32.44/net/bluetooth/rfcomm/sock.c linux-2.6.32.44/net/bluetooth/rfcomm/sock.c
71644 --- linux-2.6.32.44/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71645 +++ linux-2.6.32.44/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71646 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71647
71648 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71649
71650 + memset(&cinfo, 0, sizeof(cinfo));
71651 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71652 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71653
71654 diff -urNp linux-2.6.32.44/net/bridge/br_private.h linux-2.6.32.44/net/bridge/br_private.h
71655 --- linux-2.6.32.44/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71656 +++ linux-2.6.32.44/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71657 @@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71658
71659 #ifdef CONFIG_SYSFS
71660 /* br_sysfs_if.c */
71661 -extern struct sysfs_ops brport_sysfs_ops;
71662 +extern const struct sysfs_ops brport_sysfs_ops;
71663 extern int br_sysfs_addif(struct net_bridge_port *p);
71664
71665 /* br_sysfs_br.c */
71666 diff -urNp linux-2.6.32.44/net/bridge/br_stp_if.c linux-2.6.32.44/net/bridge/br_stp_if.c
71667 --- linux-2.6.32.44/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71668 +++ linux-2.6.32.44/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71669 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71670 char *envp[] = { NULL };
71671
71672 if (br->stp_enabled == BR_USER_STP) {
71673 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71674 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71675 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71676 br->dev->name, r);
71677
71678 diff -urNp linux-2.6.32.44/net/bridge/br_sysfs_if.c linux-2.6.32.44/net/bridge/br_sysfs_if.c
71679 --- linux-2.6.32.44/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71680 +++ linux-2.6.32.44/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71681 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71682 return ret;
71683 }
71684
71685 -struct sysfs_ops brport_sysfs_ops = {
71686 +const struct sysfs_ops brport_sysfs_ops = {
71687 .show = brport_show,
71688 .store = brport_store,
71689 };
71690 diff -urNp linux-2.6.32.44/net/bridge/netfilter/ebtables.c linux-2.6.32.44/net/bridge/netfilter/ebtables.c
71691 --- linux-2.6.32.44/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71692 +++ linux-2.6.32.44/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71693 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71694 unsigned int entries_size, nentries;
71695 char *entries;
71696
71697 + pax_track_stack();
71698 +
71699 if (cmd == EBT_SO_GET_ENTRIES) {
71700 entries_size = t->private->entries_size;
71701 nentries = t->private->nentries;
71702 diff -urNp linux-2.6.32.44/net/can/bcm.c linux-2.6.32.44/net/can/bcm.c
71703 --- linux-2.6.32.44/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71704 +++ linux-2.6.32.44/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71705 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71706 struct bcm_sock *bo = bcm_sk(sk);
71707 struct bcm_op *op;
71708
71709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
71710 + seq_printf(m, ">>> socket %p", NULL);
71711 + seq_printf(m, " / sk %p", NULL);
71712 + seq_printf(m, " / bo %p", NULL);
71713 +#else
71714 seq_printf(m, ">>> socket %p", sk->sk_socket);
71715 seq_printf(m, " / sk %p", sk);
71716 seq_printf(m, " / bo %p", bo);
71717 +#endif
71718 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71719 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71720 seq_printf(m, " <<<\n");
71721 diff -urNp linux-2.6.32.44/net/core/dev.c linux-2.6.32.44/net/core/dev.c
71722 --- linux-2.6.32.44/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71723 +++ linux-2.6.32.44/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71724 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71725 if (no_module && capable(CAP_NET_ADMIN))
71726 no_module = request_module("netdev-%s", name);
71727 if (no_module && capable(CAP_SYS_MODULE)) {
71728 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
71729 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
71730 +#else
71731 if (!request_module("%s", name))
71732 pr_err("Loading kernel module for a network device "
71733 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71734 "instead\n", name);
71735 +#endif
71736 }
71737 }
71738 EXPORT_SYMBOL(dev_load);
71739 @@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71740
71741 struct dev_gso_cb {
71742 void (*destructor)(struct sk_buff *skb);
71743 -};
71744 +} __no_const;
71745
71746 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71747
71748 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71749 }
71750 EXPORT_SYMBOL(netif_rx_ni);
71751
71752 -static void net_tx_action(struct softirq_action *h)
71753 +static void net_tx_action(void)
71754 {
71755 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71756
71757 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71758 EXPORT_SYMBOL(netif_napi_del);
71759
71760
71761 -static void net_rx_action(struct softirq_action *h)
71762 +static void net_rx_action(void)
71763 {
71764 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71765 unsigned long time_limit = jiffies + 2;
71766 diff -urNp linux-2.6.32.44/net/core/flow.c linux-2.6.32.44/net/core/flow.c
71767 --- linux-2.6.32.44/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71768 +++ linux-2.6.32.44/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71769 @@ -35,11 +35,11 @@ struct flow_cache_entry {
71770 atomic_t *object_ref;
71771 };
71772
71773 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
71774 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71775
71776 static u32 flow_hash_shift;
71777 #define flow_hash_size (1 << flow_hash_shift)
71778 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71779 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71780
71781 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71782
71783 @@ -52,7 +52,7 @@ struct flow_percpu_info {
71784 u32 hash_rnd;
71785 int count;
71786 };
71787 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71788 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71789
71790 #define flow_hash_rnd_recalc(cpu) \
71791 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71792 @@ -69,7 +69,7 @@ struct flow_flush_info {
71793 atomic_t cpuleft;
71794 struct completion completion;
71795 };
71796 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71797 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71798
71799 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71800
71801 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71802 if (fle->family == family &&
71803 fle->dir == dir &&
71804 flow_key_compare(key, &fle->key) == 0) {
71805 - if (fle->genid == atomic_read(&flow_cache_genid)) {
71806 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71807 void *ret = fle->object;
71808
71809 if (ret)
71810 @@ -228,7 +228,7 @@ nocache:
71811 err = resolver(net, key, family, dir, &obj, &obj_ref);
71812
71813 if (fle && !err) {
71814 - fle->genid = atomic_read(&flow_cache_genid);
71815 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
71816
71817 if (fle->object)
71818 atomic_dec(fle->object_ref);
71819 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71820
71821 fle = flow_table(cpu)[i];
71822 for (; fle; fle = fle->next) {
71823 - unsigned genid = atomic_read(&flow_cache_genid);
71824 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71825
71826 if (!fle->object || fle->genid == genid)
71827 continue;
71828 diff -urNp linux-2.6.32.44/net/core/Makefile linux-2.6.32.44/net/core/Makefile
71829 --- linux-2.6.32.44/net/core/Makefile 2011-03-27 14:31:47.000000000 -0400
71830 +++ linux-2.6.32.44/net/core/Makefile 2011-08-07 19:48:09.000000000 -0400
71831 @@ -3,7 +3,7 @@
71832 #
71833
71834 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
71835 - gen_stats.o gen_estimator.o net_namespace.o
71836 + gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
71837
71838 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
71839 obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
71840 diff -urNp linux-2.6.32.44/net/core/rtnetlink.c linux-2.6.32.44/net/core/rtnetlink.c
71841 --- linux-2.6.32.44/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71842 +++ linux-2.6.32.44/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71843 @@ -57,7 +57,7 @@ struct rtnl_link
71844 {
71845 rtnl_doit_func doit;
71846 rtnl_dumpit_func dumpit;
71847 -};
71848 +} __no_const;
71849
71850 static DEFINE_MUTEX(rtnl_mutex);
71851
71852 diff -urNp linux-2.6.32.44/net/core/secure_seq.c linux-2.6.32.44/net/core/secure_seq.c
71853 --- linux-2.6.32.44/net/core/secure_seq.c 1969-12-31 19:00:00.000000000 -0500
71854 +++ linux-2.6.32.44/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71855 @@ -0,0 +1,183 @@
71856 +#include <linux/kernel.h>
71857 +#include <linux/init.h>
71858 +#include <linux/cryptohash.h>
71859 +#include <linux/module.h>
71860 +#include <linux/cache.h>
71861 +#include <linux/random.h>
71862 +#include <linux/hrtimer.h>
71863 +#include <linux/ktime.h>
71864 +#include <linux/string.h>
71865 +
71866 +#include <net/secure_seq.h>
71867 +
71868 +static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
71869 +
71870 +static int __init net_secret_init(void)
71871 +{
71872 + get_random_bytes(net_secret, sizeof(net_secret));
71873 + return 0;
71874 +}
71875 +late_initcall(net_secret_init);
71876 +
71877 +static u32 seq_scale(u32 seq)
71878 +{
71879 + /*
71880 + * As close as possible to RFC 793, which
71881 + * suggests using a 250 kHz clock.
71882 + * Further reading shows this assumes 2 Mb/s networks.
71883 + * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
71884 + * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
71885 + * we also need to limit the resolution so that the u32 seq
71886 + * overlaps less than one time per MSL (2 minutes).
71887 + * Choosing a clock of 64 ns period is OK. (period of 274 s)
71888 + */
71889 + return seq + (ktime_to_ns(ktime_get_real()) >> 6);
71890 +}
71891 +
71892 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
71893 +__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
71894 + __be16 sport, __be16 dport)
71895 +{
71896 + u32 secret[MD5_MESSAGE_BYTES / 4];
71897 + u32 hash[MD5_DIGEST_WORDS];
71898 + u32 i;
71899 +
71900 + memcpy(hash, saddr, 16);
71901 + for (i = 0; i < 4; i++)
71902 + secret[i] = net_secret[i] + daddr[i];
71903 + secret[4] = net_secret[4] +
71904 + (((__force u16)sport << 16) + (__force u16)dport);
71905 + for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71906 + secret[i] = net_secret[i];
71907 +
71908 + md5_transform(hash, secret);
71909 +
71910 + return seq_scale(hash[0]);
71911 +}
71912 +EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71913 +
71914 +u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71915 + __be16 dport)
71916 +{
71917 + u32 secret[MD5_MESSAGE_BYTES / 4];
71918 + u32 hash[MD5_DIGEST_WORDS];
71919 + u32 i;
71920 +
71921 + memcpy(hash, saddr, 16);
71922 + for (i = 0; i < 4; i++)
71923 + secret[i] = net_secret[i] + (__force u32) daddr[i];
71924 + secret[4] = net_secret[4] + (__force u32)dport;
71925 + for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71926 + secret[i] = net_secret[i];
71927 +
71928 + md5_transform(hash, secret);
71929 + return hash[0];
71930 +}
71931 +#endif
71932 +
71933 +#ifdef CONFIG_INET
71934 +__u32 secure_ip_id(__be32 daddr)
71935 +{
71936 + u32 hash[MD5_DIGEST_WORDS];
71937 +
71938 + hash[0] = (__force __u32) daddr;
71939 + hash[1] = net_secret[13];
71940 + hash[2] = net_secret[14];
71941 + hash[3] = net_secret[15];
71942 +
71943 + md5_transform(hash, net_secret);
71944 +
71945 + return hash[0];
71946 +}
71947 +
71948 +__u32 secure_ipv6_id(const __be32 daddr[4])
71949 +{
71950 + __u32 hash[4];
71951 +
71952 + memcpy(hash, daddr, 16);
71953 + md5_transform(hash, net_secret);
71954 +
71955 + return hash[0];
71956 +}
71957 +
71958 +__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
71959 + __be16 sport, __be16 dport)
71960 +{
71961 + u32 hash[MD5_DIGEST_WORDS];
71962 +
71963 + hash[0] = (__force u32)saddr;
71964 + hash[1] = (__force u32)daddr;
71965 + hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
71966 + hash[3] = net_secret[15];
71967 +
71968 + md5_transform(hash, net_secret);
71969 +
71970 + return seq_scale(hash[0]);
71971 +}
71972 +
71973 +u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
71974 +{
71975 + u32 hash[MD5_DIGEST_WORDS];
71976 +
71977 + hash[0] = (__force u32)saddr;
71978 + hash[1] = (__force u32)daddr;
71979 + hash[2] = (__force u32)dport ^ net_secret[14];
71980 + hash[3] = net_secret[15];
71981 +
71982 + md5_transform(hash, net_secret);
71983 +
71984 + return hash[0];
71985 +}
71986 +EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
71987 +#endif
71988 +
71989 +#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
71990 +u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
71991 + __be16 sport, __be16 dport)
71992 +{
71993 + u32 hash[MD5_DIGEST_WORDS];
71994 + u64 seq;
71995 +
71996 + hash[0] = (__force u32)saddr;
71997 + hash[1] = (__force u32)daddr;
71998 + hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
71999 + hash[3] = net_secret[15];
72000 +
72001 + md5_transform(hash, net_secret);
72002 +
72003 + seq = hash[0] | (((u64)hash[1]) << 32);
72004 + seq += ktime_to_ns(ktime_get_real());
72005 + seq &= (1ull << 48) - 1;
72006 +
72007 + return seq;
72008 +}
72009 +EXPORT_SYMBOL(secure_dccp_sequence_number);
72010 +
72011 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
72012 +u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
72013 + __be16 sport, __be16 dport)
72014 +{
72015 + u32 secret[MD5_MESSAGE_BYTES / 4];
72016 + u32 hash[MD5_DIGEST_WORDS];
72017 + u64 seq;
72018 + u32 i;
72019 +
72020 + memcpy(hash, saddr, 16);
72021 + for (i = 0; i < 4; i++)
72022 + secret[i] = net_secret[i] + daddr[i];
72023 + secret[4] = net_secret[4] +
72024 + (((__force u16)sport << 16) + (__force u16)dport);
72025 + for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
72026 + secret[i] = net_secret[i];
72027 +
72028 + md5_transform(hash, secret);
72029 +
72030 + seq = hash[0] | (((u64)hash[1]) << 32);
72031 + seq += ktime_to_ns(ktime_get_real());
72032 + seq &= (1ull << 48) - 1;
72033 +
72034 + return seq;
72035 +}
72036 +EXPORT_SYMBOL(secure_dccpv6_sequence_number);
72037 +#endif
72038 +#endif
72039 diff -urNp linux-2.6.32.44/net/core/skbuff.c linux-2.6.32.44/net/core/skbuff.c
72040 --- linux-2.6.32.44/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
72041 +++ linux-2.6.32.44/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
72042 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
72043 struct sk_buff *frag_iter;
72044 struct sock *sk = skb->sk;
72045
72046 + pax_track_stack();
72047 +
72048 /*
72049 * __skb_splice_bits() only fails if the output has no room left,
72050 * so no point in going over the frag_list for the error case.
72051 diff -urNp linux-2.6.32.44/net/core/sock.c linux-2.6.32.44/net/core/sock.c
72052 --- linux-2.6.32.44/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
72053 +++ linux-2.6.32.44/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
72054 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
72055 break;
72056
72057 case SO_PEERCRED:
72058 + {
72059 + struct ucred peercred;
72060 if (len > sizeof(sk->sk_peercred))
72061 len = sizeof(sk->sk_peercred);
72062 - if (copy_to_user(optval, &sk->sk_peercred, len))
72063 + peercred = sk->sk_peercred;
72064 + if (copy_to_user(optval, &peercred, len))
72065 return -EFAULT;
72066 goto lenout;
72067 + }
72068
72069 case SO_PEERNAME:
72070 {
72071 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
72072 */
72073 smp_wmb();
72074 atomic_set(&sk->sk_refcnt, 1);
72075 - atomic_set(&sk->sk_drops, 0);
72076 + atomic_set_unchecked(&sk->sk_drops, 0);
72077 }
72078 EXPORT_SYMBOL(sock_init_data);
72079
72080 diff -urNp linux-2.6.32.44/net/dccp/ipv4.c linux-2.6.32.44/net/dccp/ipv4.c
72081 --- linux-2.6.32.44/net/dccp/ipv4.c 2011-03-27 14:31:47.000000000 -0400
72082 +++ linux-2.6.32.44/net/dccp/ipv4.c 2011-08-07 19:48:09.000000000 -0400
72083 @@ -25,6 +25,7 @@
72084 #include <net/timewait_sock.h>
72085 #include <net/tcp_states.h>
72086 #include <net/xfrm.h>
72087 +#include <net/secure_seq.h>
72088
72089 #include "ackvec.h"
72090 #include "ccid.h"
72091 diff -urNp linux-2.6.32.44/net/dccp/ipv6.c linux-2.6.32.44/net/dccp/ipv6.c
72092 --- linux-2.6.32.44/net/dccp/ipv6.c 2011-03-27 14:31:47.000000000 -0400
72093 +++ linux-2.6.32.44/net/dccp/ipv6.c 2011-08-07 19:48:09.000000000 -0400
72094 @@ -28,6 +28,7 @@
72095 #include <net/transp_v6.h>
72096 #include <net/ip6_checksum.h>
72097 #include <net/xfrm.h>
72098 +#include <net/secure_seq.h>
72099
72100 #include "dccp.h"
72101 #include "ipv6.h"
72102 @@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(st
72103 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
72104 }
72105
72106 -static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
72107 - __be16 sport, __be16 dport )
72108 -{
72109 - return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
72110 -}
72111 -
72112 -static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
72113 +static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
72114 {
72115 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
72116 ipv6_hdr(skb)->saddr.s6_addr32,
72117 diff -urNp linux-2.6.32.44/net/decnet/sysctl_net_decnet.c linux-2.6.32.44/net/decnet/sysctl_net_decnet.c
72118 --- linux-2.6.32.44/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
72119 +++ linux-2.6.32.44/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
72120 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
72121
72122 if (len > *lenp) len = *lenp;
72123
72124 - if (copy_to_user(buffer, addr, len))
72125 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
72126 return -EFAULT;
72127
72128 *lenp = len;
72129 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
72130
72131 if (len > *lenp) len = *lenp;
72132
72133 - if (copy_to_user(buffer, devname, len))
72134 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
72135 return -EFAULT;
72136
72137 *lenp = len;
72138 diff -urNp linux-2.6.32.44/net/econet/Kconfig linux-2.6.32.44/net/econet/Kconfig
72139 --- linux-2.6.32.44/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
72140 +++ linux-2.6.32.44/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
72141 @@ -4,7 +4,7 @@
72142
72143 config ECONET
72144 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72145 - depends on EXPERIMENTAL && INET
72146 + depends on EXPERIMENTAL && INET && BROKEN
72147 ---help---
72148 Econet is a fairly old and slow networking protocol mainly used by
72149 Acorn computers to access file and print servers. It uses native
72150 diff -urNp linux-2.6.32.44/net/ieee802154/dgram.c linux-2.6.32.44/net/ieee802154/dgram.c
72151 --- linux-2.6.32.44/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
72152 +++ linux-2.6.32.44/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
72153 @@ -318,7 +318,7 @@ out:
72154 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
72155 {
72156 if (sock_queue_rcv_skb(sk, skb) < 0) {
72157 - atomic_inc(&sk->sk_drops);
72158 + atomic_inc_unchecked(&sk->sk_drops);
72159 kfree_skb(skb);
72160 return NET_RX_DROP;
72161 }
72162 diff -urNp linux-2.6.32.44/net/ieee802154/raw.c linux-2.6.32.44/net/ieee802154/raw.c
72163 --- linux-2.6.32.44/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
72164 +++ linux-2.6.32.44/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
72165 @@ -206,7 +206,7 @@ out:
72166 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
72167 {
72168 if (sock_queue_rcv_skb(sk, skb) < 0) {
72169 - atomic_inc(&sk->sk_drops);
72170 + atomic_inc_unchecked(&sk->sk_drops);
72171 kfree_skb(skb);
72172 return NET_RX_DROP;
72173 }
72174 diff -urNp linux-2.6.32.44/net/ipv4/inet_diag.c linux-2.6.32.44/net/ipv4/inet_diag.c
72175 --- linux-2.6.32.44/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
72176 +++ linux-2.6.32.44/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
72177 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
72178 r->idiag_retrans = 0;
72179
72180 r->id.idiag_if = sk->sk_bound_dev_if;
72181 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72182 + r->id.idiag_cookie[0] = 0;
72183 + r->id.idiag_cookie[1] = 0;
72184 +#else
72185 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72186 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72187 +#endif
72188
72189 r->id.idiag_sport = inet->sport;
72190 r->id.idiag_dport = inet->dport;
72191 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
72192 r->idiag_family = tw->tw_family;
72193 r->idiag_retrans = 0;
72194 r->id.idiag_if = tw->tw_bound_dev_if;
72195 +
72196 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72197 + r->id.idiag_cookie[0] = 0;
72198 + r->id.idiag_cookie[1] = 0;
72199 +#else
72200 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72201 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72202 +#endif
72203 +
72204 r->id.idiag_sport = tw->tw_sport;
72205 r->id.idiag_dport = tw->tw_dport;
72206 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72207 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
72208 if (sk == NULL)
72209 goto unlock;
72210
72211 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72212 err = -ESTALE;
72213 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72214 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72215 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72216 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72217 goto out;
72218 +#endif
72219
72220 err = -ENOMEM;
72221 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72222 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
72223 r->idiag_retrans = req->retrans;
72224
72225 r->id.idiag_if = sk->sk_bound_dev_if;
72226 +
72227 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72228 + r->id.idiag_cookie[0] = 0;
72229 + r->id.idiag_cookie[1] = 0;
72230 +#else
72231 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72232 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72233 +#endif
72234
72235 tmo = req->expires - jiffies;
72236 if (tmo < 0)
72237 diff -urNp linux-2.6.32.44/net/ipv4/inet_hashtables.c linux-2.6.32.44/net/ipv4/inet_hashtables.c
72238 --- linux-2.6.32.44/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
72239 +++ linux-2.6.32.44/net/ipv4/inet_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72240 @@ -18,11 +18,15 @@
72241 #include <linux/sched.h>
72242 #include <linux/slab.h>
72243 #include <linux/wait.h>
72244 +#include <linux/security.h>
72245
72246 #include <net/inet_connection_sock.h>
72247 #include <net/inet_hashtables.h>
72248 +#include <net/secure_seq.h>
72249 #include <net/ip.h>
72250
72251 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72252 +
72253 /*
72254 * Allocate and initialize a new local port bind bucket.
72255 * The bindhash mutex for snum's hash chain must be held here.
72256 @@ -490,6 +494,8 @@ ok:
72257 }
72258 spin_unlock(&head->lock);
72259
72260 + gr_update_task_in_ip_table(current, inet_sk(sk));
72261 +
72262 if (tw) {
72263 inet_twsk_deschedule(tw, death_row);
72264 inet_twsk_put(tw);
72265 diff -urNp linux-2.6.32.44/net/ipv4/inetpeer.c linux-2.6.32.44/net/ipv4/inetpeer.c
72266 --- linux-2.6.32.44/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
72267 +++ linux-2.6.32.44/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
72268 @@ -19,6 +19,7 @@
72269 #include <linux/net.h>
72270 #include <net/ip.h>
72271 #include <net/inetpeer.h>
72272 +#include <net/secure_seq.h>
72273
72274 /*
72275 * Theory of operations.
72276 @@ -366,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
72277 struct inet_peer *p, *n;
72278 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
72279
72280 + pax_track_stack();
72281 +
72282 /* Look up for the address quickly. */
72283 read_lock_bh(&peer_pool_lock);
72284 p = lookup(daddr, NULL);
72285 @@ -389,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
72286 return NULL;
72287 n->v4daddr = daddr;
72288 atomic_set(&n->refcnt, 1);
72289 - atomic_set(&n->rid, 0);
72290 + atomic_set_unchecked(&n->rid, 0);
72291 n->ip_id_count = secure_ip_id(daddr);
72292 n->tcp_ts_stamp = 0;
72293
72294 diff -urNp linux-2.6.32.44/net/ipv4/ip_fragment.c linux-2.6.32.44/net/ipv4/ip_fragment.c
72295 --- linux-2.6.32.44/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
72296 +++ linux-2.6.32.44/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
72297 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
72298 return 0;
72299
72300 start = qp->rid;
72301 - end = atomic_inc_return(&peer->rid);
72302 + end = atomic_inc_return_unchecked(&peer->rid);
72303 qp->rid = end;
72304
72305 rc = qp->q.fragments && (end - start) > max;
72306 diff -urNp linux-2.6.32.44/net/ipv4/ip_sockglue.c linux-2.6.32.44/net/ipv4/ip_sockglue.c
72307 --- linux-2.6.32.44/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72308 +++ linux-2.6.32.44/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72309 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
72310 int val;
72311 int len;
72312
72313 + pax_track_stack();
72314 +
72315 if (level != SOL_IP)
72316 return -EOPNOTSUPP;
72317
72318 diff -urNp linux-2.6.32.44/net/ipv4/netfilter/arp_tables.c linux-2.6.32.44/net/ipv4/netfilter/arp_tables.c
72319 --- linux-2.6.32.44/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
72320 +++ linux-2.6.32.44/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
72321 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
72322 private = &tmp;
72323 }
72324 #endif
72325 + memset(&info, 0, sizeof(info));
72326 info.valid_hooks = t->valid_hooks;
72327 memcpy(info.hook_entry, private->hook_entry,
72328 sizeof(info.hook_entry));
72329 diff -urNp linux-2.6.32.44/net/ipv4/netfilter/ip_tables.c linux-2.6.32.44/net/ipv4/netfilter/ip_tables.c
72330 --- linux-2.6.32.44/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
72331 +++ linux-2.6.32.44/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
72332 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
72333 private = &tmp;
72334 }
72335 #endif
72336 + memset(&info, 0, sizeof(info));
72337 info.valid_hooks = t->valid_hooks;
72338 memcpy(info.hook_entry, private->hook_entry,
72339 sizeof(info.hook_entry));
72340 diff -urNp linux-2.6.32.44/net/ipv4/netfilter/nf_nat_proto_common.c linux-2.6.32.44/net/ipv4/netfilter/nf_nat_proto_common.c
72341 --- linux-2.6.32.44/net/ipv4/netfilter/nf_nat_proto_common.c 2011-03-27 14:31:47.000000000 -0400
72342 +++ linux-2.6.32.44/net/ipv4/netfilter/nf_nat_proto_common.c 2011-08-07 19:48:09.000000000 -0400
72343 @@ -12,6 +12,7 @@
72344 #include <linux/ip.h>
72345
72346 #include <linux/netfilter.h>
72347 +#include <net/secure_seq.h>
72348 #include <net/netfilter/nf_nat.h>
72349 #include <net/netfilter/nf_nat_core.h>
72350 #include <net/netfilter/nf_nat_rule.h>
72351 diff -urNp linux-2.6.32.44/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.44/net/ipv4/netfilter/nf_nat_snmp_basic.c
72352 --- linux-2.6.32.44/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
72353 +++ linux-2.6.32.44/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
72354 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
72355
72356 *len = 0;
72357
72358 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72359 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72360 if (*octets == NULL) {
72361 if (net_ratelimit())
72362 printk("OOM in bsalg (%d)\n", __LINE__);
72363 diff -urNp linux-2.6.32.44/net/ipv4/raw.c linux-2.6.32.44/net/ipv4/raw.c
72364 --- linux-2.6.32.44/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
72365 +++ linux-2.6.32.44/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
72366 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
72367 /* Charge it to the socket. */
72368
72369 if (sock_queue_rcv_skb(sk, skb) < 0) {
72370 - atomic_inc(&sk->sk_drops);
72371 + atomic_inc_unchecked(&sk->sk_drops);
72372 kfree_skb(skb);
72373 return NET_RX_DROP;
72374 }
72375 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
72376 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72377 {
72378 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72379 - atomic_inc(&sk->sk_drops);
72380 + atomic_inc_unchecked(&sk->sk_drops);
72381 kfree_skb(skb);
72382 return NET_RX_DROP;
72383 }
72384 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
72385
72386 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72387 {
72388 + struct icmp_filter filter;
72389 +
72390 + if (optlen < 0)
72391 + return -EINVAL;
72392 if (optlen > sizeof(struct icmp_filter))
72393 optlen = sizeof(struct icmp_filter);
72394 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72395 + if (copy_from_user(&filter, optval, optlen))
72396 return -EFAULT;
72397 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
72398 +
72399 return 0;
72400 }
72401
72402 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72403 {
72404 + struct icmp_filter filter;
72405 int len, ret = -EFAULT;
72406
72407 if (get_user(len, optlen))
72408 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
72409 if (len > sizeof(struct icmp_filter))
72410 len = sizeof(struct icmp_filter);
72411 ret = -EFAULT;
72412 + memcpy(&filter, &raw_sk(sk)->filter, len);
72413 if (put_user(len, optlen) ||
72414 - copy_to_user(optval, &raw_sk(sk)->filter, len))
72415 + copy_to_user(optval, &filter, len))
72416 goto out;
72417 ret = 0;
72418 out: return ret;
72419 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
72420 sk_wmem_alloc_get(sp),
72421 sk_rmem_alloc_get(sp),
72422 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72423 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72424 + atomic_read(&sp->sk_refcnt),
72425 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72426 + NULL,
72427 +#else
72428 + sp,
72429 +#endif
72430 + atomic_read_unchecked(&sp->sk_drops));
72431 }
72432
72433 static int raw_seq_show(struct seq_file *seq, void *v)
72434 diff -urNp linux-2.6.32.44/net/ipv4/route.c linux-2.6.32.44/net/ipv4/route.c
72435 --- linux-2.6.32.44/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
72436 +++ linux-2.6.32.44/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
72437 @@ -107,6 +107,7 @@
72438 #ifdef CONFIG_SYSCTL
72439 #include <linux/sysctl.h>
72440 #endif
72441 +#include <net/secure_seq.h>
72442
72443 #define RT_FL_TOS(oldflp) \
72444 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
72445 @@ -268,7 +269,7 @@ static inline unsigned int rt_hash(__be3
72446
72447 static inline int rt_genid(struct net *net)
72448 {
72449 - return atomic_read(&net->ipv4.rt_genid);
72450 + return atomic_read_unchecked(&net->ipv4.rt_genid);
72451 }
72452
72453 #ifdef CONFIG_PROC_FS
72454 @@ -888,7 +889,7 @@ static void rt_cache_invalidate(struct n
72455 unsigned char shuffle;
72456
72457 get_random_bytes(&shuffle, sizeof(shuffle));
72458 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72459 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72460 }
72461
72462 /*
72463 @@ -3356,7 +3357,7 @@ static __net_initdata struct pernet_oper
72464
72465 static __net_init int rt_secret_timer_init(struct net *net)
72466 {
72467 - atomic_set(&net->ipv4.rt_genid,
72468 + atomic_set_unchecked(&net->ipv4.rt_genid,
72469 (int) ((num_physpages ^ (num_physpages>>8)) ^
72470 (jiffies ^ (jiffies >> 7))));
72471
72472 diff -urNp linux-2.6.32.44/net/ipv4/tcp.c linux-2.6.32.44/net/ipv4/tcp.c
72473 --- linux-2.6.32.44/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
72474 +++ linux-2.6.32.44/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
72475 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
72476 int val;
72477 int err = 0;
72478
72479 + pax_track_stack();
72480 +
72481 /* This is a string value all the others are int's */
72482 if (optname == TCP_CONGESTION) {
72483 char name[TCP_CA_NAME_MAX];
72484 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
72485 struct tcp_sock *tp = tcp_sk(sk);
72486 int val, len;
72487
72488 + pax_track_stack();
72489 +
72490 if (get_user(len, optlen))
72491 return -EFAULT;
72492
72493 diff -urNp linux-2.6.32.44/net/ipv4/tcp_ipv4.c linux-2.6.32.44/net/ipv4/tcp_ipv4.c
72494 --- linux-2.6.32.44/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
72495 +++ linux-2.6.32.44/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
72496 @@ -71,6 +71,7 @@
72497 #include <net/timewait_sock.h>
72498 #include <net/xfrm.h>
72499 #include <net/netdma.h>
72500 +#include <net/secure_seq.h>
72501
72502 #include <linux/inet.h>
72503 #include <linux/ipv6.h>
72504 @@ -84,6 +85,9 @@
72505 int sysctl_tcp_tw_reuse __read_mostly;
72506 int sysctl_tcp_low_latency __read_mostly;
72507
72508 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72509 +extern int grsec_enable_blackhole;
72510 +#endif
72511
72512 #ifdef CONFIG_TCP_MD5SIG
72513 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72514 @@ -1542,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72515 return 0;
72516
72517 reset:
72518 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72519 + if (!grsec_enable_blackhole)
72520 +#endif
72521 tcp_v4_send_reset(rsk, skb);
72522 discard:
72523 kfree_skb(skb);
72524 @@ -1603,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
72525 TCP_SKB_CB(skb)->sacked = 0;
72526
72527 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72528 - if (!sk)
72529 + if (!sk) {
72530 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72531 + ret = 1;
72532 +#endif
72533 goto no_tcp_socket;
72534 + }
72535
72536 process:
72537 - if (sk->sk_state == TCP_TIME_WAIT)
72538 + if (sk->sk_state == TCP_TIME_WAIT) {
72539 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72540 + ret = 2;
72541 +#endif
72542 goto do_time_wait;
72543 + }
72544
72545 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
72546 goto discard_and_relse;
72547 @@ -1650,6 +1665,10 @@ no_tcp_socket:
72548 bad_packet:
72549 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72550 } else {
72551 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72552 + if (!grsec_enable_blackhole || (ret == 1 &&
72553 + (skb->dev->flags & IFF_LOOPBACK)))
72554 +#endif
72555 tcp_v4_send_reset(NULL, skb);
72556 }
72557
72558 @@ -2194,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
72559 int rc = 0;
72560 struct proc_dir_entry *p;
72561
72562 - afinfo->seq_fops.open = tcp_seq_open;
72563 - afinfo->seq_fops.read = seq_read;
72564 - afinfo->seq_fops.llseek = seq_lseek;
72565 - afinfo->seq_fops.release = seq_release_net;
72566 -
72567 - afinfo->seq_ops.start = tcp_seq_start;
72568 - afinfo->seq_ops.next = tcp_seq_next;
72569 - afinfo->seq_ops.stop = tcp_seq_stop;
72570 + *(void **)&afinfo->seq_fops.open = tcp_seq_open;
72571 + *(void **)&afinfo->seq_fops.read = seq_read;
72572 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72573 + *(void **)&afinfo->seq_fops.release = seq_release_net;
72574 +
72575 + *(void **)&afinfo->seq_ops.start = tcp_seq_start;
72576 + *(void **)&afinfo->seq_ops.next = tcp_seq_next;
72577 + *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
72578
72579 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72580 &afinfo->seq_fops, afinfo);
72581 @@ -2237,7 +2256,11 @@ static void get_openreq4(struct sock *sk
72582 0, /* non standard timer */
72583 0, /* open_requests have no inode */
72584 atomic_read(&sk->sk_refcnt),
72585 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72586 + NULL,
72587 +#else
72588 req,
72589 +#endif
72590 len);
72591 }
72592
72593 @@ -2279,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
72594 sock_i_uid(sk),
72595 icsk->icsk_probes_out,
72596 sock_i_ino(sk),
72597 - atomic_read(&sk->sk_refcnt), sk,
72598 + atomic_read(&sk->sk_refcnt),
72599 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72600 + NULL,
72601 +#else
72602 + sk,
72603 +#endif
72604 jiffies_to_clock_t(icsk->icsk_rto),
72605 jiffies_to_clock_t(icsk->icsk_ack.ato),
72606 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72607 @@ -2307,7 +2335,13 @@ static void get_timewait4_sock(struct in
72608 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
72609 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72610 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72611 - atomic_read(&tw->tw_refcnt), tw, len);
72612 + atomic_read(&tw->tw_refcnt),
72613 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72614 + NULL,
72615 +#else
72616 + tw,
72617 +#endif
72618 + len);
72619 }
72620
72621 #define TMPSZ 150
72622 diff -urNp linux-2.6.32.44/net/ipv4/tcp_minisocks.c linux-2.6.32.44/net/ipv4/tcp_minisocks.c
72623 --- linux-2.6.32.44/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
72624 +++ linux-2.6.32.44/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
72625 @@ -26,6 +26,10 @@
72626 #include <net/inet_common.h>
72627 #include <net/xfrm.h>
72628
72629 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72630 +extern int grsec_enable_blackhole;
72631 +#endif
72632 +
72633 #ifdef CONFIG_SYSCTL
72634 #define SYNC_INIT 0 /* let the user enable it */
72635 #else
72636 @@ -672,6 +676,10 @@ listen_overflow:
72637
72638 embryonic_reset:
72639 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72640 +
72641 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72642 + if (!grsec_enable_blackhole)
72643 +#endif
72644 if (!(flg & TCP_FLAG_RST))
72645 req->rsk_ops->send_reset(sk, skb);
72646
72647 diff -urNp linux-2.6.32.44/net/ipv4/tcp_output.c linux-2.6.32.44/net/ipv4/tcp_output.c
72648 --- linux-2.6.32.44/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
72649 +++ linux-2.6.32.44/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
72650 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
72651 __u8 *md5_hash_location;
72652 int mss;
72653
72654 + pax_track_stack();
72655 +
72656 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
72657 if (skb == NULL)
72658 return NULL;
72659 diff -urNp linux-2.6.32.44/net/ipv4/tcp_probe.c linux-2.6.32.44/net/ipv4/tcp_probe.c
72660 --- linux-2.6.32.44/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
72661 +++ linux-2.6.32.44/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
72662 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
72663 if (cnt + width >= len)
72664 break;
72665
72666 - if (copy_to_user(buf + cnt, tbuf, width))
72667 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72668 return -EFAULT;
72669 cnt += width;
72670 }
72671 diff -urNp linux-2.6.32.44/net/ipv4/tcp_timer.c linux-2.6.32.44/net/ipv4/tcp_timer.c
72672 --- linux-2.6.32.44/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
72673 +++ linux-2.6.32.44/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
72674 @@ -21,6 +21,10 @@
72675 #include <linux/module.h>
72676 #include <net/tcp.h>
72677
72678 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72679 +extern int grsec_lastack_retries;
72680 +#endif
72681 +
72682 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72683 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72684 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72685 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
72686 }
72687 }
72688
72689 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72690 + if ((sk->sk_state == TCP_LAST_ACK) &&
72691 + (grsec_lastack_retries > 0) &&
72692 + (grsec_lastack_retries < retry_until))
72693 + retry_until = grsec_lastack_retries;
72694 +#endif
72695 +
72696 if (retransmits_timed_out(sk, retry_until)) {
72697 /* Has it gone just too far? */
72698 tcp_write_err(sk);
72699 diff -urNp linux-2.6.32.44/net/ipv4/udp.c linux-2.6.32.44/net/ipv4/udp.c
72700 --- linux-2.6.32.44/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
72701 +++ linux-2.6.32.44/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
72702 @@ -86,6 +86,7 @@
72703 #include <linux/types.h>
72704 #include <linux/fcntl.h>
72705 #include <linux/module.h>
72706 +#include <linux/security.h>
72707 #include <linux/socket.h>
72708 #include <linux/sockios.h>
72709 #include <linux/igmp.h>
72710 @@ -106,6 +107,10 @@
72711 #include <net/xfrm.h>
72712 #include "udp_impl.h"
72713
72714 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72715 +extern int grsec_enable_blackhole;
72716 +#endif
72717 +
72718 struct udp_table udp_table;
72719 EXPORT_SYMBOL(udp_table);
72720
72721 @@ -371,6 +376,9 @@ found:
72722 return s;
72723 }
72724
72725 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72726 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72727 +
72728 /*
72729 * This routine is called by the ICMP module when it gets some
72730 * sort of error condition. If err < 0 then the socket should
72731 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72732 dport = usin->sin_port;
72733 if (dport == 0)
72734 return -EINVAL;
72735 +
72736 + err = gr_search_udp_sendmsg(sk, usin);
72737 + if (err)
72738 + return err;
72739 } else {
72740 if (sk->sk_state != TCP_ESTABLISHED)
72741 return -EDESTADDRREQ;
72742 +
72743 + err = gr_search_udp_sendmsg(sk, NULL);
72744 + if (err)
72745 + return err;
72746 +
72747 daddr = inet->daddr;
72748 dport = inet->dport;
72749 /* Open fast path for connected socket.
72750 @@ -945,6 +962,10 @@ try_again:
72751 if (!skb)
72752 goto out;
72753
72754 + err = gr_search_udp_recvmsg(sk, skb);
72755 + if (err)
72756 + goto out_free;
72757 +
72758 ulen = skb->len - sizeof(struct udphdr);
72759 copied = len;
72760 if (copied > ulen)
72761 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
72762 if (rc == -ENOMEM) {
72763 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72764 is_udplite);
72765 - atomic_inc(&sk->sk_drops);
72766 + atomic_inc_unchecked(&sk->sk_drops);
72767 }
72768 goto drop;
72769 }
72770 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72771 goto csum_error;
72772
72773 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72774 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72775 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72776 +#endif
72777 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72778
72779 /*
72780 @@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72781 struct proc_dir_entry *p;
72782 int rc = 0;
72783
72784 - afinfo->seq_fops.open = udp_seq_open;
72785 - afinfo->seq_fops.read = seq_read;
72786 - afinfo->seq_fops.llseek = seq_lseek;
72787 - afinfo->seq_fops.release = seq_release_net;
72788 -
72789 - afinfo->seq_ops.start = udp_seq_start;
72790 - afinfo->seq_ops.next = udp_seq_next;
72791 - afinfo->seq_ops.stop = udp_seq_stop;
72792 + *(void **)&afinfo->seq_fops.open = udp_seq_open;
72793 + *(void **)&afinfo->seq_fops.read = seq_read;
72794 + *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72795 + *(void **)&afinfo->seq_fops.release = seq_release_net;
72796 +
72797 + *(void **)&afinfo->seq_ops.start = udp_seq_start;
72798 + *(void **)&afinfo->seq_ops.next = udp_seq_next;
72799 + *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72800
72801 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72802 &afinfo->seq_fops, afinfo);
72803 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72804 sk_wmem_alloc_get(sp),
72805 sk_rmem_alloc_get(sp),
72806 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72807 - atomic_read(&sp->sk_refcnt), sp,
72808 - atomic_read(&sp->sk_drops), len);
72809 + atomic_read(&sp->sk_refcnt),
72810 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72811 + NULL,
72812 +#else
72813 + sp,
72814 +#endif
72815 + atomic_read_unchecked(&sp->sk_drops), len);
72816 }
72817
72818 int udp4_seq_show(struct seq_file *seq, void *v)
72819 diff -urNp linux-2.6.32.44/net/ipv6/inet6_connection_sock.c linux-2.6.32.44/net/ipv6/inet6_connection_sock.c
72820 --- linux-2.6.32.44/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72821 +++ linux-2.6.32.44/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72822 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72823 #ifdef CONFIG_XFRM
72824 {
72825 struct rt6_info *rt = (struct rt6_info *)dst;
72826 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72827 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72828 }
72829 #endif
72830 }
72831 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72832 #ifdef CONFIG_XFRM
72833 if (dst) {
72834 struct rt6_info *rt = (struct rt6_info *)dst;
72835 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72836 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72837 sk->sk_dst_cache = NULL;
72838 dst_release(dst);
72839 dst = NULL;
72840 diff -urNp linux-2.6.32.44/net/ipv6/inet6_hashtables.c linux-2.6.32.44/net/ipv6/inet6_hashtables.c
72841 --- linux-2.6.32.44/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
72842 +++ linux-2.6.32.44/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72843 @@ -20,6 +20,7 @@
72844 #include <net/inet_connection_sock.h>
72845 #include <net/inet_hashtables.h>
72846 #include <net/inet6_hashtables.h>
72847 +#include <net/secure_seq.h>
72848 #include <net/ip.h>
72849
72850 void __inet6_hash(struct sock *sk)
72851 @@ -118,7 +119,7 @@ out:
72852 }
72853 EXPORT_SYMBOL(__inet6_lookup_established);
72854
72855 -static int inline compute_score(struct sock *sk, struct net *net,
72856 +static inline int compute_score(struct sock *sk, struct net *net,
72857 const unsigned short hnum,
72858 const struct in6_addr *daddr,
72859 const int dif)
72860 diff -urNp linux-2.6.32.44/net/ipv6/ipv6_sockglue.c linux-2.6.32.44/net/ipv6/ipv6_sockglue.c
72861 --- linux-2.6.32.44/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72862 +++ linux-2.6.32.44/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72863 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72864 int val, valbool;
72865 int retv = -ENOPROTOOPT;
72866
72867 + pax_track_stack();
72868 +
72869 if (optval == NULL)
72870 val=0;
72871 else {
72872 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72873 int len;
72874 int val;
72875
72876 + pax_track_stack();
72877 +
72878 if (ip6_mroute_opt(optname))
72879 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72880
72881 diff -urNp linux-2.6.32.44/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.44/net/ipv6/netfilter/ip6_tables.c
72882 --- linux-2.6.32.44/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72883 +++ linux-2.6.32.44/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72884 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72885 private = &tmp;
72886 }
72887 #endif
72888 + memset(&info, 0, sizeof(info));
72889 info.valid_hooks = t->valid_hooks;
72890 memcpy(info.hook_entry, private->hook_entry,
72891 sizeof(info.hook_entry));
72892 diff -urNp linux-2.6.32.44/net/ipv6/raw.c linux-2.6.32.44/net/ipv6/raw.c
72893 --- linux-2.6.32.44/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72894 +++ linux-2.6.32.44/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
72895 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72896 {
72897 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72898 skb_checksum_complete(skb)) {
72899 - atomic_inc(&sk->sk_drops);
72900 + atomic_inc_unchecked(&sk->sk_drops);
72901 kfree_skb(skb);
72902 return NET_RX_DROP;
72903 }
72904
72905 /* Charge it to the socket. */
72906 if (sock_queue_rcv_skb(sk,skb)<0) {
72907 - atomic_inc(&sk->sk_drops);
72908 + atomic_inc_unchecked(&sk->sk_drops);
72909 kfree_skb(skb);
72910 return NET_RX_DROP;
72911 }
72912 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72913 struct raw6_sock *rp = raw6_sk(sk);
72914
72915 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72916 - atomic_inc(&sk->sk_drops);
72917 + atomic_inc_unchecked(&sk->sk_drops);
72918 kfree_skb(skb);
72919 return NET_RX_DROP;
72920 }
72921 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72922
72923 if (inet->hdrincl) {
72924 if (skb_checksum_complete(skb)) {
72925 - atomic_inc(&sk->sk_drops);
72926 + atomic_inc_unchecked(&sk->sk_drops);
72927 kfree_skb(skb);
72928 return NET_RX_DROP;
72929 }
72930 @@ -518,7 +518,7 @@ csum_copy_err:
72931 as some normal condition.
72932 */
72933 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72934 - atomic_inc(&sk->sk_drops);
72935 + atomic_inc_unchecked(&sk->sk_drops);
72936 goto out;
72937 }
72938
72939 @@ -600,7 +600,7 @@ out:
72940 return err;
72941 }
72942
72943 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72944 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72945 struct flowi *fl, struct rt6_info *rt,
72946 unsigned int flags)
72947 {
72948 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72949 u16 proto;
72950 int err;
72951
72952 + pax_track_stack();
72953 +
72954 /* Rough check on arithmetic overflow,
72955 better check is made in ip6_append_data().
72956 */
72957 @@ -916,12 +918,17 @@ do_confirm:
72958 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72959 char __user *optval, int optlen)
72960 {
72961 + struct icmp6_filter filter;
72962 +
72963 switch (optname) {
72964 case ICMPV6_FILTER:
72965 + if (optlen < 0)
72966 + return -EINVAL;
72967 if (optlen > sizeof(struct icmp6_filter))
72968 optlen = sizeof(struct icmp6_filter);
72969 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72970 + if (copy_from_user(&filter, optval, optlen))
72971 return -EFAULT;
72972 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
72973 return 0;
72974 default:
72975 return -ENOPROTOOPT;
72976 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
72977 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
72978 char __user *optval, int __user *optlen)
72979 {
72980 + struct icmp6_filter filter;
72981 int len;
72982
72983 switch (optname) {
72984 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72985 len = sizeof(struct icmp6_filter);
72986 if (put_user(len, optlen))
72987 return -EFAULT;
72988 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72989 + memcpy(&filter, &raw6_sk(sk)->filter, len);
72990 + if (copy_to_user(optval, &filter, len))
72991 return -EFAULT;
72992 return 0;
72993 default:
72994 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72995 0, 0L, 0,
72996 sock_i_uid(sp), 0,
72997 sock_i_ino(sp),
72998 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72999 + atomic_read(&sp->sk_refcnt),
73000 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73001 + NULL,
73002 +#else
73003 + sp,
73004 +#endif
73005 + atomic_read_unchecked(&sp->sk_drops));
73006 }
73007
73008 static int raw6_seq_show(struct seq_file *seq, void *v)
73009 diff -urNp linux-2.6.32.44/net/ipv6/tcp_ipv6.c linux-2.6.32.44/net/ipv6/tcp_ipv6.c
73010 --- linux-2.6.32.44/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
73011 +++ linux-2.6.32.44/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
73012 @@ -60,6 +60,7 @@
73013 #include <net/timewait_sock.h>
73014 #include <net/netdma.h>
73015 #include <net/inet_common.h>
73016 +#include <net/secure_seq.h>
73017
73018 #include <asm/uaccess.h>
73019
73020 @@ -88,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73021 }
73022 #endif
73023
73024 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73025 +extern int grsec_enable_blackhole;
73026 +#endif
73027 +
73028 static void tcp_v6_hash(struct sock *sk)
73029 {
73030 if (sk->sk_state != TCP_CLOSE) {
73031 @@ -1578,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73032 return 0;
73033
73034 reset:
73035 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73036 + if (!grsec_enable_blackhole)
73037 +#endif
73038 tcp_v6_send_reset(sk, skb);
73039 discard:
73040 if (opt_skb)
73041 @@ -1655,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73042 TCP_SKB_CB(skb)->sacked = 0;
73043
73044 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73045 - if (!sk)
73046 + if (!sk) {
73047 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73048 + ret = 1;
73049 +#endif
73050 goto no_tcp_socket;
73051 + }
73052
73053 process:
73054 - if (sk->sk_state == TCP_TIME_WAIT)
73055 + if (sk->sk_state == TCP_TIME_WAIT) {
73056 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73057 + ret = 2;
73058 +#endif
73059 goto do_time_wait;
73060 + }
73061
73062 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
73063 goto discard_and_relse;
73064 @@ -1700,6 +1716,10 @@ no_tcp_socket:
73065 bad_packet:
73066 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73067 } else {
73068 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73069 + if (!grsec_enable_blackhole || (ret == 1 &&
73070 + (skb->dev->flags & IFF_LOOPBACK)))
73071 +#endif
73072 tcp_v6_send_reset(NULL, skb);
73073 }
73074
73075 @@ -1915,7 +1935,13 @@ static void get_openreq6(struct seq_file
73076 uid,
73077 0, /* non standard timer */
73078 0, /* open_requests have no inode */
73079 - 0, req);
73080 + 0,
73081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73082 + NULL
73083 +#else
73084 + req
73085 +#endif
73086 + );
73087 }
73088
73089 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73090 @@ -1965,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
73091 sock_i_uid(sp),
73092 icsk->icsk_probes_out,
73093 sock_i_ino(sp),
73094 - atomic_read(&sp->sk_refcnt), sp,
73095 + atomic_read(&sp->sk_refcnt),
73096 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73097 + NULL,
73098 +#else
73099 + sp,
73100 +#endif
73101 jiffies_to_clock_t(icsk->icsk_rto),
73102 jiffies_to_clock_t(icsk->icsk_ack.ato),
73103 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73104 @@ -2000,7 +2031,13 @@ static void get_timewait6_sock(struct se
73105 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73106 tw->tw_substate, 0, 0,
73107 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73108 - atomic_read(&tw->tw_refcnt), tw);
73109 + atomic_read(&tw->tw_refcnt),
73110 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73111 + NULL
73112 +#else
73113 + tw
73114 +#endif
73115 + );
73116 }
73117
73118 static int tcp6_seq_show(struct seq_file *seq, void *v)
73119 diff -urNp linux-2.6.32.44/net/ipv6/udp.c linux-2.6.32.44/net/ipv6/udp.c
73120 --- linux-2.6.32.44/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
73121 +++ linux-2.6.32.44/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
73122 @@ -49,6 +49,10 @@
73123 #include <linux/seq_file.h>
73124 #include "udp_impl.h"
73125
73126 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73127 +extern int grsec_enable_blackhole;
73128 +#endif
73129 +
73130 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73131 {
73132 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73133 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73134 if (rc == -ENOMEM) {
73135 UDP6_INC_STATS_BH(sock_net(sk),
73136 UDP_MIB_RCVBUFERRORS, is_udplite);
73137 - atomic_inc(&sk->sk_drops);
73138 + atomic_inc_unchecked(&sk->sk_drops);
73139 }
73140 goto drop;
73141 }
73142 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73143 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73144 proto == IPPROTO_UDPLITE);
73145
73146 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73147 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73148 +#endif
73149 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
73150
73151 kfree_skb(skb);
73152 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
73153 0, 0L, 0,
73154 sock_i_uid(sp), 0,
73155 sock_i_ino(sp),
73156 - atomic_read(&sp->sk_refcnt), sp,
73157 - atomic_read(&sp->sk_drops));
73158 + atomic_read(&sp->sk_refcnt),
73159 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73160 + NULL,
73161 +#else
73162 + sp,
73163 +#endif
73164 + atomic_read_unchecked(&sp->sk_drops));
73165 }
73166
73167 int udp6_seq_show(struct seq_file *seq, void *v)
73168 diff -urNp linux-2.6.32.44/net/irda/ircomm/ircomm_tty.c linux-2.6.32.44/net/irda/ircomm/ircomm_tty.c
73169 --- linux-2.6.32.44/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
73170 +++ linux-2.6.32.44/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
73171 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
73172 add_wait_queue(&self->open_wait, &wait);
73173
73174 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73175 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73176 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73177
73178 /* As far as I can see, we protect open_count - Jean II */
73179 spin_lock_irqsave(&self->spinlock, flags);
73180 if (!tty_hung_up_p(filp)) {
73181 extra_count = 1;
73182 - self->open_count--;
73183 + local_dec(&self->open_count);
73184 }
73185 spin_unlock_irqrestore(&self->spinlock, flags);
73186 - self->blocked_open++;
73187 + local_inc(&self->blocked_open);
73188
73189 while (1) {
73190 if (tty->termios->c_cflag & CBAUD) {
73191 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
73192 }
73193
73194 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73195 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73196 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73197
73198 schedule();
73199 }
73200 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
73201 if (extra_count) {
73202 /* ++ is not atomic, so this should be protected - Jean II */
73203 spin_lock_irqsave(&self->spinlock, flags);
73204 - self->open_count++;
73205 + local_inc(&self->open_count);
73206 spin_unlock_irqrestore(&self->spinlock, flags);
73207 }
73208 - self->blocked_open--;
73209 + local_dec(&self->blocked_open);
73210
73211 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73212 - __FILE__,__LINE__, tty->driver->name, self->open_count);
73213 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73214
73215 if (!retval)
73216 self->flags |= ASYNC_NORMAL_ACTIVE;
73217 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
73218 }
73219 /* ++ is not atomic, so this should be protected - Jean II */
73220 spin_lock_irqsave(&self->spinlock, flags);
73221 - self->open_count++;
73222 + local_inc(&self->open_count);
73223
73224 tty->driver_data = self;
73225 self->tty = tty;
73226 spin_unlock_irqrestore(&self->spinlock, flags);
73227
73228 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73229 - self->line, self->open_count);
73230 + self->line, local_read(&self->open_count));
73231
73232 /* Not really used by us, but lets do it anyway */
73233 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73234 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
73235 return;
73236 }
73237
73238 - if ((tty->count == 1) && (self->open_count != 1)) {
73239 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73240 /*
73241 * Uh, oh. tty->count is 1, which means that the tty
73242 * structure will be freed. state->count should always
73243 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
73244 */
73245 IRDA_DEBUG(0, "%s(), bad serial port count; "
73246 "tty->count is 1, state->count is %d\n", __func__ ,
73247 - self->open_count);
73248 - self->open_count = 1;
73249 + local_read(&self->open_count));
73250 + local_set(&self->open_count, 1);
73251 }
73252
73253 - if (--self->open_count < 0) {
73254 + if (local_dec_return(&self->open_count) < 0) {
73255 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73256 - __func__, self->line, self->open_count);
73257 - self->open_count = 0;
73258 + __func__, self->line, local_read(&self->open_count));
73259 + local_set(&self->open_count, 0);
73260 }
73261 - if (self->open_count) {
73262 + if (local_read(&self->open_count)) {
73263 spin_unlock_irqrestore(&self->spinlock, flags);
73264
73265 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73266 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
73267 tty->closing = 0;
73268 self->tty = NULL;
73269
73270 - if (self->blocked_open) {
73271 + if (local_read(&self->blocked_open)) {
73272 if (self->close_delay)
73273 schedule_timeout_interruptible(self->close_delay);
73274 wake_up_interruptible(&self->open_wait);
73275 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
73276 spin_lock_irqsave(&self->spinlock, flags);
73277 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73278 self->tty = NULL;
73279 - self->open_count = 0;
73280 + local_set(&self->open_count, 0);
73281 spin_unlock_irqrestore(&self->spinlock, flags);
73282
73283 wake_up_interruptible(&self->open_wait);
73284 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
73285 seq_putc(m, '\n');
73286
73287 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73288 - seq_printf(m, "Open count: %d\n", self->open_count);
73289 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73290 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73291 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73292
73293 diff -urNp linux-2.6.32.44/net/iucv/af_iucv.c linux-2.6.32.44/net/iucv/af_iucv.c
73294 --- linux-2.6.32.44/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
73295 +++ linux-2.6.32.44/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
73296 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
73297
73298 write_lock_bh(&iucv_sk_list.lock);
73299
73300 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73301 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73302 while (__iucv_get_sock_by_name(name)) {
73303 sprintf(name, "%08x",
73304 - atomic_inc_return(&iucv_sk_list.autobind_name));
73305 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73306 }
73307
73308 write_unlock_bh(&iucv_sk_list.lock);
73309 diff -urNp linux-2.6.32.44/net/key/af_key.c linux-2.6.32.44/net/key/af_key.c
73310 --- linux-2.6.32.44/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
73311 +++ linux-2.6.32.44/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
73312 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
73313 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73314 struct xfrm_kmaddress k;
73315
73316 + pax_track_stack();
73317 +
73318 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73319 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73320 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73321 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
73322 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
73323 else
73324 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
73325 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73326 + NULL,
73327 +#else
73328 s,
73329 +#endif
73330 atomic_read(&s->sk_refcnt),
73331 sk_rmem_alloc_get(s),
73332 sk_wmem_alloc_get(s),
73333 diff -urNp linux-2.6.32.44/net/lapb/lapb_iface.c linux-2.6.32.44/net/lapb/lapb_iface.c
73334 --- linux-2.6.32.44/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
73335 +++ linux-2.6.32.44/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
73336 @@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
73337 goto out;
73338
73339 lapb->dev = dev;
73340 - lapb->callbacks = *callbacks;
73341 + lapb->callbacks = callbacks;
73342
73343 __lapb_insert_cb(lapb);
73344
73345 @@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
73346
73347 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73348 {
73349 - if (lapb->callbacks.connect_confirmation)
73350 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
73351 + if (lapb->callbacks->connect_confirmation)
73352 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
73353 }
73354
73355 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73356 {
73357 - if (lapb->callbacks.connect_indication)
73358 - lapb->callbacks.connect_indication(lapb->dev, reason);
73359 + if (lapb->callbacks->connect_indication)
73360 + lapb->callbacks->connect_indication(lapb->dev, reason);
73361 }
73362
73363 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73364 {
73365 - if (lapb->callbacks.disconnect_confirmation)
73366 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73367 + if (lapb->callbacks->disconnect_confirmation)
73368 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73369 }
73370
73371 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73372 {
73373 - if (lapb->callbacks.disconnect_indication)
73374 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
73375 + if (lapb->callbacks->disconnect_indication)
73376 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
73377 }
73378
73379 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73380 {
73381 - if (lapb->callbacks.data_indication)
73382 - return lapb->callbacks.data_indication(lapb->dev, skb);
73383 + if (lapb->callbacks->data_indication)
73384 + return lapb->callbacks->data_indication(lapb->dev, skb);
73385
73386 kfree_skb(skb);
73387 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73388 @@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
73389 {
73390 int used = 0;
73391
73392 - if (lapb->callbacks.data_transmit) {
73393 - lapb->callbacks.data_transmit(lapb->dev, skb);
73394 + if (lapb->callbacks->data_transmit) {
73395 + lapb->callbacks->data_transmit(lapb->dev, skb);
73396 used = 1;
73397 }
73398
73399 diff -urNp linux-2.6.32.44/net/mac80211/cfg.c linux-2.6.32.44/net/mac80211/cfg.c
73400 --- linux-2.6.32.44/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
73401 +++ linux-2.6.32.44/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
73402 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
73403 return err;
73404 }
73405
73406 -struct cfg80211_ops mac80211_config_ops = {
73407 +const struct cfg80211_ops mac80211_config_ops = {
73408 .add_virtual_intf = ieee80211_add_iface,
73409 .del_virtual_intf = ieee80211_del_iface,
73410 .change_virtual_intf = ieee80211_change_iface,
73411 diff -urNp linux-2.6.32.44/net/mac80211/cfg.h linux-2.6.32.44/net/mac80211/cfg.h
73412 --- linux-2.6.32.44/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
73413 +++ linux-2.6.32.44/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
73414 @@ -4,6 +4,6 @@
73415 #ifndef __CFG_H
73416 #define __CFG_H
73417
73418 -extern struct cfg80211_ops mac80211_config_ops;
73419 +extern const struct cfg80211_ops mac80211_config_ops;
73420
73421 #endif /* __CFG_H */
73422 diff -urNp linux-2.6.32.44/net/mac80211/debugfs_key.c linux-2.6.32.44/net/mac80211/debugfs_key.c
73423 --- linux-2.6.32.44/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
73424 +++ linux-2.6.32.44/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
73425 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
73426 size_t count, loff_t *ppos)
73427 {
73428 struct ieee80211_key *key = file->private_data;
73429 - int i, res, bufsize = 2 * key->conf.keylen + 2;
73430 + int i, bufsize = 2 * key->conf.keylen + 2;
73431 char *buf = kmalloc(bufsize, GFP_KERNEL);
73432 char *p = buf;
73433 + ssize_t res;
73434 +
73435 + if (buf == NULL)
73436 + return -ENOMEM;
73437
73438 for (i = 0; i < key->conf.keylen; i++)
73439 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
73440 diff -urNp linux-2.6.32.44/net/mac80211/debugfs_sta.c linux-2.6.32.44/net/mac80211/debugfs_sta.c
73441 --- linux-2.6.32.44/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
73442 +++ linux-2.6.32.44/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
73443 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
73444 int i;
73445 struct sta_info *sta = file->private_data;
73446
73447 + pax_track_stack();
73448 +
73449 spin_lock_bh(&sta->lock);
73450 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
73451 sta->ampdu_mlme.dialog_token_allocator + 1);
73452 diff -urNp linux-2.6.32.44/net/mac80211/ieee80211_i.h linux-2.6.32.44/net/mac80211/ieee80211_i.h
73453 --- linux-2.6.32.44/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
73454 +++ linux-2.6.32.44/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
73455 @@ -25,6 +25,7 @@
73456 #include <linux/etherdevice.h>
73457 #include <net/cfg80211.h>
73458 #include <net/mac80211.h>
73459 +#include <asm/local.h>
73460 #include "key.h"
73461 #include "sta_info.h"
73462
73463 @@ -635,7 +636,7 @@ struct ieee80211_local {
73464 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73465 spinlock_t queue_stop_reason_lock;
73466
73467 - int open_count;
73468 + local_t open_count;
73469 int monitors, cooked_mntrs;
73470 /* number of interfaces with corresponding FIF_ flags */
73471 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
73472 diff -urNp linux-2.6.32.44/net/mac80211/iface.c linux-2.6.32.44/net/mac80211/iface.c
73473 --- linux-2.6.32.44/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
73474 +++ linux-2.6.32.44/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
73475 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
73476 break;
73477 }
73478
73479 - if (local->open_count == 0) {
73480 + if (local_read(&local->open_count) == 0) {
73481 res = drv_start(local);
73482 if (res)
73483 goto err_del_bss;
73484 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
73485 * Validate the MAC address for this device.
73486 */
73487 if (!is_valid_ether_addr(dev->dev_addr)) {
73488 - if (!local->open_count)
73489 + if (!local_read(&local->open_count))
73490 drv_stop(local);
73491 return -EADDRNOTAVAIL;
73492 }
73493 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
73494
73495 hw_reconf_flags |= __ieee80211_recalc_idle(local);
73496
73497 - local->open_count++;
73498 + local_inc(&local->open_count);
73499 if (hw_reconf_flags) {
73500 ieee80211_hw_config(local, hw_reconf_flags);
73501 /*
73502 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
73503 err_del_interface:
73504 drv_remove_interface(local, &conf);
73505 err_stop:
73506 - if (!local->open_count)
73507 + if (!local_read(&local->open_count))
73508 drv_stop(local);
73509 err_del_bss:
73510 sdata->bss = NULL;
73511 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
73512 WARN_ON(!list_empty(&sdata->u.ap.vlans));
73513 }
73514
73515 - local->open_count--;
73516 + local_dec(&local->open_count);
73517
73518 switch (sdata->vif.type) {
73519 case NL80211_IFTYPE_AP_VLAN:
73520 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
73521
73522 ieee80211_recalc_ps(local, -1);
73523
73524 - if (local->open_count == 0) {
73525 + if (local_read(&local->open_count) == 0) {
73526 ieee80211_clear_tx_pending(local);
73527 ieee80211_stop_device(local);
73528
73529 diff -urNp linux-2.6.32.44/net/mac80211/main.c linux-2.6.32.44/net/mac80211/main.c
73530 --- linux-2.6.32.44/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
73531 +++ linux-2.6.32.44/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
73532 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
73533 local->hw.conf.power_level = power;
73534 }
73535
73536 - if (changed && local->open_count) {
73537 + if (changed && local_read(&local->open_count)) {
73538 ret = drv_config(local, changed);
73539 /*
73540 * Goal:
73541 diff -urNp linux-2.6.32.44/net/mac80211/mlme.c linux-2.6.32.44/net/mac80211/mlme.c
73542 --- linux-2.6.32.44/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
73543 +++ linux-2.6.32.44/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
73544 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
73545 bool have_higher_than_11mbit = false, newsta = false;
73546 u16 ap_ht_cap_flags;
73547
73548 + pax_track_stack();
73549 +
73550 /*
73551 * AssocResp and ReassocResp have identical structure, so process both
73552 * of them in this function.
73553 diff -urNp linux-2.6.32.44/net/mac80211/pm.c linux-2.6.32.44/net/mac80211/pm.c
73554 --- linux-2.6.32.44/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
73555 +++ linux-2.6.32.44/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
73556 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
73557 }
73558
73559 /* stop hardware - this must stop RX */
73560 - if (local->open_count)
73561 + if (local_read(&local->open_count))
73562 ieee80211_stop_device(local);
73563
73564 local->suspended = true;
73565 diff -urNp linux-2.6.32.44/net/mac80211/rate.c linux-2.6.32.44/net/mac80211/rate.c
73566 --- linux-2.6.32.44/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
73567 +++ linux-2.6.32.44/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
73568 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73569 struct rate_control_ref *ref, *old;
73570
73571 ASSERT_RTNL();
73572 - if (local->open_count)
73573 + if (local_read(&local->open_count))
73574 return -EBUSY;
73575
73576 ref = rate_control_alloc(name, local);
73577 diff -urNp linux-2.6.32.44/net/mac80211/tx.c linux-2.6.32.44/net/mac80211/tx.c
73578 --- linux-2.6.32.44/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
73579 +++ linux-2.6.32.44/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
73580 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
73581 return cpu_to_le16(dur);
73582 }
73583
73584 -static int inline is_ieee80211_device(struct ieee80211_local *local,
73585 +static inline int is_ieee80211_device(struct ieee80211_local *local,
73586 struct net_device *dev)
73587 {
73588 return local == wdev_priv(dev->ieee80211_ptr);
73589 diff -urNp linux-2.6.32.44/net/mac80211/util.c linux-2.6.32.44/net/mac80211/util.c
73590 --- linux-2.6.32.44/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
73591 +++ linux-2.6.32.44/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
73592 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
73593 local->resuming = true;
73594
73595 /* restart hardware */
73596 - if (local->open_count) {
73597 + if (local_read(&local->open_count)) {
73598 /*
73599 * Upon resume hardware can sometimes be goofy due to
73600 * various platform / driver / bus issues, so restarting
73601 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_app.c
73602 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
73603 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
73604 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
73605 .open = ip_vs_app_open,
73606 .read = seq_read,
73607 .llseek = seq_lseek,
73608 - .release = seq_release,
73609 + .release = seq_release_net,
73610 };
73611 #endif
73612
73613 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_conn.c
73614 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
73615 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
73616 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73617 /* if the connection is not template and is created
73618 * by sync, preserve the activity flag.
73619 */
73620 - cp->flags |= atomic_read(&dest->conn_flags) &
73621 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
73622 (~IP_VS_CONN_F_INACTIVE);
73623 else
73624 - cp->flags |= atomic_read(&dest->conn_flags);
73625 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
73626 cp->dest = dest;
73627
73628 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
73629 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
73630 atomic_set(&cp->refcnt, 1);
73631
73632 atomic_set(&cp->n_control, 0);
73633 - atomic_set(&cp->in_pkts, 0);
73634 + atomic_set_unchecked(&cp->in_pkts, 0);
73635
73636 atomic_inc(&ip_vs_conn_count);
73637 if (flags & IP_VS_CONN_F_NO_CPORT)
73638 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
73639 .open = ip_vs_conn_open,
73640 .read = seq_read,
73641 .llseek = seq_lseek,
73642 - .release = seq_release,
73643 + .release = seq_release_net,
73644 };
73645
73646 static const char *ip_vs_origin_name(unsigned flags)
73647 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
73648 .open = ip_vs_conn_sync_open,
73649 .read = seq_read,
73650 .llseek = seq_lseek,
73651 - .release = seq_release,
73652 + .release = seq_release_net,
73653 };
73654
73655 #endif
73656 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
73657
73658 /* Don't drop the entry if its number of incoming packets is not
73659 located in [0, 8] */
73660 - i = atomic_read(&cp->in_pkts);
73661 + i = atomic_read_unchecked(&cp->in_pkts);
73662 if (i > 8 || i < 0) return 0;
73663
73664 if (!todrop_rate[i]) return 0;
73665 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_core.c
73666 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
73667 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
73668 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73669 ret = cp->packet_xmit(skb, cp, pp);
73670 /* do not touch skb anymore */
73671
73672 - atomic_inc(&cp->in_pkts);
73673 + atomic_inc_unchecked(&cp->in_pkts);
73674 ip_vs_conn_put(cp);
73675 return ret;
73676 }
73677 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73678 * Sync connection if it is about to close to
73679 * encorage the standby servers to update the connections timeout
73680 */
73681 - pkts = atomic_add_return(1, &cp->in_pkts);
73682 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73683 if (af == AF_INET &&
73684 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
73685 (((cp->protocol != IPPROTO_TCP ||
73686 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_ctl.c
73687 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
73688 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
73689 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
73690 ip_vs_rs_hash(dest);
73691 write_unlock_bh(&__ip_vs_rs_lock);
73692 }
73693 - atomic_set(&dest->conn_flags, conn_flags);
73694 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
73695
73696 /* bind the service */
73697 if (!dest->svc) {
73698 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
73699 " %-7s %-6d %-10d %-10d\n",
73700 &dest->addr.in6,
73701 ntohs(dest->port),
73702 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73703 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73704 atomic_read(&dest->weight),
73705 atomic_read(&dest->activeconns),
73706 atomic_read(&dest->inactconns));
73707 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
73708 "%-7s %-6d %-10d %-10d\n",
73709 ntohl(dest->addr.ip),
73710 ntohs(dest->port),
73711 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73712 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73713 atomic_read(&dest->weight),
73714 atomic_read(&dest->activeconns),
73715 atomic_read(&dest->inactconns));
73716 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
73717 .open = ip_vs_info_open,
73718 .read = seq_read,
73719 .llseek = seq_lseek,
73720 - .release = seq_release_private,
73721 + .release = seq_release_net,
73722 };
73723
73724 #endif
73725 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
73726 .open = ip_vs_stats_seq_open,
73727 .read = seq_read,
73728 .llseek = seq_lseek,
73729 - .release = single_release,
73730 + .release = single_release_net,
73731 };
73732
73733 #endif
73734 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
73735
73736 entry.addr = dest->addr.ip;
73737 entry.port = dest->port;
73738 - entry.conn_flags = atomic_read(&dest->conn_flags);
73739 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73740 entry.weight = atomic_read(&dest->weight);
73741 entry.u_threshold = dest->u_threshold;
73742 entry.l_threshold = dest->l_threshold;
73743 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
73744 unsigned char arg[128];
73745 int ret = 0;
73746
73747 + pax_track_stack();
73748 +
73749 if (!capable(CAP_NET_ADMIN))
73750 return -EPERM;
73751
73752 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
73753 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73754
73755 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73756 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73757 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73758 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73759 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73760 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73761 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_sync.c
73762 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
73763 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
73764 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
73765
73766 if (opt)
73767 memcpy(&cp->in_seq, opt, sizeof(*opt));
73768 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73769 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73770 cp->state = state;
73771 cp->old_state = cp->state;
73772 /*
73773 diff -urNp linux-2.6.32.44/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.44/net/netfilter/ipvs/ip_vs_xmit.c
73774 --- linux-2.6.32.44/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
73775 +++ linux-2.6.32.44/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
73776 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73777 else
73778 rc = NF_ACCEPT;
73779 /* do not touch skb anymore */
73780 - atomic_inc(&cp->in_pkts);
73781 + atomic_inc_unchecked(&cp->in_pkts);
73782 goto out;
73783 }
73784
73785 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73786 else
73787 rc = NF_ACCEPT;
73788 /* do not touch skb anymore */
73789 - atomic_inc(&cp->in_pkts);
73790 + atomic_inc_unchecked(&cp->in_pkts);
73791 goto out;
73792 }
73793
73794 diff -urNp linux-2.6.32.44/net/netfilter/Kconfig linux-2.6.32.44/net/netfilter/Kconfig
73795 --- linux-2.6.32.44/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73796 +++ linux-2.6.32.44/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73797 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73798
73799 To compile it as a module, choose M here. If unsure, say N.
73800
73801 +config NETFILTER_XT_MATCH_GRADM
73802 + tristate '"gradm" match support'
73803 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73804 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73805 + ---help---
73806 + The gradm match allows to match on grsecurity RBAC being enabled.
73807 + It is useful when iptables rules are applied early on bootup to
73808 + prevent connections to the machine (except from a trusted host)
73809 + while the RBAC system is disabled.
73810 +
73811 config NETFILTER_XT_MATCH_HASHLIMIT
73812 tristate '"hashlimit" match support'
73813 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73814 diff -urNp linux-2.6.32.44/net/netfilter/Makefile linux-2.6.32.44/net/netfilter/Makefile
73815 --- linux-2.6.32.44/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73816 +++ linux-2.6.32.44/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73817 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73818 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73819 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73820 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73821 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73822 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73823 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73824 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73825 diff -urNp linux-2.6.32.44/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.44/net/netfilter/nf_conntrack_netlink.c
73826 --- linux-2.6.32.44/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73827 +++ linux-2.6.32.44/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73828 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73829 static int
73830 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73831 struct nf_conntrack_tuple *tuple,
73832 - enum ctattr_tuple type, u_int8_t l3num)
73833 + enum ctattr_type type, u_int8_t l3num)
73834 {
73835 struct nlattr *tb[CTA_TUPLE_MAX+1];
73836 int err;
73837 diff -urNp linux-2.6.32.44/net/netfilter/nfnetlink_log.c linux-2.6.32.44/net/netfilter/nfnetlink_log.c
73838 --- linux-2.6.32.44/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73839 +++ linux-2.6.32.44/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73840 @@ -68,7 +68,7 @@ struct nfulnl_instance {
73841 };
73842
73843 static DEFINE_RWLOCK(instances_lock);
73844 -static atomic_t global_seq;
73845 +static atomic_unchecked_t global_seq;
73846
73847 #define INSTANCE_BUCKETS 16
73848 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73849 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73850 /* global sequence number */
73851 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73852 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73853 - htonl(atomic_inc_return(&global_seq)));
73854 + htonl(atomic_inc_return_unchecked(&global_seq)));
73855
73856 if (data_len) {
73857 struct nlattr *nla;
73858 diff -urNp linux-2.6.32.44/net/netfilter/xt_gradm.c linux-2.6.32.44/net/netfilter/xt_gradm.c
73859 --- linux-2.6.32.44/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73860 +++ linux-2.6.32.44/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73861 @@ -0,0 +1,51 @@
73862 +/*
73863 + * gradm match for netfilter
73864 + * Copyright © Zbigniew Krzystolik, 2010
73865 + *
73866 + * This program is free software; you can redistribute it and/or modify
73867 + * it under the terms of the GNU General Public License; either version
73868 + * 2 or 3 as published by the Free Software Foundation.
73869 + */
73870 +#include <linux/module.h>
73871 +#include <linux/moduleparam.h>
73872 +#include <linux/skbuff.h>
73873 +#include <linux/netfilter/x_tables.h>
73874 +#include <linux/grsecurity.h>
73875 +#include <linux/netfilter/xt_gradm.h>
73876 +
73877 +static bool
73878 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
73879 +{
73880 + const struct xt_gradm_mtinfo *info = par->matchinfo;
73881 + bool retval = false;
73882 + if (gr_acl_is_enabled())
73883 + retval = true;
73884 + return retval ^ info->invflags;
73885 +}
73886 +
73887 +static struct xt_match gradm_mt_reg __read_mostly = {
73888 + .name = "gradm",
73889 + .revision = 0,
73890 + .family = NFPROTO_UNSPEC,
73891 + .match = gradm_mt,
73892 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
73893 + .me = THIS_MODULE,
73894 +};
73895 +
73896 +static int __init gradm_mt_init(void)
73897 +{
73898 + return xt_register_match(&gradm_mt_reg);
73899 +}
73900 +
73901 +static void __exit gradm_mt_exit(void)
73902 +{
73903 + xt_unregister_match(&gradm_mt_reg);
73904 +}
73905 +
73906 +module_init(gradm_mt_init);
73907 +module_exit(gradm_mt_exit);
73908 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
73909 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
73910 +MODULE_LICENSE("GPL");
73911 +MODULE_ALIAS("ipt_gradm");
73912 +MODULE_ALIAS("ip6t_gradm");
73913 diff -urNp linux-2.6.32.44/net/netlink/af_netlink.c linux-2.6.32.44/net/netlink/af_netlink.c
73914 --- linux-2.6.32.44/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
73915 +++ linux-2.6.32.44/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
73916 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
73917 sk->sk_error_report(sk);
73918 }
73919 }
73920 - atomic_inc(&sk->sk_drops);
73921 + atomic_inc_unchecked(&sk->sk_drops);
73922 }
73923
73924 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
73925 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
73926 struct netlink_sock *nlk = nlk_sk(s);
73927
73928 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
73929 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73930 + NULL,
73931 +#else
73932 s,
73933 +#endif
73934 s->sk_protocol,
73935 nlk->pid,
73936 nlk->groups ? (u32)nlk->groups[0] : 0,
73937 sk_rmem_alloc_get(s),
73938 sk_wmem_alloc_get(s),
73939 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73940 + NULL,
73941 +#else
73942 nlk->cb,
73943 +#endif
73944 atomic_read(&s->sk_refcnt),
73945 - atomic_read(&s->sk_drops)
73946 + atomic_read_unchecked(&s->sk_drops)
73947 );
73948
73949 }
73950 diff -urNp linux-2.6.32.44/net/netrom/af_netrom.c linux-2.6.32.44/net/netrom/af_netrom.c
73951 --- linux-2.6.32.44/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
73952 +++ linux-2.6.32.44/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
73953 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
73954 struct sock *sk = sock->sk;
73955 struct nr_sock *nr = nr_sk(sk);
73956
73957 + memset(sax, 0, sizeof(*sax));
73958 lock_sock(sk);
73959 if (peer != 0) {
73960 if (sk->sk_state != TCP_ESTABLISHED) {
73961 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
73962 *uaddr_len = sizeof(struct full_sockaddr_ax25);
73963 } else {
73964 sax->fsa_ax25.sax25_family = AF_NETROM;
73965 - sax->fsa_ax25.sax25_ndigis = 0;
73966 sax->fsa_ax25.sax25_call = nr->source_addr;
73967 *uaddr_len = sizeof(struct sockaddr_ax25);
73968 }
73969 diff -urNp linux-2.6.32.44/net/packet/af_packet.c linux-2.6.32.44/net/packet/af_packet.c
73970 --- linux-2.6.32.44/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
73971 +++ linux-2.6.32.44/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
73972 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
73973
73974 seq_printf(seq,
73975 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
73976 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73977 + NULL,
73978 +#else
73979 s,
73980 +#endif
73981 atomic_read(&s->sk_refcnt),
73982 s->sk_type,
73983 ntohs(po->num),
73984 diff -urNp linux-2.6.32.44/net/phonet/af_phonet.c linux-2.6.32.44/net/phonet/af_phonet.c
73985 --- linux-2.6.32.44/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
73986 +++ linux-2.6.32.44/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
73987 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
73988 {
73989 struct phonet_protocol *pp;
73990
73991 - if (protocol >= PHONET_NPROTO)
73992 + if (protocol < 0 || protocol >= PHONET_NPROTO)
73993 return NULL;
73994
73995 spin_lock(&proto_tab_lock);
73996 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
73997 {
73998 int err = 0;
73999
74000 - if (protocol >= PHONET_NPROTO)
74001 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74002 return -EINVAL;
74003
74004 err = proto_register(pp->prot, 1);
74005 diff -urNp linux-2.6.32.44/net/phonet/datagram.c linux-2.6.32.44/net/phonet/datagram.c
74006 --- linux-2.6.32.44/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
74007 +++ linux-2.6.32.44/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
74008 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
74009 if (err < 0) {
74010 kfree_skb(skb);
74011 if (err == -ENOMEM)
74012 - atomic_inc(&sk->sk_drops);
74013 + atomic_inc_unchecked(&sk->sk_drops);
74014 }
74015 return err ? NET_RX_DROP : NET_RX_SUCCESS;
74016 }
74017 diff -urNp linux-2.6.32.44/net/phonet/pep.c linux-2.6.32.44/net/phonet/pep.c
74018 --- linux-2.6.32.44/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
74019 +++ linux-2.6.32.44/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
74020 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
74021
74022 case PNS_PEP_CTRL_REQ:
74023 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74024 - atomic_inc(&sk->sk_drops);
74025 + atomic_inc_unchecked(&sk->sk_drops);
74026 break;
74027 }
74028 __skb_pull(skb, 4);
74029 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
74030 if (!err)
74031 return 0;
74032 if (err == -ENOMEM)
74033 - atomic_inc(&sk->sk_drops);
74034 + atomic_inc_unchecked(&sk->sk_drops);
74035 break;
74036 }
74037
74038 if (pn->rx_credits == 0) {
74039 - atomic_inc(&sk->sk_drops);
74040 + atomic_inc_unchecked(&sk->sk_drops);
74041 err = -ENOBUFS;
74042 break;
74043 }
74044 diff -urNp linux-2.6.32.44/net/phonet/socket.c linux-2.6.32.44/net/phonet/socket.c
74045 --- linux-2.6.32.44/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
74046 +++ linux-2.6.32.44/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
74047 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
74048 sk->sk_state,
74049 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74050 sock_i_uid(sk), sock_i_ino(sk),
74051 - atomic_read(&sk->sk_refcnt), sk,
74052 - atomic_read(&sk->sk_drops), &len);
74053 + atomic_read(&sk->sk_refcnt),
74054 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74055 + NULL,
74056 +#else
74057 + sk,
74058 +#endif
74059 + atomic_read_unchecked(&sk->sk_drops), &len);
74060 }
74061 seq_printf(seq, "%*s\n", 127 - len, "");
74062 return 0;
74063 diff -urNp linux-2.6.32.44/net/rds/cong.c linux-2.6.32.44/net/rds/cong.c
74064 --- linux-2.6.32.44/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
74065 +++ linux-2.6.32.44/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
74066 @@ -77,7 +77,7 @@
74067 * finds that the saved generation number is smaller than the global generation
74068 * number, it wakes up the process.
74069 */
74070 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74071 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74072
74073 /*
74074 * Congestion monitoring
74075 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
74076 rdsdebug("waking map %p for %pI4\n",
74077 map, &map->m_addr);
74078 rds_stats_inc(s_cong_update_received);
74079 - atomic_inc(&rds_cong_generation);
74080 + atomic_inc_unchecked(&rds_cong_generation);
74081 if (waitqueue_active(&map->m_waitq))
74082 wake_up(&map->m_waitq);
74083 if (waitqueue_active(&rds_poll_waitq))
74084 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74085
74086 int rds_cong_updated_since(unsigned long *recent)
74087 {
74088 - unsigned long gen = atomic_read(&rds_cong_generation);
74089 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74090
74091 if (likely(*recent == gen))
74092 return 0;
74093 diff -urNp linux-2.6.32.44/net/rds/iw_rdma.c linux-2.6.32.44/net/rds/iw_rdma.c
74094 --- linux-2.6.32.44/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
74095 +++ linux-2.6.32.44/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
74096 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
74097 struct rdma_cm_id *pcm_id;
74098 int rc;
74099
74100 + pax_track_stack();
74101 +
74102 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
74103 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
74104
74105 diff -urNp linux-2.6.32.44/net/rds/Kconfig linux-2.6.32.44/net/rds/Kconfig
74106 --- linux-2.6.32.44/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
74107 +++ linux-2.6.32.44/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
74108 @@ -1,7 +1,7 @@
74109
74110 config RDS
74111 tristate "The RDS Protocol (EXPERIMENTAL)"
74112 - depends on INET && EXPERIMENTAL
74113 + depends on INET && EXPERIMENTAL && BROKEN
74114 ---help---
74115 The RDS (Reliable Datagram Sockets) protocol provides reliable,
74116 sequenced delivery of datagrams over Infiniband, iWARP,
74117 diff -urNp linux-2.6.32.44/net/rxrpc/af_rxrpc.c linux-2.6.32.44/net/rxrpc/af_rxrpc.c
74118 --- linux-2.6.32.44/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
74119 +++ linux-2.6.32.44/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
74120 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
74121 __be32 rxrpc_epoch;
74122
74123 /* current debugging ID */
74124 -atomic_t rxrpc_debug_id;
74125 +atomic_unchecked_t rxrpc_debug_id;
74126
74127 /* count of skbs currently in use */
74128 atomic_t rxrpc_n_skbs;
74129 diff -urNp linux-2.6.32.44/net/rxrpc/ar-ack.c linux-2.6.32.44/net/rxrpc/ar-ack.c
74130 --- linux-2.6.32.44/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
74131 +++ linux-2.6.32.44/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
74132 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
74133
74134 _enter("{%d,%d,%d,%d},",
74135 call->acks_hard, call->acks_unacked,
74136 - atomic_read(&call->sequence),
74137 + atomic_read_unchecked(&call->sequence),
74138 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74139
74140 stop = 0;
74141 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
74142
74143 /* each Tx packet has a new serial number */
74144 sp->hdr.serial =
74145 - htonl(atomic_inc_return(&call->conn->serial));
74146 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74147
74148 hdr = (struct rxrpc_header *) txb->head;
74149 hdr->serial = sp->hdr.serial;
74150 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
74151 */
74152 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74153 {
74154 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74155 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74156 }
74157
74158 /*
74159 @@ -627,7 +627,7 @@ process_further:
74160
74161 latest = ntohl(sp->hdr.serial);
74162 hard = ntohl(ack.firstPacket);
74163 - tx = atomic_read(&call->sequence);
74164 + tx = atomic_read_unchecked(&call->sequence);
74165
74166 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74167 latest,
74168 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
74169 u32 abort_code = RX_PROTOCOL_ERROR;
74170 u8 *acks = NULL;
74171
74172 + pax_track_stack();
74173 +
74174 //printk("\n--------------------\n");
74175 _enter("{%d,%s,%lx} [%lu]",
74176 call->debug_id, rxrpc_call_states[call->state], call->events,
74177 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
74178 goto maybe_reschedule;
74179
74180 send_ACK_with_skew:
74181 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74182 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74183 ntohl(ack.serial));
74184 send_ACK:
74185 mtu = call->conn->trans->peer->if_mtu;
74186 @@ -1171,7 +1173,7 @@ send_ACK:
74187 ackinfo.rxMTU = htonl(5692);
74188 ackinfo.jumbo_max = htonl(4);
74189
74190 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74191 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74192 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74193 ntohl(hdr.serial),
74194 ntohs(ack.maxSkew),
74195 @@ -1189,7 +1191,7 @@ send_ACK:
74196 send_message:
74197 _debug("send message");
74198
74199 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74200 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74201 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74202 send_message_2:
74203
74204 diff -urNp linux-2.6.32.44/net/rxrpc/ar-call.c linux-2.6.32.44/net/rxrpc/ar-call.c
74205 --- linux-2.6.32.44/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
74206 +++ linux-2.6.32.44/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
74207 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
74208 spin_lock_init(&call->lock);
74209 rwlock_init(&call->state_lock);
74210 atomic_set(&call->usage, 1);
74211 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74212 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74213 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74214
74215 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74216 diff -urNp linux-2.6.32.44/net/rxrpc/ar-connection.c linux-2.6.32.44/net/rxrpc/ar-connection.c
74217 --- linux-2.6.32.44/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
74218 +++ linux-2.6.32.44/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
74219 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
74220 rwlock_init(&conn->lock);
74221 spin_lock_init(&conn->state_lock);
74222 atomic_set(&conn->usage, 1);
74223 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74224 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74225 conn->avail_calls = RXRPC_MAXCALLS;
74226 conn->size_align = 4;
74227 conn->header_size = sizeof(struct rxrpc_header);
74228 diff -urNp linux-2.6.32.44/net/rxrpc/ar-connevent.c linux-2.6.32.44/net/rxrpc/ar-connevent.c
74229 --- linux-2.6.32.44/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
74230 +++ linux-2.6.32.44/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
74231 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
74232
74233 len = iov[0].iov_len + iov[1].iov_len;
74234
74235 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74236 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74237 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74238
74239 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74240 diff -urNp linux-2.6.32.44/net/rxrpc/ar-input.c linux-2.6.32.44/net/rxrpc/ar-input.c
74241 --- linux-2.6.32.44/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
74242 +++ linux-2.6.32.44/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
74243 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
74244 /* track the latest serial number on this connection for ACK packet
74245 * information */
74246 serial = ntohl(sp->hdr.serial);
74247 - hi_serial = atomic_read(&call->conn->hi_serial);
74248 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74249 while (serial > hi_serial)
74250 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74251 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74252 serial);
74253
74254 /* request ACK generation for any ACK or DATA packet that requests
74255 diff -urNp linux-2.6.32.44/net/rxrpc/ar-internal.h linux-2.6.32.44/net/rxrpc/ar-internal.h
74256 --- linux-2.6.32.44/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
74257 +++ linux-2.6.32.44/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
74258 @@ -272,8 +272,8 @@ struct rxrpc_connection {
74259 int error; /* error code for local abort */
74260 int debug_id; /* debug ID for printks */
74261 unsigned call_counter; /* call ID counter */
74262 - atomic_t serial; /* packet serial number counter */
74263 - atomic_t hi_serial; /* highest serial number received */
74264 + atomic_unchecked_t serial; /* packet serial number counter */
74265 + atomic_unchecked_t hi_serial; /* highest serial number received */
74266 u8 avail_calls; /* number of calls available */
74267 u8 size_align; /* data size alignment (for security) */
74268 u8 header_size; /* rxrpc + security header size */
74269 @@ -346,7 +346,7 @@ struct rxrpc_call {
74270 spinlock_t lock;
74271 rwlock_t state_lock; /* lock for state transition */
74272 atomic_t usage;
74273 - atomic_t sequence; /* Tx data packet sequence counter */
74274 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74275 u32 abort_code; /* local/remote abort code */
74276 enum { /* current state of call */
74277 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74278 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
74279 */
74280 extern atomic_t rxrpc_n_skbs;
74281 extern __be32 rxrpc_epoch;
74282 -extern atomic_t rxrpc_debug_id;
74283 +extern atomic_unchecked_t rxrpc_debug_id;
74284 extern struct workqueue_struct *rxrpc_workqueue;
74285
74286 /*
74287 diff -urNp linux-2.6.32.44/net/rxrpc/ar-key.c linux-2.6.32.44/net/rxrpc/ar-key.c
74288 --- linux-2.6.32.44/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
74289 +++ linux-2.6.32.44/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
74290 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
74291 return ret;
74292
74293 plen -= sizeof(*token);
74294 - token = kmalloc(sizeof(*token), GFP_KERNEL);
74295 + token = kzalloc(sizeof(*token), GFP_KERNEL);
74296 if (!token)
74297 return -ENOMEM;
74298
74299 - token->kad = kmalloc(plen, GFP_KERNEL);
74300 + token->kad = kzalloc(plen, GFP_KERNEL);
74301 if (!token->kad) {
74302 kfree(token);
74303 return -ENOMEM;
74304 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
74305 goto error;
74306
74307 ret = -ENOMEM;
74308 - token = kmalloc(sizeof(*token), GFP_KERNEL);
74309 + token = kzalloc(sizeof(*token), GFP_KERNEL);
74310 if (!token)
74311 goto error;
74312 - token->kad = kmalloc(plen, GFP_KERNEL);
74313 + token->kad = kzalloc(plen, GFP_KERNEL);
74314 if (!token->kad)
74315 goto error_free;
74316
74317 diff -urNp linux-2.6.32.44/net/rxrpc/ar-local.c linux-2.6.32.44/net/rxrpc/ar-local.c
74318 --- linux-2.6.32.44/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
74319 +++ linux-2.6.32.44/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
74320 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
74321 spin_lock_init(&local->lock);
74322 rwlock_init(&local->services_lock);
74323 atomic_set(&local->usage, 1);
74324 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74325 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74326 memcpy(&local->srx, srx, sizeof(*srx));
74327 }
74328
74329 diff -urNp linux-2.6.32.44/net/rxrpc/ar-output.c linux-2.6.32.44/net/rxrpc/ar-output.c
74330 --- linux-2.6.32.44/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
74331 +++ linux-2.6.32.44/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
74332 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
74333 sp->hdr.cid = call->cid;
74334 sp->hdr.callNumber = call->call_id;
74335 sp->hdr.seq =
74336 - htonl(atomic_inc_return(&call->sequence));
74337 + htonl(atomic_inc_return_unchecked(&call->sequence));
74338 sp->hdr.serial =
74339 - htonl(atomic_inc_return(&conn->serial));
74340 + htonl(atomic_inc_return_unchecked(&conn->serial));
74341 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74342 sp->hdr.userStatus = 0;
74343 sp->hdr.securityIndex = conn->security_ix;
74344 diff -urNp linux-2.6.32.44/net/rxrpc/ar-peer.c linux-2.6.32.44/net/rxrpc/ar-peer.c
74345 --- linux-2.6.32.44/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
74346 +++ linux-2.6.32.44/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
74347 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
74348 INIT_LIST_HEAD(&peer->error_targets);
74349 spin_lock_init(&peer->lock);
74350 atomic_set(&peer->usage, 1);
74351 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74352 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74353 memcpy(&peer->srx, srx, sizeof(*srx));
74354
74355 rxrpc_assess_MTU_size(peer);
74356 diff -urNp linux-2.6.32.44/net/rxrpc/ar-proc.c linux-2.6.32.44/net/rxrpc/ar-proc.c
74357 --- linux-2.6.32.44/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
74358 +++ linux-2.6.32.44/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
74359 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
74360 atomic_read(&conn->usage),
74361 rxrpc_conn_states[conn->state],
74362 key_serial(conn->key),
74363 - atomic_read(&conn->serial),
74364 - atomic_read(&conn->hi_serial));
74365 + atomic_read_unchecked(&conn->serial),
74366 + atomic_read_unchecked(&conn->hi_serial));
74367
74368 return 0;
74369 }
74370 diff -urNp linux-2.6.32.44/net/rxrpc/ar-transport.c linux-2.6.32.44/net/rxrpc/ar-transport.c
74371 --- linux-2.6.32.44/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
74372 +++ linux-2.6.32.44/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
74373 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
74374 spin_lock_init(&trans->client_lock);
74375 rwlock_init(&trans->conn_lock);
74376 atomic_set(&trans->usage, 1);
74377 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74378 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74379
74380 if (peer->srx.transport.family == AF_INET) {
74381 switch (peer->srx.transport_type) {
74382 diff -urNp linux-2.6.32.44/net/rxrpc/rxkad.c linux-2.6.32.44/net/rxrpc/rxkad.c
74383 --- linux-2.6.32.44/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
74384 +++ linux-2.6.32.44/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
74385 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
74386 u16 check;
74387 int nsg;
74388
74389 + pax_track_stack();
74390 +
74391 sp = rxrpc_skb(skb);
74392
74393 _enter("");
74394 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
74395 u16 check;
74396 int nsg;
74397
74398 + pax_track_stack();
74399 +
74400 _enter("");
74401
74402 sp = rxrpc_skb(skb);
74403 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
74404
74405 len = iov[0].iov_len + iov[1].iov_len;
74406
74407 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74408 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74409 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74410
74411 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74412 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
74413
74414 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74415
74416 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74417 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74418 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74419
74420 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74421 diff -urNp linux-2.6.32.44/net/sctp/proc.c linux-2.6.32.44/net/sctp/proc.c
74422 --- linux-2.6.32.44/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
74423 +++ linux-2.6.32.44/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
74424 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
74425 sctp_for_each_hentry(epb, node, &head->chain) {
74426 ep = sctp_ep(epb);
74427 sk = epb->sk;
74428 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
74429 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
74430 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74431 + NULL, NULL,
74432 +#else
74433 + ep, sk,
74434 +#endif
74435 sctp_sk(sk)->type, sk->sk_state, hash,
74436 epb->bind_addr.port,
74437 sock_i_uid(sk), sock_i_ino(sk));
74438 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
74439 seq_printf(seq,
74440 "%8p %8p %-3d %-3d %-2d %-4d "
74441 "%4d %8d %8d %7d %5lu %-5d %5d ",
74442 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74444 + NULL, NULL,
74445 +#else
74446 + assoc, sk,
74447 +#endif
74448 + sctp_sk(sk)->type, sk->sk_state,
74449 assoc->state, hash,
74450 assoc->assoc_id,
74451 assoc->sndbuf_used,
74452 diff -urNp linux-2.6.32.44/net/sctp/socket.c linux-2.6.32.44/net/sctp/socket.c
74453 --- linux-2.6.32.44/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
74454 +++ linux-2.6.32.44/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
74455 @@ -5802,7 +5802,6 @@ pp_found:
74456 */
74457 int reuse = sk->sk_reuse;
74458 struct sock *sk2;
74459 - struct hlist_node *node;
74460
74461 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
74462 if (pp->fastreuse && sk->sk_reuse &&
74463 diff -urNp linux-2.6.32.44/net/socket.c linux-2.6.32.44/net/socket.c
74464 --- linux-2.6.32.44/net/socket.c 2011-03-27 14:31:47.000000000 -0400
74465 +++ linux-2.6.32.44/net/socket.c 2011-05-16 21:46:57.000000000 -0400
74466 @@ -87,6 +87,7 @@
74467 #include <linux/wireless.h>
74468 #include <linux/nsproxy.h>
74469 #include <linux/magic.h>
74470 +#include <linux/in.h>
74471
74472 #include <asm/uaccess.h>
74473 #include <asm/unistd.h>
74474 @@ -97,6 +98,21 @@
74475 #include <net/sock.h>
74476 #include <linux/netfilter.h>
74477
74478 +extern void gr_attach_curr_ip(const struct sock *sk);
74479 +extern int gr_handle_sock_all(const int family, const int type,
74480 + const int protocol);
74481 +extern int gr_handle_sock_server(const struct sockaddr *sck);
74482 +extern int gr_handle_sock_server_other(const struct sock *sck);
74483 +extern int gr_handle_sock_client(const struct sockaddr *sck);
74484 +extern int gr_search_connect(struct socket * sock,
74485 + struct sockaddr_in * addr);
74486 +extern int gr_search_bind(struct socket * sock,
74487 + struct sockaddr_in * addr);
74488 +extern int gr_search_listen(struct socket * sock);
74489 +extern int gr_search_accept(struct socket * sock);
74490 +extern int gr_search_socket(const int domain, const int type,
74491 + const int protocol);
74492 +
74493 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74494 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74495 unsigned long nr_segs, loff_t pos);
74496 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
74497 mnt);
74498 }
74499
74500 -static struct vfsmount *sock_mnt __read_mostly;
74501 +struct vfsmount *sock_mnt __read_mostly;
74502
74503 static struct file_system_type sock_fs_type = {
74504 .name = "sockfs",
74505 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
74506 return -EAFNOSUPPORT;
74507 if (type < 0 || type >= SOCK_MAX)
74508 return -EINVAL;
74509 + if (protocol < 0)
74510 + return -EINVAL;
74511
74512 /* Compatibility.
74513
74514 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
74515 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74516 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74517
74518 + if(!gr_search_socket(family, type, protocol)) {
74519 + retval = -EACCES;
74520 + goto out;
74521 + }
74522 +
74523 + if (gr_handle_sock_all(family, type, protocol)) {
74524 + retval = -EACCES;
74525 + goto out;
74526 + }
74527 +
74528 retval = sock_create(family, type, protocol, &sock);
74529 if (retval < 0)
74530 goto out;
74531 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74532 if (sock) {
74533 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74534 if (err >= 0) {
74535 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74536 + err = -EACCES;
74537 + goto error;
74538 + }
74539 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74540 + if (err)
74541 + goto error;
74542 +
74543 err = security_socket_bind(sock,
74544 (struct sockaddr *)&address,
74545 addrlen);
74546 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
74547 (struct sockaddr *)
74548 &address, addrlen);
74549 }
74550 +error:
74551 fput_light(sock->file, fput_needed);
74552 }
74553 return err;
74554 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
74555 if ((unsigned)backlog > somaxconn)
74556 backlog = somaxconn;
74557
74558 + if (gr_handle_sock_server_other(sock->sk)) {
74559 + err = -EPERM;
74560 + goto error;
74561 + }
74562 +
74563 + err = gr_search_listen(sock);
74564 + if (err)
74565 + goto error;
74566 +
74567 err = security_socket_listen(sock, backlog);
74568 if (!err)
74569 err = sock->ops->listen(sock, backlog);
74570
74571 +error:
74572 fput_light(sock->file, fput_needed);
74573 }
74574 return err;
74575 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74576 newsock->type = sock->type;
74577 newsock->ops = sock->ops;
74578
74579 + if (gr_handle_sock_server_other(sock->sk)) {
74580 + err = -EPERM;
74581 + sock_release(newsock);
74582 + goto out_put;
74583 + }
74584 +
74585 + err = gr_search_accept(sock);
74586 + if (err) {
74587 + sock_release(newsock);
74588 + goto out_put;
74589 + }
74590 +
74591 /*
74592 * We don't need try_module_get here, as the listening socket (sock)
74593 * has the protocol module (sock->ops->owner) held.
74594 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
74595 fd_install(newfd, newfile);
74596 err = newfd;
74597
74598 + gr_attach_curr_ip(newsock->sk);
74599 +
74600 out_put:
74601 fput_light(sock->file, fput_needed);
74602 out:
74603 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74604 int, addrlen)
74605 {
74606 struct socket *sock;
74607 + struct sockaddr *sck;
74608 struct sockaddr_storage address;
74609 int err, fput_needed;
74610
74611 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
74612 if (err < 0)
74613 goto out_put;
74614
74615 + sck = (struct sockaddr *)&address;
74616 +
74617 + if (gr_handle_sock_client(sck)) {
74618 + err = -EACCES;
74619 + goto out_put;
74620 + }
74621 +
74622 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74623 + if (err)
74624 + goto out_put;
74625 +
74626 err =
74627 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74628 if (err)
74629 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
74630 int err, ctl_len, iov_size, total_len;
74631 int fput_needed;
74632
74633 + pax_track_stack();
74634 +
74635 err = -EFAULT;
74636 if (MSG_CMSG_COMPAT & flags) {
74637 if (get_compat_msghdr(&msg_sys, msg_compat))
74638 diff -urNp linux-2.6.32.44/net/sunrpc/sched.c linux-2.6.32.44/net/sunrpc/sched.c
74639 --- linux-2.6.32.44/net/sunrpc/sched.c 2011-08-09 18:35:30.000000000 -0400
74640 +++ linux-2.6.32.44/net/sunrpc/sched.c 2011-08-09 18:34:01.000000000 -0400
74641 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
74642 #ifdef RPC_DEBUG
74643 static void rpc_task_set_debuginfo(struct rpc_task *task)
74644 {
74645 - static atomic_t rpc_pid;
74646 + static atomic_unchecked_t rpc_pid;
74647
74648 task->tk_magic = RPC_TASK_MAGIC_ID;
74649 - task->tk_pid = atomic_inc_return(&rpc_pid);
74650 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
74651 }
74652 #else
74653 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
74654 diff -urNp linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma.c
74655 --- linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
74656 +++ linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
74657 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
74658 static unsigned int min_max_inline = 4096;
74659 static unsigned int max_max_inline = 65536;
74660
74661 -atomic_t rdma_stat_recv;
74662 -atomic_t rdma_stat_read;
74663 -atomic_t rdma_stat_write;
74664 -atomic_t rdma_stat_sq_starve;
74665 -atomic_t rdma_stat_rq_starve;
74666 -atomic_t rdma_stat_rq_poll;
74667 -atomic_t rdma_stat_rq_prod;
74668 -atomic_t rdma_stat_sq_poll;
74669 -atomic_t rdma_stat_sq_prod;
74670 +atomic_unchecked_t rdma_stat_recv;
74671 +atomic_unchecked_t rdma_stat_read;
74672 +atomic_unchecked_t rdma_stat_write;
74673 +atomic_unchecked_t rdma_stat_sq_starve;
74674 +atomic_unchecked_t rdma_stat_rq_starve;
74675 +atomic_unchecked_t rdma_stat_rq_poll;
74676 +atomic_unchecked_t rdma_stat_rq_prod;
74677 +atomic_unchecked_t rdma_stat_sq_poll;
74678 +atomic_unchecked_t rdma_stat_sq_prod;
74679
74680 /* Temporary NFS request map and context caches */
74681 struct kmem_cache *svc_rdma_map_cachep;
74682 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
74683 len -= *ppos;
74684 if (len > *lenp)
74685 len = *lenp;
74686 - if (len && copy_to_user(buffer, str_buf, len))
74687 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
74688 return -EFAULT;
74689 *lenp = len;
74690 *ppos += len;
74691 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
74692 {
74693 .procname = "rdma_stat_read",
74694 .data = &rdma_stat_read,
74695 - .maxlen = sizeof(atomic_t),
74696 + .maxlen = sizeof(atomic_unchecked_t),
74697 .mode = 0644,
74698 .proc_handler = &read_reset_stat,
74699 },
74700 {
74701 .procname = "rdma_stat_recv",
74702 .data = &rdma_stat_recv,
74703 - .maxlen = sizeof(atomic_t),
74704 + .maxlen = sizeof(atomic_unchecked_t),
74705 .mode = 0644,
74706 .proc_handler = &read_reset_stat,
74707 },
74708 {
74709 .procname = "rdma_stat_write",
74710 .data = &rdma_stat_write,
74711 - .maxlen = sizeof(atomic_t),
74712 + .maxlen = sizeof(atomic_unchecked_t),
74713 .mode = 0644,
74714 .proc_handler = &read_reset_stat,
74715 },
74716 {
74717 .procname = "rdma_stat_sq_starve",
74718 .data = &rdma_stat_sq_starve,
74719 - .maxlen = sizeof(atomic_t),
74720 + .maxlen = sizeof(atomic_unchecked_t),
74721 .mode = 0644,
74722 .proc_handler = &read_reset_stat,
74723 },
74724 {
74725 .procname = "rdma_stat_rq_starve",
74726 .data = &rdma_stat_rq_starve,
74727 - .maxlen = sizeof(atomic_t),
74728 + .maxlen = sizeof(atomic_unchecked_t),
74729 .mode = 0644,
74730 .proc_handler = &read_reset_stat,
74731 },
74732 {
74733 .procname = "rdma_stat_rq_poll",
74734 .data = &rdma_stat_rq_poll,
74735 - .maxlen = sizeof(atomic_t),
74736 + .maxlen = sizeof(atomic_unchecked_t),
74737 .mode = 0644,
74738 .proc_handler = &read_reset_stat,
74739 },
74740 {
74741 .procname = "rdma_stat_rq_prod",
74742 .data = &rdma_stat_rq_prod,
74743 - .maxlen = sizeof(atomic_t),
74744 + .maxlen = sizeof(atomic_unchecked_t),
74745 .mode = 0644,
74746 .proc_handler = &read_reset_stat,
74747 },
74748 {
74749 .procname = "rdma_stat_sq_poll",
74750 .data = &rdma_stat_sq_poll,
74751 - .maxlen = sizeof(atomic_t),
74752 + .maxlen = sizeof(atomic_unchecked_t),
74753 .mode = 0644,
74754 .proc_handler = &read_reset_stat,
74755 },
74756 {
74757 .procname = "rdma_stat_sq_prod",
74758 .data = &rdma_stat_sq_prod,
74759 - .maxlen = sizeof(atomic_t),
74760 + .maxlen = sizeof(atomic_unchecked_t),
74761 .mode = 0644,
74762 .proc_handler = &read_reset_stat,
74763 },
74764 diff -urNp linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
74765 --- linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
74766 +++ linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
74767 @@ -495,7 +495,7 @@ next_sge:
74768 svc_rdma_put_context(ctxt, 0);
74769 goto out;
74770 }
74771 - atomic_inc(&rdma_stat_read);
74772 + atomic_inc_unchecked(&rdma_stat_read);
74773
74774 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
74775 chl_map->ch[ch_no].count -= read_wr.num_sge;
74776 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74777 dto_q);
74778 list_del_init(&ctxt->dto_q);
74779 } else {
74780 - atomic_inc(&rdma_stat_rq_starve);
74781 + atomic_inc_unchecked(&rdma_stat_rq_starve);
74782 clear_bit(XPT_DATA, &xprt->xpt_flags);
74783 ctxt = NULL;
74784 }
74785 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
74786 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
74787 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
74788 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
74789 - atomic_inc(&rdma_stat_recv);
74790 + atomic_inc_unchecked(&rdma_stat_recv);
74791
74792 /* Build up the XDR from the receive buffers. */
74793 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
74794 diff -urNp linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_sendto.c
74795 --- linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
74796 +++ linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
74797 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
74798 write_wr.wr.rdma.remote_addr = to;
74799
74800 /* Post It */
74801 - atomic_inc(&rdma_stat_write);
74802 + atomic_inc_unchecked(&rdma_stat_write);
74803 if (svc_rdma_send(xprt, &write_wr))
74804 goto err;
74805 return 0;
74806 diff -urNp linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_transport.c
74807 --- linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
74808 +++ linux-2.6.32.44/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
74809 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
74810 return;
74811
74812 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
74813 - atomic_inc(&rdma_stat_rq_poll);
74814 + atomic_inc_unchecked(&rdma_stat_rq_poll);
74815
74816 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
74817 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
74818 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
74819 }
74820
74821 if (ctxt)
74822 - atomic_inc(&rdma_stat_rq_prod);
74823 + atomic_inc_unchecked(&rdma_stat_rq_prod);
74824
74825 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
74826 /*
74827 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
74828 return;
74829
74830 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
74831 - atomic_inc(&rdma_stat_sq_poll);
74832 + atomic_inc_unchecked(&rdma_stat_sq_poll);
74833 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
74834 if (wc.status != IB_WC_SUCCESS)
74835 /* Close the transport */
74836 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
74837 }
74838
74839 if (ctxt)
74840 - atomic_inc(&rdma_stat_sq_prod);
74841 + atomic_inc_unchecked(&rdma_stat_sq_prod);
74842 }
74843
74844 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
74845 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
74846 spin_lock_bh(&xprt->sc_lock);
74847 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
74848 spin_unlock_bh(&xprt->sc_lock);
74849 - atomic_inc(&rdma_stat_sq_starve);
74850 + atomic_inc_unchecked(&rdma_stat_sq_starve);
74851
74852 /* See if we can opportunistically reap SQ WR to make room */
74853 sq_cq_reap(xprt);
74854 diff -urNp linux-2.6.32.44/net/sysctl_net.c linux-2.6.32.44/net/sysctl_net.c
74855 --- linux-2.6.32.44/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
74856 +++ linux-2.6.32.44/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
74857 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
74858 struct ctl_table *table)
74859 {
74860 /* Allow network administrator to have same access as root. */
74861 - if (capable(CAP_NET_ADMIN)) {
74862 + if (capable_nolog(CAP_NET_ADMIN)) {
74863 int mode = (table->mode >> 6) & 7;
74864 return (mode << 6) | (mode << 3) | mode;
74865 }
74866 diff -urNp linux-2.6.32.44/net/unix/af_unix.c linux-2.6.32.44/net/unix/af_unix.c
74867 --- linux-2.6.32.44/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
74868 +++ linux-2.6.32.44/net/unix/af_unix.c 2011-07-18 18:17:33.000000000 -0400
74869 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
74870 err = -ECONNREFUSED;
74871 if (!S_ISSOCK(inode->i_mode))
74872 goto put_fail;
74873 +
74874 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
74875 + err = -EACCES;
74876 + goto put_fail;
74877 + }
74878 +
74879 u = unix_find_socket_byinode(net, inode);
74880 if (!u)
74881 goto put_fail;
74882 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
74883 if (u) {
74884 struct dentry *dentry;
74885 dentry = unix_sk(u)->dentry;
74886 +
74887 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
74888 + err = -EPERM;
74889 + sock_put(u);
74890 + goto fail;
74891 + }
74892 +
74893 if (dentry)
74894 touch_atime(unix_sk(u)->mnt, dentry);
74895 } else
74896 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
74897 err = security_path_mknod(&nd.path, dentry, mode, 0);
74898 if (err)
74899 goto out_mknod_drop_write;
74900 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
74901 + err = -EACCES;
74902 + goto out_mknod_drop_write;
74903 + }
74904 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
74905 out_mknod_drop_write:
74906 mnt_drop_write(nd.path.mnt);
74907 if (err)
74908 goto out_mknod_dput;
74909 +
74910 + gr_handle_create(dentry, nd.path.mnt);
74911 +
74912 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
74913 dput(nd.path.dentry);
74914 nd.path.dentry = dentry;
74915 @@ -2211,7 +2231,11 @@ static int unix_seq_show(struct seq_file
74916 unix_state_lock(s);
74917
74918 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
74919 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74920 + NULL,
74921 +#else
74922 s,
74923 +#endif
74924 atomic_read(&s->sk_refcnt),
74925 0,
74926 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
74927 diff -urNp linux-2.6.32.44/net/wireless/core.c linux-2.6.32.44/net/wireless/core.c
74928 --- linux-2.6.32.44/net/wireless/core.c 2011-03-27 14:31:47.000000000 -0400
74929 +++ linux-2.6.32.44/net/wireless/core.c 2011-08-05 20:33:55.000000000 -0400
74930 @@ -367,7 +367,7 @@ struct wiphy *wiphy_new(const struct cfg
74931
74932 wiphy_net_set(&rdev->wiphy, &init_net);
74933
74934 - rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74935 + *(void **)&rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
74936 rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
74937 &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
74938 &rdev->rfkill_ops, rdev);
74939 @@ -505,7 +505,7 @@ void wiphy_rfkill_start_polling(struct w
74940
74941 if (!rdev->ops->rfkill_poll)
74942 return;
74943 - rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74944 + *(void **)&rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
74945 rfkill_resume_polling(rdev->rfkill);
74946 }
74947 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
74948 diff -urNp linux-2.6.32.44/net/wireless/wext.c linux-2.6.32.44/net/wireless/wext.c
74949 --- linux-2.6.32.44/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
74950 +++ linux-2.6.32.44/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
74951 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
74952 */
74953
74954 /* Support for very large requests */
74955 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
74956 - (user_length > descr->max_tokens)) {
74957 + if (user_length > descr->max_tokens) {
74958 /* Allow userspace to GET more than max so
74959 * we can support any size GET requests.
74960 * There is still a limit : -ENOMEM.
74961 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
74962 }
74963 }
74964
74965 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
74966 - /*
74967 - * If this is a GET, but not NOMAX, it means that the extra
74968 - * data is not bounded by userspace, but by max_tokens. Thus
74969 - * set the length to max_tokens. This matches the extra data
74970 - * allocation.
74971 - * The driver should fill it with the number of tokens it
74972 - * provided, and it may check iwp->length rather than having
74973 - * knowledge of max_tokens. If the driver doesn't change the
74974 - * iwp->length, this ioctl just copies back max_token tokens
74975 - * filled with zeroes. Hopefully the driver isn't claiming
74976 - * them to be valid data.
74977 - */
74978 - iwp->length = descr->max_tokens;
74979 - }
74980 -
74981 err = handler(dev, info, (union iwreq_data *) iwp, extra);
74982
74983 iwp->length += essid_compat;
74984 diff -urNp linux-2.6.32.44/net/xfrm/xfrm_policy.c linux-2.6.32.44/net/xfrm/xfrm_policy.c
74985 --- linux-2.6.32.44/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
74986 +++ linux-2.6.32.44/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
74987 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
74988 hlist_add_head(&policy->bydst, chain);
74989 xfrm_pol_hold(policy);
74990 net->xfrm.policy_count[dir]++;
74991 - atomic_inc(&flow_cache_genid);
74992 + atomic_inc_unchecked(&flow_cache_genid);
74993 if (delpol)
74994 __xfrm_policy_unlink(delpol, dir);
74995 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
74996 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
74997 write_unlock_bh(&xfrm_policy_lock);
74998
74999 if (ret && delete) {
75000 - atomic_inc(&flow_cache_genid);
75001 + atomic_inc_unchecked(&flow_cache_genid);
75002 xfrm_policy_kill(ret);
75003 }
75004 return ret;
75005 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
75006 write_unlock_bh(&xfrm_policy_lock);
75007
75008 if (ret && delete) {
75009 - atomic_inc(&flow_cache_genid);
75010 + atomic_inc_unchecked(&flow_cache_genid);
75011 xfrm_policy_kill(ret);
75012 }
75013 return ret;
75014 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
75015 }
75016
75017 }
75018 - atomic_inc(&flow_cache_genid);
75019 + atomic_inc_unchecked(&flow_cache_genid);
75020 out:
75021 write_unlock_bh(&xfrm_policy_lock);
75022 return err;
75023 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
75024 write_unlock_bh(&xfrm_policy_lock);
75025 if (pol) {
75026 if (dir < XFRM_POLICY_MAX)
75027 - atomic_inc(&flow_cache_genid);
75028 + atomic_inc_unchecked(&flow_cache_genid);
75029 xfrm_policy_kill(pol);
75030 return 0;
75031 }
75032 @@ -1477,7 +1477,7 @@ free_dst:
75033 goto out;
75034 }
75035
75036 -static int inline
75037 +static inline int
75038 xfrm_dst_alloc_copy(void **target, void *src, int size)
75039 {
75040 if (!*target) {
75041 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
75042 return 0;
75043 }
75044
75045 -static int inline
75046 +static inline int
75047 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
75048 {
75049 #ifdef CONFIG_XFRM_SUB_POLICY
75050 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
75051 #endif
75052 }
75053
75054 -static int inline
75055 +static inline int
75056 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
75057 {
75058 #ifdef CONFIG_XFRM_SUB_POLICY
75059 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
75060 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
75061
75062 restart:
75063 - genid = atomic_read(&flow_cache_genid);
75064 + genid = atomic_read_unchecked(&flow_cache_genid);
75065 policy = NULL;
75066 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
75067 pols[pi] = NULL;
75068 @@ -1680,7 +1680,7 @@ restart:
75069 goto error;
75070 }
75071 if (nx == -EAGAIN ||
75072 - genid != atomic_read(&flow_cache_genid)) {
75073 + genid != atomic_read_unchecked(&flow_cache_genid)) {
75074 xfrm_pols_put(pols, npols);
75075 goto restart;
75076 }
75077 diff -urNp linux-2.6.32.44/net/xfrm/xfrm_user.c linux-2.6.32.44/net/xfrm/xfrm_user.c
75078 --- linux-2.6.32.44/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
75079 +++ linux-2.6.32.44/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
75080 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
75081 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
75082 int i;
75083
75084 + pax_track_stack();
75085 +
75086 if (xp->xfrm_nr == 0)
75087 return 0;
75088
75089 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
75090 int err;
75091 int n = 0;
75092
75093 + pax_track_stack();
75094 +
75095 if (attrs[XFRMA_MIGRATE] == NULL)
75096 return -EINVAL;
75097
75098 diff -urNp linux-2.6.32.44/samples/kobject/kset-example.c linux-2.6.32.44/samples/kobject/kset-example.c
75099 --- linux-2.6.32.44/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
75100 +++ linux-2.6.32.44/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
75101 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
75102 }
75103
75104 /* Our custom sysfs_ops that we will associate with our ktype later on */
75105 -static struct sysfs_ops foo_sysfs_ops = {
75106 +static const struct sysfs_ops foo_sysfs_ops = {
75107 .show = foo_attr_show,
75108 .store = foo_attr_store,
75109 };
75110 diff -urNp linux-2.6.32.44/scripts/basic/fixdep.c linux-2.6.32.44/scripts/basic/fixdep.c
75111 --- linux-2.6.32.44/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
75112 +++ linux-2.6.32.44/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
75113 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
75114
75115 static void parse_config_file(char *map, size_t len)
75116 {
75117 - int *end = (int *) (map + len);
75118 + unsigned int *end = (unsigned int *) (map + len);
75119 /* start at +1, so that p can never be < map */
75120 - int *m = (int *) map + 1;
75121 + unsigned int *m = (unsigned int *) map + 1;
75122 char *p, *q;
75123
75124 for (; m < end; m++) {
75125 @@ -371,7 +371,7 @@ static void print_deps(void)
75126 static void traps(void)
75127 {
75128 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75129 - int *p = (int *)test;
75130 + unsigned int *p = (unsigned int *)test;
75131
75132 if (*p != INT_CONF) {
75133 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75134 diff -urNp linux-2.6.32.44/scripts/gcc-plugin.sh linux-2.6.32.44/scripts/gcc-plugin.sh
75135 --- linux-2.6.32.44/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
75136 +++ linux-2.6.32.44/scripts/gcc-plugin.sh 2011-08-05 20:33:55.000000000 -0400
75137 @@ -0,0 +1,3 @@
75138 +#!/bin/sh
75139 +
75140 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
75141 diff -urNp linux-2.6.32.44/scripts/Makefile.build linux-2.6.32.44/scripts/Makefile.build
75142 --- linux-2.6.32.44/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
75143 +++ linux-2.6.32.44/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
75144 @@ -59,7 +59,7 @@ endif
75145 endif
75146
75147 # Do not include host rules unless needed
75148 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75149 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75150 include scripts/Makefile.host
75151 endif
75152
75153 diff -urNp linux-2.6.32.44/scripts/Makefile.clean linux-2.6.32.44/scripts/Makefile.clean
75154 --- linux-2.6.32.44/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
75155 +++ linux-2.6.32.44/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
75156 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
75157 __clean-files := $(extra-y) $(always) \
75158 $(targets) $(clean-files) \
75159 $(host-progs) \
75160 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75161 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75162 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75163
75164 # as clean-files is given relative to the current directory, this adds
75165 # a $(obj) prefix, except for absolute paths
75166 diff -urNp linux-2.6.32.44/scripts/Makefile.host linux-2.6.32.44/scripts/Makefile.host
75167 --- linux-2.6.32.44/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
75168 +++ linux-2.6.32.44/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
75169 @@ -31,6 +31,7 @@
75170 # Note: Shared libraries consisting of C++ files are not supported
75171
75172 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75173 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75174
75175 # C code
75176 # Executables compiled from a single .c file
75177 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
75178 # Shared libaries (only .c supported)
75179 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75180 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75181 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75182 # Remove .so files from "xxx-objs"
75183 host-cobjs := $(filter-out %.so,$(host-cobjs))
75184
75185 diff -urNp linux-2.6.32.44/scripts/mod/file2alias.c linux-2.6.32.44/scripts/mod/file2alias.c
75186 --- linux-2.6.32.44/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
75187 +++ linux-2.6.32.44/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
75188 @@ -72,7 +72,7 @@ static void device_id_check(const char *
75189 unsigned long size, unsigned long id_size,
75190 void *symval)
75191 {
75192 - int i;
75193 + unsigned int i;
75194
75195 if (size % id_size || size < id_size) {
75196 if (cross_build != 0)
75197 @@ -102,7 +102,7 @@ static void device_id_check(const char *
75198 /* USB is special because the bcdDevice can be matched against a numeric range */
75199 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75200 static void do_usb_entry(struct usb_device_id *id,
75201 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75202 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75203 unsigned char range_lo, unsigned char range_hi,
75204 struct module *mod)
75205 {
75206 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
75207 for (i = 0; i < count; i++) {
75208 const char *id = (char *)devs[i].id;
75209 char acpi_id[sizeof(devs[0].id)];
75210 - int j;
75211 + unsigned int j;
75212
75213 buf_printf(&mod->dev_table_buf,
75214 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75215 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
75216
75217 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75218 const char *id = (char *)card->devs[j].id;
75219 - int i2, j2;
75220 + unsigned int i2, j2;
75221 int dup = 0;
75222
75223 if (!id[0])
75224 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
75225 /* add an individual alias for every device entry */
75226 if (!dup) {
75227 char acpi_id[sizeof(card->devs[0].id)];
75228 - int k;
75229 + unsigned int k;
75230
75231 buf_printf(&mod->dev_table_buf,
75232 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75233 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
75234 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75235 char *alias)
75236 {
75237 - int i, j;
75238 + unsigned int i, j;
75239
75240 sprintf(alias, "dmi*");
75241
75242 diff -urNp linux-2.6.32.44/scripts/mod/modpost.c linux-2.6.32.44/scripts/mod/modpost.c
75243 --- linux-2.6.32.44/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
75244 +++ linux-2.6.32.44/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
75245 @@ -835,6 +835,7 @@ enum mismatch {
75246 INIT_TO_EXIT,
75247 EXIT_TO_INIT,
75248 EXPORT_TO_INIT_EXIT,
75249 + DATA_TO_TEXT
75250 };
75251
75252 struct sectioncheck {
75253 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
75254 .fromsec = { "__ksymtab*", NULL },
75255 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75256 .mismatch = EXPORT_TO_INIT_EXIT
75257 +},
75258 +/* Do not reference code from writable data */
75259 +{
75260 + .fromsec = { DATA_SECTIONS, NULL },
75261 + .tosec = { TEXT_SECTIONS, NULL },
75262 + .mismatch = DATA_TO_TEXT
75263 }
75264 };
75265
75266 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
75267 continue;
75268 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75269 continue;
75270 - if (sym->st_value == addr)
75271 - return sym;
75272 /* Find a symbol nearby - addr are maybe negative */
75273 d = sym->st_value - addr;
75274 + if (d == 0)
75275 + return sym;
75276 if (d < 0)
75277 d = addr - sym->st_value;
75278 if (d < distance) {
75279 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
75280 "Fix this by removing the %sannotation of %s "
75281 "or drop the export.\n",
75282 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
75283 + case DATA_TO_TEXT:
75284 +/*
75285 + fprintf(stderr,
75286 + "The variable %s references\n"
75287 + "the %s %s%s%s\n",
75288 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75289 +*/
75290 + break;
75291 case NO_MISMATCH:
75292 /* To get warnings on missing members */
75293 break;
75294 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
75295 static void check_sec_ref(struct module *mod, const char *modname,
75296 struct elf_info *elf)
75297 {
75298 - int i;
75299 + unsigned int i;
75300 Elf_Shdr *sechdrs = elf->sechdrs;
75301
75302 /* Walk through all sections */
75303 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
75304 va_end(ap);
75305 }
75306
75307 -void buf_write(struct buffer *buf, const char *s, int len)
75308 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75309 {
75310 if (buf->size - buf->pos < len) {
75311 buf->size += len + SZ;
75312 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
75313 if (fstat(fileno(file), &st) < 0)
75314 goto close_write;
75315
75316 - if (st.st_size != b->pos)
75317 + if (st.st_size != (off_t)b->pos)
75318 goto close_write;
75319
75320 tmp = NOFAIL(malloc(b->pos));
75321 diff -urNp linux-2.6.32.44/scripts/mod/modpost.h linux-2.6.32.44/scripts/mod/modpost.h
75322 --- linux-2.6.32.44/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
75323 +++ linux-2.6.32.44/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
75324 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
75325
75326 struct buffer {
75327 char *p;
75328 - int pos;
75329 - int size;
75330 + unsigned int pos;
75331 + unsigned int size;
75332 };
75333
75334 void __attribute__((format(printf, 2, 3)))
75335 buf_printf(struct buffer *buf, const char *fmt, ...);
75336
75337 void
75338 -buf_write(struct buffer *buf, const char *s, int len);
75339 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75340
75341 struct module {
75342 struct module *next;
75343 diff -urNp linux-2.6.32.44/scripts/mod/sumversion.c linux-2.6.32.44/scripts/mod/sumversion.c
75344 --- linux-2.6.32.44/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
75345 +++ linux-2.6.32.44/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
75346 @@ -455,7 +455,7 @@ static void write_version(const char *fi
75347 goto out;
75348 }
75349
75350 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75351 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75352 warn("writing sum in %s failed: %s\n",
75353 filename, strerror(errno));
75354 goto out;
75355 diff -urNp linux-2.6.32.44/scripts/package/mkspec linux-2.6.32.44/scripts/package/mkspec
75356 --- linux-2.6.32.44/scripts/package/mkspec 2011-03-27 14:31:47.000000000 -0400
75357 +++ linux-2.6.32.44/scripts/package/mkspec 2011-07-19 18:19:12.000000000 -0400
75358 @@ -70,7 +70,7 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM
75359 echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
75360 echo "%endif"
75361
75362 -echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{_smp_mflags} KBUILD_SRC= modules_install'
75363 +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
75364 echo "%ifarch ia64"
75365 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
75366 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
75367 diff -urNp linux-2.6.32.44/scripts/pnmtologo.c linux-2.6.32.44/scripts/pnmtologo.c
75368 --- linux-2.6.32.44/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
75369 +++ linux-2.6.32.44/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
75370 @@ -237,14 +237,14 @@ static void write_header(void)
75371 fprintf(out, " * Linux logo %s\n", logoname);
75372 fputs(" */\n\n", out);
75373 fputs("#include <linux/linux_logo.h>\n\n", out);
75374 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75375 + fprintf(out, "static unsigned char %s_data[] = {\n",
75376 logoname);
75377 }
75378
75379 static void write_footer(void)
75380 {
75381 fputs("\n};\n\n", out);
75382 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75383 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75384 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75385 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75386 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75387 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75388 fputs("\n};\n\n", out);
75389
75390 /* write logo clut */
75391 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75392 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75393 logoname);
75394 write_hex_cnt = 0;
75395 for (i = 0; i < logo_clutsize; i++) {
75396 diff -urNp linux-2.6.32.44/scripts/tags.sh linux-2.6.32.44/scripts/tags.sh
75397 --- linux-2.6.32.44/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
75398 +++ linux-2.6.32.44/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
75399 @@ -93,6 +93,11 @@ docscope()
75400 cscope -b -f cscope.out
75401 }
75402
75403 +dogtags()
75404 +{
75405 + all_sources | gtags -f -
75406 +}
75407 +
75408 exuberant()
75409 {
75410 all_sources | xargs $1 -a \
75411 @@ -164,6 +169,10 @@ case "$1" in
75412 docscope
75413 ;;
75414
75415 + "gtags")
75416 + dogtags
75417 + ;;
75418 +
75419 "tags")
75420 rm -f tags
75421 xtags ctags
75422 diff -urNp linux-2.6.32.44/security/capability.c linux-2.6.32.44/security/capability.c
75423 --- linux-2.6.32.44/security/capability.c 2011-03-27 14:31:47.000000000 -0400
75424 +++ linux-2.6.32.44/security/capability.c 2011-04-17 15:56:46.000000000 -0400
75425 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
75426 }
75427 #endif /* CONFIG_AUDIT */
75428
75429 -struct security_operations default_security_ops = {
75430 +struct security_operations default_security_ops __read_only = {
75431 .name = "default",
75432 };
75433
75434 diff -urNp linux-2.6.32.44/security/commoncap.c linux-2.6.32.44/security/commoncap.c
75435 --- linux-2.6.32.44/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
75436 +++ linux-2.6.32.44/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
75437 @@ -27,7 +27,7 @@
75438 #include <linux/sched.h>
75439 #include <linux/prctl.h>
75440 #include <linux/securebits.h>
75441 -
75442 +#include <net/sock.h>
75443 /*
75444 * If a non-root user executes a setuid-root binary in
75445 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
75446 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
75447 }
75448 }
75449
75450 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
75451 +
75452 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
75453 {
75454 - NETLINK_CB(skb).eff_cap = current_cap();
75455 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
75456 return 0;
75457 }
75458
75459 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
75460 {
75461 const struct cred *cred = current_cred();
75462
75463 + if (gr_acl_enable_at_secure())
75464 + return 1;
75465 +
75466 if (cred->uid != 0) {
75467 if (bprm->cap_effective)
75468 return 1;
75469 diff -urNp linux-2.6.32.44/security/integrity/ima/ima_api.c linux-2.6.32.44/security/integrity/ima/ima_api.c
75470 --- linux-2.6.32.44/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
75471 +++ linux-2.6.32.44/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
75472 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
75473 int result;
75474
75475 /* can overflow, only indicator */
75476 - atomic_long_inc(&ima_htable.violations);
75477 + atomic_long_inc_unchecked(&ima_htable.violations);
75478
75479 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
75480 if (!entry) {
75481 diff -urNp linux-2.6.32.44/security/integrity/ima/ima_fs.c linux-2.6.32.44/security/integrity/ima/ima_fs.c
75482 --- linux-2.6.32.44/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
75483 +++ linux-2.6.32.44/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
75484 @@ -27,12 +27,12 @@
75485 static int valid_policy = 1;
75486 #define TMPBUFLEN 12
75487 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
75488 - loff_t *ppos, atomic_long_t *val)
75489 + loff_t *ppos, atomic_long_unchecked_t *val)
75490 {
75491 char tmpbuf[TMPBUFLEN];
75492 ssize_t len;
75493
75494 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
75495 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
75496 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
75497 }
75498
75499 diff -urNp linux-2.6.32.44/security/integrity/ima/ima.h linux-2.6.32.44/security/integrity/ima/ima.h
75500 --- linux-2.6.32.44/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
75501 +++ linux-2.6.32.44/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
75502 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
75503 extern spinlock_t ima_queue_lock;
75504
75505 struct ima_h_table {
75506 - atomic_long_t len; /* number of stored measurements in the list */
75507 - atomic_long_t violations;
75508 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
75509 + atomic_long_unchecked_t violations;
75510 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
75511 };
75512 extern struct ima_h_table ima_htable;
75513 diff -urNp linux-2.6.32.44/security/integrity/ima/ima_queue.c linux-2.6.32.44/security/integrity/ima/ima_queue.c
75514 --- linux-2.6.32.44/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
75515 +++ linux-2.6.32.44/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
75516 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
75517 INIT_LIST_HEAD(&qe->later);
75518 list_add_tail_rcu(&qe->later, &ima_measurements);
75519
75520 - atomic_long_inc(&ima_htable.len);
75521 + atomic_long_inc_unchecked(&ima_htable.len);
75522 key = ima_hash_key(entry->digest);
75523 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
75524 return 0;
75525 diff -urNp linux-2.6.32.44/security/Kconfig linux-2.6.32.44/security/Kconfig
75526 --- linux-2.6.32.44/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
75527 +++ linux-2.6.32.44/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
75528 @@ -4,6 +4,555 @@
75529
75530 menu "Security options"
75531
75532 +source grsecurity/Kconfig
75533 +
75534 +menu "PaX"
75535 +
75536 + config ARCH_TRACK_EXEC_LIMIT
75537 + bool
75538 +
75539 + config PAX_PER_CPU_PGD
75540 + bool
75541 +
75542 + config TASK_SIZE_MAX_SHIFT
75543 + int
75544 + depends on X86_64
75545 + default 47 if !PAX_PER_CPU_PGD
75546 + default 42 if PAX_PER_CPU_PGD
75547 +
75548 + config PAX_ENABLE_PAE
75549 + bool
75550 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75551 +
75552 +config PAX
75553 + bool "Enable various PaX features"
75554 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75555 + help
75556 + This allows you to enable various PaX features. PaX adds
75557 + intrusion prevention mechanisms to the kernel that reduce
75558 + the risks posed by exploitable memory corruption bugs.
75559 +
75560 +menu "PaX Control"
75561 + depends on PAX
75562 +
75563 +config PAX_SOFTMODE
75564 + bool 'Support soft mode'
75565 + select PAX_PT_PAX_FLAGS
75566 + help
75567 + Enabling this option will allow you to run PaX in soft mode, that
75568 + is, PaX features will not be enforced by default, only on executables
75569 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
75570 + is the only way to mark executables for soft mode use.
75571 +
75572 + Soft mode can be activated by using the "pax_softmode=1" kernel command
75573 + line option on boot. Furthermore you can control various PaX features
75574 + at runtime via the entries in /proc/sys/kernel/pax.
75575 +
75576 +config PAX_EI_PAX
75577 + bool 'Use legacy ELF header marking'
75578 + help
75579 + Enabling this option will allow you to control PaX features on
75580 + a per executable basis via the 'chpax' utility available at
75581 + http://pax.grsecurity.net/. The control flags will be read from
75582 + an otherwise reserved part of the ELF header. This marking has
75583 + numerous drawbacks (no support for soft-mode, toolchain does not
75584 + know about the non-standard use of the ELF header) therefore it
75585 + has been deprecated in favour of PT_PAX_FLAGS support.
75586 +
75587 + Note that if you enable PT_PAX_FLAGS marking support as well,
75588 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
75589 +
75590 +config PAX_PT_PAX_FLAGS
75591 + bool 'Use ELF program header marking'
75592 + help
75593 + Enabling this option will allow you to control PaX features on
75594 + a per executable basis via the 'paxctl' utility available at
75595 + http://pax.grsecurity.net/. The control flags will be read from
75596 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
75597 + has the benefits of supporting both soft mode and being fully
75598 + integrated into the toolchain (the binutils patch is available
75599 + from http://pax.grsecurity.net).
75600 +
75601 + If your toolchain does not support PT_PAX_FLAGS markings,
75602 + you can create one in most cases with 'paxctl -C'.
75603 +
75604 + Note that if you enable the legacy EI_PAX marking support as well,
75605 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
75606 +
75607 +choice
75608 + prompt 'MAC system integration'
75609 + default PAX_HAVE_ACL_FLAGS
75610 + help
75611 + Mandatory Access Control systems have the option of controlling
75612 + PaX flags on a per executable basis, choose the method supported
75613 + by your particular system.
75614 +
75615 + - "none": if your MAC system does not interact with PaX,
75616 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
75617 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
75618 +
75619 + NOTE: this option is for developers/integrators only.
75620 +
75621 + config PAX_NO_ACL_FLAGS
75622 + bool 'none'
75623 +
75624 + config PAX_HAVE_ACL_FLAGS
75625 + bool 'direct'
75626 +
75627 + config PAX_HOOK_ACL_FLAGS
75628 + bool 'hook'
75629 +endchoice
75630 +
75631 +endmenu
75632 +
75633 +menu "Non-executable pages"
75634 + depends on PAX
75635 +
75636 +config PAX_NOEXEC
75637 + bool "Enforce non-executable pages"
75638 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
75639 + help
75640 + By design some architectures do not allow for protecting memory
75641 + pages against execution or even if they do, Linux does not make
75642 + use of this feature. In practice this means that if a page is
75643 + readable (such as the stack or heap) it is also executable.
75644 +
75645 + There is a well known exploit technique that makes use of this
75646 + fact and a common programming mistake where an attacker can
75647 + introduce code of his choice somewhere in the attacked program's
75648 + memory (typically the stack or the heap) and then execute it.
75649 +
75650 + If the attacked program was running with different (typically
75651 + higher) privileges than that of the attacker, then he can elevate
75652 + his own privilege level (e.g. get a root shell, write to files for
75653 + which he does not have write access to, etc).
75654 +
75655 + Enabling this option will let you choose from various features
75656 + that prevent the injection and execution of 'foreign' code in
75657 + a program.
75658 +
75659 + This will also break programs that rely on the old behaviour and
75660 + expect that dynamically allocated memory via the malloc() family
75661 + of functions is executable (which it is not). Notable examples
75662 + are the XFree86 4.x server, the java runtime and wine.
75663 +
75664 +config PAX_PAGEEXEC
75665 + bool "Paging based non-executable pages"
75666 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
75667 + select S390_SWITCH_AMODE if S390
75668 + select S390_EXEC_PROTECT if S390
75669 + select ARCH_TRACK_EXEC_LIMIT if X86_32
75670 + help
75671 + This implementation is based on the paging feature of the CPU.
75672 + On i386 without hardware non-executable bit support there is a
75673 + variable but usually low performance impact, however on Intel's
75674 + P4 core based CPUs it is very high so you should not enable this
75675 + for kernels meant to be used on such CPUs.
75676 +
75677 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
75678 + with hardware non-executable bit support there is no performance
75679 + impact, on ppc the impact is negligible.
75680 +
75681 + Note that several architectures require various emulations due to
75682 + badly designed userland ABIs, this will cause a performance impact
75683 + but will disappear as soon as userland is fixed. For example, ppc
75684 + userland MUST have been built with secure-plt by a recent toolchain.
75685 +
75686 +config PAX_SEGMEXEC
75687 + bool "Segmentation based non-executable pages"
75688 + depends on PAX_NOEXEC && X86_32
75689 + help
75690 + This implementation is based on the segmentation feature of the
75691 + CPU and has a very small performance impact, however applications
75692 + will be limited to a 1.5 GB address space instead of the normal
75693 + 3 GB.
75694 +
75695 +config PAX_EMUTRAMP
75696 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
75697 + default y if PARISC
75698 + help
75699 + There are some programs and libraries that for one reason or
75700 + another attempt to execute special small code snippets from
75701 + non-executable memory pages. Most notable examples are the
75702 + signal handler return code generated by the kernel itself and
75703 + the GCC trampolines.
75704 +
75705 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
75706 + such programs will no longer work under your kernel.
75707 +
75708 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
75709 + utilities to enable trampoline emulation for the affected programs
75710 + yet still have the protection provided by the non-executable pages.
75711 +
75712 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
75713 + your system will not even boot.
75714 +
75715 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
75716 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
75717 + for the affected files.
75718 +
75719 + NOTE: enabling this feature *may* open up a loophole in the
75720 + protection provided by non-executable pages that an attacker
75721 + could abuse. Therefore the best solution is to not have any
75722 + files on your system that would require this option. This can
75723 + be achieved by not using libc5 (which relies on the kernel
75724 + signal handler return code) and not using or rewriting programs
75725 + that make use of the nested function implementation of GCC.
75726 + Skilled users can just fix GCC itself so that it implements
75727 + nested function calls in a way that does not interfere with PaX.
75728 +
75729 +config PAX_EMUSIGRT
75730 + bool "Automatically emulate sigreturn trampolines"
75731 + depends on PAX_EMUTRAMP && PARISC
75732 + default y
75733 + help
75734 + Enabling this option will have the kernel automatically detect
75735 + and emulate signal return trampolines executing on the stack
75736 + that would otherwise lead to task termination.
75737 +
75738 + This solution is intended as a temporary one for users with
75739 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
75740 + Modula-3 runtime, etc) or executables linked to such, basically
75741 + everything that does not specify its own SA_RESTORER function in
75742 + normal executable memory like glibc 2.1+ does.
75743 +
75744 + On parisc you MUST enable this option, otherwise your system will
75745 + not even boot.
75746 +
75747 + NOTE: this feature cannot be disabled on a per executable basis
75748 + and since it *does* open up a loophole in the protection provided
75749 + by non-executable pages, the best solution is to not have any
75750 + files on your system that would require this option.
75751 +
75752 +config PAX_MPROTECT
75753 + bool "Restrict mprotect()"
75754 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
75755 + help
75756 + Enabling this option will prevent programs from
75757 + - changing the executable status of memory pages that were
75758 + not originally created as executable,
75759 + - making read-only executable pages writable again,
75760 + - creating executable pages from anonymous memory,
75761 + - making read-only-after-relocations (RELRO) data pages writable again.
75762 +
75763 + You should say Y here to complete the protection provided by
75764 + the enforcement of non-executable pages.
75765 +
75766 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75767 + this feature on a per file basis.
75768 +
75769 +config PAX_MPROTECT_COMPAT
75770 + bool "Use legacy/compat protection demoting (read help)"
75771 + depends on PAX_MPROTECT
75772 + default n
75773 + help
75774 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
75775 + by sending the proper error code to the application. For some broken
75776 + userland, this can cause problems with Python or other applications. The
75777 + current implementation however allows for applications like clamav to
75778 + detect if JIT compilation/execution is allowed and to fall back gracefully
75779 + to an interpreter-based mode if it does not. While we encourage everyone
75780 + to use the current implementation as-is and push upstream to fix broken
75781 + userland (note that the RWX logging option can assist with this), in some
75782 + environments this may not be possible. Having to disable MPROTECT
75783 + completely on certain binaries reduces the security benefit of PaX,
75784 + so this option is provided for those environments to revert to the old
75785 + behavior.
75786 +
75787 +config PAX_ELFRELOCS
75788 + bool "Allow ELF text relocations (read help)"
75789 + depends on PAX_MPROTECT
75790 + default n
75791 + help
75792 + Non-executable pages and mprotect() restrictions are effective
75793 + in preventing the introduction of new executable code into an
75794 + attacked task's address space. There remain only two venues
75795 + for this kind of attack: if the attacker can execute already
75796 + existing code in the attacked task then he can either have it
75797 + create and mmap() a file containing his code or have it mmap()
75798 + an already existing ELF library that does not have position
75799 + independent code in it and use mprotect() on it to make it
75800 + writable and copy his code there. While protecting against
75801 + the former approach is beyond PaX, the latter can be prevented
75802 + by having only PIC ELF libraries on one's system (which do not
75803 + need to relocate their code). If you are sure this is your case,
75804 + as is the case with all modern Linux distributions, then leave
75805 + this option disabled. You should say 'n' here.
75806 +
75807 +config PAX_ETEXECRELOCS
75808 + bool "Allow ELF ET_EXEC text relocations"
75809 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
75810 + select PAX_ELFRELOCS
75811 + default y
75812 + help
75813 + On some architectures there are incorrectly created applications
75814 + that require text relocations and would not work without enabling
75815 + this option. If you are an alpha, ia64 or parisc user, you should
75816 + enable this option and disable it once you have made sure that
75817 + none of your applications need it.
75818 +
75819 +config PAX_EMUPLT
75820 + bool "Automatically emulate ELF PLT"
75821 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
75822 + default y
75823 + help
75824 + Enabling this option will have the kernel automatically detect
75825 + and emulate the Procedure Linkage Table entries in ELF files.
75826 + On some architectures such entries are in writable memory, and
75827 + become non-executable leading to task termination. Therefore
75828 + it is mandatory that you enable this option on alpha, parisc,
75829 + sparc and sparc64, otherwise your system would not even boot.
75830 +
75831 + NOTE: this feature *does* open up a loophole in the protection
75832 + provided by the non-executable pages, therefore the proper
75833 + solution is to modify the toolchain to produce a PLT that does
75834 + not need to be writable.
75835 +
75836 +config PAX_DLRESOLVE
75837 + bool 'Emulate old glibc resolver stub'
75838 + depends on PAX_EMUPLT && SPARC
75839 + default n
75840 + help
75841 + This option is needed if userland has an old glibc (before 2.4)
75842 + that puts a 'save' instruction into the runtime generated resolver
75843 + stub that needs special emulation.
75844 +
75845 +config PAX_KERNEXEC
75846 + bool "Enforce non-executable kernel pages"
75847 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
75848 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
75849 + help
75850 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
75851 + that is, enabling this option will make it harder to inject
75852 + and execute 'foreign' code in kernel memory itself.
75853 +
75854 + Note that on x86_64 kernels there is a known regression when
75855 + this feature and KVM/VMX are both enabled in the host kernel.
75856 +
75857 +config PAX_KERNEXEC_MODULE_TEXT
75858 + int "Minimum amount of memory reserved for module code"
75859 + default "4"
75860 + depends on PAX_KERNEXEC && X86_32 && MODULES
75861 + help
75862 + Due to implementation details the kernel must reserve a fixed
75863 + amount of memory for module code at compile time that cannot be
75864 + changed at runtime. Here you can specify the minimum amount
75865 + in MB that will be reserved. Due to the same implementation
75866 + details this size will always be rounded up to the next 2/4 MB
75867 + boundary (depends on PAE) so the actually available memory for
75868 + module code will usually be more than this minimum.
75869 +
75870 + The default 4 MB should be enough for most users but if you have
75871 + an excessive number of modules (e.g., most distribution configs
75872 + compile many drivers as modules) or use huge modules such as
75873 + nvidia's kernel driver, you will need to adjust this amount.
75874 + A good rule of thumb is to look at your currently loaded kernel
75875 + modules and add up their sizes.
75876 +
75877 +endmenu
75878 +
75879 +menu "Address Space Layout Randomization"
75880 + depends on PAX
75881 +
75882 +config PAX_ASLR
75883 + bool "Address Space Layout Randomization"
75884 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
75885 + help
75886 + Many if not most exploit techniques rely on the knowledge of
75887 + certain addresses in the attacked program. The following options
75888 + will allow the kernel to apply a certain amount of randomization
75889 + to specific parts of the program thereby forcing an attacker to
75890 + guess them in most cases. Any failed guess will most likely crash
75891 + the attacked program which allows the kernel to detect such attempts
75892 + and react on them. PaX itself provides no reaction mechanisms,
75893 + instead it is strongly encouraged that you make use of Nergal's
75894 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
75895 + (http://www.grsecurity.net/) built-in crash detection features or
75896 + develop one yourself.
75897 +
75898 + By saying Y here you can choose to randomize the following areas:
75899 + - top of the task's kernel stack
75900 + - top of the task's userland stack
75901 + - base address for mmap() requests that do not specify one
75902 + (this includes all libraries)
75903 + - base address of the main executable
75904 +
75905 + It is strongly recommended to say Y here as address space layout
75906 + randomization has negligible impact on performance yet it provides
75907 + a very effective protection.
75908 +
75909 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
75910 + this feature on a per file basis.
75911 +
75912 +config PAX_RANDKSTACK
75913 + bool "Randomize kernel stack base"
75914 + depends on PAX_ASLR && X86_TSC && X86
75915 + help
75916 + By saying Y here the kernel will randomize every task's kernel
75917 + stack on every system call. This will not only force an attacker
75918 + to guess it but also prevent him from making use of possible
75919 + leaked information about it.
75920 +
75921 + Since the kernel stack is a rather scarce resource, randomization
75922 + may cause unexpected stack overflows, therefore you should very
75923 + carefully test your system. Note that once enabled in the kernel
75924 + configuration, this feature cannot be disabled on a per file basis.
75925 +
75926 +config PAX_RANDUSTACK
75927 + bool "Randomize user stack base"
75928 + depends on PAX_ASLR
75929 + help
75930 + By saying Y here the kernel will randomize every task's userland
75931 + stack. The randomization is done in two steps where the second
75932 + one may apply a big amount of shift to the top of the stack and
75933 + cause problems for programs that want to use lots of memory (more
75934 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
75935 + For this reason the second step can be controlled by 'chpax' or
75936 + 'paxctl' on a per file basis.
75937 +
75938 +config PAX_RANDMMAP
75939 + bool "Randomize mmap() base"
75940 + depends on PAX_ASLR
75941 + help
75942 + By saying Y here the kernel will use a randomized base address for
75943 + mmap() requests that do not specify one themselves. As a result
75944 + all dynamically loaded libraries will appear at random addresses
75945 + and therefore be harder to exploit by a technique where an attacker
75946 + attempts to execute library code for his purposes (e.g. spawn a
75947 + shell from an exploited program that is running at an elevated
75948 + privilege level).
75949 +
75950 + Furthermore, if a program is relinked as a dynamic ELF file, its
75951 + base address will be randomized as well, completing the full
75952 + randomization of the address space layout. Attacking such programs
75953 + becomes a guess game. You can find an example of doing this at
75954 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
75955 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
75956 +
75957 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
75958 + feature on a per file basis.
75959 +
75960 +endmenu
75961 +
75962 +menu "Miscellaneous hardening features"
75963 +
75964 +config PAX_MEMORY_SANITIZE
75965 + bool "Sanitize all freed memory"
75966 + help
75967 + By saying Y here the kernel will erase memory pages as soon as they
75968 + are freed. This in turn reduces the lifetime of data stored in the
75969 + pages, making it less likely that sensitive information such as
75970 + passwords, cryptographic secrets, etc stay in memory for too long.
75971 +
75972 + This is especially useful for programs whose runtime is short, long
75973 + lived processes and the kernel itself benefit from this as long as
75974 + they operate on whole memory pages and ensure timely freeing of pages
75975 + that may hold sensitive information.
75976 +
75977 + The tradeoff is performance impact, on a single CPU system kernel
75978 + compilation sees a 3% slowdown, other systems and workloads may vary
75979 + and you are advised to test this feature on your expected workload
75980 + before deploying it.
75981 +
75982 + Note that this feature does not protect data stored in live pages,
75983 + e.g., process memory swapped to disk may stay there for a long time.
75984 +
75985 +config PAX_MEMORY_STACKLEAK
75986 + bool "Sanitize kernel stack"
75987 + depends on X86
75988 + help
75989 + By saying Y here the kernel will erase the kernel stack before it
75990 + returns from a system call. This in turn reduces the information
75991 + that a kernel stack leak bug can reveal.
75992 +
75993 + Note that such a bug can still leak information that was put on
75994 + the stack by the current system call (the one eventually triggering
75995 + the bug) but traces of earlier system calls on the kernel stack
75996 + cannot leak anymore.
75997 +
75998 + The tradeoff is performance impact, on a single CPU system kernel
75999 + compilation sees a 1% slowdown, other systems and workloads may vary
76000 + and you are advised to test this feature on your expected workload
76001 + before deploying it.
76002 +
76003 + Note: full support for this feature requires gcc with plugin support
76004 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
76005 + is not supported). Using older gcc versions means that functions
76006 + with large enough stack frames may leave uninitialized memory behind
76007 + that may be exposed to a later syscall leaking the stack.
76008 +
76009 +config PAX_MEMORY_UDEREF
76010 + bool "Prevent invalid userland pointer dereference"
76011 + depends on X86 && !UML_X86 && !XEN
76012 + select PAX_PER_CPU_PGD if X86_64
76013 + help
76014 + By saying Y here the kernel will be prevented from dereferencing
76015 + userland pointers in contexts where the kernel expects only kernel
76016 + pointers. This is both a useful runtime debugging feature and a
76017 + security measure that prevents exploiting a class of kernel bugs.
76018 +
76019 + The tradeoff is that some virtualization solutions may experience
76020 + a huge slowdown and therefore you should not enable this feature
76021 + for kernels meant to run in such environments. Whether a given VM
76022 + solution is affected or not is best determined by simply trying it
76023 + out, the performance impact will be obvious right on boot as this
76024 + mechanism engages from very early on. A good rule of thumb is that
76025 + VMs running on CPUs without hardware virtualization support (i.e.,
76026 + the majority of IA-32 CPUs) will likely experience the slowdown.
76027 +
76028 +config PAX_REFCOUNT
76029 + bool "Prevent various kernel object reference counter overflows"
76030 + depends on GRKERNSEC && (X86 || SPARC64)
76031 + help
76032 + By saying Y here the kernel will detect and prevent overflowing
76033 + various (but not all) kinds of object reference counters. Such
76034 + overflows can normally occur due to bugs only and are often, if
76035 + not always, exploitable.
76036 +
76037 + The tradeoff is that data structures protected by an overflowed
76038 + refcount will never be freed and therefore will leak memory. Note
76039 + that this leak also happens even without this protection but in
76040 + that case the overflow can eventually trigger the freeing of the
76041 + data structure while it is still being used elsewhere, resulting
76042 + in the exploitable situation that this feature prevents.
76043 +
76044 + Since this has a negligible performance impact, you should enable
76045 + this feature.
76046 +
76047 +config PAX_USERCOPY
76048 + bool "Harden heap object copies between kernel and userland"
76049 + depends on X86 || PPC || SPARC || ARM
76050 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76051 + help
76052 + By saying Y here the kernel will enforce the size of heap objects
76053 + when they are copied in either direction between the kernel and
76054 + userland, even if only a part of the heap object is copied.
76055 +
76056 + Specifically, this checking prevents information leaking from the
76057 + kernel heap during kernel to userland copies (if the kernel heap
76058 + object is otherwise fully initialized) and prevents kernel heap
76059 + overflows during userland to kernel copies.
76060 +
76061 + Note that the current implementation provides the strictest bounds
76062 + checks for the SLUB allocator.
76063 +
76064 + Enabling this option also enables per-slab cache protection against
76065 + data in a given cache being copied into/out of via userland
76066 + accessors. Though the whitelist of regions will be reduced over
76067 + time, it notably protects important data structures like task structs.
76068 +
76069 +
76070 + If frame pointers are enabled on x86, this option will also
76071 + restrict copies into and out of the kernel stack to local variables
76072 + within a single frame.
76073 +
76074 + Since this has a negligible performance impact, you should enable
76075 + this feature.
76076 +
76077 +endmenu
76078 +
76079 +endmenu
76080 +
76081 config KEYS
76082 bool "Enable access key retention support"
76083 help
76084 @@ -146,7 +695,7 @@ config INTEL_TXT
76085 config LSM_MMAP_MIN_ADDR
76086 int "Low address space for LSM to protect from user allocation"
76087 depends on SECURITY && SECURITY_SELINUX
76088 - default 65536
76089 + default 32768
76090 help
76091 This is the portion of low virtual memory which should be protected
76092 from userspace allocation. Keeping a user from writing to low pages
76093 diff -urNp linux-2.6.32.44/security/keys/keyring.c linux-2.6.32.44/security/keys/keyring.c
76094 --- linux-2.6.32.44/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
76095 +++ linux-2.6.32.44/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
76096 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
76097 ret = -EFAULT;
76098
76099 for (loop = 0; loop < klist->nkeys; loop++) {
76100 + key_serial_t serial;
76101 key = klist->keys[loop];
76102 + serial = key->serial;
76103
76104 tmp = sizeof(key_serial_t);
76105 if (tmp > buflen)
76106 tmp = buflen;
76107
76108 - if (copy_to_user(buffer,
76109 - &key->serial,
76110 - tmp) != 0)
76111 + if (copy_to_user(buffer, &serial, tmp))
76112 goto error;
76113
76114 buflen -= tmp;
76115 diff -urNp linux-2.6.32.44/security/min_addr.c linux-2.6.32.44/security/min_addr.c
76116 --- linux-2.6.32.44/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
76117 +++ linux-2.6.32.44/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
76118 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
76119 */
76120 static void update_mmap_min_addr(void)
76121 {
76122 +#ifndef SPARC
76123 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76124 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76125 mmap_min_addr = dac_mmap_min_addr;
76126 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76127 #else
76128 mmap_min_addr = dac_mmap_min_addr;
76129 #endif
76130 +#endif
76131 }
76132
76133 /*
76134 diff -urNp linux-2.6.32.44/security/root_plug.c linux-2.6.32.44/security/root_plug.c
76135 --- linux-2.6.32.44/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
76136 +++ linux-2.6.32.44/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
76137 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
76138 return 0;
76139 }
76140
76141 -static struct security_operations rootplug_security_ops = {
76142 +static struct security_operations rootplug_security_ops __read_only = {
76143 .bprm_check_security = rootplug_bprm_check_security,
76144 };
76145
76146 diff -urNp linux-2.6.32.44/security/security.c linux-2.6.32.44/security/security.c
76147 --- linux-2.6.32.44/security/security.c 2011-03-27 14:31:47.000000000 -0400
76148 +++ linux-2.6.32.44/security/security.c 2011-04-17 15:56:46.000000000 -0400
76149 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
76150 extern struct security_operations default_security_ops;
76151 extern void security_fixup_ops(struct security_operations *ops);
76152
76153 -struct security_operations *security_ops; /* Initialized to NULL */
76154 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
76155
76156 static inline int verify(struct security_operations *ops)
76157 {
76158 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
76159 * If there is already a security module registered with the kernel,
76160 * an error will be returned. Otherwise %0 is returned on success.
76161 */
76162 -int register_security(struct security_operations *ops)
76163 +int __init register_security(struct security_operations *ops)
76164 {
76165 if (verify(ops)) {
76166 printk(KERN_DEBUG "%s could not verify "
76167 diff -urNp linux-2.6.32.44/security/selinux/hooks.c linux-2.6.32.44/security/selinux/hooks.c
76168 --- linux-2.6.32.44/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
76169 +++ linux-2.6.32.44/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
76170 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
76171 * Minimal support for a secondary security module,
76172 * just to allow the use of the capability module.
76173 */
76174 -static struct security_operations *secondary_ops;
76175 +static struct security_operations *secondary_ops __read_only;
76176
76177 /* Lists of inode and superblock security structures initialized
76178 before the policy was loaded. */
76179 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
76180
76181 #endif
76182
76183 -static struct security_operations selinux_ops = {
76184 +static struct security_operations selinux_ops __read_only = {
76185 .name = "selinux",
76186
76187 .ptrace_access_check = selinux_ptrace_access_check,
76188 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
76189 avc_disable();
76190
76191 /* Reset security_ops to the secondary module, dummy or capability. */
76192 + pax_open_kernel();
76193 security_ops = secondary_ops;
76194 + pax_close_kernel();
76195
76196 /* Unregister netfilter hooks. */
76197 selinux_nf_ip_exit();
76198 diff -urNp linux-2.6.32.44/security/selinux/include/xfrm.h linux-2.6.32.44/security/selinux/include/xfrm.h
76199 --- linux-2.6.32.44/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
76200 +++ linux-2.6.32.44/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
76201 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
76202
76203 static inline void selinux_xfrm_notify_policyload(void)
76204 {
76205 - atomic_inc(&flow_cache_genid);
76206 + atomic_inc_unchecked(&flow_cache_genid);
76207 }
76208 #else
76209 static inline int selinux_xfrm_enabled(void)
76210 diff -urNp linux-2.6.32.44/security/selinux/ss/services.c linux-2.6.32.44/security/selinux/ss/services.c
76211 --- linux-2.6.32.44/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
76212 +++ linux-2.6.32.44/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
76213 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
76214 int rc = 0;
76215 struct policy_file file = { data, len }, *fp = &file;
76216
76217 + pax_track_stack();
76218 +
76219 if (!ss_initialized) {
76220 avtab_cache_init();
76221 if (policydb_read(&policydb, fp)) {
76222 diff -urNp linux-2.6.32.44/security/smack/smack_lsm.c linux-2.6.32.44/security/smack/smack_lsm.c
76223 --- linux-2.6.32.44/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
76224 +++ linux-2.6.32.44/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
76225 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
76226 return 0;
76227 }
76228
76229 -struct security_operations smack_ops = {
76230 +struct security_operations smack_ops __read_only = {
76231 .name = "smack",
76232
76233 .ptrace_access_check = smack_ptrace_access_check,
76234 diff -urNp linux-2.6.32.44/security/tomoyo/tomoyo.c linux-2.6.32.44/security/tomoyo/tomoyo.c
76235 --- linux-2.6.32.44/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
76236 +++ linux-2.6.32.44/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
76237 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
76238 * tomoyo_security_ops is a "struct security_operations" which is used for
76239 * registering TOMOYO.
76240 */
76241 -static struct security_operations tomoyo_security_ops = {
76242 +static struct security_operations tomoyo_security_ops __read_only = {
76243 .name = "tomoyo",
76244 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76245 .cred_prepare = tomoyo_cred_prepare,
76246 diff -urNp linux-2.6.32.44/sound/aoa/codecs/onyx.c linux-2.6.32.44/sound/aoa/codecs/onyx.c
76247 --- linux-2.6.32.44/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
76248 +++ linux-2.6.32.44/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
76249 @@ -53,7 +53,7 @@ struct onyx {
76250 spdif_locked:1,
76251 analog_locked:1,
76252 original_mute:2;
76253 - int open_count;
76254 + local_t open_count;
76255 struct codec_info *codec_info;
76256
76257 /* mutex serializes concurrent access to the device
76258 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
76259 struct onyx *onyx = cii->codec_data;
76260
76261 mutex_lock(&onyx->mutex);
76262 - onyx->open_count++;
76263 + local_inc(&onyx->open_count);
76264 mutex_unlock(&onyx->mutex);
76265
76266 return 0;
76267 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
76268 struct onyx *onyx = cii->codec_data;
76269
76270 mutex_lock(&onyx->mutex);
76271 - onyx->open_count--;
76272 - if (!onyx->open_count)
76273 + if (local_dec_and_test(&onyx->open_count))
76274 onyx->spdif_locked = onyx->analog_locked = 0;
76275 mutex_unlock(&onyx->mutex);
76276
76277 diff -urNp linux-2.6.32.44/sound/aoa/codecs/onyx.h linux-2.6.32.44/sound/aoa/codecs/onyx.h
76278 --- linux-2.6.32.44/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
76279 +++ linux-2.6.32.44/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
76280 @@ -11,6 +11,7 @@
76281 #include <linux/i2c.h>
76282 #include <asm/pmac_low_i2c.h>
76283 #include <asm/prom.h>
76284 +#include <asm/local.h>
76285
76286 /* PCM3052 register definitions */
76287
76288 diff -urNp linux-2.6.32.44/sound/core/seq/seq_device.c linux-2.6.32.44/sound/core/seq/seq_device.c
76289 --- linux-2.6.32.44/sound/core/seq/seq_device.c 2011-03-27 14:31:47.000000000 -0400
76290 +++ linux-2.6.32.44/sound/core/seq/seq_device.c 2011-08-05 20:33:55.000000000 -0400
76291 @@ -63,7 +63,7 @@ struct ops_list {
76292 int argsize; /* argument size */
76293
76294 /* operators */
76295 - struct snd_seq_dev_ops ops;
76296 + struct snd_seq_dev_ops *ops;
76297
76298 /* registred devices */
76299 struct list_head dev_list; /* list of devices */
76300 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
76301
76302 mutex_lock(&ops->reg_mutex);
76303 /* copy driver operators */
76304 - ops->ops = *entry;
76305 + ops->ops = entry;
76306 ops->driver |= DRIVER_LOADED;
76307 ops->argsize = argsize;
76308
76309 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
76310 dev->name, ops->id, ops->argsize, dev->argsize);
76311 return -EINVAL;
76312 }
76313 - if (ops->ops.init_device(dev) >= 0) {
76314 + if (ops->ops->init_device(dev) >= 0) {
76315 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
76316 ops->num_init_devices++;
76317 } else {
76318 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
76319 dev->name, ops->id, ops->argsize, dev->argsize);
76320 return -EINVAL;
76321 }
76322 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
76323 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
76324 dev->status = SNDRV_SEQ_DEVICE_FREE;
76325 dev->driver_data = NULL;
76326 ops->num_init_devices--;
76327 diff -urNp linux-2.6.32.44/sound/drivers/mts64.c linux-2.6.32.44/sound/drivers/mts64.c
76328 --- linux-2.6.32.44/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
76329 +++ linux-2.6.32.44/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
76330 @@ -27,6 +27,7 @@
76331 #include <sound/initval.h>
76332 #include <sound/rawmidi.h>
76333 #include <sound/control.h>
76334 +#include <asm/local.h>
76335
76336 #define CARD_NAME "Miditerminal 4140"
76337 #define DRIVER_NAME "MTS64"
76338 @@ -65,7 +66,7 @@ struct mts64 {
76339 struct pardevice *pardev;
76340 int pardev_claimed;
76341
76342 - int open_count;
76343 + local_t open_count;
76344 int current_midi_output_port;
76345 int current_midi_input_port;
76346 u8 mode[MTS64_NUM_INPUT_PORTS];
76347 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
76348 {
76349 struct mts64 *mts = substream->rmidi->private_data;
76350
76351 - if (mts->open_count == 0) {
76352 + if (local_read(&mts->open_count) == 0) {
76353 /* We don't need a spinlock here, because this is just called
76354 if the device has not been opened before.
76355 So there aren't any IRQs from the device */
76356 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
76357
76358 msleep(50);
76359 }
76360 - ++(mts->open_count);
76361 + local_inc(&mts->open_count);
76362
76363 return 0;
76364 }
76365 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
76366 struct mts64 *mts = substream->rmidi->private_data;
76367 unsigned long flags;
76368
76369 - --(mts->open_count);
76370 - if (mts->open_count == 0) {
76371 + if (local_dec_return(&mts->open_count) == 0) {
76372 /* We need the spinlock_irqsave here because we can still
76373 have IRQs at this point */
76374 spin_lock_irqsave(&mts->lock, flags);
76375 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
76376
76377 msleep(500);
76378
76379 - } else if (mts->open_count < 0)
76380 - mts->open_count = 0;
76381 + } else if (local_read(&mts->open_count) < 0)
76382 + local_set(&mts->open_count, 0);
76383
76384 return 0;
76385 }
76386 diff -urNp linux-2.6.32.44/sound/drivers/opl4/opl4_lib.c linux-2.6.32.44/sound/drivers/opl4/opl4_lib.c
76387 --- linux-2.6.32.44/sound/drivers/opl4/opl4_lib.c 2011-03-27 14:31:47.000000000 -0400
76388 +++ linux-2.6.32.44/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:33:55.000000000 -0400
76389 @@ -27,7 +27,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
76390 MODULE_DESCRIPTION("OPL4 driver");
76391 MODULE_LICENSE("GPL");
76392
76393 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
76394 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
76395 {
76396 int timeout = 10;
76397 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
76398 diff -urNp linux-2.6.32.44/sound/drivers/portman2x4.c linux-2.6.32.44/sound/drivers/portman2x4.c
76399 --- linux-2.6.32.44/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
76400 +++ linux-2.6.32.44/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
76401 @@ -46,6 +46,7 @@
76402 #include <sound/initval.h>
76403 #include <sound/rawmidi.h>
76404 #include <sound/control.h>
76405 +#include <asm/local.h>
76406
76407 #define CARD_NAME "Portman 2x4"
76408 #define DRIVER_NAME "portman"
76409 @@ -83,7 +84,7 @@ struct portman {
76410 struct pardevice *pardev;
76411 int pardev_claimed;
76412
76413 - int open_count;
76414 + local_t open_count;
76415 int mode[PORTMAN_NUM_INPUT_PORTS];
76416 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
76417 };
76418 diff -urNp linux-2.6.32.44/sound/isa/cmi8330.c linux-2.6.32.44/sound/isa/cmi8330.c
76419 --- linux-2.6.32.44/sound/isa/cmi8330.c 2011-03-27 14:31:47.000000000 -0400
76420 +++ linux-2.6.32.44/sound/isa/cmi8330.c 2011-08-05 20:33:55.000000000 -0400
76421 @@ -455,16 +455,16 @@ static int __devinit snd_cmi8330_pcm(str
76422
76423 /* SB16 */
76424 ops = snd_sb16dsp_get_pcm_ops(CMI_SB_STREAM);
76425 - chip->streams[CMI_SB_STREAM].ops = *ops;
76426 + memcpy((void *)&chip->streams[CMI_SB_STREAM].ops, ops, sizeof(*ops));
76427 chip->streams[CMI_SB_STREAM].open = ops->open;
76428 - chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
76429 + *(void **)&chip->streams[CMI_SB_STREAM].ops.open = cmi_open_callbacks[CMI_SB_STREAM];
76430 chip->streams[CMI_SB_STREAM].private_data = chip->sb;
76431
76432 /* AD1848 */
76433 ops = snd_wss_get_pcm_ops(CMI_AD_STREAM);
76434 - chip->streams[CMI_AD_STREAM].ops = *ops;
76435 + memcpy((void *)&chip->streams[CMI_AD_STREAM].ops, ops, sizeof(*ops));
76436 chip->streams[CMI_AD_STREAM].open = ops->open;
76437 - chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
76438 + *(void **)&chip->streams[CMI_AD_STREAM].ops.open = cmi_open_callbacks[CMI_AD_STREAM];
76439 chip->streams[CMI_AD_STREAM].private_data = chip->wss;
76440
76441 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK].ops);
76442 diff -urNp linux-2.6.32.44/sound/oss/sb_audio.c linux-2.6.32.44/sound/oss/sb_audio.c
76443 --- linux-2.6.32.44/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
76444 +++ linux-2.6.32.44/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
76445 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
76446 buf16 = (signed short *)(localbuf + localoffs);
76447 while (c)
76448 {
76449 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76450 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
76451 if (copy_from_user(lbuf8,
76452 userbuf+useroffs + p,
76453 locallen))
76454 diff -urNp linux-2.6.32.44/sound/oss/swarm_cs4297a.c linux-2.6.32.44/sound/oss/swarm_cs4297a.c
76455 --- linux-2.6.32.44/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
76456 +++ linux-2.6.32.44/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
76457 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
76458 {
76459 struct cs4297a_state *s;
76460 u32 pwr, id;
76461 - mm_segment_t fs;
76462 int rval;
76463 #ifndef CONFIG_BCM_CS4297A_CSWARM
76464 u64 cfg;
76465 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
76466 if (!rval) {
76467 char *sb1250_duart_present;
76468
76469 +#if 0
76470 + mm_segment_t fs;
76471 fs = get_fs();
76472 set_fs(KERNEL_DS);
76473 -#if 0
76474 val = SOUND_MASK_LINE;
76475 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
76476 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
76477 val = initvol[i].vol;
76478 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
76479 }
76480 + set_fs(fs);
76481 // cs4297a_write_ac97(s, 0x18, 0x0808);
76482 #else
76483 // cs4297a_write_ac97(s, 0x5e, 0x180);
76484 cs4297a_write_ac97(s, 0x02, 0x0808);
76485 cs4297a_write_ac97(s, 0x18, 0x0808);
76486 #endif
76487 - set_fs(fs);
76488
76489 list_add(&s->list, &cs4297a_devs);
76490
76491 diff -urNp linux-2.6.32.44/sound/pci/ac97/ac97_codec.c linux-2.6.32.44/sound/pci/ac97/ac97_codec.c
76492 --- linux-2.6.32.44/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
76493 +++ linux-2.6.32.44/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
76494 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
76495 }
76496
76497 /* build_ops to do nothing */
76498 -static struct snd_ac97_build_ops null_build_ops;
76499 +static const struct snd_ac97_build_ops null_build_ops;
76500
76501 #ifdef CONFIG_SND_AC97_POWER_SAVE
76502 static void do_update_power(struct work_struct *work)
76503 diff -urNp linux-2.6.32.44/sound/pci/ac97/ac97_patch.c linux-2.6.32.44/sound/pci/ac97/ac97_patch.c
76504 --- linux-2.6.32.44/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
76505 +++ linux-2.6.32.44/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
76506 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
76507 return 0;
76508 }
76509
76510 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
76511 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
76512 .build_spdif = patch_yamaha_ymf743_build_spdif,
76513 .build_3d = patch_yamaha_ymf7x3_3d,
76514 };
76515 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
76516 return 0;
76517 }
76518
76519 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
76520 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
76521 .build_3d = patch_yamaha_ymf7x3_3d,
76522 .build_post_spdif = patch_yamaha_ymf753_post_spdif
76523 };
76524 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
76525 return 0;
76526 }
76527
76528 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
76529 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
76530 .build_specific = patch_wolfson_wm9703_specific,
76531 };
76532
76533 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
76534 return 0;
76535 }
76536
76537 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
76538 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
76539 .build_specific = patch_wolfson_wm9704_specific,
76540 };
76541
76542 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
76543 return 0;
76544 }
76545
76546 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
76547 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
76548 .build_specific = patch_wolfson_wm9705_specific,
76549 };
76550
76551 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
76552 return 0;
76553 }
76554
76555 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
76556 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
76557 .build_specific = patch_wolfson_wm9711_specific,
76558 };
76559
76560 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
76561 }
76562 #endif
76563
76564 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
76565 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
76566 .build_specific = patch_wolfson_wm9713_specific,
76567 .build_3d = patch_wolfson_wm9713_3d,
76568 #ifdef CONFIG_PM
76569 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
76570 return 0;
76571 }
76572
76573 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
76574 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
76575 .build_3d = patch_sigmatel_stac9700_3d,
76576 .build_specific = patch_sigmatel_stac97xx_specific
76577 };
76578 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
76579 return patch_sigmatel_stac97xx_specific(ac97);
76580 }
76581
76582 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
76583 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
76584 .build_3d = patch_sigmatel_stac9708_3d,
76585 .build_specific = patch_sigmatel_stac9708_specific
76586 };
76587 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
76588 return 0;
76589 }
76590
76591 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
76592 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
76593 .build_3d = patch_sigmatel_stac9700_3d,
76594 .build_specific = patch_sigmatel_stac9758_specific
76595 };
76596 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
76597 return 0;
76598 }
76599
76600 -static struct snd_ac97_build_ops patch_cirrus_ops = {
76601 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
76602 .build_spdif = patch_cirrus_build_spdif
76603 };
76604
76605 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
76606 return 0;
76607 }
76608
76609 -static struct snd_ac97_build_ops patch_conexant_ops = {
76610 +static const struct snd_ac97_build_ops patch_conexant_ops = {
76611 .build_spdif = patch_conexant_build_spdif
76612 };
76613
76614 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
76615 }
76616 }
76617
76618 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
76619 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
76620 #ifdef CONFIG_PM
76621 .resume = ad18xx_resume
76622 #endif
76623 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
76624 return 0;
76625 }
76626
76627 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
76628 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
76629 .build_specific = &patch_ad1885_specific,
76630 #ifdef CONFIG_PM
76631 .resume = ad18xx_resume
76632 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
76633 return 0;
76634 }
76635
76636 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
76637 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
76638 .build_specific = &patch_ad1886_specific,
76639 #ifdef CONFIG_PM
76640 .resume = ad18xx_resume
76641 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
76642 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
76643 }
76644
76645 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
76646 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
76647 .build_post_spdif = patch_ad198x_post_spdif,
76648 .build_specific = patch_ad1981a_specific,
76649 #ifdef CONFIG_PM
76650 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
76651 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
76652 }
76653
76654 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
76655 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
76656 .build_post_spdif = patch_ad198x_post_spdif,
76657 .build_specific = patch_ad1981b_specific,
76658 #ifdef CONFIG_PM
76659 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
76660 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
76661 }
76662
76663 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
76664 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
76665 .build_post_spdif = patch_ad198x_post_spdif,
76666 .build_specific = patch_ad1888_specific,
76667 #ifdef CONFIG_PM
76668 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
76669 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
76670 }
76671
76672 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
76673 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
76674 .build_post_spdif = patch_ad198x_post_spdif,
76675 .build_specific = patch_ad1980_specific,
76676 #ifdef CONFIG_PM
76677 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
76678 ARRAY_SIZE(snd_ac97_ad1985_controls));
76679 }
76680
76681 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
76682 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
76683 .build_post_spdif = patch_ad198x_post_spdif,
76684 .build_specific = patch_ad1985_specific,
76685 #ifdef CONFIG_PM
76686 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
76687 ARRAY_SIZE(snd_ac97_ad1985_controls));
76688 }
76689
76690 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
76691 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
76692 .build_post_spdif = patch_ad198x_post_spdif,
76693 .build_specific = patch_ad1986_specific,
76694 #ifdef CONFIG_PM
76695 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
76696 return 0;
76697 }
76698
76699 -static struct snd_ac97_build_ops patch_alc650_ops = {
76700 +static const struct snd_ac97_build_ops patch_alc650_ops = {
76701 .build_specific = patch_alc650_specific,
76702 .update_jacks = alc650_update_jacks
76703 };
76704 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
76705 return 0;
76706 }
76707
76708 -static struct snd_ac97_build_ops patch_alc655_ops = {
76709 +static const struct snd_ac97_build_ops patch_alc655_ops = {
76710 .build_specific = patch_alc655_specific,
76711 .update_jacks = alc655_update_jacks
76712 };
76713 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
76714 return 0;
76715 }
76716
76717 -static struct snd_ac97_build_ops patch_alc850_ops = {
76718 +static const struct snd_ac97_build_ops patch_alc850_ops = {
76719 .build_specific = patch_alc850_specific,
76720 .update_jacks = alc850_update_jacks
76721 };
76722 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
76723 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
76724 }
76725
76726 -static struct snd_ac97_build_ops patch_cm9738_ops = {
76727 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
76728 .build_specific = patch_cm9738_specific,
76729 .update_jacks = cm9738_update_jacks
76730 };
76731 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
76732 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
76733 }
76734
76735 -static struct snd_ac97_build_ops patch_cm9739_ops = {
76736 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
76737 .build_specific = patch_cm9739_specific,
76738 .build_post_spdif = patch_cm9739_post_spdif,
76739 .update_jacks = cm9739_update_jacks
76740 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
76741 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
76742 }
76743
76744 -static struct snd_ac97_build_ops patch_cm9761_ops = {
76745 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
76746 .build_specific = patch_cm9761_specific,
76747 .build_post_spdif = patch_cm9761_post_spdif,
76748 .update_jacks = cm9761_update_jacks
76749 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
76750 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
76751 }
76752
76753 -static struct snd_ac97_build_ops patch_cm9780_ops = {
76754 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
76755 .build_specific = patch_cm9780_specific,
76756 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
76757 };
76758 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
76759 return 0;
76760 }
76761
76762 -static struct snd_ac97_build_ops patch_vt1616_ops = {
76763 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
76764 .build_specific = patch_vt1616_specific
76765 };
76766
76767 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
76768 return 0;
76769 }
76770
76771 -static struct snd_ac97_build_ops patch_it2646_ops = {
76772 +static const struct snd_ac97_build_ops patch_it2646_ops = {
76773 .build_specific = patch_it2646_specific,
76774 .update_jacks = it2646_update_jacks
76775 };
76776 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
76777 return 0;
76778 }
76779
76780 -static struct snd_ac97_build_ops patch_si3036_ops = {
76781 +static const struct snd_ac97_build_ops patch_si3036_ops = {
76782 .build_specific = patch_si3036_specific,
76783 };
76784
76785 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
76786 return 0;
76787 }
76788
76789 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
76790 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
76791 .build_specific = patch_ucb1400_specific,
76792 };
76793
76794 diff -urNp linux-2.6.32.44/sound/pci/hda/hda_codec.h linux-2.6.32.44/sound/pci/hda/hda_codec.h
76795 --- linux-2.6.32.44/sound/pci/hda/hda_codec.h 2011-03-27 14:31:47.000000000 -0400
76796 +++ linux-2.6.32.44/sound/pci/hda/hda_codec.h 2011-08-05 20:33:55.000000000 -0400
76797 @@ -580,7 +580,7 @@ struct hda_bus_ops {
76798 /* notify power-up/down from codec to controller */
76799 void (*pm_notify)(struct hda_bus *bus);
76800 #endif
76801 -};
76802 +} __no_const;
76803
76804 /* template to pass to the bus constructor */
76805 struct hda_bus_template {
76806 @@ -705,7 +705,7 @@ struct hda_pcm_ops {
76807 struct snd_pcm_substream *substream);
76808 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
76809 struct snd_pcm_substream *substream);
76810 -};
76811 +} __no_const;
76812
76813 /* PCM information for each substream */
76814 struct hda_pcm_stream {
76815 diff -urNp linux-2.6.32.44/sound/pci/hda/hda_generic.c linux-2.6.32.44/sound/pci/hda/hda_generic.c
76816 --- linux-2.6.32.44/sound/pci/hda/hda_generic.c 2011-03-27 14:31:47.000000000 -0400
76817 +++ linux-2.6.32.44/sound/pci/hda/hda_generic.c 2011-08-05 20:33:55.000000000 -0400
76818 @@ -1097,7 +1097,7 @@ int snd_hda_parse_generic_codec(struct h
76819 (err = parse_output(codec)) < 0)
76820 goto error;
76821
76822 - codec->patch_ops = generic_patch_ops;
76823 + memcpy((void *)&codec->patch_ops, &generic_patch_ops, sizeof(generic_patch_ops));
76824
76825 return 0;
76826
76827 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_analog.c linux-2.6.32.44/sound/pci/hda/patch_analog.c
76828 --- linux-2.6.32.44/sound/pci/hda/patch_analog.c 2011-03-27 14:31:47.000000000 -0400
76829 +++ linux-2.6.32.44/sound/pci/hda/patch_analog.c 2011-08-05 20:33:55.000000000 -0400
76830 @@ -1069,7 +1069,7 @@ static int patch_ad1986a(struct hda_code
76831 #endif
76832 spec->vmaster_nid = 0x1b;
76833
76834 - codec->patch_ops = ad198x_patch_ops;
76835 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76836
76837 /* override some parameters */
76838 board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
76839 @@ -1120,8 +1120,8 @@ static int patch_ad1986a(struct hda_code
76840 if (!is_jack_available(codec, 0x25))
76841 spec->multiout.dig_out_nid = 0;
76842 spec->input_mux = &ad1986a_automic_capture_source;
76843 - codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76844 - codec->patch_ops.init = ad1986a_automic_init;
76845 + *(void **)&codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
76846 + *(void **)&codec->patch_ops.init = ad1986a_automic_init;
76847 break;
76848 case AD1986A_SAMSUNG_P50:
76849 spec->num_mixers = 2;
76850 @@ -1137,8 +1137,8 @@ static int patch_ad1986a(struct hda_code
76851 if (!is_jack_available(codec, 0x25))
76852 spec->multiout.dig_out_nid = 0;
76853 spec->input_mux = &ad1986a_automic_capture_source;
76854 - codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76855 - codec->patch_ops.init = ad1986a_samsung_p50_init;
76856 + *(void **)&codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
76857 + *(void **)&codec->patch_ops.init = ad1986a_samsung_p50_init;
76858 break;
76859 case AD1986A_LAPTOP_AUTOMUTE:
76860 spec->num_mixers = 3;
76861 @@ -1154,8 +1154,8 @@ static int patch_ad1986a(struct hda_code
76862 if (!is_jack_available(codec, 0x25))
76863 spec->multiout.dig_out_nid = 0;
76864 spec->input_mux = &ad1986a_laptop_eapd_capture_source;
76865 - codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76866 - codec->patch_ops.init = ad1986a_hp_init;
76867 + *(void **)&codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
76868 + *(void **)&codec->patch_ops.init = ad1986a_hp_init;
76869 /* Lenovo N100 seems to report the reversed bit
76870 * for HP jack-sensing
76871 */
76872 @@ -1363,7 +1363,7 @@ static int patch_ad1983(struct hda_codec
76873 #endif
76874 spec->vmaster_nid = 0x05;
76875
76876 - codec->patch_ops = ad198x_patch_ops;
76877 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76878
76879 return 0;
76880 }
76881 @@ -1769,7 +1769,7 @@ static int patch_ad1981(struct hda_codec
76882 #endif
76883 spec->vmaster_nid = 0x05;
76884
76885 - codec->patch_ops = ad198x_patch_ops;
76886 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76887
76888 /* override some parameters */
76889 board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
76890 @@ -1783,8 +1783,8 @@ static int patch_ad1981(struct hda_codec
76891 spec->multiout.dig_out_nid = 0;
76892 spec->input_mux = &ad1981_hp_capture_source;
76893
76894 - codec->patch_ops.init = ad1981_hp_init;
76895 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76896 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76897 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76898 break;
76899 case AD1981_THINKPAD:
76900 spec->mixers[0] = ad1981_thinkpad_mixers;
76901 @@ -1805,8 +1805,8 @@ static int patch_ad1981(struct hda_codec
76902 spec->init_verbs[1] = ad1981_toshiba_init_verbs;
76903 spec->multiout.dig_out_nid = 0;
76904 spec->input_mux = &ad1981_hp_capture_source;
76905 - codec->patch_ops.init = ad1981_hp_init;
76906 - codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76907 + *(void **)&codec->patch_ops.init = ad1981_hp_init;
76908 + *(void **)&codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
76909 break;
76910 }
76911 return 0;
76912 @@ -3096,14 +3096,14 @@ static int patch_ad1988(struct hda_codec
76913 if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a)
76914 spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
76915
76916 - codec->patch_ops = ad198x_patch_ops;
76917 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76918 switch (board_config) {
76919 case AD1988_AUTO:
76920 - codec->patch_ops.init = ad1988_auto_init;
76921 + *(void **)&codec->patch_ops.init = ad1988_auto_init;
76922 break;
76923 case AD1988_LAPTOP:
76924 case AD1988_LAPTOP_DIG:
76925 - codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76926 + *(void **)&codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
76927 break;
76928 }
76929 #ifdef CONFIG_SND_HDA_POWER_SAVE
76930 @@ -3321,7 +3321,7 @@ static int patch_ad1884(struct hda_codec
76931 /* we need to cover all playback volumes */
76932 spec->slave_vols = ad1884_slave_vols;
76933
76934 - codec->patch_ops = ad198x_patch_ops;
76935 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76936
76937 return 0;
76938 }
76939 @@ -3529,7 +3529,7 @@ static int patch_ad1984(struct hda_codec
76940 case AD1984_BASIC:
76941 /* additional digital mics */
76942 spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
76943 - codec->patch_ops.build_pcms = ad1984_build_pcms;
76944 + *(void **)&codec->patch_ops.build_pcms = ad1984_build_pcms;
76945 break;
76946 case AD1984_THINKPAD:
76947 spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
76948 @@ -4229,7 +4229,7 @@ static int patch_ad1884a(struct hda_code
76949 #ifdef CONFIG_SND_HDA_POWER_SAVE
76950 spec->loopback.amplist = ad1884a_loopbacks;
76951 #endif
76952 - codec->patch_ops = ad198x_patch_ops;
76953 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
76954
76955 /* override some parameters */
76956 board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
76957 @@ -4240,8 +4240,8 @@ static int patch_ad1884a(struct hda_code
76958 spec->mixers[0] = ad1884a_laptop_mixers;
76959 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
76960 spec->multiout.dig_out_nid = 0;
76961 - codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76962 - codec->patch_ops.init = ad1884a_laptop_init;
76963 + *(void **)&codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
76964 + *(void **)&codec->patch_ops.init = ad1884a_laptop_init;
76965 /* set the upper-limit for mixer amp to 0dB for avoiding the
76966 * possible damage by overloading
76967 */
76968 @@ -4255,8 +4255,8 @@ static int patch_ad1884a(struct hda_code
76969 spec->mixers[0] = ad1884a_mobile_mixers;
76970 spec->init_verbs[0] = ad1884a_mobile_verbs;
76971 spec->multiout.dig_out_nid = 0;
76972 - codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76973 - codec->patch_ops.init = ad1884a_hp_init;
76974 + *(void **)&codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
76975 + *(void **)&codec->patch_ops.init = ad1884a_hp_init;
76976 /* set the upper-limit for mixer amp to 0dB for avoiding the
76977 * possible damage by overloading
76978 */
76979 @@ -4272,15 +4272,15 @@ static int patch_ad1884a(struct hda_code
76980 ad1984a_thinkpad_verbs;
76981 spec->multiout.dig_out_nid = 0;
76982 spec->input_mux = &ad1984a_thinkpad_capture_source;
76983 - codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76984 - codec->patch_ops.init = ad1984a_thinkpad_init;
76985 + *(void **)&codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
76986 + *(void **)&codec->patch_ops.init = ad1984a_thinkpad_init;
76987 break;
76988 case AD1984A_TOUCHSMART:
76989 spec->mixers[0] = ad1984a_touchsmart_mixers;
76990 spec->init_verbs[0] = ad1984a_touchsmart_verbs;
76991 spec->multiout.dig_out_nid = 0;
76992 - codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76993 - codec->patch_ops.init = ad1984a_touchsmart_init;
76994 + *(void **)&codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
76995 + *(void **)&codec->patch_ops.init = ad1984a_touchsmart_init;
76996 /* set the upper-limit for mixer amp to 0dB for avoiding the
76997 * possible damage by overloading
76998 */
76999 @@ -4607,7 +4607,7 @@ static int patch_ad1882(struct hda_codec
77000 #endif
77001 spec->vmaster_nid = 0x04;
77002
77003 - codec->patch_ops = ad198x_patch_ops;
77004 + memcpy((void *)&codec->patch_ops, &ad198x_patch_ops, sizeof(ad198x_patch_ops));
77005
77006 /* override some parameters */
77007 board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
77008 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_atihdmi.c linux-2.6.32.44/sound/pci/hda/patch_atihdmi.c
77009 --- linux-2.6.32.44/sound/pci/hda/patch_atihdmi.c 2011-03-27 14:31:47.000000000 -0400
77010 +++ linux-2.6.32.44/sound/pci/hda/patch_atihdmi.c 2011-08-05 20:33:55.000000000 -0400
77011 @@ -177,7 +177,7 @@ static int patch_atihdmi(struct hda_code
77012 */
77013 spec->multiout.dig_out_nid = CVT_NID;
77014
77015 - codec->patch_ops = atihdmi_patch_ops;
77016 + memcpy((void *)&codec->patch_ops, &atihdmi_patch_ops, sizeof(atihdmi_patch_ops));
77017
77018 return 0;
77019 }
77020 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_ca0110.c linux-2.6.32.44/sound/pci/hda/patch_ca0110.c
77021 --- linux-2.6.32.44/sound/pci/hda/patch_ca0110.c 2011-03-27 14:31:47.000000000 -0400
77022 +++ linux-2.6.32.44/sound/pci/hda/patch_ca0110.c 2011-08-05 20:33:55.000000000 -0400
77023 @@ -525,7 +525,7 @@ static int patch_ca0110(struct hda_codec
77024 if (err < 0)
77025 goto error;
77026
77027 - codec->patch_ops = ca0110_patch_ops;
77028 + memcpy((void *)&codec->patch_ops, &ca0110_patch_ops, sizeof(ca0110_patch_ops));
77029
77030 return 0;
77031
77032 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_cirrus.c linux-2.6.32.44/sound/pci/hda/patch_cirrus.c
77033 --- linux-2.6.32.44/sound/pci/hda/patch_cirrus.c 2011-05-10 22:12:02.000000000 -0400
77034 +++ linux-2.6.32.44/sound/pci/hda/patch_cirrus.c 2011-08-05 20:33:55.000000000 -0400
77035 @@ -1191,7 +1191,7 @@ static int patch_cs420x(struct hda_codec
77036 if (err < 0)
77037 goto error;
77038
77039 - codec->patch_ops = cs_patch_ops;
77040 + memcpy((void *)&codec->patch_ops, &cs_patch_ops, sizeof(cs_patch_ops));
77041
77042 return 0;
77043
77044 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_cmedia.c linux-2.6.32.44/sound/pci/hda/patch_cmedia.c
77045 --- linux-2.6.32.44/sound/pci/hda/patch_cmedia.c 2011-03-27 14:31:47.000000000 -0400
77046 +++ linux-2.6.32.44/sound/pci/hda/patch_cmedia.c 2011-08-05 20:33:55.000000000 -0400
77047 @@ -728,7 +728,7 @@ static int patch_cmi9880(struct hda_code
77048
77049 spec->adc_nids = cmi9880_adc_nids;
77050
77051 - codec->patch_ops = cmi9880_patch_ops;
77052 + memcpy((void *)&codec->patch_ops, &cmi9880_patch_ops, sizeof(cmi9880_patch_ops));
77053
77054 return 0;
77055 }
77056 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_conexant.c linux-2.6.32.44/sound/pci/hda/patch_conexant.c
77057 --- linux-2.6.32.44/sound/pci/hda/patch_conexant.c 2011-03-27 14:31:47.000000000 -0400
77058 +++ linux-2.6.32.44/sound/pci/hda/patch_conexant.c 2011-08-05 20:33:55.000000000 -0400
77059 @@ -1119,55 +1119,55 @@ static int patch_cxt5045(struct hda_code
77060 spec->channel_mode = cxt5045_modes,
77061
77062
77063 - codec->patch_ops = conexant_patch_ops;
77064 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
77065
77066 board_config = snd_hda_check_board_config(codec, CXT5045_MODELS,
77067 cxt5045_models,
77068 cxt5045_cfg_tbl);
77069 switch (board_config) {
77070 case CXT5045_LAPTOP_HPSENSE:
77071 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77072 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77073 spec->input_mux = &cxt5045_capture_source;
77074 spec->num_init_verbs = 2;
77075 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
77076 spec->mixers[0] = cxt5045_mixers;
77077 - codec->patch_ops.init = cxt5045_init;
77078 + *(void **)&codec->patch_ops.init = cxt5045_init;
77079 break;
77080 case CXT5045_LAPTOP_MICSENSE:
77081 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77082 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77083 spec->input_mux = &cxt5045_capture_source;
77084 spec->num_init_verbs = 2;
77085 spec->init_verbs[1] = cxt5045_mic_sense_init_verbs;
77086 spec->mixers[0] = cxt5045_mixers;
77087 - codec->patch_ops.init = cxt5045_init;
77088 + *(void **)&codec->patch_ops.init = cxt5045_init;
77089 break;
77090 default:
77091 case CXT5045_LAPTOP_HPMICSENSE:
77092 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77093 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77094 spec->input_mux = &cxt5045_capture_source;
77095 spec->num_init_verbs = 3;
77096 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
77097 spec->init_verbs[2] = cxt5045_mic_sense_init_verbs;
77098 spec->mixers[0] = cxt5045_mixers;
77099 - codec->patch_ops.init = cxt5045_init;
77100 + *(void **)&codec->patch_ops.init = cxt5045_init;
77101 break;
77102 case CXT5045_BENQ:
77103 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77104 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77105 spec->input_mux = &cxt5045_capture_source_benq;
77106 spec->num_init_verbs = 1;
77107 spec->init_verbs[0] = cxt5045_benq_init_verbs;
77108 spec->mixers[0] = cxt5045_mixers;
77109 spec->mixers[1] = cxt5045_benq_mixers;
77110 spec->num_mixers = 2;
77111 - codec->patch_ops.init = cxt5045_init;
77112 + *(void **)&codec->patch_ops.init = cxt5045_init;
77113 break;
77114 case CXT5045_LAPTOP_HP530:
77115 - codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77116 + *(void **)&codec->patch_ops.unsol_event = cxt5045_hp_unsol_event;
77117 spec->input_mux = &cxt5045_capture_source_hp530;
77118 spec->num_init_verbs = 2;
77119 spec->init_verbs[1] = cxt5045_hp_sense_init_verbs;
77120 spec->mixers[0] = cxt5045_mixers_hp530;
77121 - codec->patch_ops.init = cxt5045_init;
77122 + *(void **)&codec->patch_ops.init = cxt5045_init;
77123 break;
77124 #ifdef CONFIG_SND_DEBUG
77125 case CXT5045_TEST:
77126 @@ -1556,7 +1556,7 @@ static int patch_cxt5047(struct hda_code
77127 spec->num_channel_mode = ARRAY_SIZE(cxt5047_modes),
77128 spec->channel_mode = cxt5047_modes,
77129
77130 - codec->patch_ops = conexant_patch_ops;
77131 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
77132
77133 board_config = snd_hda_check_board_config(codec, CXT5047_MODELS,
77134 cxt5047_models,
77135 @@ -1565,13 +1565,13 @@ static int patch_cxt5047(struct hda_code
77136 case CXT5047_LAPTOP:
77137 spec->num_mixers = 2;
77138 spec->mixers[1] = cxt5047_hp_spk_mixers;
77139 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77140 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77141 break;
77142 case CXT5047_LAPTOP_HP:
77143 spec->num_mixers = 2;
77144 spec->mixers[1] = cxt5047_hp_only_mixers;
77145 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77146 - codec->patch_ops.init = cxt5047_hp_init;
77147 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77148 + *(void **)&codec->patch_ops.init = cxt5047_hp_init;
77149 break;
77150 case CXT5047_LAPTOP_EAPD:
77151 spec->input_mux = &cxt5047_toshiba_capture_source;
77152 @@ -1579,14 +1579,14 @@ static int patch_cxt5047(struct hda_code
77153 spec->mixers[1] = cxt5047_hp_spk_mixers;
77154 spec->num_init_verbs = 2;
77155 spec->init_verbs[1] = cxt5047_toshiba_init_verbs;
77156 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77157 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77158 break;
77159 #ifdef CONFIG_SND_DEBUG
77160 case CXT5047_TEST:
77161 spec->input_mux = &cxt5047_test_capture_source;
77162 spec->mixers[0] = cxt5047_test_mixer;
77163 spec->init_verbs[0] = cxt5047_test_init_verbs;
77164 - codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77165 + *(void **)&codec->patch_ops.unsol_event = cxt5047_hp_unsol_event;
77166 #endif
77167 }
77168 spec->vmaster_nid = 0x13;
77169 @@ -1904,8 +1904,8 @@ static int patch_cxt5051(struct hda_code
77170 codec->spec = spec;
77171 codec->pin_amp_workaround = 1;
77172
77173 - codec->patch_ops = conexant_patch_ops;
77174 - codec->patch_ops.init = cxt5051_init;
77175 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
77176 + *(void **)&codec->patch_ops.init = cxt5051_init;
77177
77178 spec->multiout.max_channels = 2;
77179 spec->multiout.num_dacs = ARRAY_SIZE(cxt5051_dac_nids);
77180 @@ -1923,7 +1923,7 @@ static int patch_cxt5051(struct hda_code
77181 spec->cur_adc = 0;
77182 spec->cur_adc_idx = 0;
77183
77184 - codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
77185 + *(void **)&codec->patch_ops.unsol_event = cxt5051_hp_unsol_event;
77186
77187 board_config = snd_hda_check_board_config(codec, CXT5051_MODELS,
77188 cxt5051_models,
77189 @@ -2372,8 +2372,8 @@ static int patch_cxt5066(struct hda_code
77190 return -ENOMEM;
77191 codec->spec = spec;
77192
77193 - codec->patch_ops = conexant_patch_ops;
77194 - codec->patch_ops.init = cxt5066_init;
77195 + memcpy((void *)&codec->patch_ops, &conexant_patch_ops, sizeof(conexant_patch_ops));
77196 + *(void **)&codec->patch_ops.init = cxt5066_init;
77197
77198 spec->dell_automute = 0;
77199 spec->multiout.max_channels = 2;
77200 @@ -2413,7 +2413,7 @@ static int patch_cxt5066(struct hda_code
77201 spec->dell_automute = 1;
77202 break;
77203 case CXT5066_OLPC_XO_1_5:
77204 - codec->patch_ops.unsol_event = cxt5066_unsol_event;
77205 + *(void **)&codec->patch_ops.unsol_event = cxt5066_unsol_event;
77206 spec->init_verbs[0] = cxt5066_init_verbs_olpc;
77207 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
77208 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
77209 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.44/sound/pci/hda/patch_intelhdmi.c
77210 --- linux-2.6.32.44/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
77211 +++ linux-2.6.32.44/sound/pci/hda/patch_intelhdmi.c 2011-08-05 20:33:55.000000000 -0400
77212 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
77213 cp_ready);
77214
77215 /* TODO */
77216 - if (cp_state)
77217 - ;
77218 - if (cp_ready)
77219 - ;
77220 + if (cp_state) {
77221 + }
77222 + if (cp_ready) {
77223 + }
77224 }
77225
77226
77227 @@ -656,7 +656,7 @@ static int do_patch_intel_hdmi(struct hd
77228 spec->multiout.dig_out_nid = cvt_nid;
77229
77230 codec->spec = spec;
77231 - codec->patch_ops = intel_hdmi_patch_ops;
77232 + memcpy((void *)&codec->patch_ops, &intel_hdmi_patch_ops, sizeof(intel_hdmi_patch_ops));
77233
77234 snd_hda_eld_proc_new(codec, &spec->sink_eld);
77235
77236 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_nvhdmi.c linux-2.6.32.44/sound/pci/hda/patch_nvhdmi.c
77237 --- linux-2.6.32.44/sound/pci/hda/patch_nvhdmi.c 2011-03-27 14:31:47.000000000 -0400
77238 +++ linux-2.6.32.44/sound/pci/hda/patch_nvhdmi.c 2011-08-05 20:33:55.000000000 -0400
77239 @@ -367,7 +367,7 @@ static int patch_nvhdmi_8ch(struct hda_c
77240 spec->multiout.max_channels = 8;
77241 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
77242
77243 - codec->patch_ops = nvhdmi_patch_ops_8ch;
77244 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_8ch, sizeof(nvhdmi_patch_ops_8ch));
77245
77246 return 0;
77247 }
77248 @@ -386,7 +386,7 @@ static int patch_nvhdmi_2ch(struct hda_c
77249 spec->multiout.max_channels = 2;
77250 spec->multiout.dig_out_nid = Nv_Master_Convert_nid;
77251
77252 - codec->patch_ops = nvhdmi_patch_ops_2ch;
77253 + memcpy((void *)&codec->patch_ops, &nvhdmi_patch_ops_2ch, sizeof(nvhdmi_patch_ops_2ch));
77254
77255 return 0;
77256 }
77257 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_realtek.c linux-2.6.32.44/sound/pci/hda/patch_realtek.c
77258 --- linux-2.6.32.44/sound/pci/hda/patch_realtek.c 2011-06-25 12:55:35.000000000 -0400
77259 +++ linux-2.6.32.44/sound/pci/hda/patch_realtek.c 2011-08-05 20:33:55.000000000 -0400
77260 @@ -4856,7 +4856,7 @@ static int patch_alc880(struct hda_codec
77261
77262 spec->vmaster_nid = 0x0c;
77263
77264 - codec->patch_ops = alc_patch_ops;
77265 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77266 if (board_config == ALC880_AUTO)
77267 spec->init_hook = alc880_auto_init;
77268 #ifdef CONFIG_SND_HDA_POWER_SAVE
77269 @@ -6479,7 +6479,7 @@ static int patch_alc260(struct hda_codec
77270
77271 spec->vmaster_nid = 0x08;
77272
77273 - codec->patch_ops = alc_patch_ops;
77274 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77275 if (board_config == ALC260_AUTO)
77276 spec->init_hook = alc260_auto_init;
77277 #ifdef CONFIG_SND_HDA_POWER_SAVE
77278 @@ -9997,7 +9997,7 @@ static int patch_alc882(struct hda_codec
77279
77280 spec->vmaster_nid = 0x0c;
77281
77282 - codec->patch_ops = alc_patch_ops;
77283 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77284 if (board_config == ALC882_AUTO)
77285 spec->init_hook = alc882_auto_init;
77286 #ifdef CONFIG_SND_HDA_POWER_SAVE
77287 @@ -11871,7 +11871,7 @@ static int patch_alc262(struct hda_codec
77288
77289 spec->vmaster_nid = 0x0c;
77290
77291 - codec->patch_ops = alc_patch_ops;
77292 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77293 if (board_config == ALC262_AUTO)
77294 spec->init_hook = alc262_auto_init;
77295 #ifdef CONFIG_SND_HDA_POWER_SAVE
77296 @@ -12950,7 +12950,7 @@ static int patch_alc268(struct hda_codec
77297
77298 spec->vmaster_nid = 0x02;
77299
77300 - codec->patch_ops = alc_patch_ops;
77301 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77302 if (board_config == ALC268_AUTO)
77303 spec->init_hook = alc268_auto_init;
77304
77305 @@ -13636,7 +13636,7 @@ static int patch_alc269(struct hda_codec
77306
77307 spec->vmaster_nid = 0x02;
77308
77309 - codec->patch_ops = alc_patch_ops;
77310 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77311 if (board_config == ALC269_AUTO)
77312 spec->init_hook = alc269_auto_init;
77313 #ifdef CONFIG_SND_HDA_POWER_SAVE
77314 @@ -14741,7 +14741,7 @@ static int patch_alc861(struct hda_codec
77315
77316 spec->vmaster_nid = 0x03;
77317
77318 - codec->patch_ops = alc_patch_ops;
77319 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77320 if (board_config == ALC861_AUTO)
77321 spec->init_hook = alc861_auto_init;
77322 #ifdef CONFIG_SND_HDA_POWER_SAVE
77323 @@ -15727,7 +15727,7 @@ static int patch_alc861vd(struct hda_cod
77324
77325 spec->vmaster_nid = 0x02;
77326
77327 - codec->patch_ops = alc_patch_ops;
77328 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77329
77330 if (board_config == ALC861VD_AUTO)
77331 spec->init_hook = alc861vd_auto_init;
77332 @@ -17652,7 +17652,7 @@ static int patch_alc662(struct hda_codec
77333
77334 spec->vmaster_nid = 0x02;
77335
77336 - codec->patch_ops = alc_patch_ops;
77337 + memcpy((void *)&codec->patch_ops, &alc_patch_ops, sizeof(alc_patch_ops));
77338 if (board_config == ALC662_AUTO)
77339 spec->init_hook = alc662_auto_init;
77340 #ifdef CONFIG_SND_HDA_POWER_SAVE
77341 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_si3054.c linux-2.6.32.44/sound/pci/hda/patch_si3054.c
77342 --- linux-2.6.32.44/sound/pci/hda/patch_si3054.c 2011-03-27 14:31:47.000000000 -0400
77343 +++ linux-2.6.32.44/sound/pci/hda/patch_si3054.c 2011-08-05 20:33:55.000000000 -0400
77344 @@ -275,7 +275,7 @@ static int patch_si3054(struct hda_codec
77345 if (spec == NULL)
77346 return -ENOMEM;
77347 codec->spec = spec;
77348 - codec->patch_ops = si3054_patch_ops;
77349 + memcpy((void *)&codec->patch_ops, &si3054_patch_ops, sizeof(si3054_patch_ops));
77350 return 0;
77351 }
77352
77353 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_sigmatel.c linux-2.6.32.44/sound/pci/hda/patch_sigmatel.c
77354 --- linux-2.6.32.44/sound/pci/hda/patch_sigmatel.c 2011-06-25 12:55:35.000000000 -0400
77355 +++ linux-2.6.32.44/sound/pci/hda/patch_sigmatel.c 2011-08-05 20:33:55.000000000 -0400
77356 @@ -4899,7 +4899,7 @@ static int patch_stac9200(struct hda_cod
77357 if (spec->board_config == STAC_9200_PANASONIC)
77358 spec->hp_detect = 0;
77359
77360 - codec->patch_ops = stac92xx_patch_ops;
77361 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77362
77363 return 0;
77364 }
77365 @@ -4981,7 +4981,7 @@ static int patch_stac925x(struct hda_cod
77366 return err;
77367 }
77368
77369 - codec->patch_ops = stac92xx_patch_ops;
77370 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77371
77372 return 0;
77373 }
77374 @@ -5125,7 +5125,7 @@ again:
77375 if (spec->board_config == STAC_92HD73XX_NO_JD)
77376 spec->hp_detect = 0;
77377
77378 - codec->patch_ops = stac92xx_patch_ops;
77379 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77380
77381 codec->proc_widget_hook = stac92hd7x_proc_hook;
77382
77383 @@ -5220,7 +5220,7 @@ again:
77384 snd_hda_codec_write_cache(codec, nid, 0,
77385 AC_VERB_SET_CONNECT_SEL, num_dacs);
77386
77387 - codec->patch_ops = stac92xx_patch_ops;
77388 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77389
77390 codec->proc_widget_hook = stac92hd_proc_hook;
77391
77392 @@ -5294,7 +5294,7 @@ static int patch_stac92hd71bxx(struct hd
77393 return -ENOMEM;
77394
77395 codec->spec = spec;
77396 - codec->patch_ops = stac92xx_patch_ops;
77397 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77398 spec->num_pins = STAC92HD71BXX_NUM_PINS;
77399 switch (codec->vendor_id) {
77400 case 0x111d76b6:
77401 @@ -5515,7 +5515,7 @@ again:
77402 spec->gpio_dir |= spec->gpio_led;
77403 spec->gpio_data |= spec->gpio_led;
77404 /* register check_power_status callback. */
77405 - codec->patch_ops.check_power_status =
77406 + *(void **)&codec->patch_ops.check_power_status =
77407 stac92xx_hp_check_power_status;
77408 }
77409 #endif
77410 @@ -5634,7 +5634,7 @@ static int patch_stac922x(struct hda_cod
77411 return err;
77412 }
77413
77414 - codec->patch_ops = stac92xx_patch_ops;
77415 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77416
77417 /* Fix Mux capture level; max to 2 */
77418 snd_hda_override_amp_caps(codec, 0x12, HDA_OUTPUT,
77419 @@ -5757,7 +5757,7 @@ static int patch_stac927x(struct hda_cod
77420 return err;
77421 }
77422
77423 - codec->patch_ops = stac92xx_patch_ops;
77424 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77425
77426 codec->proc_widget_hook = stac927x_proc_hook;
77427
77428 @@ -5880,7 +5880,7 @@ static int patch_stac9205(struct hda_cod
77429 return err;
77430 }
77431
77432 - codec->patch_ops = stac92xx_patch_ops;
77433 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77434
77435 codec->proc_widget_hook = stac9205_proc_hook;
77436
77437 @@ -5974,7 +5974,7 @@ static int patch_stac9872(struct hda_cod
77438 return -EINVAL;
77439 }
77440 spec->input_mux = &spec->private_imux;
77441 - codec->patch_ops = stac92xx_patch_ops;
77442 + memcpy((void *)&codec->patch_ops, &stac92xx_patch_ops, sizeof(stac92xx_patch_ops));
77443 return 0;
77444 }
77445
77446 diff -urNp linux-2.6.32.44/sound/pci/hda/patch_via.c linux-2.6.32.44/sound/pci/hda/patch_via.c
77447 --- linux-2.6.32.44/sound/pci/hda/patch_via.c 2011-03-27 14:31:47.000000000 -0400
77448 +++ linux-2.6.32.44/sound/pci/hda/patch_via.c 2011-08-05 20:33:55.000000000 -0400
77449 @@ -1399,9 +1399,9 @@ static int patch_vt1708(struct hda_codec
77450 spec->num_mixers++;
77451 }
77452
77453 - codec->patch_ops = via_patch_ops;
77454 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77455
77456 - codec->patch_ops.init = via_auto_init;
77457 + *(void **)&codec->patch_ops.init = via_auto_init;
77458 #ifdef CONFIG_SND_HDA_POWER_SAVE
77459 spec->loopback.amplist = vt1708_loopbacks;
77460 #endif
77461 @@ -1870,10 +1870,10 @@ static int patch_vt1709_10ch(struct hda_
77462 spec->num_mixers++;
77463 }
77464
77465 - codec->patch_ops = via_patch_ops;
77466 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77467
77468 - codec->patch_ops.init = via_auto_init;
77469 - codec->patch_ops.unsol_event = via_unsol_event;
77470 + *(void **)&codec->patch_ops.init = via_auto_init;
77471 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77472 #ifdef CONFIG_SND_HDA_POWER_SAVE
77473 spec->loopback.amplist = vt1709_loopbacks;
77474 #endif
77475 @@ -1964,10 +1964,10 @@ static int patch_vt1709_6ch(struct hda_c
77476 spec->num_mixers++;
77477 }
77478
77479 - codec->patch_ops = via_patch_ops;
77480 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77481
77482 - codec->patch_ops.init = via_auto_init;
77483 - codec->patch_ops.unsol_event = via_unsol_event;
77484 + *(void **)&codec->patch_ops.init = via_auto_init;
77485 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77486 #ifdef CONFIG_SND_HDA_POWER_SAVE
77487 spec->loopback.amplist = vt1709_loopbacks;
77488 #endif
77489 @@ -2418,10 +2418,10 @@ static int patch_vt1708B_8ch(struct hda_
77490 spec->num_mixers++;
77491 }
77492
77493 - codec->patch_ops = via_patch_ops;
77494 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77495
77496 - codec->patch_ops.init = via_auto_init;
77497 - codec->patch_ops.unsol_event = via_unsol_event;
77498 + *(void **)&codec->patch_ops.init = via_auto_init;
77499 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77500 #ifdef CONFIG_SND_HDA_POWER_SAVE
77501 spec->loopback.amplist = vt1708B_loopbacks;
77502 #endif
77503 @@ -2470,10 +2470,10 @@ static int patch_vt1708B_4ch(struct hda_
77504 spec->num_mixers++;
77505 }
77506
77507 - codec->patch_ops = via_patch_ops;
77508 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77509
77510 - codec->patch_ops.init = via_auto_init;
77511 - codec->patch_ops.unsol_event = via_unsol_event;
77512 + *(void **)&codec->patch_ops.init = via_auto_init;
77513 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77514 #ifdef CONFIG_SND_HDA_POWER_SAVE
77515 spec->loopback.amplist = vt1708B_loopbacks;
77516 #endif
77517 @@ -2905,10 +2905,10 @@ static int patch_vt1708S(struct hda_code
77518 spec->num_mixers++;
77519 }
77520
77521 - codec->patch_ops = via_patch_ops;
77522 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77523
77524 - codec->patch_ops.init = via_auto_init;
77525 - codec->patch_ops.unsol_event = via_unsol_event;
77526 + *(void **)&codec->patch_ops.init = via_auto_init;
77527 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77528 #ifdef CONFIG_SND_HDA_POWER_SAVE
77529 spec->loopback.amplist = vt1708S_loopbacks;
77530 #endif
77531 @@ -3223,10 +3223,10 @@ static int patch_vt1702(struct hda_codec
77532 spec->num_mixers++;
77533 }
77534
77535 - codec->patch_ops = via_patch_ops;
77536 + memcpy((void *)&codec->patch_ops, &via_patch_ops, sizeof(via_patch_ops));
77537
77538 - codec->patch_ops.init = via_auto_init;
77539 - codec->patch_ops.unsol_event = via_unsol_event;
77540 + *(void **)&codec->patch_ops.init = via_auto_init;
77541 + *(void **)&codec->patch_ops.unsol_event = via_unsol_event;
77542 #ifdef CONFIG_SND_HDA_POWER_SAVE
77543 spec->loopback.amplist = vt1702_loopbacks;
77544 #endif
77545 diff -urNp linux-2.6.32.44/sound/pci/ice1712/ice1712.h linux-2.6.32.44/sound/pci/ice1712/ice1712.h
77546 --- linux-2.6.32.44/sound/pci/ice1712/ice1712.h 2011-03-27 14:31:47.000000000 -0400
77547 +++ linux-2.6.32.44/sound/pci/ice1712/ice1712.h 2011-08-05 20:33:55.000000000 -0400
77548 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77549 unsigned int mask_flags; /* total mask bits */
77550 struct snd_akm4xxx_ops {
77551 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77552 - } ops;
77553 + } __no_const ops;
77554 };
77555
77556 struct snd_ice1712_spdif {
77557 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77558 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77559 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77560 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77561 - } ops;
77562 + } __no_const ops;
77563 };
77564
77565
77566 diff -urNp linux-2.6.32.44/sound/pci/intel8x0m.c linux-2.6.32.44/sound/pci/intel8x0m.c
77567 --- linux-2.6.32.44/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
77568 +++ linux-2.6.32.44/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
77569 @@ -1264,7 +1264,7 @@ static struct shortname_table {
77570 { 0x5455, "ALi M5455" },
77571 { 0x746d, "AMD AMD8111" },
77572 #endif
77573 - { 0 },
77574 + { 0, },
77575 };
77576
77577 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
77578 diff -urNp linux-2.6.32.44/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.44/sound/pci/ymfpci/ymfpci_main.c
77579 --- linux-2.6.32.44/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
77580 +++ linux-2.6.32.44/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
77581 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
77582 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77583 break;
77584 }
77585 - if (atomic_read(&chip->interrupt_sleep_count)) {
77586 - atomic_set(&chip->interrupt_sleep_count, 0);
77587 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77588 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77589 wake_up(&chip->interrupt_sleep);
77590 }
77591 __end:
77592 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
77593 continue;
77594 init_waitqueue_entry(&wait, current);
77595 add_wait_queue(&chip->interrupt_sleep, &wait);
77596 - atomic_inc(&chip->interrupt_sleep_count);
77597 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77598 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77599 remove_wait_queue(&chip->interrupt_sleep, &wait);
77600 }
77601 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
77602 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77603 spin_unlock(&chip->reg_lock);
77604
77605 - if (atomic_read(&chip->interrupt_sleep_count)) {
77606 - atomic_set(&chip->interrupt_sleep_count, 0);
77607 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77608 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77609 wake_up(&chip->interrupt_sleep);
77610 }
77611 }
77612 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
77613 spin_lock_init(&chip->reg_lock);
77614 spin_lock_init(&chip->voice_lock);
77615 init_waitqueue_head(&chip->interrupt_sleep);
77616 - atomic_set(&chip->interrupt_sleep_count, 0);
77617 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77618 chip->card = card;
77619 chip->pci = pci;
77620 chip->irq = -1;
77621 diff -urNp linux-2.6.32.44/sound/soc/soc-core.c linux-2.6.32.44/sound/soc/soc-core.c
77622 --- linux-2.6.32.44/sound/soc/soc-core.c 2011-03-27 14:31:47.000000000 -0400
77623 +++ linux-2.6.32.44/sound/soc/soc-core.c 2011-08-05 20:33:55.000000000 -0400
77624 @@ -1107,13 +1107,13 @@ static int soc_new_pcm(struct snd_soc_de
77625
77626 dai_link->pcm = pcm;
77627 pcm->private_data = rtd;
77628 - soc_pcm_ops.mmap = platform->pcm_ops->mmap;
77629 - soc_pcm_ops.pointer = platform->pcm_ops->pointer;
77630 - soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
77631 - soc_pcm_ops.copy = platform->pcm_ops->copy;
77632 - soc_pcm_ops.silence = platform->pcm_ops->silence;
77633 - soc_pcm_ops.ack = platform->pcm_ops->ack;
77634 - soc_pcm_ops.page = platform->pcm_ops->page;
77635 + *(void **)&soc_pcm_ops.mmap = platform->pcm_ops->mmap;
77636 + *(void **)&soc_pcm_ops.pointer = platform->pcm_ops->pointer;
77637 + *(void **)&soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
77638 + *(void **)&soc_pcm_ops.copy = platform->pcm_ops->copy;
77639 + *(void **)&soc_pcm_ops.silence = platform->pcm_ops->silence;
77640 + *(void **)&soc_pcm_ops.ack = platform->pcm_ops->ack;
77641 + *(void **)&soc_pcm_ops.page = platform->pcm_ops->page;
77642
77643 if (playback)
77644 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
77645 diff -urNp linux-2.6.32.44/sound/usb/usbaudio.c linux-2.6.32.44/sound/usb/usbaudio.c
77646 --- linux-2.6.32.44/sound/usb/usbaudio.c 2011-03-27 14:31:47.000000000 -0400
77647 +++ linux-2.6.32.44/sound/usb/usbaudio.c 2011-08-05 20:33:55.000000000 -0400
77648 @@ -963,12 +963,12 @@ static int snd_usb_pcm_playback_trigger(
77649 switch (cmd) {
77650 case SNDRV_PCM_TRIGGER_START:
77651 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
77652 - subs->ops.prepare = prepare_playback_urb;
77653 + *(void **)&subs->ops.prepare = prepare_playback_urb;
77654 return 0;
77655 case SNDRV_PCM_TRIGGER_STOP:
77656 return deactivate_urbs(subs, 0, 0);
77657 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
77658 - subs->ops.prepare = prepare_nodata_playback_urb;
77659 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
77660 return 0;
77661 default:
77662 return -EINVAL;
77663 @@ -985,15 +985,15 @@ static int snd_usb_pcm_capture_trigger(s
77664
77665 switch (cmd) {
77666 case SNDRV_PCM_TRIGGER_START:
77667 - subs->ops.retire = retire_capture_urb;
77668 + *(void **)&subs->ops.retire = retire_capture_urb;
77669 return start_urbs(subs, substream->runtime);
77670 case SNDRV_PCM_TRIGGER_STOP:
77671 return deactivate_urbs(subs, 0, 0);
77672 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
77673 - subs->ops.retire = retire_paused_capture_urb;
77674 + *(void **)&subs->ops.retire = retire_paused_capture_urb;
77675 return 0;
77676 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
77677 - subs->ops.retire = retire_capture_urb;
77678 + *(void **)&subs->ops.retire = retire_capture_urb;
77679 return 0;
77680 default:
77681 return -EINVAL;
77682 @@ -1542,7 +1542,7 @@ static int snd_usb_pcm_prepare(struct sn
77683 /* for playback, submit the URBs now; otherwise, the first hwptr_done
77684 * updates for all URBs would happen at the same time when starting */
77685 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
77686 - subs->ops.prepare = prepare_nodata_playback_urb;
77687 + *(void **)&subs->ops.prepare = prepare_nodata_playback_urb;
77688 return start_urbs(subs, runtime);
77689 } else
77690 return 0;
77691 @@ -2228,14 +2228,14 @@ static void init_substream(struct snd_us
77692 subs->direction = stream;
77693 subs->dev = as->chip->dev;
77694 if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
77695 - subs->ops = audio_urb_ops[stream];
77696 + memcpy((void *)&subs->ops, &audio_urb_ops[stream], sizeof(subs->ops));
77697 } else {
77698 - subs->ops = audio_urb_ops_high_speed[stream];
77699 + memcpy((void *)&subs->ops, &audio_urb_ops_high_speed[stream], sizeof(subs->ops));
77700 switch (as->chip->usb_id) {
77701 case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
77702 case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
77703 case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
77704 - subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
77705 + *(void **)&subs->ops.retire_sync = retire_playback_sync_urb_hs_emu;
77706 break;
77707 }
77708 }
77709 diff -urNp linux-2.6.32.44/tools/gcc/constify_plugin.c linux-2.6.32.44/tools/gcc/constify_plugin.c
77710 --- linux-2.6.32.44/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
77711 +++ linux-2.6.32.44/tools/gcc/constify_plugin.c 2011-08-05 20:33:55.000000000 -0400
77712 @@ -0,0 +1,147 @@
77713 +/*
77714 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
77715 + * Licensed under the GPL v2, or (at your option) v3
77716 + *
77717 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
77718 + *
77719 + * Usage:
77720 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o const_plugin.so const_plugin.c
77721 + * $ gcc -fplugin=const_plugin.so test.c -O2
77722 + */
77723 +
77724 +#include "gcc-plugin.h"
77725 +#include "config.h"
77726 +#include "system.h"
77727 +#include "coretypes.h"
77728 +#include "tree.h"
77729 +#include "tree-pass.h"
77730 +#include "intl.h"
77731 +#include "plugin-version.h"
77732 +#include "tm.h"
77733 +#include "toplev.h"
77734 +#include "function.h"
77735 +#include "tree-flow.h"
77736 +#include "plugin.h"
77737 +
77738 +int plugin_is_GPL_compatible;
77739 +
77740 +static struct plugin_info const_plugin_info = {
77741 + .version = "20110706",
77742 + .help = "no-constify\tturn off constification\n",
77743 +};
77744 +
77745 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77746 +{
77747 + return NULL_TREE;
77748 +}
77749 +
77750 +static struct attribute_spec no_const_attr = {
77751 + .name = "no_const",
77752 + .min_length = 0,
77753 + .max_length = 0,
77754 + .decl_required = false,
77755 + .type_required = false,
77756 + .function_type_required = false,
77757 + .handler = handle_no_const_attribute
77758 +};
77759 +
77760 +static void register_attributes(void *event_data, void *data)
77761 +{
77762 + register_attribute(&no_const_attr);
77763 +}
77764 +
77765 +/*
77766 +static void printnode(char *prefix, tree node)
77767 +{
77768 + enum tree_code code;
77769 + enum tree_code_class tclass;
77770 +
77771 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
77772 +
77773 + code = TREE_CODE(node);
77774 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
77775 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
77776 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
77777 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
77778 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
77779 +}
77780 +*/
77781 +
77782 +static void constify_node(tree node)
77783 +{
77784 + TREE_READONLY(node) = 1;
77785 +}
77786 +
77787 +static bool is_fptr(tree field)
77788 +{
77789 + tree ptr = TREE_TYPE(field);
77790 +
77791 + if (TREE_CODE(ptr) != POINTER_TYPE)
77792 + return false;
77793 +
77794 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77795 +}
77796 +
77797 +static bool walk_struct(tree node)
77798 +{
77799 + tree field;
77800 +
77801 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77802 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
77803 + if (code == RECORD_TYPE) {
77804 + if (!(walk_struct(TREE_TYPE(field))))
77805 + return false;
77806 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
77807 + return false;
77808 + }
77809 + return true;
77810 +}
77811 +
77812 +static void finish_type(void *event_data, void *data)
77813 +{
77814 + tree node = (tree)event_data;
77815 +
77816 + if (node == NULL_TREE)
77817 + return;
77818 +
77819 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77820 + return;
77821 +
77822 + if (TREE_READONLY(node))
77823 + return;
77824 +
77825 + if (TYPE_FIELDS(node) == NULL_TREE)
77826 + return;
77827 +
77828 + if (walk_struct(node))
77829 + constify_node(node);
77830 +}
77831 +
77832 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77833 +{
77834 + const char * const plugin_name = plugin_info->base_name;
77835 + const int argc = plugin_info->argc;
77836 + const struct plugin_argument * const argv = plugin_info->argv;
77837 + int i;
77838 + bool constify = true;
77839 +
77840 + if (!plugin_default_version_check(version, &gcc_version)) {
77841 + error(G_("incompatible gcc/plugin versions"));
77842 + return 1;
77843 + }
77844 +
77845 + for (i = 0; i < argc; ++i) {
77846 + if (!(strcmp(argv[i].key, "no-constify"))) {
77847 + constify = false;
77848 + continue;
77849 + }
77850 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77851 + }
77852 +
77853 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77854 + if (constify)
77855 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77856 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77857 +
77858 + return 0;
77859 +}
77860 Binary files linux-2.6.32.44/tools/gcc/constify_plugin.so and linux-2.6.32.44/tools/gcc/constify_plugin.so differ
77861 diff -urNp linux-2.6.32.44/tools/gcc/Makefile linux-2.6.32.44/tools/gcc/Makefile
77862 --- linux-2.6.32.44/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
77863 +++ linux-2.6.32.44/tools/gcc/Makefile 2011-08-05 20:33:55.000000000 -0400
77864 @@ -0,0 +1,12 @@
77865 +#CC := gcc
77866 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77867 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77868 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
77869 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77870 +
77871 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77872 +
77873 +hostlibs-y := stackleak_plugin.so constify_plugin.so
77874 +always := $(hostlibs-y)
77875 +stackleak_plugin-objs := stackleak_plugin.o
77876 +constify_plugin-objs := constify_plugin.o
77877 Binary files linux-2.6.32.44/tools/gcc/pax_plugin.so and linux-2.6.32.44/tools/gcc/pax_plugin.so differ
77878 diff -urNp linux-2.6.32.44/tools/gcc/stackleak_plugin.c linux-2.6.32.44/tools/gcc/stackleak_plugin.c
77879 --- linux-2.6.32.44/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
77880 +++ linux-2.6.32.44/tools/gcc/stackleak_plugin.c 2011-08-05 20:33:55.000000000 -0400
77881 @@ -0,0 +1,243 @@
77882 +/*
77883 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77884 + * Licensed under the GPL v2
77885 + *
77886 + * Note: the choice of the license means that the compilation process is
77887 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77888 + * but for the kernel it doesn't matter since it doesn't link against
77889 + * any of the gcc libraries
77890 + *
77891 + * gcc plugin to help implement various PaX features
77892 + *
77893 + * - track lowest stack pointer
77894 + *
77895 + * TODO:
77896 + * - initialize all local variables
77897 + *
77898 + * BUGS:
77899 + * - cloned functions are instrumented twice
77900 + */
77901 +#include "gcc-plugin.h"
77902 +#include "plugin-version.h"
77903 +#include "config.h"
77904 +#include "system.h"
77905 +#include "coretypes.h"
77906 +#include "tm.h"
77907 +#include "toplev.h"
77908 +#include "basic-block.h"
77909 +#include "gimple.h"
77910 +//#include "expr.h" where are you...
77911 +#include "diagnostic.h"
77912 +#include "rtl.h"
77913 +#include "emit-rtl.h"
77914 +#include "function.h"
77915 +#include "tree.h"
77916 +#include "tree-pass.h"
77917 +#include "intl.h"
77918 +
77919 +int plugin_is_GPL_compatible;
77920 +
77921 +static int track_frame_size = -1;
77922 +static const char track_function[] = "pax_track_stack";
77923 +static bool init_locals;
77924 +
77925 +static struct plugin_info stackleak_plugin_info = {
77926 + .version = "201106030000",
77927 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
77928 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
77929 +};
77930 +
77931 +static bool gate_stackleak_track_stack(void);
77932 +static unsigned int execute_stackleak_tree_instrument(void);
77933 +static unsigned int execute_stackleak_final(void);
77934 +
77935 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
77936 + .pass = {
77937 + .type = GIMPLE_PASS,
77938 + .name = "stackleak_tree_instrument",
77939 + .gate = gate_stackleak_track_stack,
77940 + .execute = execute_stackleak_tree_instrument,
77941 + .sub = NULL,
77942 + .next = NULL,
77943 + .static_pass_number = 0,
77944 + .tv_id = TV_NONE,
77945 + .properties_required = PROP_gimple_leh | PROP_cfg,
77946 + .properties_provided = 0,
77947 + .properties_destroyed = 0,
77948 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
77949 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
77950 + }
77951 +};
77952 +
77953 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
77954 + .pass = {
77955 + .type = RTL_PASS,
77956 + .name = "stackleak_final",
77957 + .gate = gate_stackleak_track_stack,
77958 + .execute = execute_stackleak_final,
77959 + .sub = NULL,
77960 + .next = NULL,
77961 + .static_pass_number = 0,
77962 + .tv_id = TV_NONE,
77963 + .properties_required = 0,
77964 + .properties_provided = 0,
77965 + .properties_destroyed = 0,
77966 + .todo_flags_start = 0,
77967 + .todo_flags_finish = 0
77968 + }
77969 +};
77970 +
77971 +static bool gate_stackleak_track_stack(void)
77972 +{
77973 + return track_frame_size >= 0;
77974 +}
77975 +
77976 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
77977 +{
77978 + gimple call;
77979 + tree decl, type;
77980 +
77981 + // insert call to void pax_track_stack(void)
77982 + type = build_function_type_list(void_type_node, NULL_TREE);
77983 + decl = build_fn_decl(track_function, type);
77984 + DECL_ASSEMBLER_NAME(decl); // for LTO
77985 + call = gimple_build_call(decl, 0);
77986 + if (before)
77987 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
77988 + else
77989 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
77990 +}
77991 +
77992 +static unsigned int execute_stackleak_tree_instrument(void)
77993 +{
77994 + basic_block bb;
77995 + gimple_stmt_iterator gsi;
77996 +
77997 + // 1. loop through BBs and GIMPLE statements
77998 + FOR_EACH_BB(bb) {
77999 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78000 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78001 + tree decl;
78002 + gimple stmt = gsi_stmt(gsi);
78003 +
78004 + if (!is_gimple_call(stmt))
78005 + continue;
78006 + decl = gimple_call_fndecl(stmt);
78007 + if (!decl)
78008 + continue;
78009 + if (TREE_CODE(decl) != FUNCTION_DECL)
78010 + continue;
78011 + if (!DECL_BUILT_IN(decl))
78012 + continue;
78013 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
78014 + continue;
78015 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
78016 + continue;
78017 +
78018 + // 2. insert track call after each __builtin_alloca call
78019 + stackleak_add_instrumentation(&gsi, false);
78020 +// print_node(stderr, "pax", decl, 4);
78021 + }
78022 + }
78023 +
78024 + // 3. insert track call at the beginning
78025 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78026 + gsi = gsi_start_bb(bb);
78027 + stackleak_add_instrumentation(&gsi, true);
78028 +
78029 + return 0;
78030 +}
78031 +
78032 +static unsigned int execute_stackleak_final(void)
78033 +{
78034 + rtx insn;
78035 +
78036 + if (cfun->calls_alloca)
78037 + return 0;
78038 +
78039 + // 1. find pax_track_stack calls
78040 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78041 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78042 + rtx body;
78043 +
78044 + if (!CALL_P(insn))
78045 + continue;
78046 + body = PATTERN(insn);
78047 + if (GET_CODE(body) != CALL)
78048 + continue;
78049 + body = XEXP(body, 0);
78050 + if (GET_CODE(body) != MEM)
78051 + continue;
78052 + body = XEXP(body, 0);
78053 + if (GET_CODE(body) != SYMBOL_REF)
78054 + continue;
78055 + if (strcmp(XSTR(body, 0), track_function))
78056 + continue;
78057 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78058 + // 2. delete call if function frame is not big enough
78059 + if (get_frame_size() >= track_frame_size)
78060 + continue;
78061 + delete_insn_and_edges(insn);
78062 + }
78063 +
78064 +// print_simple_rtl(stderr, get_insns());
78065 +// print_rtl(stderr, get_insns());
78066 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78067 +
78068 + return 0;
78069 +}
78070 +
78071 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78072 +{
78073 + const char * const plugin_name = plugin_info->base_name;
78074 + const int argc = plugin_info->argc;
78075 + const struct plugin_argument * const argv = plugin_info->argv;
78076 + int i;
78077 + struct register_pass_info stackleak_tree_instrument_pass_info = {
78078 + .pass = &stackleak_tree_instrument_pass.pass,
78079 +// .reference_pass_name = "tree_profile",
78080 + .reference_pass_name = "optimized",
78081 + .ref_pass_instance_number = 0,
78082 + .pos_op = PASS_POS_INSERT_AFTER
78083 + };
78084 + struct register_pass_info stackleak_final_pass_info = {
78085 + .pass = &stackleak_final_rtl_opt_pass.pass,
78086 + .reference_pass_name = "final",
78087 + .ref_pass_instance_number = 0,
78088 + .pos_op = PASS_POS_INSERT_BEFORE
78089 + };
78090 +
78091 + if (!plugin_default_version_check(version, &gcc_version)) {
78092 + error(G_("incompatible gcc/plugin versions"));
78093 + return 1;
78094 + }
78095 +
78096 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78097 +
78098 + for (i = 0; i < argc; ++i) {
78099 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
78100 + if (!argv[i].value) {
78101 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78102 + continue;
78103 + }
78104 + track_frame_size = atoi(argv[i].value);
78105 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78106 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78107 + continue;
78108 + }
78109 + if (!strcmp(argv[i].key, "initialize-locals")) {
78110 + if (argv[i].value) {
78111 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78112 + continue;
78113 + }
78114 + init_locals = true;
78115 + continue;
78116 + }
78117 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78118 + }
78119 +
78120 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78121 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78122 +
78123 + return 0;
78124 +}
78125 Binary files linux-2.6.32.44/tools/gcc/stackleak_plugin.so and linux-2.6.32.44/tools/gcc/stackleak_plugin.so differ
78126 diff -urNp linux-2.6.32.44/usr/gen_init_cpio.c linux-2.6.32.44/usr/gen_init_cpio.c
78127 --- linux-2.6.32.44/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
78128 +++ linux-2.6.32.44/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
78129 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
78130 int retval;
78131 int rc = -1;
78132 int namesize;
78133 - int i;
78134 + unsigned int i;
78135
78136 mode |= S_IFREG;
78137
78138 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
78139 *env_var = *expanded = '\0';
78140 strncat(env_var, start + 2, end - start - 2);
78141 strncat(expanded, new_location, start - new_location);
78142 - strncat(expanded, getenv(env_var), PATH_MAX);
78143 - strncat(expanded, end + 1, PATH_MAX);
78144 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78145 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78146 strncpy(new_location, expanded, PATH_MAX);
78147 + new_location[PATH_MAX] = 0;
78148 } else
78149 break;
78150 }
78151 diff -urNp linux-2.6.32.44/virt/kvm/kvm_main.c linux-2.6.32.44/virt/kvm/kvm_main.c
78152 --- linux-2.6.32.44/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
78153 +++ linux-2.6.32.44/virt/kvm/kvm_main.c 2011-08-05 20:33:55.000000000 -0400
78154 @@ -2494,7 +2494,7 @@ asmlinkage void kvm_handle_fault_on_rebo
78155 if (kvm_rebooting)
78156 /* spin while reset goes on */
78157 while (true)
78158 - ;
78159 + cpu_relax();
78160 /* Fault while not rebooting. We want the trace. */
78161 BUG();
78162 }
78163 @@ -2714,7 +2714,7 @@ static void kvm_sched_out(struct preempt
78164 kvm_arch_vcpu_put(vcpu);
78165 }
78166
78167 -int kvm_init(void *opaque, unsigned int vcpu_size,
78168 +int kvm_init(const void *opaque, unsigned int vcpu_size,
78169 struct module *module)
78170 {
78171 int r;
78172 @@ -2767,15 +2767,17 @@ int kvm_init(void *opaque, unsigned int
78173 /* A kmem cache lets us meet the alignment requirements of fx_save. */
78174 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
78175 __alignof__(struct kvm_vcpu),
78176 - 0, NULL);
78177 + SLAB_USERCOPY, NULL);
78178 if (!kvm_vcpu_cache) {
78179 r = -ENOMEM;
78180 goto out_free_5;
78181 }
78182
78183 - kvm_chardev_ops.owner = module;
78184 - kvm_vm_fops.owner = module;
78185 - kvm_vcpu_fops.owner = module;
78186 + pax_open_kernel();
78187 + *(void **)&kvm_chardev_ops.owner = module;
78188 + *(void **)&kvm_vm_fops.owner = module;
78189 + *(void **)&kvm_vcpu_fops.owner = module;
78190 + pax_close_kernel();
78191
78192 r = misc_register(&kvm_dev);
78193 if (r) {